text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Interval datatypes
"""
import pkg_resources
pkg_resources.require( "bx-python" )
import logging, os, sys, time, tempfile, shutil
import data
from galaxy import util
from galaxy.datatypes.sniff import *
from galaxy.web import url_for
from cgi import escape
import urllib
from bx.intervals.io import *
from galaxy.datatypes import metadata
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.tabular import Tabular
import math
log = logging.getLogger(__name__)
#
# contain the meta columns and the words that map to it
# list aliases on the right side of the : in decreasing order of priority
#
alias_spec = {
'chromCol' : [ 'chrom' , 'CHROMOSOME' , 'CHROM', 'Chromosome Name' ],
'startCol' : [ 'start' , 'START', 'chromStart', 'txStart', 'Start Position (bp)' ],
'endCol' : [ 'end' , 'END' , 'STOP', 'chromEnd', 'txEnd', 'End Position (bp)' ],
'strandCol' : [ 'strand', 'STRAND', 'Strand' ],
'nameCol' : [ 'name', 'NAME', 'Name', 'name2', 'NAME2', 'Name2', 'Ensembl Gene ID', 'Ensembl Transcript ID', 'Ensembl Peptide ID' ]
}
# a little faster lookup
alias_helper = {}
for key, value in alias_spec.items():
for elem in value:
alias_helper[elem] = key
class Interval( Tabular ):
"""Tab delimited data containing interval information"""
file_ext = "interval"
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="nameCol", desc="Name/Identifier column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display apps"""
Tabular.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def init_meta( self, dataset, copy_from=None ):
Tabular.init_meta( self, dataset, copy_from=copy_from )
def set_peek( self, dataset, line_count=None, is_multi_byte=False ):
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte )
if line_count is None:
# See if line_count is stored in the metadata
if dataset.metadata.data_lines:
dataset.blurb = "%s regions" % util.commaify( str( dataset.metadata.data_lines ) )
else:
# Number of lines is not known ( this should not happen ), and auto-detect is
# needed to set metadata
dataset.blurb = "? regions"
else:
dataset.blurb = "%s regions" % util.commaify( str( line_count ) )
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def set_meta( self, dataset, overwrite = True, first_line_is_header = False, **kwd ):
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = 0 )
"""Tries to guess from the line the location number of the column for the chromosome, region start-end and strand"""
if dataset.has_data():
empty_line_count = 0
num_check_lines = 100 # only check up to this many non empty lines
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if line:
if ( first_line_is_header or line[0] == '#' ):
self.init_meta( dataset )
line = line.strip( '#' )
elems = line.split( '\t' )
valid = dict( alias_helper ) # shrinks
for index, col_name in enumerate( elems ):
if col_name in valid:
meta_name = valid[col_name]
if overwrite or not dataset.metadata.element_is_set( meta_name ):
setattr( dataset.metadata, meta_name, index+1 )
values = alias_spec[ meta_name ]
start = values.index( col_name )
for lower in values[ start: ]:
del valid[ lower ] # removes lower priority keys
break # Our metadata is set, so break out of the outer loop
else:
# Header lines in Interval files are optional. For example, BED is Interval but has no header.
# We'll make a best guess at the location of the metadata columns.
metadata_is_set = False
elems = line.split( '\t' )
if len( elems ) > 2:
for str in data.col1_startswith:
if line.lower().startswith( str ):
if overwrite or not dataset.metadata.element_is_set( 'chromCol' ):
dataset.metadata.chromCol = 1
try:
int( elems[1] )
if overwrite or not dataset.metadata.element_is_set( 'startCol' ):
dataset.metadata.startCol = 2
except:
pass # Metadata default will be used
try:
int( elems[2] )
if overwrite or not dataset.metadata.element_is_set( 'endCol' ):
dataset.metadata.endCol = 3
except:
pass # Metadata default will be used
#we no longer want to guess that this column is the 'name', name must now be set manually for interval files
#we will still guess at the strand, as we can make a more educated guess
#if len( elems ) > 3:
# try:
# int( elems[3] )
# except:
# if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
# dataset.metadata.nameCol = 4
if len( elems ) < 6 or elems[5] not in data.valid_strand:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 6
metadata_is_set = True
break
if metadata_is_set or ( i - empty_line_count ) > num_check_lines:
break # Our metadata is set or we examined 100 non-empty lines, so break out of the outer loop
else:
empty_line_count += 1
def get_estimated_display_viewport( self, dataset ):
"""Return a chrom, start, stop tuple for viewing a file."""
if dataset.has_data() and dataset.state == dataset.states.OK:
try:
c, s, e = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol
c, s, e = int(c)-1, int(s)-1, int(e)-1
peek = []
for idx, line in enumerate(file(dataset.file_name)):
if line[0] != '#':
peek.append( line.split() )
if idx > 10:
break
chr, start, stop = peek[0][c], int( peek[0][s] ), int( peek[0][e] )
for p in peek[1:]:
if p[0] == chr:
start = min( start, int( p[s] ) )
stop = max( stop, int( p[e] ) )
except Exception, exc:
#log.error( 'Viewport generation error -> %s ' % str(exc) )
(chr, start, stop) = 'chr1', 1, 1000
return (chr, str( start ), str( stop ))
else:
return ('', '', '')
def as_ucsc_display_file( self, dataset, **kwd ):
"""Returns file contents with only the bed data"""
fd, temp_name = tempfile.mkstemp()
c, s, e, t, n = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol or 0, dataset.metadata.nameCol or 0
c, s, e, t, n = int(c)-1, int(s)-1, int(e)-1, int(t)-1, int(n)-1
if t >= 0: # strand column (should) exists
for i, elems in enumerate( util.file_iter(dataset.file_name) ):
strand = "+"
name = "region_%i" % i
if n >= 0 and n < len( elems ): name = elems[n]
if t<len(elems): strand = elems[t]
tmp = [ elems[c], elems[s], elems[e], name, '0', strand ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
elif n >= 0: # name column (should) exists
for i, elems in enumerate( util.file_iter(dataset.file_name) ):
name = "region_%i" % i
if n >= 0 and n < len( elems ): name = elems[n]
tmp = [ elems[c], elems[s], elems[e], name ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
else:
for elems in util.file_iter(dataset.file_name):
tmp = [ elems[c], elems[s], elems[e] ]
os.write(fd, '%s\n' % '\t'.join(tmp) )
os.close(fd)
return open(temp_name)
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append('<tr>')
for i in range( 1, dataset.metadata.columns+1 ):
if i == dataset.metadata.chromCol:
out.append( '<th>%s.Chrom</th>' % i )
elif i == dataset.metadata.startCol:
out.append( '<th>%s.Start</th>' % i )
elif i == dataset.metadata.endCol:
out.append( '<th>%s.End</th>' % i )
elif dataset.metadata.strandCol and i == dataset.metadata.strandCol:
out.append( '<th>%s.Strand</th>' % i )
elif dataset.metadata.nameCol and i == dataset.metadata.nameCol:
out.append( '<th>%s.Name</th>' % i )
else:
out.append( '<th>%s</th>' % i )
out.append('</tr>')
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % str( exc )
return out
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
for site_name, site_url in util.get_ucsc_by_build(dataset.dbkey):
if site_name in app.config.ucsc_display_sites:
# HACK: UCSC doesn't support https, so force http even
# if our URL scheme is https. Making this work
# requires additional hackery in your upstream proxy.
# If UCSC ever supports https, remove this hack.
internal_url = "%s" % url_for( controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name )
if base_url.startswith( 'https://' ):
base_url = base_url.replace( 'https', 'http', 1 )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, url_for( controller='root' ), dataset.id, type) )
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % (site_url, dataset.dbkey, chrom, start, stop ) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( (site_name, link) )
return ret_val
def validate( self, dataset ):
"""Validate an interval file using the bx GenomicIntervalReader"""
errors = list()
c, s, e, t = dataset.metadata.chromCol, dataset.metadata.startCol, dataset.metadata.endCol, dataset.metadata.strandCol
c, s, e, t = int(c)-1, int(s)-1, int(e)-1, int(t)-1
infile = open(dataset.file_name, "r")
reader = GenomicIntervalReader(
infile,
chrom_col = c,
start_col = s,
end_col = e,
strand_col = t)
while True:
try:
reader.next()
except ParseError, e:
errors.append(e)
except StopIteration:
infile.close()
return errors
def repair_methods( self, dataset ):
"""Return options for removing errors along with a description"""
return [("lines","Remove erroneous lines")]
def sniff( self, filename ):
"""
Checks for 'intervalness'
This format is mostly used by galaxy itself. Valid interval files should include
a valid header comment, but this seems to be loosely regulated.
>>> fname = get_test_fname( 'test_space.txt' )
>>> Interval().sniff( fname )
False
>>> fname = get_test_fname( 'interval.interval' )
>>> Interval().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
"""
If we got here, we already know the file is_column_based and is not bed,
so we'll just look for some valid data.
"""
for hdr in headers:
if hdr and not hdr[0].startswith( '#' ):
if len(hdr) < 3:
return False
try:
# Assume chrom start and end are in column positions 1 and 2
# respectively ( for 0 based columns )
check = int( hdr[1] )
check = int( hdr[2] )
except:
return False
return True
except:
return False
def get_track_window(self, dataset, data, start, end):
"""
Assumes the incoming track data is sorted already.
"""
window = list()
for record in data:
fields = record.rstrip("\n\r").split("\t")
record_chrom = fields[dataset.metadata.chromCol-1]
record_start = int(fields[dataset.metadata.startCol-1])
record_end = int(fields[dataset.metadata.endCol-1])
if record_start < end and record_end > start:
window.append( (record_chrom, record_start, record_end) ) #Yes I did want to use a generator here, but it doesn't work downstream
return window
def get_track_resolution( self, dataset, start, end):
return None
def get_track_type( self ):
return "FeatureTrack"
class Bed( Interval ):
"""Tab delimited data in BED format"""
file_ext = "bed"
"""Add metadata elements"""
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="startCol", default=2, desc="Start column", param=metadata.ColumnParameter )
MetadataElement( name="endCol", default=3, desc="End column", param=metadata.ColumnParameter )
MetadataElement( name="strandCol", desc="Strand column (click box & select)", param=metadata.ColumnParameter, optional=True, no_value=0 )
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
###do we need to repeat these? they are the same as should be inherited from interval type
def set_meta( self, dataset, overwrite = True, **kwd ):
"""Sets the metadata information for datasets previously determined to be in bed format."""
i = 0
if dataset.has_data():
for i, line in enumerate( file(dataset.file_name) ):
metadata_set = False
line = line.rstrip('\r\n')
if line and not line.startswith('#'):
elems = line.split('\t')
if len(elems) > 2:
for startswith in data.col1_startswith:
if line.lower().startswith( startswith ):
if len( elems ) > 3:
if overwrite or not dataset.metadata.element_is_set( 'nameCol' ):
dataset.metadata.nameCol = 4
if len(elems) < 6:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 0
else:
if overwrite or not dataset.metadata.element_is_set( 'strandCol' ):
dataset.metadata.strandCol = 6
metadata_set = True
break
if metadata_set: break
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def as_ucsc_display_file( self, dataset, **kwd ):
"""Returns file contents with only the bed data. If bed 6+, treat as interval."""
for line in open(dataset.file_name):
line = line.strip()
if line == "" or line.startswith("#"):
continue
fields = line.split('\t')
"""check to see if this file doesn't conform to strict genome browser accepted bed"""
try:
if len(fields) > 12:
return Interval.as_ucsc_display_file(self, dataset) #too many fields
if len(fields) > 6:
int(fields[6])
if len(fields) > 7:
int(fields[7])
if len(fields) > 8:
if int(fields[8]) != 0:
return Interval.as_ucsc_display_file(self, dataset)
if len(fields) > 9:
int(fields[9])
if len(fields) > 10:
fields2 = fields[10].rstrip(",").split(",") #remove trailing comma and split on comma
for field in fields2:
int(field)
if len(fields) > 11:
fields2 = fields[11].rstrip(",").split(",") #remove trailing comma and split on comma
for field in fields2:
int(field)
except: return Interval.as_ucsc_display_file(self, dataset)
#only check first line for proper form
break
try: return open(dataset.file_name)
except: return "This item contains no content"
def sniff( self, filename ):
"""
Checks for 'bedness'
BED lines have three required fields and nine additional optional fields.
The number of fields per line must be consistent throughout any single set of data in
an annotation track. The order of the optional fields is binding: lower-numbered
fields must always be populated if higher-numbered fields are used. The data type of
all 12 columns is:
1-str, 2-int, 3-int, 4-str, 5-int, 6-str, 7-int, 8-int, 9-int or list, 10-int, 11-list, 12-list
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format1
>>> fname = get_test_fname( 'test_tab.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'interval1.bed' )
>>> Bed().sniff( fname )
True
>>> fname = get_test_fname( 'complete.bed' )
>>> Bed().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if not headers: return False
for hdr in headers:
if (hdr[0] == '' or hdr[0].startswith( '#' )):
continue
valid_col1 = False
if len(hdr) < 3 or len(hdr) > 12:
return False
for str in data.col1_startswith:
if hdr[0].lower().startswith(str):
valid_col1 = True
break
if valid_col1:
try:
int( hdr[1] )
int( hdr[2] )
except:
return False
if len( hdr ) > 4:
#hdr[3] is a string, 'name', which defines the name of the BED line - difficult to test for this.
#hdr[4] is an int, 'score', a score between 0 and 1000.
try:
if int( hdr[4] ) < 0 or int( hdr[4] ) > 1000: return False
except:
return False
if len( hdr ) > 5:
#hdr[5] is strand
if hdr[5] not in data.valid_strand: return False
if len( hdr ) > 6:
#hdr[6] is thickStart, the starting position at which the feature is drawn thickly.
try: int( hdr[6] )
except: return False
if len( hdr ) > 7:
#hdr[7] is thickEnd, the ending position at which the feature is drawn thickly
try: int( hdr[7] )
except: return False
if len( hdr ) > 8:
#hdr[8] is itemRgb, an RGB value of the form R,G,B (e.g. 255,0,0). However, this could also be an int (e.g., 0)
try: int( hdr[8] )
except:
try: hdr[8].split(',')
except: return False
if len( hdr ) > 9:
#hdr[9] is blockCount, the number of blocks (exons) in the BED line.
try: block_count = int( hdr[9] )
except: return False
if len( hdr ) > 10:
#hdr[10] is blockSizes - A comma-separated list of the block sizes.
#Sometimes the blosck_sizes and block_starts lists end in extra commas
try: block_sizes = hdr[10].rstrip(',').split(',')
except: return False
if len( hdr ) > 11:
#hdr[11] is blockStarts - A comma-separated list of block starts.
try: block_starts = hdr[11].rstrip(',').split(',')
except: return False
if len(block_sizes) != block_count or len(block_starts) != block_count: return False
else: return False
return True
except: return False
class _RemoteCallMixin:
def _get_remote_call_url( self, redirect_url, site_name, dataset, type, app, base_url ):
"""Retrieve the URL to call out to an external site and retrieve data.
This routes our external URL through a local galaxy instance which makes
the data available, followed by redirecting to the remote site with a
link back to the available information.
"""
internal_url = "%s" % url_for( controller='dataset', dataset_id=dataset.id, action='display_at', filename='%s_%s' % ( type, site_name ) )
base_url = app.config.get( "display_at_callback", base_url )
if base_url.startswith( 'https://' ):
base_url = base_url.replace( 'https', 'http', 1 )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % \
( base_url, url_for( controller='root' ), dataset.id, type ) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
return link
class Gff( Tabular, _RemoteCallMixin ):
"""Tab delimited data in Gff format"""
file_ext = "gff"
column_names = [ 'Seqname', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand', 'Frame', 'Group' ]
"""Add metadata elements"""
MetadataElement( name="columns", default=9, desc="Number of columns", readonly=True, visible=False )
MetadataElement( name="column_types", default=['str','str','str','int','int','int','str','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
def __init__( self, **kwd ):
"""Initialize datatype, by adding GBrowse display app"""
Tabular.__init__(self, **kwd)
self.add_display_app( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
self.add_display_app( 'c_elegans', 'display in Wormbase', 'as_gbrowse_display_file', 'gbrowse_links' )
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
if len(elems) == 9:
try:
int( elems[3] )
int( elems[4] )
break
except:
pass
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def make_html_table( self, dataset, skipchars=[] ):
"""Create HTML table, used for displaying peek"""
out = ['<table cellspacing="0" cellpadding="3">']
comments = []
try:
# Generate column header
out.append( '<tr>' )
for i, name in enumerate( self.column_names ):
out.append( '<th>%s.%s</th>' % ( str( i+1 ), name ) )
out.append( self.make_html_peek_rows( dataset, skipchars=skipchars ) )
out.append( '</table>' )
out = "".join( out )
except Exception, exc:
out = "Can't create peek %s" % exc
return out
def get_estimated_display_viewport( self, dataset ):
"""
Return a chrom, start, stop tuple for viewing a file. There are slight differences between gff 2 and gff 3
formats. This function should correctly handle both...
"""
if dataset.has_data() and dataset.state == dataset.states.OK:
try:
seqid = ''
start = 2147483647 # Maximum value of a signed 32 bit integer ( 2**31 - 1 )
stop = 0
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if not line:
continue
if line.startswith( '##sequence-region' ): # ##sequence-region IV 6000000 6030000
elems = line.split()
seqid = elems[1] # IV
start = elems[2] # 6000000
stop = elems[3] # 6030000
break
# Allow UCSC style browser and track info in the GFF file
if line.startswith("browser position"):
pos_info = line.split()[-1]
seqid, startend = pos_info.split(":")
start, end = startend.split("-")
break
if not line.startswith(('#', 'track', 'browser')) :
elems = line.split( '\t' )
if not seqid:
# We can only set the viewport for a single chromosome
seqid = elems[0]
if seqid == elems[0]:
# Make sure we have not spanned chromosomes
start = min( start, int( elems[3] ) )
stop = max( stop, int( elems[4] ) )
else:
# We've spanned a chromosome
break
if i > 10:
break
except:
seqid, start, stop = ( '', '', '' )
return ( seqid, str( start ), str( stop ) )
else:
return ( '', '', '' )
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
seqid, start, stop = self.get_estimated_display_viewport( dataset )
if seqid and start and stop:
for site_name, site_url in util.get_ucsc_by_build( dataset.dbkey ):
if site_name in app.config.ucsc_display_sites:
redirect_url = urllib.quote_plus(
"%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" %
( site_url, dataset.dbkey, seqid, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport( dataset )
seqid = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
if seqid and start and stop:
for site_name, site_url in util.get_gbrowse_sites_by_build( dataset.dbkey ):
if site_name in app.config.gbrowse_display_sites:
redirect_url = urllib.quote_plus( "%s%s/?ref=%s&start=%s&stop=%s&eurl=%%s" %
( site_url, dataset.dbkey, seqid, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in gff format
GFF lines have nine required fields that must be tab-separated.
For complete details see http://genome.ucsc.edu/FAQ/FAQformat#format3
>>> fname = get_test_fname( 'gff_version_3.gff' )
>>> Gff().sniff( fname )
False
>>> fname = get_test_fname( 'test.gff' )
>>> Gff().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '2' ) < 0:
return False
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
int( hdr[4] )
except:
return False
if hdr[5] != '.':
try:
score = int(hdr[5])
except:
return False
if (score < 0 or score > 1000):
return False
if hdr[6] not in data.valid_strand:
return False
return True
except:
return False
class Gff3( Gff ):
"""Tab delimited data in Gff3 format"""
file_ext = "gff3"
valid_gff3_strand = ['+', '-', '.', '?']
valid_gff3_phase = ['.', '0', '1', '2']
column_names = [ 'Seqid', 'Source', 'Type', 'Start', 'End', 'Score', 'Strand', 'Phase', 'Attributes' ]
"""Add metadata elements"""
MetadataElement( name="column_types", default=['str','str','str','int','int','float','str','int','list'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False )
def __init__(self, **kwd):
"""Initialize datatype, by adding GBrowse display app"""
Gff.__init__(self, **kwd)
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
valid_start = False
valid_end = False
if len( elems ) == 9:
try:
start = int( elems[3] )
valid_start = True
except:
if elems[3] == '.':
valid_start = True
try:
end = int( elems[4] )
valid_end = True
except:
if elems[4] == '.':
valid_end = True
strand = elems[6]
phase = elems[7]
if valid_start and valid_end and start < end and strand in self.valid_gff3_strand and phase in self.valid_gff3_phase:
break
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def sniff( self, filename ):
"""
Determines whether the file is in gff version 3 format
GFF 3 format:
1) adds a mechanism for representing more than one level
of hierarchical grouping of features and subfeatures.
2) separates the ideas of group membership and feature name/id
3) constrains the feature type field to be taken from a controlled
vocabulary.
4) allows a single feature, such as an exon, to belong to more than
one group at a time.
5) provides an explicit convention for pairwise alignments
6) provides an explicit convention for features that occupy disjunct regions
The format consists of 9 columns, separated by tabs (NOT spaces).
Undefined fields are replaced with the "." character, as described in the original GFF spec.
For complete details see http://song.sourceforge.net/gff3.shtml
>>> fname = get_test_fname( 'test.gff' )
>>> Gff3().sniff( fname )
False
>>> fname = get_test_fname('gff_version_3.gff')
>>> Gff3().sniff( fname )
True
"""
headers = get_headers( filename, '\t' )
try:
if len(headers) < 2:
return False
for hdr in headers:
if hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '3' ) >= 0:
return True
elif hdr and hdr[0].startswith( '##gff-version' ) and hdr[0].find( '3' ) < 0:
return False
# Header comments may have been stripped, so inspect the data
if hdr and hdr[0] and not hdr[0].startswith( '#' ):
if len(hdr) != 9:
return False
try:
int( hdr[3] )
except:
if hdr[3] != '.':
return False
try:
int( hdr[4] )
except:
if hdr[4] != '.':
return False
if hdr[5] != '.':
try:
score = int(hdr[5])
except:
return False
if (score < 0 or score > 1000):
return False
if hdr[6] not in self.valid_gff3_strand:
return False
if hdr[7] not in self.valid_gff3_phase:
return False
return True
except:
return False
class Wiggle( Tabular, _RemoteCallMixin ):
"""Tab delimited data in wiggle format"""
file_ext = "wig"
MetadataElement( name="columns", default=3, desc="Number of columns", readonly=True, visible=False )
def __init__( self, **kwd ):
Tabular.__init__( self, **kwd )
self.add_display_app( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
self.add_display_app( 'gbrowse', 'display in Gbrowse', 'as_gbrowse_display_file', 'gbrowse_links' )
def get_estimated_display_viewport( self, dataset ):
value = ( "", "", "" )
num_check_lines = 100 # only check up to this many non empty lines
for i, line in enumerate( file( dataset.file_name ) ):
line = line.rstrip( '\r\n' )
if line and line.startswith( "browser" ):
chr_info = line.split()[-1]
wig_chr, coords = chr_info.split( ":" )
start, end = coords.split( "-" )
value = ( wig_chr, start, end )
break
if i > num_check_lines:
break
return value
def _get_viewer_range( self, dataset ):
"""Retrieve the chromosome, start, end for an external viewer."""
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport( dataset )
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
return ( chrom, start, stop )
return ( None, None, None )
def gbrowse_links( self, dataset, type, app, base_url ):
ret_val = []
chrom, start, stop = self._get_viewer_range( dataset )
if chrom is not None:
for site_name, site_url in util.get_gbrowse_sites_by_build( dataset.dbkey ):
if site_name in app.config.gbrowse_display_sites:
redirect_url = urllib.quote_plus( "%s%s/?ref=%s&start=%s&stop=%s&eurl=%%s" % ( site_url, dataset.dbkey, chrom, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
chrom, start, stop = self._get_viewer_range( dataset )
if chrom is not None:
for site_name, site_url in util.get_ucsc_by_build( dataset.dbkey ):
if site_name in app.config.ucsc_display_sites:
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % ( site_url, dataset.dbkey, chrom, start, stop ) )
link = self._get_remote_call_url( redirect_url, site_name, dataset, type, app, base_url )
ret_val.append( ( site_name, link ) )
return ret_val
def make_html_table( self, dataset ):
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def set_meta( self, dataset, overwrite = True, **kwd ):
i = 0
for i, line in enumerate( file ( dataset.file_name ) ):
line = line.rstrip('\r\n')
if line and not line.startswith( '#' ):
elems = line.split( '\t' )
try:
float( elems[0] ) #"Wiggle track data values can be integer or real, positive or negative values"
break
except:
do_break = False
for str in data.col1_startswith:
if elems[0].lower().startswith(str):
do_break = True
break
if do_break:
break
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = i )
def sniff( self, filename ):
"""
Determines wether the file is in wiggle format
The .wig format is line-oriented. Wiggle data is preceeded by a track definition line,
which adds a number of options for controlling the default display of this track.
Following the track definition line is the track data, which can be entered in several
different formats.
The track definition line begins with the word 'track' followed by the track type.
The track type with version is REQUIRED, and it currently must be wiggle_0. For example,
track type=wiggle_0...
For complete details see http://genome.ucsc.edu/goldenPath/help/wiggle.html
>>> fname = get_test_fname( 'interval1.bed' )
>>> Wiggle().sniff( fname )
False
>>> fname = get_test_fname( 'wiggle.wig' )
>>> Wiggle().sniff( fname )
True
"""
headers = get_headers( filename, None )
try:
for hdr in headers:
if len(hdr) > 1 and hdr[0] == 'track' and hdr[1].startswith('type=wiggle'):
return True
return False
except:
return False
def get_track_window(self, dataset, data, start, end):
"""
Assumes we have a numpy file.
"""
# Maybe if we import here people will still be able to use Galaxy when numpy kills it
pkg_resources.require("numpy>=1.2.1")
#from numpy.lib import format
import numpy
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = ( 10 ** math.ceil( math.log10( range / 1000 ) ) )
# Restrict to valid range
resolution = min( resolution, 100000 )
resolution = max( resolution, 1 )
# Memory map the array (don't load all the data)
data = numpy.load( data )
# Grab just what we need
t_start = math.floor( start / resolution )
t_end = math.ceil( end / resolution )
x = numpy.arange( t_start, t_end ) * resolution
y = data[ t_start : t_end ]
return zip(x.tolist(), y.tolist())
def get_track_resolution( self, dataset, start, end):
range = end - start
# Determine appropriate resolution to plot ~1000 points
resolution = math.ceil( 10 ** math.ceil( math.log10( range / 1000 ) ) )
# Restrict to valid range
resolution = min( resolution, 100000 )
resolution = max( resolution, 1 )
return resolution
def get_track_type( self ):
return "LineTrack"
class CustomTrack ( Tabular ):
"""UCSC CustomTrack"""
file_ext = "customtrack"
def __init__(self, **kwd):
"""Initialize interval datatype, by adding UCSC display app"""
Tabular.__init__(self, **kwd)
self.add_display_app ( 'ucsc', 'display at UCSC', 'as_ucsc_display_file', 'ucsc_links' )
def set_meta( self, dataset, overwrite = True, **kwd ):
Tabular.set_meta( self, dataset, overwrite = overwrite, skip = 1 )
def display_peek( self, dataset ):
"""Returns formated html of peek"""
return Tabular.make_html_table( self, dataset, skipchars=['track', '#'] )
def get_estimated_display_viewport( self, dataset ):
try:
wiggle_format = False
for line in open(dataset.file_name):
if (line.startswith("chr") or line.startswith("scaffold")):
start = line.split("\t")[1].replace(",","")
end = line.split("\t")[2].replace(",","")
if int(start) < int(end):
value = ( line.split("\t")[0], start, end )
else:
value = ( line.split("\t")[0], end, start )
break
elif (line.startswith('variableStep')):
# wiggle format
wiggle_format = True
wig_chr = line.split()[1].split('=')[1]
if not wig_chr.startswith("chr"):
value = ('', '', '')
break
elif wiggle_format:
# wiggle format
if line.split("\t")[0].isdigit():
start = line.split("\t")[0]
end = str(int(start) + 1)
value = (wig_chr, start, end)
else:
value = (wig_chr, '', '')
break
return value #returns the co-ordinates of the 1st track/dataset
except:
#return "."
return ('', '', '')
def ucsc_links( self, dataset, type, app, base_url ):
ret_val = []
if dataset.has_data:
viewport_tuple = self.get_estimated_display_viewport(dataset)
if viewport_tuple:
chrom = viewport_tuple[0]
start = viewport_tuple[1]
stop = viewport_tuple[2]
for site_name, site_url in util.get_ucsc_by_build(dataset.dbkey):
if site_name in app.config.ucsc_display_sites:
internal_url = "%s" % url_for( controller='dataset', dataset_id=dataset.id, action='display_at', filename='ucsc_' + site_name )
if base_url.startswith( 'https://' ):
base_url = base_url.replace( 'https', 'http', 1 )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at" % (base_url, url_for( controller='root' ), dataset.id, type) )
redirect_url = urllib.quote_plus( "%sdb=%s&position=%s:%s-%s&hgt.customText=%%s" % (site_url, dataset.dbkey, chrom, start, stop ) )
link = '%s?redirect_url=%s&display_url=%s' % ( internal_url, redirect_url, display_url )
ret_val.append( (site_name, link) )
return ret_val
def sniff( self, filename ):
"""
Determines whether the file is in customtrack format.
CustomTrack files are built within Galaxy and are basically bed or interval files with the first line looking
something like this.
track name="User Track" description="User Supplied Track (from Galaxy)" color=0,0,0 visibility=1
>>> fname = get_test_fname( 'complete.bed' )
>>> CustomTrack().sniff( fname )
False
>>> fname = get_test_fname( 'ucsc.customtrack' )
>>> CustomTrack().sniff( fname )
True
"""
headers = get_headers( filename, None )
first_line = True
for hdr in headers:
if first_line:
first_line = False
try:
if hdr[0].startswith('track'):
color_found = False
visibility_found = False
for elem in hdr[1:]:
if elem.startswith('color'): color_found = True
if elem.startswith('visibility'): visibility_found = True
if color_found and visibility_found: break
if not color_found or not visibility_found: return False
else: return False
except: return False
else:
try:
if hdr[0] and not hdr[0].startswith( '#' ):
if len( hdr ) < 3:
return False
try:
int( hdr[1] )
int( hdr[2] )
except:
return False
except:
return False
return True
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
volpino/Yeps-EURAC
|
lib/galaxy/datatypes/interval.py
|
Python
|
mit
| 49,885
|
[
"Galaxy"
] |
8876f9419789060c98d0de90d8ffbfcab061981644c8b0464921d65a881744f7
|
"""
An updated CRYSTAL logs parser
wrapping a standalone parser called pycrystal
Authors: Evgeny Blokhin and Andrey Sobolev
"""
import os.path
from pycrystal import CRYSTOUT as _CRYSTOUT, CRYSTOUT_Error
from tilde.parsers import Output
class CRYSTOUT(Output):
def __init__(self, filename):
Output.__init__(self, filename)
try:
result = _CRYSTOUT(filename)
except CRYSTOUT_Error as ex:
raise RuntimeError(ex)
for key in self.info:
if result.info.get(key):
self.info[key] = result.info[key]
self.structures = result.info['structures']
self.convergence = result.info['convergence']
self.tresholds = result.info['optgeom']
self.ncycles = result.info['ncycles']
self.phonons = result.info['phonons']
self.electrons = result.info['electrons']
self.electrons['basis_set']['ps'] = self.electrons['basis_set']['ecp']
self.elastic = result.info['elastic']
self.info['framework'] = 0x3
self.info['ansatz'] = 0x3
self.related_files.append(filename)
cur_folder = os.path.dirname(filename)
check_files = []
if filename.endswith('.cryst.out'):
check_files = [filename.replace('.cryst.out', '') + '.d12', filename.replace('.cryst.out', '') + '.gui']
elif filename.endswith('.out'):
check_files = [filename.replace('.out', '') + '.d12', filename.replace('.out', '') + '.gui']
for check in check_files:
if os.path.exists(os.path.join(cur_folder, check)):
self.related_files.append(os.path.join(cur_folder, check))
err_file = os.path.join(cur_folder, 'fort.87')
if os.path.exists(err_file):
with open(err_file, 'r') as f:
err_msg = f.readline()
if err_msg:
self.info['warns'].append(err_msg)
@staticmethod
def fingerprints(test_string):
return _CRYSTOUT.detect(test_string)
|
tilde-lab/tilde
|
tilde/parsers/CRYSTAL/CRYSTAL.py
|
Python
|
mit
| 2,021
|
[
"CRYSTAL"
] |
7bbe20ecd42ed751fdcf064b7d08fdd4fc92cedbf04a6df7afacf5488b4c646e
|
#! /usr/bin/env python
import json, urllib2, re
from urllib2 import Request, urlopen, URLError
from datetime import datetime, timedelta
def removeNonAscii(s):
s = s.replace("&", "and")
return "".join([x if ord(x) < 128 else '_' for x in s])
req = Request('http://api.tvmaze.com/schedule')
try:
print 'Visit www.tvmaze.com'
print 'Opening TVmaze connection'
response = urlopen(req)
except URLError as e:
if hasattr(e, 'reason'):
print 'Failed to reach TVmaze server'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'TVmaze server could not fulfill request'
print 'Error code: ', e.code
else:
print 'Downloading TVmaze schedule'
schedule_json_wa = response.read()
schedule_json = removeNonAscii(schedule_json_wa)
schedule_dicts = json.loads(schedule_json)
with open('xmltv.xml', 'w') as xml_file:
xml_file.write('<?xml version="1.0" encoding="ISO-8859-1"?>'+'\n')
xml_file.write('<!DOCTYPE tv SYSTEM "xmltv.dtd">'+'\n')
xml_file.write('\n')
xml_file.write('<tv source-info-name="TVmaze" generator-info-name="tvmaze2xml.py">'+'\n')
for i in range(len(schedule_dicts)-1):
name = schedule_dicts[i]['show']['name']
time = schedule_dicts[i]['airstamp']
runtime = schedule_dicts[i]['runtime']
ch_id = str(schedule_dicts[i]['show']['network']['id'])
description = re.sub('<[^<]+?>', '', schedule_dicts[i]['summary'])
if name and time and runtime and ch_id:
start = time[0:4]+time[5:7]+time[8:10]+time[11:13]+time[14:16]+time[17:19]+' '+time[19:22]+time[23:25]
start_time = datetime.strptime(start[0:14], "%Y%m%d%H%M%S")
stop_time = start_time + timedelta(minutes=runtime)
stop = stop_time.strftime("%Y%m%d%H%M%S")+' '+time[19:22]+time[23:25]
xml_file.write(' <programme start="'+start+'" stop="'+stop+'" channel="'+ch_id+'">'+'\n')
xml_file.write(' <title lang="en">'+name+'</title>'+'\n')
xml_file.write(' <desc lang="en">'+description+'</desc>'+'\n')
xml_file.write(' </programme>'+'\n')
xml_file.write('</tv>')
|
heyted/tvmaze2xml
|
tvmaze2xml.py
|
Python
|
gpl-2.0
| 2,255
|
[
"VisIt"
] |
8fb961a82830a7d60b4f8ba4e0ecc5d718886fb52fdb6012ef6fcfef4b68b2af
|
'''
MFEM example 20
See c++ version in the MFEM library for more detail
'''
import os
import mfem.ser as mfem
from mfem.ser import intArray
from os.path import expanduser, join, dirname
import numpy as np
from numpy import sin, cos, exp, sqrt
m_ = 1.0
k_ = 1.0
def run(order=1,
prob=0,
nsteps=100,
dt=0.1,
sc=1.0,
visualization=False):
class GradT(mfem.Operator):
def __init__(self):
mfem.Operator.__init__(self, 1)
def Mult(self, x, y):
y.Set(1.0/m_, x)
class NegGradV(mfem.TimeDependentOperator):
def __init__(self):
mfem.TimeDependentOperator.__init__(self, 1)
def Mult(self, x, y):
if prob == 1:
y[0] = - k_ * sin(x[0])
elif prob == 2:
y[0] = - k_ * x[0] * exp(-0.5 * x[0] * x[0])
elif prob == 3:
y[0] = - k_ * (1.0 + 2.0 * x[0] * x[0]) * x[0]
elif prob == 4:
y[0] = - k_ * (1.0 - 0.25 * x[0] * x[0]) * x[0]
else:
y[0] = - k_ * x[0]
def hamiltonian(q, p, t):
h = 1.0 - 0.5 / m_ + 0.5 * p * p / m_
if prob == 1:
h += k_ * (1.0 - cos(q))
elif prob == 2:
h += k_ * (1.0 - exp(-0.5 * q * q))
elif prob == 3:
h += 0.5 * k_ * (1.0 + q * q) * q * q
elif prob == 4:
h += 0.5 * k_ * (1.0 - 0.125 * q * q) * q * q
else:
h += 0.5 * k_ * q * q
return h
# 2. Create and Initialize the Symplectic Integration Solver
siaSolver = mfem.SIAVSolver(order)
P = GradT()
F = NegGradV()
siaSolver.Init(P, F)
# 3. Set the initial conditions
t = 0.0
q = mfem.Vector(1)
p = mfem.Vector(1)
e = mfem.Vector(nsteps+1)
q[0] = 0.0
p[0] = 1.0
# 5. Create a Mesh for visualization in phase space
nverts = 2*(nsteps+1) if visualization else 0
nelems = nsteps if visualization else 0
mesh = mfem.Mesh(2, nverts, nelems, 0, 3)
x0 = mfem.Vector(3)
x0.Assign(0.0)
x1 = mfem.Vector(3)
x1.Assign(0.0)
v = mfem.intArray(4)
# 6. Perform time-stepping
e_mean = 0.0
for i in range(nsteps):
if i == 0:
e[0] = hamiltonian(q[0], p[0], t)
e_mean += e[0]
if visualization:
x1[0] = q[0]
x1[1] = p[0]
x1[2] = 0.0
mesh.AddVertex(x0)
# These are all same.
# mesh.AddVertex(x0.GetDataArray())
# mesh.AddVertex(x0,GetData())
mesh.AddVertex(x1)
# 6b. Advance the state of the system
t, dt = siaSolver.Step(q, p, t, dt)
e[i+1] = hamiltonian(q[0], p[0], t)
e_mean += e[i+1]
# 6d. Add results to GLVis visualization
if visualization:
x0[2] = t
x1[0] = q[0]
x1[1] = p[0]
x1[2] = t
mesh.AddVertex(x0)
mesh.AddVertex(x1)
v[0] = 2*i
v[1] = 2*(i+1)
v[2] = 2*(i+1)+1
v[3] = 2*i+1
mesh.AddQuad(v)
# this also works ;D
# mesh.AddQuad(v.ToList())
#mesh.AddQuad(np.array(v.ToList(), dtype=np.int32))
# 7. Compute and display mean and standard deviation of the energy
e_mean /= (nsteps + 1)
e_var = 0.0
for i in range(nsteps+1):
e_var += (e[i] - e_mean)**2
e_var /= (nsteps + 1)
print("\n".join(["",
"Mean and standard deviation of the energy",
"{:g}".format(e_mean) + "\t" + "{:g}".format(sqrt(e_var))]))
# 9. Finalize the GLVis output
if visualization:
mesh.FinalizeQuadMesh(1)
fec = mfem.H1_FECollection(1, 2)
fespace = mfem.FiniteElementSpace(mesh, fec)
energy = mfem.GridFunction(fespace)
energy.Assign(0.0)
for i in range(nsteps+1):
energy[2*i+0] = e[i]
energy[2*i+1] = e[i]
sock = mfem.socketstream("localhost", 19916)
sock.precision(8)
sock << "solution\n" << mesh << energy
sock << "window_title 'Energy in Phase Space'\n"
sock << "keys\n maac\n" << "axis_labels 'q' 'p' 't'\n"
sock.flush()
if __name__ == "__main__":
from mfem.common.arg_parser import ArgParser
parser = ArgParser(description='Ex20 (Sympletic ODE)')
parser.add_argument('-m', '--mesh',
default='star.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument("-p",
"--problem-type",
action='store', type=int, default=0,
help=''.join(["Problem Type:\n",
"\t 0 - Simple Harmonic Oscillator\n",
"\t 1 - Pendulum\n",
"\t 2 - Gaussian Potential Well\n",
"\t 3 - Quartic Potential\n",
"\t 4 - Negative Quartic Potential", ]))
parser.add_argument('-o', '--order',
action='store', default=1, type=int,
help="Time integration order")
parser.add_argument('-n', '--number-of-steps',
action='store', default=100, type=int,
help="Number of time steps")
parser.add_argument('-dt', '--time-step',
action='store', default=0.1, type=float,
help="Time step size")
parser.add_argument('-k', '--spring-constant',
action='store', default=1, type=float,
help="Sprint constant")
parser.add_argument('-vis', '--visualization',
action='store_true',
default=True,
help='Enable GLVis visualization')
parser.add_argument('-no-gp', '--no-gnuplot',
action='store_true',
default=True,
help='Disable GnuPlot visualization')
args = parser.parse_args()
parser.print_options(args)
prob = args.problem_type
visualization = args.visualization
order = args.order
nsteps = args.number_of_steps
dt = args.time_step
sc = args.spring_constant
np_gp = args.no_gnuplot
run(order=order,
prob=prob,
nsteps=nsteps,
dt=dt,
sc=sc,
visualization=visualization)
|
mfem/PyMFEM
|
examples/ex20.py
|
Python
|
bsd-3-clause
| 6,639
|
[
"Gaussian"
] |
4a830bf12b62aa71e5793adf7061f23d1c02dd685044c2bbf2882e17b53c9c2d
|
import glob
import os
from time import sleep
import hashlib
from subprocess import call
path_to_ase = "path\\to\\aseprite.exe"
def main():
while True:
for filename in glob.glob("*.ase"):
# Try to find an existing instance for that file
file = AseFile.find_file(filename)
if file:
# check if md5 changed between passes
new_file = AseFile(filename)
if new_file.hash != file.hash:
new_file.export()
AseFile.files.remove(file)
AseFile.files.append(new_file)
else:
# Create an instance and save it
file = AseFile(filename)
file.export()
AseFile.files.append(file)
sleep(2)
class AseFile:
files = []
def __init__(self, name):
self.name = name
self.hash = self.get_hash()
self.png_name = os.path.splitext(self.name)[0] + ".png"
def get_hash(self):
fullpath = os.path.abspath(self.name)
return hashlib.md5(open(fullpath, 'rb').read()).hexdigest()
def export(self):
call([path_to_ase, "--batch", self.name, "--sheet", self.png_name])
@classmethod
def find_file(cls, filename):
for file in cls.files:
if file.name == filename:
return file
return False
if __name__ == "__main__":
main()
|
scambier/aseprite-autoexport
|
converter.py
|
Python
|
mit
| 1,450
|
[
"ASE"
] |
9803c33bc98ce2282d32e95d1c846f62ab9e1abfb1ebe6b9100e4063d44cab46
|
import glob
from setuptools import setup, find_packages
setup(
name='seroba',
version='1.0.2',
description='SEROBA: Serotyping for illumina reads',
packages = find_packages(),
author='Lennard Epping',
author_email='path-help@sanger.ac.uk',
url='https://github.com/sanger-pathogens/seroba',
scripts=glob.glob('scripts/*'),
test_suite='nose.collector',
tests_require=['nose >= 1.3'],
install_requires=[
'ariba >= 2.9.1',
'pymummer==0.10.3',
'PyYAML>=3.12',
'biopython>=1.68',
'pyfastaq>=3.15.0'
],
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python :: 3 :: Only',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
],
)
|
sanger-pathogens/seroba
|
setup.py
|
Python
|
gpl-3.0
| 868
|
[
"Biopython"
] |
4962afab145f1d9939c9639c45d3234b33bb380bd26ccfff0212ff8f45f75f28
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import numpy as np
from skbio.stats import p_value_to_str
class PValueToStrTests(TestCase):
def setUp(self):
self.p_value = 0.119123123123
def test_valid_input(self):
obs = p_value_to_str(self.p_value, 100)
self.assertEqual(obs, '0.12')
obs = p_value_to_str(self.p_value, 250)
self.assertEqual(obs, '0.12')
obs = p_value_to_str(self.p_value, 1000)
self.assertEqual(obs, '0.119')
obs = p_value_to_str(0.0055623489, 999)
self.assertEqual(obs, '0.006')
def test_too_few_permutations(self):
obs = p_value_to_str(self.p_value, 9)
self.assertEqual(obs, 'Too few permutations to compute p-value '
'(permutations = 9)')
obs = p_value_to_str(self.p_value, 1)
self.assertEqual(obs, 'Too few permutations to compute p-value '
'(permutations = 1)')
obs = p_value_to_str(self.p_value, 0)
self.assertEqual(obs, 'Too few permutations to compute p-value '
'(permutations = 0)')
def test_missing_or_invalid_p_value(self):
obs = p_value_to_str(None, 0)
self.assertEqual(obs, 'N/A')
obs = p_value_to_str(np.nan, 0)
self.assertEqual(obs, 'N/A')
if __name__ == '__main__':
main()
|
JWDebelius/scikit-bio
|
skbio/stats/tests/test_misc.py
|
Python
|
bsd-3-clause
| 1,791
|
[
"scikit-bio"
] |
5f0f7573e5d0e0998ea2344dc3ce5993a6a93c0b0a775faea9c691182c21841d
|
#!/usr/local/bin/python3
#
# archlinux.py
from chrubix.distros import Distro
from chrubix.utils import system_or_die, chroot_this, wget, logme, do_a_sed, \
call_makepkg_or_die, abort_if_make_is_segfaulting
import os
class ArchlinuxDistro( Distro ):
def __init__( self , *args, **kwargs ):
super( ArchlinuxDistro, self ).__init__( *args, **kwargs )
self.name = 'archlinux'
self.architecture = 'armv7h'
self.list_of_mkfs_packages = ( 'cryptsetup', 'btrfs-progs', 'jfsutils', 'xfsprogs' )
assert( self.important_packages not in ( '', None ) )
self.important_packages += ' \
jre8-openjdk jdk8-openjdk phonon-qt4-gstreamer xorg-font-util \
mate-settings-daemon-pulseaudio libreoffice-fresh xf86-video-armsoc-chromium mesa-libgl libx264 \
xz mkinitcpio mutagen libconfig festival-us libxpm uboot-mkimage dtc mythes-en \
mesa pyqt gptfdisk bluez-libs alsa-plugins acpi sdl libcanberra perl-xml-parser \
libnotify talkfilters libxmu apache-ant junit zbar python2-setuptools python2-pip \
twisted python2-yaml python2-distutils-extra python2-gobject python2-cairo python2-poppler python2-pdfrw \
bcprov gtk-engine-unico gtk-engine-murrine gtk-engines xorg-fonts-encodings \
libxfixes xorg-server xorg-xinit xf86-input-synaptics xf86-video-fbdev xlockmore phonon \
mate-panel mate-netbook mate-extra mate-themes-extras mate-nettool gnome-mplayer mate-accountsdialog \
gtk2-perl automoc4 xorg-server-utils xorg-xmessage librsvg icedtea-web gconf \
hunspell-en chromium thunderbird windowmaker libdevmapper-dev \
' # mate cgpt
self.install_from_AUR = 'paman mintmenu ttf-ms-fonts gtk-theme-adwaita-x win-xp-theme wmsystemtray python2-pysqlite python2-pyptlib hachoir-core hachoir-parser mat obfsproxy java-service-wrapper i2p' # pulseaudio-ctl pasystray-git ssss florence
self.final_push_packages = Distro.final_push_packages + ' lxdm network-manager-applet'
def install_barebones_root_filesystem( self ):
logme( 'ArchlinuxDistro - install_barebones_root_filesystem() - starting' )
# wget( url = 'http://us.mirror.archlinuxarm.org/os/ArchLinuxARM-chromebook-latest.tar.gz', \
os.system( 'umount %s/dev &>/dev/null' % ( self.mountpoint ) )
my_url = 'http://us.mirror.archlinuxarm.org/os/ArchLinuxARM-chromebook-latest.tar.gz'
# my_url = 'https://dl.dropboxusercontent.com/u/59916027/chrubix/skeletons/ArchLinuxARM-chromebook-latest.tar.gz'
wget( url = my_url, \
extract_to_path = self.mountpoint, decompression_flag = 'z', \
title_str = self.title_str, status_lst = self.status_lst )
return 0
# def install_locale( self ):
# logme( 'ArchlinuxDistro - install_locale() - starting' )
# # chroot_this( self.mountpoint, 'yes 2> /dev/null | pacman -S locales locales-all', title_str = self.title_str, status_lst = self.status_lst ):
# super( ArchlinuxDistro, self ).install_locale()
def install_kernel_and_mkfs ( self ):
# Technically, this won't install Linux-latest, which wasn't built with makepkg's help anyway. However, it WILL install
# 3.4.0-ARCH (built w/ makepkg). Two kernels wait in PKGBUILDs/ore: one in linux-latest, the other in linux-chromebook.
logme( 'ArchlinuxDistro - install_kernel_and_mkfs() - starting' )
chroot_this( self.mountpoint, r'yes "" 2>/dev/null | pacman -U `find %s | grep -x ".*\.tar\.xz"`' % ( self.sources_basedir ), title_str = self.title_str, status_lst = self.status_lst )
# if self.use_latest_kernel:
# chroot_this( self.mountpoint, 'cd %s/linux-latest && make install && make modules_install' % ( self.sources_basedir ),
# title_str = self.title_str, status_lst = self.status_lst,
# on_fail = "Failed to install the standard ChromeOS kernel and/or modules" )
self.update_status_with_newline( '...kernel installed.' )
def install_package_manager_tweaks( self ):
logme( 'ArchlinuxDistro - install_package_manager_tweaks() - starting' )
do_a_sed( '%s/etc/pacman.d/mirrorlist' % ( self.mountpoint ), '#.*Server =', 'Server =' )
def update_and_upgrade_all( self ):
logme( 'ArchlinuxDistro - update_and_upgrade_all() - starting' )
# system_or_die( 'sync; sync; sync; sleep 1' )
system_or_die( 'rm -f %s/var/lib/pacman/db.lck; sync; sync; sync; sleep 2; sync; sync; sync; sleep 2' % ( self.mountpoint ) )
chroot_this( self.mountpoint, r'yes "" 2>/dev/null | pacman -Syu', "Failed to upgrade OS", attempts = 5, title_str = self.title_str, status_lst = self.status_lst )
system_or_die( 'sync; sync; sync; sleep 1; sync; sync; sync; sleep 1' )
def install_important_packages( self ):
logme( 'ArchlinuxDistro - install_important_packages() - starting' )
self.package_group_size = 2
os.system( 'clear' )
print( 'Chroot into the distro. Try running pacman -Syu. See if it works. Then, exit.' )
os.system( 'bash' )
chroot_this( self.mountpoint, 'yes "" 2> /dev/null | pacman -Syu', title_str = self.title_str, status_lst = self.status_lst )
chroot_this( self.mountpoint, 'yes "" 2>/dev/null | pacman -S --needed --force fakeroot', title_str = self.title_str, status_lst = self.status_lst )
system_or_die( 'rm -f %s/var/lib/pacman/db.lck; sync; sync; sync; sleep 2; sync; sync; sync; sleep 2' % ( self.mountpoint ) )
packages_lst = [ r for r in self.important_packages.split( ' ' ) if r != '']
list_of_groups = [ packages_lst[i:i + self.package_group_size] for i in range( 0, len( packages_lst ), self.package_group_size ) ]
for lst in list_of_groups:
s = ''.join( [r + ' ' for r in lst] )
chroot_this( self.mountpoint, 'yes "" 2>/dev/null | pacman -Syu --needed ' + s, title_str = self.title_str, status_lst = self.status_lst,
on_fail = 'Failed to install %s' % ( ''.join( [' ' + r for r in lst] ) ) )
logme( 'Installed%s OK' % ( ''.join( [' ' + r for r in lst] ) ) )
self.update_status( '.' )
# self.update_and_upgrade_all()
# fix_perl_cpan( self.mountpoint )
# abort_if_make_is_segfaulting( self.mountpoint )
chroot_this( self.mountpoint, 'yes "" 2>/dev/null | pacman -Syu --needed --force fakeroot', title_str = self.title_str, status_lst = self.status_lst,
on_fail = 'Failed to install fakeroot' )
for pkg in ( 'shiboken', 'python-pyside' ):
abort_if_make_is_segfaulting( self.mountpoint )
self.update_status( '.' )
self.build_and_install_software_from_archlinux_source( pkg, quiet = False )
self.update_status_with_newline( 'installed.' )
chroot_this( self.mountpoint, 'yes "" 2>/dev/null | pacman -Syu --needed --force cgpt', title_str = self.title_str, status_lst = self.status_lst,
on_fail = 'Failed to install cgpt' )
system_or_die( 'rm -Rf %s/var/cache/apt/archives/*' % ( self.mountpoint ) )
def download_mkfs_sources( self ):
logme( 'ArchlinuxDistro - download_mkfs_sources() - starting' )
assert( self.list_of_mkfs_packages[0].find( 'btrfs' ) >= 0 )
assert( self.list_of_mkfs_packages[1].find( 'jfs' ) >= 0 )
assert( self.list_of_mkfs_packages[2].find( 'xfs' ) >= 0 )
self.download_package_source( self.list_of_mkfs_packages[0], ( 'PKGBUILD', 'btrfs-progs.install', 'initcpio-hook-btrfs', 'initcpio-install-btrfs' ) ) # , '01-fix-manpages.patch' ) )
self.download_package_source( self.list_of_mkfs_packages[1], ( 'PKGBUILD', 'inttypes.patch' ) )
self.download_package_source( self.list_of_mkfs_packages[2], ( 'PKGBUILD', ) )
def download_package_source( self, package_name, filenames_lst = None ):
logme( 'ArchlinuxDistro - download_package_source() - starting' )
# self.update_status(( [ "Downloading %s package into %s OS" % ( package_name, self.name ) ] )
system_or_die( 'mkdir -p %s/%s/%s' % ( self.mountpoint, self.sources_basedir, package_name ) )
os.chdir( '%s/%s/%s' % ( self.mountpoint, self.sources_basedir, package_name ) )
if os.path.isfile( '%s/%s/%s/PKGBUILD' % ( self.mountpoint, self.sources_basedir, package_name ) ):
self.update_status( '' ) # += "." # ..Still working" # No need to download anything. We have PKGBUILD already.
elif filenames_lst in ( None, [] ):
url = 'aur.archlinux.org/packages/%s/%s/%s.tar.gz' % ( package_name[:2], package_name, package_name )
wget( url = url, extract_to_path = '%s/%s' % ( self.mountpoint, self.sources_basedir ), quiet = True , title_str = self.title_str, status_lst = self.status_lst )
else:
for fname in filenames_lst:
file_to_download = '%s/%s/%s/%s' % ( self.mountpoint, self.sources_basedir, package_name, fname )
try:
os.unlink( file_to_download )
except IOError:
pass
wget( url = 'http://projects.archlinux.org/svntogit/packages.git/plain/trunk/%s?h=packages/%s' \
% ( fname, package_name ), save_as_file = file_to_download, attempts = 20,
quiet = True, title_str = self.title_str, status_lst = self.status_lst )
system_or_die( 'mv PKGBUILD PKGBUILD.ori' )
system_or_die( r"cat PKGBUILD.ori | sed s/march/phr34k/ | sed s/\'libutil-linux\'// | sed s/\'java-service-wrapper\'// | sed s/arch=\(.*/arch=\(\'%s\'\)/ | sed s/phr34k/march/ > PKGBUILD" % ( self.architecture ) )
chroot_this( self.mountpoint, 'chown -R guest %s/%s' % ( self.sources_basedir, package_name ) )
call_makepkg_or_die( mountpoint = self.mountpoint, \
package_path = '%s/%s' % ( self.sources_basedir, package_name ), \
cmd = 'cd %s/%s && makepkg --skipchecksums --nobuild -f' % ( self.sources_basedir, package_name ),
errtxt = 'Failed to download %s' % ( package_name ) )
return 0
def build_package( self, source_pathname ):
logme( 'ArchlinuxDistro - build_package() - starting' )
package_name = os.path.basename( source_pathname )
package_path = os.path.dirname( source_pathname )
str_to_add = "Kernel & rootfs" if package_name == 'linux-chromebook' else "%s" % ( package_name )
self.update_status( '...' + str_to_add )
chroot_this( self.mountpoint, 'chown -R guest %s/%s' % ( self.sources_basedir, package_name ) )
chroot_this( self.mountpoint, 'cd %s && makepkg --skipchecksums --noextract -f ' % ( source_pathname ), \
"Failed to chroot make %s within %s" % ( package_name, package_path ),
title_str = self.title_str, status_lst = self.status_lst ,
user = 'guest' )
chroot_this( self.mountpoint, 'chown -R root %s/%s' % ( self.sources_basedir, package_name ) )
self.update_status_with_newline( '...Built.' )
def install_i2p( self ):
logme( 'install_i2p() --- FYI, i2p was already installed as one of my packages in Phase A. Yay!' )
def configure_distrospecific_tweaks( self ):
logme( 'ArchlinuxDistro - configure_distrospecific_tweaks() - starting' )
self.update_status_with_newline( 'Installing distro-specific tweaks' )
friendly_list_of_packages_to_exclude = ''.join( r + ' ' for r in self.list_of_mkfs_packages ) + os.path.basename( self.kernel_src_basedir )
do_a_sed( '%s/etc/pacman.conf' % ( self.mountpoint ), '#.*IgnorePkg.*', 'IgnorePkg = %s' % ( friendly_list_of_packages_to_exclude ) )
chroot_this( self.mountpoint, 'systemctl enable lxdm.service' )
# logme( 'FYI, ArchLinux has no distro-specific post-install tweaks at present' )
self.update_status_with_newline( '...tweaked.' )
# def downgrade_systemd_if_necessary( self, bad_verno ):
# if bad_verno == None or 0 == chroot_this( self.mountpoint, 'pacman -Q systemd | fgrep "systemd %s"' % ( bad_verno ) ):
# wget( url = 'https://dl.dropboxusercontent.com/u/59916027/chrubix/systemd-212-3-armv7h.pkg.tar.xz',
# # wget( url = 'http://rollback.adminempire.com/2014/06/03/armv7h/core/systemd-212-3-armv7h.pkg.tar.xz',
# save_as_file = '%s/tmp/systemd-212-3-armv7h.pkg.tar.xz' % ( self.mountpoint ),
# status_lst = self.status_lst, title_str = self.title_str )
# chroot_this( self.mountpoint, 'yes "" | pacman -U /tmp/systemd-212-3-armv7h.pkg.tar.xz',
# status_lst = self.status_lst, title_str = self.title_str,
# on_fail = 'Failed to downgrade systemd' )
# self.update_status(' (downgraded SystemD)'
#
def install_final_push_of_packages( self ):
logme( 'ArchlinuxDistro - install_final_push_of_packages() - starting' )
self.update_status( 'Installed' )
for my_fname in ( 'ssss-0.5-3-armv7h.pkg.tar.xz', 'florence-0.6.2-1-armv7h.pkg.tar.xz' ):
try:
system_or_die( 'cp /usr/local/bin/Chrubix/blobs/apps/%s /%s/tmp/' % ( my_fname, self.mountpoint ) )
except RuntimeError:
wget( url = 'https://dl.dropboxusercontent.com/u/59916027/chrubix/%s' % ( my_fname ), \
save_as_file = '%s/tmp/%s' % ( self.mountpoint, my_fname ), \
status_lst = self.status_lst, \
title_str = self.title_str )
if 0 == chroot_this( self.mountpoint, 'yes "" | pacman -U /tmp/%s' % ( my_fname ) ):
self.update_status( ' ' + my_fname.split( '-' )[0] )
# else:
# failed( 'Failed to install ' + my_fname.split( '-' )[0])
# perl-cpan-meta-check perl-class-load-xs perl-eval-closure perl-mro-compat perl-package-depreciationmanager perl-sub-name perl-task-weaken \
# perl-test-checkdeps perl-test-without-module perl-moose
failed_pkgs = self.install_from_AUR
attempts = 0
while failed_pkgs != '' and attempts < 5:
self.update_and_upgrade_all()
attempts += 1
packages_to_install = failed_pkgs
failed_pkgs = ''
for pkg_name in packages_to_install.split( ' ' ):
if pkg_name in ( None, '', ' ' ):
continue
try:
self.build_and_install_software_from_archlinux_source( pkg_name, quiet = True )
self.update_status( ' %s' % ( pkg_name ) )
except RuntimeError:
failed_pkgs += ' %s' % ( pkg_name )
self.update_status( '...OK.' )
if failed_pkgs != '':
self.update_status( ['Warning - failed to install%s' % ( failed_pkgs )] )
# self.update_status(' etc. ')
# self.update_status(( ['Installing %s' % ( self.final_push_packages.replace( ' ', ' ' ).replace( ' ', ', ' ) )] )
chroot_this( self.mountpoint, 'yes "" 2>/dev/null | pacman -S --needed %s' % ( self.final_push_packages ), title_str = self.title_str, status_lst = self.status_lst,
on_fail = 'Failed to install final push of packages', attempts = 20 )
self.update_and_upgrade_all()
self.update_status_with_newline( '...complete.' )
|
ReubenAbrams/Chrubix
|
src/chrubix/distros/archlinux.py
|
Python
|
gpl-3.0
| 15,622
|
[
"MOOSE"
] |
29d80d2e15dfca001b9c28dc8ddd5487d548d578cb0c0387bcf951b41a0ea8e6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010, 2011 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import time
import zmq
from zmq.tests import BaseZMQTestCase
from zmq.utils.strtypes import bytes, unicode
try:
from queue import Queue
except:
from Queue import Queue
try:
from nose import SkipTest
except ImportError:
class SkipTest(Exception):
pass
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestSocket(BaseZMQTestCase):
def test_create(self):
ctx = zmq.Context()
s = ctx.socket(zmq.PUB)
# Superluminal protocol not yet implemented
self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://a')
self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://a')
self.assertRaisesErrno(zmq.EINVAL, s.bind, 'tcp://')
s.close()
del ctx
def test_2_1_sockopts(self):
"test non-uint64 sockopts introduced in zeromq 2.1.0"
v = list(map(int, zmq.zmq_version().split('.', 2)[:2]))
if not (v[0] >= 2 and v[1] >= 1):
raise SkipTest
p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
p.setsockopt(zmq.LINGER, 0)
self.assertEquals(p.getsockopt(zmq.LINGER), 0)
p.setsockopt(zmq.LINGER, -1)
self.assertEquals(p.getsockopt(zmq.LINGER), -1)
self.assertEquals(p.getsockopt(zmq.HWM), 0)
p.setsockopt(zmq.HWM, 11)
self.assertEquals(p.getsockopt(zmq.HWM), 11)
# p.setsockopt(zmq.EVENTS, zmq.POLLIN)
self.assertEquals(p.getsockopt(zmq.EVENTS), zmq.POLLOUT)
self.assertRaisesErrno(zmq.EINVAL, p.setsockopt,zmq.EVENTS, 2**7-1)
self.assertEquals(p.getsockopt(zmq.TYPE), p.socket_type)
self.assertEquals(p.getsockopt(zmq.TYPE), zmq.PUB)
self.assertEquals(s.getsockopt(zmq.TYPE), s.socket_type)
self.assertEquals(s.getsockopt(zmq.TYPE), zmq.SUB)
def test_sockopt_roundtrip(self):
"test set/getsockopt roundtrip."
p = self.context.socket(zmq.PUB)
self.assertEquals(p.getsockopt(zmq.HWM), 0)
p.setsockopt(zmq.HWM, 11)
self.assertEquals(p.getsockopt(zmq.HWM), 11)
def test_close(self):
ctx = zmq.Context()
s = ctx.socket(zmq.PUB)
s.close()
self.assertRaises(zmq.ZMQError, s.bind, ''.encode())
self.assertRaises(zmq.ZMQError, s.connect, ''.encode())
self.assertRaises(zmq.ZMQError, s.setsockopt, zmq.SUBSCRIBE, ''.encode())
self.assertRaises(zmq.ZMQError, s.send, 'asdf'.encode())
self.assertRaises(zmq.ZMQError, s.recv)
del ctx
|
svpcom/pyzmq-ctypes
|
zmq/tests/test_socket.py
|
Python
|
lgpl-3.0
| 3,625
|
[
"Brian"
] |
c8713cf3bc8e992efc224a192a535c2526d33a4b28d12c2c3a6fe86b2fda44bf
|
# -*- coding: utf-8 -*-
"""
Utilities for smoothing the 0.5 level-set of binary arrays.
"""
import logging
from typing import Tuple
import numpy as np
from scipy import sparse
from scipy import ndimage as ndi
__all__ = [
'smooth',
'smooth_constrained',
'smooth_gaussian',
'signed_distance_function'
]
def _build_variable_indices(band: np.ndarray) -> np.ndarray:
num_variables = np.count_nonzero(band)
variable_indices = np.full(band.shape, -1, dtype=np.int_)
variable_indices[band] = np.arange(num_variables)
return variable_indices
def _buildq3d(variable_indices: np.ndarray):
"""
Builds the filterq matrix for the given variables.
"""
num_variables = variable_indices.max() + 1
filterq = sparse.lil_matrix((3*num_variables, num_variables))
# Pad variable_indices to simplify out-of-bounds accesses
variable_indices = np.pad(
variable_indices,
[(0, 1), (0, 1), (0, 1)],
mode='constant',
constant_values=-1
)
coords = np.nonzero(variable_indices >= 0)
for count, (i, j, k) in enumerate(zip(*coords)):
assert(variable_indices[i, j, k] == count)
filterq[3*count, count] = -2
neighbor = variable_indices[i-1, j, k]
if neighbor >= 0:
filterq[3*count, neighbor] = 1
else:
filterq[3*count, count] += 1
neighbor = variable_indices[i+1, j, k]
if neighbor >= 0:
filterq[3*count, neighbor] = 1
else:
filterq[3*count, count] += 1
filterq[3*count+1, count] = -2
neighbor = variable_indices[i, j-1, k]
if neighbor >= 0:
filterq[3*count+1, neighbor] = 1
else:
filterq[3*count+1, count] += 1
neighbor = variable_indices[i, j+1, k]
if neighbor >= 0:
filterq[3*count+1, neighbor] = 1
else:
filterq[3*count+1, count] += 1
filterq[3*count+2, count] = -2
neighbor = variable_indices[i, j, k-1]
if neighbor >= 0:
filterq[3*count+2, neighbor] = 1
else:
filterq[3*count+2, count] += 1
neighbor = variable_indices[i, j, k+1]
if neighbor >= 0:
filterq[3*count+2, neighbor] = 1
else:
filterq[3*count+2, count] += 1
filterq = filterq.tocsr()
return filterq.T.dot(filterq)
def _buildq2d(variable_indices: np.ndarray):
"""
Builds the filterq matrix for the given variables.
Version for 2 dimensions.
"""
num_variables = variable_indices.max() + 1
filterq = sparse.lil_matrix((3*num_variables, num_variables))
# Pad variable_indices to simplify out-of-bounds accesses
variable_indices = np.pad(
variable_indices,
[(0, 1), (0, 1)],
mode='constant',
constant_values=-1
)
coords = np.nonzero(variable_indices >= 0)
for count, (i, j) in enumerate(zip(*coords)):
assert(variable_indices[i, j] == count)
filterq[2*count, count] = -2
neighbor = variable_indices[i-1, j]
if neighbor >= 0:
filterq[2*count, neighbor] = 1
else:
filterq[2*count, count] += 1
neighbor = variable_indices[i+1, j]
if neighbor >= 0:
filterq[2*count, neighbor] = 1
else:
filterq[2*count, count] += 1
filterq[2*count+1, count] = -2
neighbor = variable_indices[i, j-1]
if neighbor >= 0:
filterq[2*count+1, neighbor] = 1
else:
filterq[2*count+1, count] += 1
neighbor = variable_indices[i, j+1]
if neighbor >= 0:
filterq[2*count+1, neighbor] = 1
else:
filterq[2*count+1, count] += 1
filterq = filterq.tocsr()
return filterq.T.dot(filterq)
def _jacobi(
filterq,
x0: np.ndarray,
lower_bound: np.ndarray,
upper_bound: np.ndarray,
max_iters: int = 10,
rel_tol: float = 1e-6,
weight: float = 0.5):
"""Jacobi method with constraints."""
jacobi_r = sparse.lil_matrix(filterq)
shp = jacobi_r.shape
jacobi_d = 1.0 / filterq.diagonal()
jacobi_r.setdiag((0,) * shp[0])
jacobi_r = jacobi_r.tocsr()
x = x0
# We check the stopping criterion each 10 iterations
check_each = 10
cum_rel_tol = 1 - (1 - rel_tol)**check_each
energy_now = np.dot(x, filterq.dot(x)) / 2
logging.debug("Energy at iter %d: %.6g", 0, energy_now)
for i in range(max_iters):
x_1 = - jacobi_d * jacobi_r.dot(x)
x = weight * x_1 + (1 - weight) * x
# Constraints.
x = np.maximum(x, lower_bound)
x = np.minimum(x, upper_bound)
# Stopping criterion
if (i + 1) % check_each == 0:
# Update energy
energy_before = energy_now
energy_now = np.dot(x, filterq.dot(x)) / 2
logging.debug("Energy at iter %d: %.6g", i + 1, energy_now)
# Check stopping criterion
cum_rel_improvement = (energy_before - energy_now) / energy_before
if cum_rel_improvement < cum_rel_tol:
break
return x
def signed_distance_function(
levelset: np.ndarray,
band_radius: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Return the distance to the 0.5 levelset of a function, the mask of the
border (i.e., the nearest cells to the 0.5 level-set) and the mask of the
band (i.e., the cells of the function whose distance to the 0.5 level-set
is less of equal to `band_radius`).
"""
binary_array = np.where(levelset > 0, True, False)
# Compute the band and the border.
dist_func = ndi.distance_transform_edt
distance = np.where(
binary_array,
dist_func(binary_array) - 0.5,
-dist_func(~binary_array) + 0.5
)
border = np.abs(distance) < 1
band = np.abs(distance) <= band_radius
return distance, border, band
def smooth_constrained(
binary_array: np.ndarray,
band_radius: int = 4,
max_iters: int = 500,
rel_tol: float = 1e-6
) -> np.ndarray:
"""
Implementation of the smoothing method from
"Surface Extraction from Binary Volumes with Higher-Order Smoothness"
Victor Lempitsky, CVPR10
"""
# # Compute the distance map, the border and the band.
logging.info("Computing distance transform...")
distance, _, band = signed_distance_function(binary_array, band_radius)
variable_indices = _build_variable_indices(band)
# Compute filterq.
logging.info("Building matrix filterq...")
if binary_array.ndim == 3:
filterq = _buildq3d(variable_indices)
elif binary_array.ndim == 2:
filterq = _buildq2d(variable_indices)
else:
raise ValueError("binary_array.ndim not in [2, 3]")
# Initialize the variables.
res = np.asarray(distance, dtype=np.double)
x = res[band]
upper_bound = np.where(x < 0, x, np.inf)
lower_bound = np.where(x > 0, x, -np.inf)
upper_bound[np.abs(upper_bound) < 1] = 0
lower_bound[np.abs(lower_bound) < 1] = 0
# Solve.
logging.info("Minimizing energy...")
x = _jacobi(
filterq=filterq,
x0=x,
lower_bound=lower_bound,
upper_bound=upper_bound,
max_iters=max_iters,
rel_tol=rel_tol
)
res[band] = x
return res
def smooth_gaussian(binary_array: np.ndarray, sigma: float = 3) -> np.ndarray:
vol = np.float_(binary_array) - 0.5
return ndi.gaussian_filter(vol, sigma=sigma)
def smooth(
binary_array: np.ndarray,
method: str = 'auto',
**kwargs
) -> np.ndarray:
"""
Smooths the 0.5 level-set of a binary array. Returns a floating-point
array with a smoothed version of the original level-set in the 0 isovalue.
This function can apply two different methods:
- A constrained smoothing method which preserves details and fine
structures, but it is slow and requires a large amount of memory. This
method is recommended when the input array is small (smaller than
(500, 500, 500)).
- A Gaussian filter applied over the binary array. This method is fast, but
not very precise, as it can destroy fine details. It is only recommended
when the input array is large and the 0.5 level-set does not contain
thin structures.
Parameters
----------
binary_array : ndarray
Input binary array with the 0.5 level-set to smooth.
method : str, one of ['auto', 'gaussian', 'constrained']
Smoothing method. If 'auto' is given, the method will be automatically
chosen based on the size of `binary_array`.
Parameters for 'gaussian'
-------------------------
sigma : float
Size of the Gaussian filter (default 3).
Parameters for 'constrained'
----------------------------
max_iters : positive integer
Number of iterations of the constrained optimization method
(default 500).
rel_tol: float
Relative tolerance as a stopping criterion (default 1e-6).
Output
------
res : ndarray
Floating-point array with a smoothed 0 level-set.
"""
binary_array = np.asarray(binary_array)
if method == 'auto':
if binary_array.size > 500**3:
method = 'gaussian'
else:
method = 'constrained'
if method == 'gaussian':
return smooth_gaussian(binary_array, **kwargs)
if method == 'constrained':
return smooth_constrained(binary_array, **kwargs)
raise ValueError("Unknown method '{}'".format(method))
|
pmneila/PyMCubes
|
mcubes/smoothing.py
|
Python
|
bsd-3-clause
| 9,699
|
[
"Gaussian"
] |
6cdbbbe3253b3c85f85a2b8048b2f5950248b48598d6cee9240f9de73b1632e5
|
# -*- coding: utf-8 -*-
#
# futures documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 3 19:35:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'futures'
copyright = u'2009-2011, Brian Quinlan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.1.2'
# The full version, including alpha/beta/rc tags.
release = '2.1.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'futuresdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'futures.tex', u'futures Documentation',
u'Brian Quinlan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
santegoeds/pythonfutures
|
docs/conf.py
|
Python
|
bsd-2-clause
| 6,302
|
[
"Brian"
] |
e194cf57e674a44a0586ba1e958da1e5b70c7b1f26b8b1d36015cf852dd212d8
|
from pylab import plot,grid,title,subplot,xlabel,ylabel,text,subplots_adjust,fill_between,mean,connect,show
from shogun.Kernel import GaussianKernel
from shogun.Classifier import LibSVM, LDA
from shogun.Evaluation import ROCEvaluation
import util
util.set_title('ROC example')
util.DISTANCE=0.5
subplots_adjust(hspace=0.3)
pos=util.get_realdata(True)
neg=util.get_realdata(False)
features=util.get_realfeatures(pos, neg)
labels=util.get_labels()
# classifiers
gk=GaussianKernel(features, features, 1.0)
svm = LibSVM(1000.0, gk, labels)
svm.train()
lda=LDA(1,features,labels)
lda.train()
## plot points
subplot(211)
plot(pos[0,:], pos[1,:], "r.")
plot(neg[0,:], neg[1,:], "b.")
grid(True)
title('Data',size=10)
# plot ROC for SVM
subplot(223)
ROC_evaluation=ROCEvaluation()
ROC_evaluation.evaluate(svm.apply(),labels)
roc = ROC_evaluation.get_ROC()
print roc
plot(roc[0], roc[1])
fill_between(roc[0],roc[1],0,alpha=0.1)
text(mean(roc[0])/2,mean(roc[1])/2,'auROC = %.5f' % ROC_evaluation.get_auROC())
grid(True)
xlabel('FPR')
ylabel('TPR')
title('LibSVM (Gaussian kernel, C=%.3f) ROC curve' % svm.get_C1(),size=10)
# plot ROC for LDA
subplot(224)
ROC_evaluation.evaluate(lda.apply(),labels)
roc = ROC_evaluation.get_ROC()
plot(roc[0], roc[1])
fill_between(roc[0],roc[1],0,alpha=0.1)
text(mean(roc[0])/2,mean(roc[1])/2,'auROC = %.5f' % ROC_evaluation.get_auROC())
grid(True)
xlabel('FPR')
ylabel('TPR')
title('LDA (gamma=%.3f) ROC curve' % lda.get_gamma(),size=10)
connect('key_press_event', util.quit)
show()
|
ratschlab/ASP
|
examples/undocumented/python_modular/graphical/roc.py
|
Python
|
gpl-2.0
| 1,515
|
[
"Gaussian"
] |
9a0d5b3512a3b586dbcc78f0b11b15de594498a675f310e64257107b67a324f6
|
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
from io import StringIO
import itertools
import mmap
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
AnyStr,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib, properties
from pandas._libs.lib import no_default
from pandas._typing import (
AggFuncType,
ArrayLike,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
Dtype,
FilePathOrBuffer,
FloatFormatType,
FormattersType,
FrameOrSeriesUnion,
IndexKeyFunc,
IndexLabel,
Label,
Level,
PythonFuncType,
Renamer,
StorageOptions,
Suffixes,
ValueKeyFunc,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
doc,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_2d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_datetimelike,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
validate_numeric_casting,
)
from pandas.core.dtypes.common import (
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_scalar,
is_sequence,
pandas_dtype,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, generic, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.aggregation import reconstruct_func, relabel_result, transform
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import extract_array, sanitize_masked_array
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import MultiIndex, maybe_droplevels
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
nested_data_to_arrays,
reorder_arrays,
sanitize_index,
to_arrays,
treat_as_nested,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.sorting import get_group_index, lexsort_indexer, nargsort
from pandas.io.common import get_handle
from pandas.io.formats import console, format as fmt
from pandas.io.formats.info import BaseInfo, DataFrameInfo
import pandas.plotting
if TYPE_CHECKING:
from typing import Literal
from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.resample import Resampler
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"inplace": """
inplace : boolean, default False
If True, performs operation inplace and returns None.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
@property
def _constructor(self) -> Type[DataFrame]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_hidden_attrs: FrozenSet[str] = NDFrame._hidden_attrs | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
# GH#31549 raising NotImplementedError on a property causes trouble
# for `inspect`
def constructor(*args, **kwargs):
raise NotImplementedError("Not supported for DataFrames!")
return constructor
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif is_list_like(data):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if treat_as_nested(data):
arrays, columns, index = nested_data_to_arrays(
data, columns, index, dtype
)
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
# TODO(EA2D): special case not needed with 2D EAs
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
values = construct_2d_arraylike_from_scalar(
data, len(index), len(columns), dtype, copy
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if self._mgr.any_extension_types:
# TODO(EA2D) special case would be unnecessary with 2D EAs
return False
return len(self._mgr.blocks) == 1
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.FormattersType] = None,
float_format: Optional[fmt.FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Label, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Label, Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Label, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(
self, dtype=None, copy: bool = False, na_value=lib.no_default
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
result = self._mgr.as_array(
transpose=self._AXIS_REVERSED, dtype=dtype, copy=copy, na_value=na_value
)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient: str = "dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c((k, v.tolist()) for k, v in self.items())
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "series":
return into_c((k, maybe_box_datetimelike(v)) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table: str,
project_id: Optional[str] = None,
chunksize: Optional[int] = None,
reauth: bool = False,
if_exists: str = "fail",
auth_local_webserver: bool = False,
table_schema: Optional[List[Dict[str, str]]] = None,
location: Optional[str] = None,
progress_bar: bool = True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float: bool = False,
nrows=None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns_list, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns)
if coerce_float:
for i, arr in enumerate(arrays):
if arr.dtype == object:
arrays[i] = lib.maybe_convert_objects(arr, try_float=True)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Optional[Dtype] = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
mgr = arrays_to_mgr(
arrays,
columns,
index,
columns,
dtype=dtype,
verify_integrity=verify_integrity,
)
return cls(mgr)
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePathOrBuffer,
convert_dates: Optional[Dict[Label, str]] = None,
write_index: bool = True,
byteorder: Optional[str] = None,
time_stamp: Optional[datetime.datetime] = None,
data_label: Optional[str] = None,
variable_labels: Optional[Dict[Label, str]] = None,
version: Optional[int] = 114,
convert_strl: Optional[Sequence[Label]] = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
compression : str or dict, default 'infer'
For on-the-fly compression of the output dta. If string, specifies
compression mode. If dict, value at key 'method' specifies
compression mode. Compression mode must be one of {{'infer', 'gzip',
'bz2', 'zip', 'xz', None}}. If compression mode is 'infer' and
`fname` is path-like, then detect compression from the following
extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no
compression). If dict and compression mode is one of {{'zip',
'gzip', 'bz2'}}, or inferred as one of the above, other entries
passed as additional compression options.
.. versionadded:: 1.1.0
{storage_options}
.. versionadded:: 1.2.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: Dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
# mypy: Too many arguments for "StataWriter"
writer = statawriter( # type: ignore[call-arg]
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePathOrBuffer[AnyStr], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str or file-like object
If a string, it will be used as Root Directory path.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+
""",
)
def to_markdown(
self,
buf: Optional[Union[IO[str], str]] = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[str]:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=2,
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
assert not isinstance(handles.handle, (str, mmap.mmap))
handles.handle.writelines(result)
return None
@doc(storage_options=generic._shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: Optional[FilePathOrBuffer] = None,
engine: str = "auto",
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
partition_cols: Optional[List[str]] = None,
storage_options: StorageOptions = None,
**kwargs,
) -> Optional[bytes]:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str or file-like object, default None
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handle
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects. If path is None,
a bytes object is returned.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[ColspaceArgType] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[FormattersType] = None,
float_format: Optional[FloatFormatType] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: Union[bool, str] = False,
decimal: str = ".",
bold_rows: bool = True,
classes: Optional[Union[str, List, Tuple]] = None,
escape: bool = True,
notebook: bool = False,
border: Optional[int] = None,
table_id: Optional[str] = None,
render_links: bool = False,
encoding: Optional[str] = None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
# ----------------------------------------------------------------------
@Substitution(
klass="DataFrame",
type_sub=" and columns",
max_cols_sub=dedent(
"""\
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used."""
),
show_counts_sub=dedent(
"""\
show_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the DataFrame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
null_counts : bool, optional
.. deprecated:: 1.2.0
Use show_counts instead."""
),
examples_sub=dedent(
"""\
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 165.9 MB"""
),
see_also_sub=dedent(
"""\
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns."""
),
version_added_sub="",
)
@doc(BaseInfo.render)
def info(
self,
verbose: Optional[bool] = None,
buf: Optional[IO[str]] = None,
max_cols: Optional[int] = None,
memory_usage: Optional[Union[bool, str]] = None,
show_counts: Optional[bool] = None,
null_counts: Optional[bool] = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=2,
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
).append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._mgr.iget(i)
result = self._box_col_values(values, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
data = data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
try:
loc = engine.get_loc(index)
return series._values[loc]
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.iloc[indexer] = value
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
self.loc._ensure_listlike_indexer(key, axis=1, value=value)
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.iloc[:, indexer] = value
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item_frame_value(self, key, value: "DataFrame") -> None:
self._ensure_valid_index(value)
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = _reindex_for_setitem(value, self.index)
value = value.T
self._set_item_mgr(key, value)
def _iset_item_mgr(self, loc: int, value) -> None:
self._mgr.iset(loc, value)
self._clear_item_cache()
def _set_item_mgr(self, key, value):
value = _maybe_atleast_2d(value)
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
else:
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _iset_item(self, loc: int, value):
value = self._sanitize_column(value)
value = _maybe_atleast_2d(value)
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
value = self._sanitize_column(value)
if (
key in self.columns
and value.ndim == 1
and not is_extension_array_dtype(value)
):
# broadcast across multiple columns if necessary
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
self._set_item_mgr(key, value)
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
"""
try:
if takeable is True:
series = self._ixs(col, axis=1)
series._set_value(index, value, takeable=True)
return
series = self._get_item_cache(col)
engine = self.index._engine
loc = engine.get_loc(index)
validate_numeric_casting(series.dtype, value)
series._values[loc] = value
# Note: trying to use series._set_value breaks tests in
# tests.frame.indexing.test_indexing and tests.indexing.test_partial
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
return klass(values, index=self.index, name=name, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr: str, inplace: bool = False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2) would
be referenced as `Area (cm^2)`). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
else:
return result
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
if (
issubclass(
unique_dtype.type, tuple(dtypes_set) # type: ignore[arg-type]
)
or (
np.number in dtypes_set
and getattr(unique_dtype, "_is_numeric", False)
)
)
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates: bool = False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
See Also
--------
Index.insert : Insert new item by index.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df
col1 col2
0 1 3
1 2 4
>>> df.insert(1, "newcol", [99, 99])
>>> df
col1 newcol col2
0 1 99 3
1 2 99 4
>>> df.insert(0, "col1", [100, 100], allow_duplicates=True)
>>> df
col1 col1 newcol col2
0 100 1 99 3
1 100 2 99 4
"""
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
value = self._sanitize_column(value)
value = _maybe_atleast_2d(value)
self._mgr.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, value):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
value : scalar, Series, or array-like
Returns
-------
numpy.ndarray
"""
self._ensure_valid_index(value)
# We should never get here with DataFrame value
if isinstance(value, Series):
value = _reindex_for_setitem(value, self.index)
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
value = construct_1d_arraylike_from_scalar(value, len(self), dtype=None)
return value
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use DataFrame.melt and DataFrame.loc instead.
For an example see :meth:`~pandas.DataFrame.lookup`
in the user guide.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be"
"removed in a future version."
"You can use DataFrame.melt and DataFrame.loc"
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy: bool,
level: Level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy: bool,
level: Level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy: bool, fill_value) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join: str = "outer",
axis: Optional[Axis] = None,
level: Optional[Level] = None,
copy: bool = True,
fill_value=None,
method: Optional[str] = None,
limit=None,
fill_axis: Axis = 0,
broadcast_axis: Optional[Axis] = None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
def drop(
self,
labels=None,
axis: Axis = 0,
index=None,
columns=None,
level: Optional[Level] = None,
inplace: bool = False,
errors: str = "raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional[DataFrame]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value=None,
method: Optional[str] = None,
axis: Optional[Axis] = None,
inplace: bool = False,
limit=None,
downcast=None,
) -> Optional[DataFrame]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Label) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace: bool = False,
limit=None,
regex: bool = False,
method: str = "pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: Dict[Label, Tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res.iloc[:, i] = newobj
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self, periods=1, freq=None, axis: Axis = 0, fill_value=lib.no_default
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if axis == 1 and periods != 0 and fill_value is lib.no_default and ncols > 0:
# We will infer fill_value to match the closest column
# Use a column that we know is valid for our column's dtype GH#38434
label = self.columns[0]
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, label, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), label, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
def set_index(
self,
keys,
drop: bool = True,
append: bool = False,
inplace: bool = False,
verify_integrity: bool = False,
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
If True, modifies the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Label] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: List[Label] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Label] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
# https://github.com/python/mypy/issues/6580
# Overloaded function signatures 1 and 2 overlap with incompatible return types
def reset_index( # type: ignore[misc]
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Label = ...,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = ...,
drop: bool = ...,
inplace: Literal[True] = ...,
col_level: Hashable = ...,
col_fill: Label = ...,
) -> None:
...
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Label = "",
) -> Optional[DataFrame]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, MultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = lev._values
if level_values.dtype == np.object_:
level_values = lib.maybe_convert_objects(level_values)
if lab is not None:
# if we have the codes, extract the values with a mask
level_values = algorithms.take(
level_values, lab, allow_fill=True, fill_value=lev._na_value
)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
return ~self.isna()
def dropna(
self,
axis: Axis = 0,
how: str = "any",
thresh=None,
subset=None,
inplace: bool = False,
):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional[DataFrame]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
from pandas._libs.hashtable import SIZE_HINT_LIMIT, duplicated_int64
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
result = self._constructor_sliced(duplicated_int64(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
def sort_index(
self,
axis: Axis = 0,
level: Optional[Level] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
):
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis,
level,
ascending,
inplace,
kind,
na_position,
sort_remaining,
ignore_index,
key,
)
def value_counts(
self,
subset: Optional[Sequence[Label]] = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n, columns, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func, axis: Optional[int] = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
bm = self._mgr.apply(array_op, right=right)
return type(self)(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
bm = self._mgr.operate_blockwise(right._mgr, array_op)
return type(self)(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> Tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite: bool = True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self,
other,
join: str = "left",
overwrite: bool = True,
filter_func=None,
errors: str = "ignore",
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`:
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis: Axis = 0,
level: Optional[Level] = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level: Level = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(
self, column: Union[str, Tuple], ignore_index: bool = False
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
if ignore_index:
result.index = ibase.default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level: Optional[Level] = None,
ignore_index=True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
)
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="Dataframe",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not isinstance(periods, int):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
bm_axis = self._get_block_manager_axis(axis)
if bm_axis == 0 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=bm_axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[Label, List[Label]],
ndim: int,
subset: Optional[FrameOrSeriesUnion] = None,
) -> FrameOrSeriesUnion:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
result = None
try:
result, how = self._aggregate(func, axis, *args, **kwargs)
except TypeError as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
def _aggregate(self, arg, axis: Axis = 0, *args, **kwargs):
from pandas.core.apply import frame_apply
op = frame_apply(
self if axis == 0 else self.T,
how="agg",
func=arg,
axis=0,
args=args,
kwds=kwargs,
)
result, how = op.get_result()
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result = result.T if result is not None else result
return result, how
agg = aggregate
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
result = transform(self, func, axis, *args, **kwargs)
assert isinstance(result, DataFrame)
return result
def apply(
self,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type=None,
args=(),
**kwds,
):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
how="apply",
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(
self, func: PythonFuncType, na_action: Optional[str] = None
) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 <NA> 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return (
concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
).__finalize__(self, method="append")
def join(
self,
other: FrameOrSeriesUnion,
on: Optional[IndexLabel] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self,
other: FrameOrSeriesUnion,
on: Optional[IndexLabel] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right: FrameOrSeriesUnion,
how: str = "inner",
on: Optional[IndexLabel] = None,
left_on: Optional[IndexLabel] = None,
right_on: Optional[IndexLabel] = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: Optional[str] = None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(
self, min_periods: Optional[int] = None, ddof: Optional[int] = 1
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(self, other, axis: Axis = 0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(
self, axis: Axis = 0, level: Optional[Level] = None, numeric_only: bool = False
):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64")
def _count_level(self, level: Level, axis: Axis = 0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis: Axis = 0,
skipna: bool = True,
numeric_only: Optional[bool] = None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
own_dtypes = [arr.dtype for arr in self._iter_column_arrays()]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if numeric_only is None and name in ["mean", "median"] and dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=5,
)
cols = self.columns[~dtype_is_dt]
self = self[cols]
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values: np.ndarray):
# We only use this in the case that operates on self.values
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values):
if isinstance(values, ExtensionArray):
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=1, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only is True:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, indexer = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
pass
result = self._constructor_sliced(result, index=labels)
return result
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin : Return index of the minimum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the minimum value in each column.
>>> df.idxmin()
consumption Pork
co2_emissions Wheat Products
dtype: object
To return the index for the minimum value in each row, use ``axis="columns"``.
>>> df.idxmin(axis="columns")
Pork consumption
Wheat Products co2_emissions
Beef consumption
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def idxmax(self, axis: Axis = 0, skipna: bool = True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax : Return index of the maximum element.
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
Examples
--------
Consider a dataset containing food consumption in Argentina.
>>> df = pd.DataFrame({'consumption': [10.51, 103.11, 55.48],
... 'co2_emissions': [37.2, 19.66, 1712]},
... index=['Pork', 'Wheat Products', 'Beef'])
>>> df
consumption co2_emissions
Pork 10.51 37.20
Wheat Products 103.11 19.66
Beef 55.48 1712.00
By default, it returns the index for the maximum value in each column.
>>> df.idxmax()
consumption Wheat Products
co2_emissions Beef
dtype: object
To return the index for the maximum value in each row, use ``axis="columns"``.
>>> df.idxmax(axis="columns")
Pork co2_emissions
Wheat Products consumption
Beef co2_emissions
dtype: object
"""
axis = self._get_axis_number(axis)
res = self._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return self._constructor_sliced(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(
self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True
) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
data = data.apply(f, axis=axis)
# Ensure index is type stable (should always use int index)
if data.empty:
data.index = ibase.default_index(0)
return data
def quantile(
self,
q=0.5,
axis: Axis = 0,
numeric_only: bool = True,
interpolation: str = "linear",
):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._mgr.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
def asfreq(
self,
freq,
method=None,
how: Optional[str] = None,
normalize: bool = False,
fill_value=None,
) -> "DataFrame":
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@doc(NDFrame.resample, **_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: Optional[str] = None,
label: Optional[str] = None,
convention: str = "start",
kind: Optional[str] = None,
loffset=None,
base: Optional[int] = None,
on=None,
level=None,
origin: Union[str, "TimestampConvertibleTypes"] = "start_day",
offset: Optional["TimedeltaConvertibleTypes"] = None,
) -> "Resampler":
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
)
def to_timestamp(
self, freq=None, how: str = "start", axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(self, freq=None, axis: Axis = 0, copy: bool = True) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
return self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: Dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_REVERSED = True
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> Dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> Dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
def _reindex_for_setitem(value: FrameOrSeriesUnion, index: Index) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
return value._values.copy()
# GH#4107
try:
reindexed_value = value.reindex(index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return reindexed_value
def _maybe_atleast_2d(value):
# TODO(EA2D): not needed with 2D EAs
if is_extension_array_dtype(value):
return value
return np.atleast_2d(np.asarray(value))
|
jreback/pandas
|
pandas/core/frame.py
|
Python
|
bsd-3-clause
| 329,625
|
[
"Elk"
] |
b4dcc302d1221ea7a38194c21e20605df297d62a4e9b9e835d591084866b509d
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
**espressopp.esutil.GammaVariate**
**********************************
.. function:: espressopp.esutil.GammaVariate(alpha, beta)
:param alpha:
:param beta:
:type alpha:
:type beta:
"""
from espressopp import pmi
from _espressopp import esutil_GammaVariate
class GammaVariateLocal(esutil_GammaVariate):
def __init__(self, alpha, beta):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, esutil_GammaVariate, alpha, beta)
if pmi.isController:
class GammaVariate(object):
__metaclass__ = pmi.Proxy
"""A random gamma variate."""
pmiproxydefs = dict(
cls = 'espressopp.esutil.GammaVariateLocal',
localcall = [ '__call__' ],
)
|
junghans/espressopp
|
src/esutil/GammaVariate.py
|
Python
|
gpl-3.0
| 1,699
|
[
"ESPResSo"
] |
ccddc97412530b126e7cb2909c2f5afa1bd53168cd22dae1effff91fe912058f
|
# -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2019 EventGhost Project <http://www.eventghost.org/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import threading
import webbrowser
import wx
from agithub.GitHub import GitHub
from pkg_resources import parse_version
# Local imports
import eg
class Text(eg.TranslatableStrings):
newVersionMesg = \
"A new version of EventGhost has been released!\n\n"\
"Your version:\t%s\n"\
"Newest version:\t%s\n\n"\
"Do you want to visit the download page now?"
waitMesg = "Please wait while EventGhost retrieves update information."
ManOkMesg = "There is currently no newer version of EventGhost available."
ManErrorMesg = \
"It wasn't possible to get the information from the EventGhost "\
"website.\n\n"\
"Please try it again later."
wipUpdateMsg = "Update check not available when running from source."
class CheckUpdate:
@classmethod
@eg.LogIt
def Start(cls):
threading.Thread(target=_checkUpdate, name="CheckUpdate").start()
@classmethod
def CheckUpdateManually(cls):
_checkUpdate(manually=True)
class MessageDialog(eg.Dialog):
def __init__(self, version, url):
self.url = url
eg.Dialog.__init__(self, None, -1, eg.APP_NAME)
bmp = wx.ArtProvider.GetBitmap(
wx.ART_INFORMATION,
wx.ART_MESSAGE_BOX,
(32, 32)
)
staticBitmap = wx.StaticBitmap(self, -1, bmp)
staticText = self.StaticText(
Text.newVersionMesg % (eg.Version.string, version)
)
downloadButton = wx.Button(self, -1, eg.text.General.ok)
downloadButton.Bind(wx.EVT_BUTTON, self.OnOk)
cancelButton = wx.Button(self, -1, eg.text.General.cancel)
cancelButton.Bind(wx.EVT_BUTTON, self.OnCancel)
sizer2 = eg.HBoxSizer(
(staticBitmap, 0, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 10),
((5, 5), 0),
(
staticText,
0,
wx.TOP | wx.RIGHT | wx.BOTTOM | wx.ALIGN_CENTER_VERTICAL,
10
),
)
self.SetSizerAndFit(
eg.VBoxSizer(
(sizer2),
((5, 5), 1),
(
eg.HBoxSizer(
(downloadButton),
((5, 5), 0),
(cancelButton),
), 0, wx.ALIGN_CENTER_HORIZONTAL
),
((2, 10), 0),
)
)
self.ShowModal()
def OnCancel(self, event):
self.Close()
def OnOk(self, event):
webbrowser.open(self.url, True, True)
self.Close()
def CenterOnParent(self):
parent = eg.document.frame
if parent is None:
return
x, y = parent.GetPosition()
parentWidth, parentHeight = parent.GetSize()
width, height = self.GetSize()
self.SetPosition(
((parentWidth - width) / 2 + x, (parentHeight - height) / 2 + y)
)
def ShowWaitDialog():
dialog = wx.Dialog(None, style=wx.THICK_FRAME | wx.DIALOG_NO_PARENT)
staticText = wx.StaticText(dialog, -1, Text.waitMesg)
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(staticText, 1, wx.ALL, 20)
dialog.SetSizerAndFit(sizer)
CenterOnParent(dialog)
dialog.Show()
wx.GetApp().Yield()
return dialog
def _checkUpdate(manually=False):
if eg.Version.string == "WIP":
if manually:
wx.MessageBox(Text.wipUpdateMsg, eg.APP_NAME)
return
dialog = None
try:
if manually:
dialog = ShowWaitDialog()
gh = GitHub()
rc, data = gh.repos["EventGhost"]["EventGhost"].releases.get()
if rc == 200:
for rel in data:
if rel["prerelease"]:
if eg.config.checkPreRelease or "-" in eg.Version.string:
break
else:
break
if dialog:
dialog.Destroy()
dialog = None
ver = rel["name"].lstrip("v")
url = rel["html_url"]
if (
rc == 200 and
parse_version(ver) > parse_version(eg.Version.string) and
(manually or ver != eg.config.lastUpdateCheckVersion)
):
eg.config.lastUpdateCheckVersion = ver
wx.CallAfter(MessageDialog, ver, url)
else:
if manually:
dlg = wx.MessageDialog(
None,
Text.ManOkMesg,
eg.APP_NAME,
style=wx.OK | wx.ICON_INFORMATION
)
dlg.ShowModal()
dlg.Destroy()
except:
if dialog:
dialog.Destroy()
if manually:
dlg = wx.MessageDialog(
None,
Text.ManErrorMesg,
eg.APP_NAME,
style=wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
|
topic2k/EventGhost
|
eg/Classes/CheckUpdate.py
|
Python
|
gpl-2.0
| 5,663
|
[
"VisIt"
] |
36e8a38eaae20d99f153202a8755970b7207558c5aa00219b8d023dbd9552fcd
|
from pathlib import Path
import xarray
from typing import Union, Tuple, Dict, Sequence
from typing.io import TextIO
from datetime import datetime, timedelta
import logging
from .rio import rinexinfo
from .obs2 import rinexobs2
from .obs3 import rinexobs3
from .nav2 import rinexnav2
from .nav3 import rinexnav3
from .sp3 import load_sp3
from .utils import _tlim
# for NetCDF compression. too high slows down with little space savings.
ENC = {"zlib": True, "complevel": 1, "fletcher32": True}
def load(
rinexfn: Union[TextIO, str, Path],
out: Path = None,
use: Sequence[str] = None,
tlim: Tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: Sequence[str] = None,
verbose: bool = False,
*,
overwrite: bool = False,
fast: bool = True,
interval: Union[float, int, timedelta] = None,
) -> Union[xarray.Dataset, Dict[str, xarray.Dataset]]:
"""
Reads OBS, NAV in RINEX 2.x and 3.x
Files / StringIO input may be plain ASCII text or compressed (including Hatanaka)
"""
if verbose:
logging.basicConfig(level=logging.INFO)
if isinstance(rinexfn, (str, Path)):
rinexfn = Path(rinexfn).expanduser()
# %% determine if/where to write NetCDF4/HDF5 output
outfn = None
if out:
out = Path(out).expanduser()
if out.is_dir():
outfn = out / (
rinexfn.name + ".nc"
) # not with_suffix to keep unique RINEX 2 filenames
elif out.suffix == ".nc":
outfn = out
else:
raise ValueError(f"not sure what output is wanted: {out}")
# %% main program
if tlim is not None:
if len(tlim) != 2:
raise ValueError("time bounds are specified as start stop")
if tlim[1] < tlim[0]:
raise ValueError("stop time must be after start time")
try:
info = rinexinfo(rinexfn)
except RuntimeError:
logging.error(
f"could not read {rinexfn} header. It may not be a known type of RINEX file."
)
return None
if info["rinextype"] == "sp3":
return load_sp3(rinexfn, outfn)
elif info["rinextype"] == "nav":
return rinexnav(rinexfn, outfn, use=use, tlim=tlim, overwrite=overwrite)
elif info["rinextype"] == "obs":
return rinexobs(
rinexfn,
outfn,
use=use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
overwrite=overwrite,
fast=fast,
interval=interval,
)
elif rinexfn.suffix == ".nc":
# outfn not used here, because we already have the converted file!
try:
nav = rinexnav(rinexfn)
except LookupError:
nav = None
try:
obs = rinexobs(rinexfn)
except LookupError:
obs = None
if nav is not None and obs is not None:
return {"nav": nav, "obs": rinexobs(rinexfn)}
elif nav is not None:
return nav
elif obs is not None:
return obs
else:
raise ValueError(f"No data of known format found in {rinexfn}")
else:
raise ValueError(f"What kind of RINEX file is: {rinexfn}")
def batch_convert(
path: Path,
glob: str,
out: Path,
use: Sequence[str] = None,
tlim: Tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: Sequence[str] = None,
verbose: bool = False,
*,
fast: bool = True,
):
path = Path(path).expanduser()
flist = (f for f in path.glob(glob) if f.is_file())
for fn in flist:
try:
load(
fn,
out,
use=use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
)
except ValueError as e:
logging.error(f"{fn.name}: {e}")
def rinexnav(
fn: Union[TextIO, str, Path],
outfn: Path = None,
use: Sequence[str] = None,
group: str = "NAV",
tlim: Tuple[datetime, datetime] = None,
*,
overwrite: bool = False,
) -> xarray.Dataset:
""" Read RINEX 2 or 3 NAV files"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
if fn.suffix == ".nc":
try:
return xarray.open_dataset(fn, group=group)
except OSError as e:
raise LookupError(f"Group {group} not found in {fn} {e}")
tlim = _tlim(tlim)
info = rinexinfo(fn)
if int(info["version"]) == 2:
nav = rinexnav2(fn, tlim=tlim)
elif int(info["version"]) == 3:
nav = rinexnav3(fn, use=use, tlim=tlim)
else:
raise LookupError(f"unknown RINEX {info} {fn}")
# %% optional output write
if outfn:
outfn = Path(outfn).expanduser()
wmode = _groupexists(outfn, group, overwrite)
enc = {k: ENC for k in nav.data_vars}
nav.to_netcdf(outfn, group=group, mode=wmode, encoding=enc)
return nav
# %% Observation File
def rinexobs(
fn: Union[TextIO, str, Path],
outfn: Path = None,
use: Sequence[str] = None,
group: str = "OBS",
tlim: Tuple[datetime, datetime] = None,
useindicators: bool = False,
meas: Sequence[str] = None,
verbose: bool = False,
*,
overwrite: bool = False,
fast: bool = True,
interval: Union[float, int, timedelta] = None,
) -> xarray.Dataset:
"""
Read RINEX 2.x and 3.x OBS files in ASCII or GZIP (or Hatanaka)
"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
# %% NetCDF4
if fn.suffix == ".nc":
try:
return xarray.open_dataset(fn, group=group)
except OSError as e:
raise LookupError(f"Group {group} not found in {fn} {e}")
tlim = _tlim(tlim)
# %% version selection
info = rinexinfo(fn)
if int(info["version"]) in (1, 2):
obs = rinexobs2(
fn,
use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
interval=interval,
)
elif int(info["version"]) == 3:
obs = rinexobs3(
fn,
use,
tlim=tlim,
useindicators=useindicators,
meas=meas,
verbose=verbose,
fast=fast,
interval=interval,
)
else:
raise ValueError(f"unknown RINEX {info} {fn}")
# %% optional output write
if outfn:
outfn = Path(outfn).expanduser()
wmode = _groupexists(outfn, group, overwrite)
enc = {k: ENC for k in obs.data_vars}
# Pandas >= 0.25.0 requires this, regardless of xarray version
if obs.time.dtype != "datetime64[ns]":
obs["time"] = obs.time.astype("datetime64[ns]")
obs.to_netcdf(outfn, group=group, mode=wmode, encoding=enc)
return obs
def _groupexists(fn: Path, group: str, overwrite: bool) -> str:
print(f"saving {group}:", fn)
if overwrite or not fn.is_file():
return "w"
# be sure there isn't already NAV in it
try:
xarray.open_dataset(fn, group=group)
raise ValueError(f"{group} already in {fn}")
except OSError:
pass
return "a"
|
scienceopen/pyrinex
|
src/georinex/base.py
|
Python
|
mit
| 7,443
|
[
"NetCDF"
] |
878e070904036c8b8a0382a95a908f7fe5ded9158afef4169cdf9a8f639b35bc
|
from neuron import h
h.nrn_load_dll("E:\\Google Drive\\Github\\Spinal-Cord-Modeling\\nrnmech.dll")
import helper_functions as hf
from Ia_template import Ia
from Mn_template import Mn
class Ia_network:
def __init__(self, N = 2, syn_w = 0.15, syn_delay = 1):
self._N = N;
self.cells = [] # Cells in the net
self.nclist = [] # NetCon list
self.syn_w = syn_w # Synaptic weight
self.syn_delay = syn_delay # Synaptic delay
self.t_vec = h.Vector() # Spike time of all cells
self.id_vec = h.Vector() # Ids of spike times
self.set_numcells(N) # Actually build the net.
#
def set_numcells(self, N):
"""Create, layout, and connect N cells."""
self._N = N
self.create_cells(N)
self.connect_cells()
#
def create_cells(self, N):
"""Create and layout N cells in the network."""
self.cells = []
r = 50 # Radius of cell locations from origin (0,0,0) in microns
N = self._N
position_factor = 5e3;
sim_params = hf.get_net_params(hf.get_tempdata_address())
mn_pos_x = sim_params[10]
mn_pos_y = sim_params[11]
mn_pos_z = sim_params[12]
for i in range(N):
cell = Mn()
cell.set_position(mn_pos_x[0]+i * position_factor,mn_pos_y[0]+i * position_factor,mn_pos_z[0]+i * position_factor)
self.cells.append(cell)
for i in range(N):
cell = Ia()
cell.set_position(i * position_factor,i * position_factor,i * position_factor)
self.cells.append(cell)
#
def connect_cells(self):
"""Connect cell i to cell i + N."""
self.nclist = []
N = self._N
for i in range(N):
src = self.cells[N+i]
tgt_syn = self.cells[i].synlist[0]
nc = src.connect2target(src.Ia_node[0], tgt_syn)
nc.weight[0] = self.syn_w
nc.delay = self.syn_delay
nc.record(self.t_vec, self.id_vec, i)
self.nclist.append(nc)
#
'''def connect_stim(self):
"""Connect a spiking generator to the first cell to get
the network going."""
self.stim = h.NetStim()
self.stim.number = self.stim_number
self.stim.start = 9
self.ncstim = h.NetCon(self.stim, self.cells[0].synlist[0])
self.ncstim.delay = 1
self.ncstim.weight[0] = self.stim_w # NetCon weight is a vector.'''
#
def get_spikes(self):
"""Get the spikes as a list of lists."""
return spiketrain.netconvecs_to_listoflists(self.t_vec, self.id_vec)
|
penguinscontrol/Spinal-Cord-Modeling
|
Python/Ia_network.py
|
Python
|
gpl-2.0
| 2,666
|
[
"NEURON"
] |
d839eac962f06b714573fae5a220a125a105c8eabb4285c9b88d6fa04088073e
|
# encoding: utf-8
'''
Created on Nov 26, 2015
@author: tal
Based in part on:
Learn math - https://github.com/fchollet/keras/blob/master/examples/addition_rnn.py
See https://medium.com/@majortal/deep-spelling-9ffef96a24f6#.2c9pu8nlm
'''
from __future__ import print_function, division, unicode_literals
import os
import errno
from collections import Counter
from hashlib import sha256
import re
import json
import itertools
import logging
import requests
import numpy as np
from numpy.random import choice as random_choice, randint as random_randint, shuffle as random_shuffle, seed as random_seed, rand
from numpy import zeros as np_zeros # pylint:disable=no-name-in-module
from keras.models import Sequential, load_model
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, Dropout, recurrent
from keras.callbacks import Callback
# Set a logger for the module
LOGGER = logging.getLogger(__name__) # Every log will use the module name
LOGGER.addHandler(logging.StreamHandler())
LOGGER.setLevel(logging.DEBUG)
random_seed(123) # Reproducibility
class Configuration(object):
"""Dump stuff here"""
CONFIG = Configuration()
#pylint:disable=attribute-defined-outside-init
# Parameters for the model:
CONFIG.input_layers = 2
CONFIG.output_layers = 2
CONFIG.amount_of_dropout = 0.2
CONFIG.hidden_size = 500
CONFIG.initialization = "he_normal" # : Gaussian initialization scaled by fan-in (He et al., 2014)
CONFIG.number_of_chars = 100
CONFIG.max_input_len = 60
CONFIG.inverted = True
# parameters for the training:
CONFIG.batch_size = 100 # As the model changes in size, play with the batch size to best fit the process in memory
CONFIG.epochs = 500 # due to mini-epochs.
CONFIG.steps_per_epoch = 1000 # This is a mini-epoch. Using News 2013 an epoch would need to be ~60K.
CONFIG.validation_steps = 10
CONFIG.number_of_iterations = 10
#pylint:enable=attribute-defined-outside-init
DIGEST = sha256(json.dumps(CONFIG.__dict__, sort_keys=True)).hexdigest()
# Parameters for the dataset
MIN_INPUT_LEN = 5
AMOUNT_OF_NOISE = 0.2 / CONFIG.max_input_len
CHARS = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .")
PADDING = "☕"
DATA_FILES_PATH = "~/Downloads/data"
DATA_FILES_FULL_PATH = os.path.expanduser(DATA_FILES_PATH)
DATA_FILES_URL = "http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2013.en.shuffled.gz"
NEWS_FILE_NAME_COMPRESSED = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.shuffled.gz") # 1.1 GB
NEWS_FILE_NAME_ENGLISH = "news.2013.en.shuffled"
NEWS_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, NEWS_FILE_NAME_ENGLISH)
NEWS_FILE_NAME_CLEAN = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.clean")
NEWS_FILE_NAME_FILTERED = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.filtered")
NEWS_FILE_NAME_SPLIT = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.split")
NEWS_FILE_NAME_TRAIN = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.train")
NEWS_FILE_NAME_VALIDATE = os.path.join(DATA_FILES_FULL_PATH, "news.2013.en.validate")
CHAR_FREQUENCY_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, "char_frequency.json")
SAVED_MODEL_FILE_NAME = os.path.join(DATA_FILES_FULL_PATH, "keras_spell_e{}.h5") # an HDF5 file
# Some cleanup:
NORMALIZE_WHITESPACE_REGEX = re.compile(r'[^\S\n]+', re.UNICODE) # match all whitespace except newlines
RE_DASH_FILTER = re.compile(r'[\-\˗\֊\‐\‑\‒\–\—\⁻\₋\−\﹣\-]', re.UNICODE)
RE_APOSTROPHE_FILTER = re.compile(r''|[ʼ՚'‘’‛❛❜ߴߵ`‵´ˊˋ{}{}{}{}{}{}{}{}{}]'.format(unichr(768), unichr(769), unichr(832),
unichr(833), unichr(2387), unichr(5151),
unichr(5152), unichr(65344), unichr(8242)),
re.UNICODE)
RE_LEFT_PARENTH_FILTER = re.compile(r'[\(\[\{\⁽\₍\❨\❪\﹙\(]', re.UNICODE)
RE_RIGHT_PARENTH_FILTER = re.compile(r'[\)\]\}\⁾\₎\❩\❫\﹚\)]', re.UNICODE)
ALLOWED_CURRENCIES = """¥£₪$€฿₨"""
ALLOWED_PUNCTUATION = """-!?/;"'%&<>.()[]{}@#:,|=*"""
RE_BASIC_CLEANER = re.compile(r'[^\w\s{}{}]'.format(re.escape(ALLOWED_CURRENCIES), re.escape(ALLOWED_PUNCTUATION)), re.UNICODE)
# pylint:disable=invalid-name
def download_the_news_data():
"""Download the news data"""
LOGGER.info("Downloading")
try:
os.makedirs(os.path.dirname(NEWS_FILE_NAME_COMPRESSED))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open(NEWS_FILE_NAME_COMPRESSED, "wb") as output_file:
response = requests.get(DATA_FILES_URL, stream=True)
total_length = response.headers.get('content-length')
downloaded = percentage = 0
print("»"*100)
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
downloaded += len(data)
output_file.write(data)
new_percentage = 100 * downloaded // total_length
if new_percentage > percentage:
print("☑", end="")
percentage = new_percentage
print()
def uncompress_data():
"""Uncompress the data files"""
import gzip
with gzip.open(NEWS_FILE_NAME_COMPRESSED, 'rb') as compressed_file:
with open(NEWS_FILE_NAME_COMPRESSED[:-3], 'wb') as outfile:
outfile.write(compressed_file.read())
def add_noise_to_string(a_string, amount_of_noise):
"""Add some artificial spelling mistakes to the string"""
if rand() < amount_of_noise * len(a_string):
# Replace a character with a random character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + random_choice(CHARS[:-1]) + a_string[random_char_position + 1:]
if rand() < amount_of_noise * len(a_string):
# Delete a character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + a_string[random_char_position + 1:]
if len(a_string) < CONFIG.max_input_len and rand() < amount_of_noise * len(a_string):
# Add a random character
random_char_position = random_randint(len(a_string))
a_string = a_string[:random_char_position] + random_choice(CHARS[:-1]) + a_string[random_char_position:]
if rand() < amount_of_noise * len(a_string):
# Transpose 2 characters
random_char_position = random_randint(len(a_string) - 1)
a_string = (a_string[:random_char_position] + a_string[random_char_position + 1] + a_string[random_char_position] +
a_string[random_char_position + 2:])
return a_string
def _vectorize(questions, answers, ctable):
"""Vectorize the data as numpy arrays"""
len_of_questions = len(questions)
X = np_zeros((len_of_questions, CONFIG.max_input_len, ctable.size), dtype=np.bool)
for i in xrange(len(questions)):
sentence = questions.pop()
for j, c in enumerate(sentence):
try:
X[i, j, ctable.char_indices[c]] = 1
except KeyError:
pass # Padding
y = np_zeros((len_of_questions, CONFIG.max_input_len, ctable.size), dtype=np.bool)
for i in xrange(len(answers)):
sentence = answers.pop()
for j, c in enumerate(sentence):
try:
y[i, j, ctable.char_indices[c]] = 1
except KeyError:
pass # Padding
return X, y
def slice_X(X, start=None, stop=None):
"""This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
"""
if isinstance(X, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def vectorize(questions, answers, chars=None):
"""Vectorize the questions and expected answers"""
print('Vectorization...')
chars = chars or CHARS
ctable = CharacterTable(chars)
X, y = _vectorize(questions, answers, ctable)
# Explicitly set apart 10% for validation data that we never train over
split_at = int(len(X) - len(X) / 10)
(X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at))
(y_train, y_val) = (y[:split_at], y[split_at:])
print(X_train.shape)
print(y_train.shape)
return X_train, X_val, y_train, y_val, CONFIG.max_input_len, ctable
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of hidden_size
# note: in a situation where your input sequences have a variable length,
# use input_shape=(None, nb_feature).
for layer_number in range(CONFIG.input_layers):
model.add(recurrent.LSTM(CONFIG.hidden_size, input_shape=(None, len(chars)), kernel_initializer=CONFIG.initialization,
return_sequences=layer_number + 1 < CONFIG.input_layers))
model.add(Dropout(CONFIG.amount_of_dropout))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(output_len))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in range(CONFIG.output_layers):
model.add(recurrent.LSTM(CONFIG.hidden_size, return_sequences=True, kernel_initializer=CONFIG.initialization))
model.add(Dropout(CONFIG.amount_of_dropout))
# For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(len(chars), kernel_initializer=CONFIG.initialization)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
green = '\033[92m'
red = '\033[91m'
close = '\033[0m'
class CharacterTable(object):
"""
Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilities to their character output
"""
def __init__(self, chars):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
@property
def size(self):
"""The number of chars"""
return len(self.chars)
def encode(self, C, maxlen):
"""Encode as one-hot"""
X = np_zeros((maxlen, len(self.chars)), dtype=np.bool) # pylint:disable=no-member
for i, c in enumerate(C):
X[i, self.char_indices[c]] = 1
return X
def decode(self, X, calc_argmax=True):
"""Decode from one-hot"""
if calc_argmax:
X = X.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in X if x)
def generator(file_name):
"""Returns a tuple (inputs, targets)
All arrays should contain the same number of samples.
The generator is expected to loop over its data indefinitely.
An epoch finishes when samples_per_epoch samples have been seen by the model.
"""
ctable = CharacterTable(read_top_chars())
batch_of_answers = []
while True:
with open(file_name) as answers:
for answer in answers:
batch_of_answers.append(answer.strip().decode('utf-8'))
if len(batch_of_answers) == CONFIG.batch_size:
random_shuffle(batch_of_answers)
batch_of_questions = []
for answer_index, answer in enumerate(batch_of_answers):
question, answer = generate_question(answer)
batch_of_answers[answer_index] = answer
assert len(answer) == CONFIG.max_input_len
question = question[::-1] if CONFIG.inverted else question
batch_of_questions.append(question)
X, y = _vectorize(batch_of_questions, batch_of_answers, ctable)
yield X, y
batch_of_answers = []
def print_random_predictions(model, ctable, X_val, y_val):
"""Select 10 samples from the validation set at random so we can visualize errors"""
print()
for _ in range(10):
ind = random_randint(0, len(X_val))
rowX, rowy = X_val[np.array([ind])], y_val[np.array([ind])] # pylint:disable=no-member
preds = model.predict_classes(rowX, verbose=0)
q = ctable.decode(rowX[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
if CONFIG.inverted:
print('Q', q[::-1]) # inverted back!
else:
print('Q', q)
print('A', correct)
print(Colors.green + '☑' + Colors.close if correct == guess else Colors.red + '☒' + Colors.close, guess)
print('---')
print()
class OnEpochEndCallback(Callback):
"""Execute this every end of epoch"""
def on_epoch_end(self, epoch, logs=None):
"""On Epoch end - do some stats"""
ctable = CharacterTable(read_top_chars())
X_val, y_val = next(generator(NEWS_FILE_NAME_VALIDATE))
print_random_predictions(self.model, ctable, X_val, y_val)
self.model.save(SAVED_MODEL_FILE_NAME.format(epoch))
ON_EPOCH_END_CALLBACK = OnEpochEndCallback()
def itarative_train(model):
"""
Iterative training of the model
- To allow for finite RAM...
- To allow infinite training data as the training noise is injected in runtime
"""
model.fit_generator(generator(NEWS_FILE_NAME_TRAIN), steps_per_epoch=CONFIG.steps_per_epoch,
epochs=CONFIG.epochs,
verbose=1, callbacks=[ON_EPOCH_END_CALLBACK, ], validation_data=generator(NEWS_FILE_NAME_VALIDATE),
validation_steps=CONFIG.validation_steps,
class_weight=None, max_q_size=10, workers=1,
pickle_safe=False, initial_epoch=0)
def iterate_training(model, X_train, y_train, X_val, y_val, ctable):
"""Iterative Training"""
# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, CONFIG.number_of_iterations):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X_train, y_train, batch_size=CONFIG.batch_size, epochs=CONFIG.epochs,
validation_data=(X_val, y_val))
print_random_predictions(model, ctable, X_val, y_val)
def clean_text(text):
"""Clean the text - remove unwanted chars, fold punctuation etc."""
result = NORMALIZE_WHITESPACE_REGEX.sub(' ', text.strip())
result = RE_DASH_FILTER.sub('-', result)
result = RE_APOSTROPHE_FILTER.sub("'", result)
result = RE_LEFT_PARENTH_FILTER.sub("(", result)
result = RE_RIGHT_PARENTH_FILTER.sub(")", result)
result = RE_BASIC_CLEANER.sub('', result)
return result
def preprocesses_data_clean():
"""Pre-process the data - step 1 - cleanup"""
with open(NEWS_FILE_NAME_CLEAN, "wb") as clean_data:
for line in open(NEWS_FILE_NAME):
decoded_line = line.decode('utf-8')
cleaned_line = clean_text(decoded_line)
encoded_line = cleaned_line.encode("utf-8")
clean_data.write(encoded_line + b"\n")
def preprocesses_data_analyze_chars():
"""Pre-process the data - step 2 - analyze the characters"""
counter = Counter()
LOGGER.info("Reading data:")
for line in open(NEWS_FILE_NAME_CLEAN):
decoded_line = line.decode('utf-8')
counter.update(decoded_line)
# data = open(NEWS_FILE_NAME_CLEAN).read().decode('utf-8')
# LOGGER.info("Read.\nCounting characters:")
# counter = Counter(data.replace("\n", ""))
LOGGER.info("Done.\nWriting to file:")
with open(CHAR_FREQUENCY_FILE_NAME, 'wb') as output_file:
output_file.write(json.dumps(counter))
most_popular_chars = {key for key, _value in counter.most_common(CONFIG.number_of_chars)}
LOGGER.info("The top %s chars are:", CONFIG.number_of_chars)
LOGGER.info("".join(sorted(most_popular_chars)))
def read_top_chars():
"""Read the top chars we saved to file"""
chars = json.loads(open(CHAR_FREQUENCY_FILE_NAME).read())
counter = Counter(chars)
most_popular_chars = {key for key, _value in counter.most_common(CONFIG.number_of_chars)}
return most_popular_chars
def preprocesses_data_filter():
"""Pre-process the data - step 3 - filter only sentences with the right chars"""
most_popular_chars = read_top_chars()
LOGGER.info("Reading and filtering data:")
with open(NEWS_FILE_NAME_FILTERED, "wb") as output_file:
for line in open(NEWS_FILE_NAME_CLEAN):
decoded_line = line.decode('utf-8')
if decoded_line and not bool(set(decoded_line) - most_popular_chars):
output_file.write(line)
LOGGER.info("Done.")
def read_filtered_data():
"""Read the filtered data corpus"""
LOGGER.info("Reading filtered data:")
lines = open(NEWS_FILE_NAME_FILTERED).read().decode('utf-8').split("\n")
LOGGER.info("Read filtered data - %s lines", len(lines))
return lines
def preprocesses_split_lines():
"""Preprocess the text by splitting the lines between min-length and max_length
I don't like this step:
I think the start-of-sentence is important.
I think the end-of-sentence is important.
Sometimes the stripped down sub-sentence is missing crucial context.
Important NGRAMs are cut (though given enough data, that might be moot).
I do this to enable batch-learning by padding to a fixed length.
"""
LOGGER.info("Reading filtered data:")
answers = set()
with open(NEWS_FILE_NAME_SPLIT, "wb") as output_file:
for _line in open(NEWS_FILE_NAME_FILTERED):
line = _line.decode('utf-8')
while len(line) > MIN_INPUT_LEN:
if len(line) <= CONFIG.max_input_len:
answer = line
line = ""
else:
space_location = line.rfind(" ", MIN_INPUT_LEN, CONFIG.max_input_len - 1)
if space_location > -1:
answer = line[:space_location]
line = line[len(answer) + 1:]
else:
space_location = line.rfind(" ") # no limits this time
if space_location == -1:
break # we are done with this line
else:
line = line[space_location + 1:]
continue
answers.add(answer)
output_file.write(answer.encode('utf-8') + b"\n")
def preprocesses_split_lines2():
"""Preprocess the text by splitting the lines between min-length and max_length
Alternative split.
"""
LOGGER.info("Reading filtered data:")
answers = set()
for encoded_line in open(NEWS_FILE_NAME_FILTERED):
line = encoded_line.decode('utf-8')
if CONFIG.max_input_len >= len(line) > MIN_INPUT_LEN:
answers.add(line)
LOGGER.info("There are %s 'answers' (sub-sentences)", len(answers))
LOGGER.info("Here are some examples:")
for answer in itertools.islice(answers, 10):
LOGGER.info(answer)
with open(NEWS_FILE_NAME_SPLIT, "wb") as output_file:
output_file.write("".join(answers).encode('utf-8'))
def preprocesses_split_lines3():
"""Preprocess the text by selecting only max n-grams
Alternative split.
"""
LOGGER.info("Reading filtered data:")
answers = set()
for encoded_line in open(NEWS_FILE_NAME_FILTERED):
line = encoded_line.decode('utf-8')
if line.count(" ") < 5:
answers.add(line)
LOGGER.info("There are %s 'answers' (sub-sentences)", len(answers))
LOGGER.info("Here are some examples:")
for answer in itertools.islice(answers, 10):
LOGGER.info(answer)
with open(NEWS_FILE_NAME_SPLIT, "wb") as output_file:
output_file.write("".join(answers).encode('utf-8'))
def preprocesses_split_lines4():
"""Preprocess the text by selecting only sentences with most-common words AND not too long
Alternative split.
"""
LOGGER.info("Reading filtered data:")
from gensim.models.word2vec import Word2Vec
FILTERED_W2V = "fw2v.bin"
model = Word2Vec.load_word2vec_format(FILTERED_W2V, binary=True) # C text format
print(len(model.wv.index2word))
# answers = set()
# for encoded_line in open(NEWS_FILE_NAME_FILTERED):
# line = encoded_line.decode('utf-8')
# if line.count(" ") < 5:
# answers.add(line)
# LOGGER.info("There are %s 'answers' (sub-sentences)", len(answers))
# LOGGER.info("Here are some examples:")
# for answer in itertools.islice(answers, 10):
# LOGGER.info(answer)
# with open(NEWS_FILE_NAME_SPLIT, "wb") as output_file:
# output_file.write("".join(answers).encode('utf-8'))
def preprocess_partition_data():
"""Set asside data for validation"""
answers = open(NEWS_FILE_NAME_SPLIT).read().decode('utf-8').split("\n")
print('shuffle', end=" ")
random_shuffle(answers)
print("Done")
# Explicitly set apart 10% for validation data that we never train over
split_at = len(answers) - len(answers) // 10
with open(NEWS_FILE_NAME_TRAIN, "wb") as output_file:
output_file.write("\n".join(answers[:split_at]).encode('utf-8'))
with open(NEWS_FILE_NAME_VALIDATE, "wb") as output_file:
output_file.write("\n".join(answers[split_at:]).encode('utf-8'))
def generate_question(answer):
"""Generate a question by adding noise"""
question = add_noise_to_string(answer, AMOUNT_OF_NOISE)
# Add padding:
question += PADDING * (CONFIG.max_input_len - len(question))
answer += PADDING * (CONFIG.max_input_len - len(answer))
return question, answer
def generate_news_data():
"""Generate some news data"""
print ("Generating Data")
answers = open(NEWS_FILE_NAME_SPLIT).read().decode('utf-8').split("\n")
questions = []
print('shuffle', end=" ")
random_shuffle(answers)
print("Done")
for answer_index, answer in enumerate(answers):
question, answer = generate_question(answer)
answers[answer_index] = answer
assert len(answer) == CONFIG.max_input_len
if random_randint(100000) == 8: # Show some progress
print (len(answers))
print ("answer: '{}'".format(answer))
print ("question: '{}'".format(question))
print ()
question = question[::-1] if CONFIG.inverted else question
questions.append(question)
return questions, answers
def train_speller_w_all_data():
"""Train the speller if all data fits into RAM"""
questions, answers = generate_news_data()
chars_answer = set.union(*(set(answer) for answer in answers))
chars_question = set.union(*(set(question) for question in questions))
chars = list(set.union(chars_answer, chars_question))
X_train, X_val, y_train, y_val, y_maxlen, ctable = vectorize(questions, answers, chars)
print ("y_maxlen, chars", y_maxlen, "".join(chars))
model = generate_model(y_maxlen, chars)
iterate_training(model, X_train, y_train, X_val, y_val, ctable)
def train_speller(from_file=None):
"""Train the speller"""
if from_file:
model = load_model(from_file)
else:
model = generate_model(CONFIG.max_input_len, chars=read_top_chars())
itarative_train(model)
if __name__ == '__main__':
# download_the_news_data()
# uncompress_data()
# preprocesses_data_clean()
# preprocesses_data_analyze_chars()
# preprocesses_data_filter()
# preprocesses_split_lines() --- Choose this step or:
# preprocesses_split_lines2()
# preprocesses_split_lines4()
# preprocess_partition_data()
# train_speller(os.path.join(DATA_FILES_FULL_PATH, "keras_spell_e15.h5"))
train_speller()
|
MajorTal/DeepSpell
|
keras_spell.py
|
Python
|
mit
| 24,924
|
[
"Gaussian"
] |
f46d65c5fdbb66c1e9f12d910226c413498dd23abd89b7ac8de8f5e659ec9383
|
#!/usr/bin/env python
import sys
import os
import PyQt4
import vtk
from vtk.test import Testing
class TestvtkQtTableView(Testing.vtkTest):
def testvtkQtTableView(self):
sphereSource = vtk.vtkSphereSource()
tableConverter = vtk.vtkDataObjectToTable()
tableConverter.SetInput(sphereSource.GetOutput())
tableConverter.SetFieldType(1)
tableConverter.Update()
pointTable = tableConverter.GetOutput();
tableView = vtk.vtkQtTableView()
tableView.SetSplitMultiComponentColumns(1);
tableView.AddRepresentationFromInput(pointTable);
tableView.Update();
w = tableView.GetWidget()
w.show();
if Testing.isInteractive():
PyQt4.QtGui.qApp.exec_()
if __name__ == "__main__":
app = PyQt4.QtGui.QApplication(sys.argv)
Testing.main([(TestvtkQtTableView, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/GUISupport/Qt/Testing/Python/TestvtkQtTableView.py
|
Python
|
gpl-3.0
| 817
|
[
"VTK"
] |
95aa2081b2fec025fa0622583dfca1f8da171106f156fdcafcc8665017e7868c
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Matti Hämäläinen <msh@nmr.mgh.harvard.edu>
# Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
from copy import deepcopy
from distutils.version import LooseVersion
import itertools as itt
from math import log
import os
import numpy as np
from .defaults import _EXTRAPOLATE_DEFAULT, _BORDER_DEFAULT, DEFAULTS
from .io.write import start_file, end_file
from .io.proj import (make_projector, _proj_equal, activate_proj,
_check_projs, _needs_eeg_average_ref_proj,
_has_eeg_average_ref_proj, _read_proj, _write_proj)
from .io import fiff_open, RawArray
from .io.pick import (pick_types, pick_channels_cov, pick_channels, pick_info,
_picks_by_type, _pick_data_channels, _picks_to_idx,
_DATA_CH_TYPES_SPLIT)
from .io.constants import FIFF
from .io.meas_info import _read_bad_channels, create_info
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.write import (start_block, end_block, write_int, write_name_list,
write_double, write_float_matrix, write_string)
from .defaults import _handle_default
from .epochs import Epochs
from .event import make_fixed_length_events
from .evoked import EvokedArray
from .rank import compute_rank
from .utils import (check_fname, logger, verbose, check_version, _time_mask,
warn, copy_function_doc_to_method_doc, _pl,
_undo_scaling_cov, _scaled_array, _validate_type,
_check_option, eigh, fill_doc, _on_missing,
_check_on_missing)
from . import viz
from .fixes import (BaseEstimator, EmpiricalCovariance, _logdet,
empirical_covariance, log_likelihood)
def _check_covs_algebra(cov1, cov2):
if cov1.ch_names != cov2.ch_names:
raise ValueError('Both Covariance do not have the same list of '
'channels.')
projs1 = [str(c) for c in cov1['projs']]
projs2 = [str(c) for c in cov1['projs']]
if projs1 != projs2:
raise ValueError('Both Covariance do not have the same list of '
'SSP projections.')
def _get_tslice(epochs, tmin, tmax):
"""Get the slice."""
mask = _time_mask(epochs.times, tmin, tmax, sfreq=epochs.info['sfreq'])
tstart = np.where(mask)[0][0] if tmin is not None else None
tend = np.where(mask)[0][-1] + 1 if tmax is not None else None
tslice = slice(tstart, tend, None)
return tslice
@fill_doc
class Covariance(dict):
"""Noise covariance matrix.
.. warning:: This class should not be instantiated directly, but
instead should be created using a covariance reading or
computation function.
Parameters
----------
data : array-like
The data.
names : list of str
Channel names.
bads : list of str
Bad channels.
projs : list
Projection vectors.
nfree : int
Degrees of freedom.
eig : array-like | None
Eigenvalues.
eigvec : array-like | None
Eigenvectors.
method : str | None
The method used to compute the covariance.
loglik : float
The log likelihood.
%(verbose_meth)s
Attributes
----------
data : array of shape (n_channels, n_channels)
The covariance.
ch_names : list of str
List of channels' names.
nfree : int
Number of degrees of freedom i.e. number of time points used.
dim : int
The number of channels ``n_channels``.
See Also
--------
compute_covariance
compute_raw_covariance
make_ad_hoc_cov
read_cov
"""
def __init__(self, data, names, bads, projs, nfree, eig=None, eigvec=None,
method=None, loglik=None, verbose=None):
"""Init of covariance."""
diag = (data.ndim == 1)
projs = _check_projs(projs)
self.update(data=data, dim=len(data), names=names, bads=bads,
nfree=nfree, eig=eig, eigvec=eigvec, diag=diag,
projs=projs, kind=FIFF.FIFFV_MNE_NOISE_COV)
if method is not None:
self['method'] = method
if loglik is not None:
self['loglik'] = loglik
self.verbose = verbose
@property
def data(self):
"""Numpy array of Noise covariance matrix."""
return self['data']
@property
def ch_names(self):
"""Channel names."""
return self['names']
@property
def nfree(self):
"""Number of degrees of freedom."""
return self['nfree']
def save(self, fname):
"""Save covariance matrix in a FIF file.
Parameters
----------
fname : str
Output filename.
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz',
'_cov.fif', '_cov.fif.gz'))
fid = start_file(fname)
try:
_write_cov(fid, self)
except Exception:
fid.close()
os.remove(fname)
raise
end_file(fid)
def copy(self):
"""Copy the Covariance object.
Returns
-------
cov : instance of Covariance
The copied object.
"""
return deepcopy(self)
def as_diag(self):
"""Set covariance to be processed as being diagonal.
Returns
-------
cov : dict
The covariance.
Notes
-----
This function allows creation of inverse operators
equivalent to using the old "--diagnoise" mne option.
This function operates in place.
"""
if self['diag']:
return self
self['diag'] = True
self['data'] = np.diag(self['data'])
self['eig'] = None
self['eigvec'] = None
return self
def _as_square(self):
# This is a hack but it works because np.diag() behaves nicely
if self['diag']:
self['diag'] = False
self.as_diag()
self['diag'] = False
return self
def _get_square(self):
if self['diag'] != (self.data.ndim == 1):
raise RuntimeError(
'Covariance attributes inconsistent, got data with '
'dimensionality %d but diag=%s'
% (self.data.ndim, self['diag']))
return np.diag(self.data) if self['diag'] else self.data.copy()
def __repr__(self): # noqa: D105
if self.data.ndim == 2:
s = 'size : %s x %s' % self.data.shape
else: # ndim == 1
s = 'diagonal : %s' % self.data.size
s += ", n_samples : %s" % self.nfree
s += ", data : %s" % self.data
return "<Covariance | %s>" % s
def __add__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
this_cov = cov.copy()
this_cov['data'] = (((this_cov['data'] * this_cov['nfree']) +
(self['data'] * self['nfree'])) /
(self['nfree'] + this_cov['nfree']))
this_cov['nfree'] += self['nfree']
this_cov['bads'] = list(set(this_cov['bads']).union(self['bads']))
return this_cov
def __iadd__(self, cov):
"""Add Covariance taking into account number of degrees of freedom."""
_check_covs_algebra(self, cov)
self['data'][:] = (((self['data'] * self['nfree']) +
(cov['data'] * cov['nfree'])) /
(self['nfree'] + cov['nfree']))
self['nfree'] += cov['nfree']
self['bads'] = list(set(self['bads']).union(cov['bads']))
return self
@verbose
@copy_function_doc_to_method_doc(viz.misc.plot_cov)
def plot(self, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
return viz.misc.plot_cov(self, info, exclude, colorbar, proj, show_svd,
show, verbose)
@verbose
def plot_topomap(self, info, ch_type=None, vmin=None,
vmax=None, cmap=None, sensors=True, colorbar=True,
scalings=None, units=None, res=64,
size=1, cbar_fmt="%3.1f",
proj=False, show=True, show_names=False, title=None,
mask=None, mask_params=None, outlines='head',
contours=6, image_interp='bilinear',
axes=None, extrapolate=_EXTRAPOLATE_DEFAULT, sphere=None,
border=_BORDER_DEFAULT,
noise_cov=None, verbose=None):
"""Plot a topomap of the covariance diagonal.
Parameters
----------
info : instance of Info
The measurement information.
%(topomap_ch_type)s
%(topomap_vmin_vmax)s
%(topomap_cmap)s
%(topomap_sensors)s
%(topomap_colorbar)s
%(topomap_scalings)s
%(topomap_units)s
%(topomap_res)s
%(topomap_size)s
%(topomap_cbar_fmt)s
%(plot_proj)s
%(show)s
%(topomap_show_names)s
%(title_None)s
%(topomap_mask)s
%(topomap_mask_params)s
%(topomap_outlines)s
%(topomap_contours)s
%(topomap_image_interp)s
%(topomap_axes)s
%(topomap_extrapolate)s
%(topomap_sphere_auto)s
%(topomap_border)s
noise_cov : instance of Covariance | None
If not None, whiten the instance with ``noise_cov`` before
plotting.
%(verbose)s
Returns
-------
fig : instance of Figure
The matplotlib figure.
Notes
-----
.. versionadded:: 0.21
"""
from .viz.misc import _index_info_cov
info, C, _, _ = _index_info_cov(info, self, exclude=())
evoked = EvokedArray(np.diag(C)[:, np.newaxis], info)
if noise_cov is not None:
# need to left and right multiply whitener, which for the diagonal
# entries is the same as multiplying twice
evoked = whiten_evoked(whiten_evoked(evoked, noise_cov), noise_cov)
if units is None:
units = 'AU'
if scalings is None:
scalings = 1.
if units is None:
units = {k: f'({v})²' for k, v in DEFAULTS['units'].items()}
if scalings is None:
scalings = {k: v * v for k, v in DEFAULTS['scalings'].items()}
return evoked.plot_topomap(
times=[0], ch_type=ch_type, vmin=vmin, vmax=vmax, cmap=cmap,
sensors=sensors, colorbar=colorbar, scalings=scalings,
units=units, res=res, size=size, cbar_fmt=cbar_fmt,
proj=proj, show=show, show_names=show_names, title=title,
mask=mask, mask_params=mask_params, outlines=outlines,
contours=contours, image_interp=image_interp, axes=axes,
extrapolate=extrapolate, sphere=sphere, border=border,
time_format='')
def pick_channels(self, ch_names, ordered=False):
"""Pick channels from this covariance matrix.
Parameters
----------
ch_names : list of str
List of channels to keep. All other channels are dropped.
ordered : bool
If True (default False), ensure that the order of the channels
matches the order of ``ch_names``.
Returns
-------
cov : instance of Covariance.
The modified covariance matrix.
Notes
-----
Operates in-place.
.. versionadded:: 0.20.0
"""
return pick_channels_cov(self, ch_names, exclude=[], ordered=ordered,
copy=False)
###############################################################################
# IO
@verbose
def read_cov(fname, verbose=None):
"""Read a noise covariance from a FIF file.
Parameters
----------
fname : str
The name of file containing the covariance matrix. It should end with
-cov.fif or -cov.fif.gz.
%(verbose)s
Returns
-------
cov : Covariance
The noise covariance matrix.
See Also
--------
write_cov, compute_covariance, compute_raw_covariance
"""
check_fname(fname, 'covariance', ('-cov.fif', '-cov.fif.gz',
'_cov.fif', '_cov.fif.gz'))
f, tree = fiff_open(fname)[:2]
with f as fid:
return Covariance(**_read_cov(fid, tree, FIFF.FIFFV_MNE_NOISE_COV,
limited=True))
###############################################################################
# Estimate from data
@verbose
def make_ad_hoc_cov(info, std=None, verbose=None):
"""Create an ad hoc noise covariance.
Parameters
----------
info : instance of Info
Measurement info.
std : dict of float | None
Standard_deviation of the diagonal elements. If dict, keys should be
``'grad'`` for gradiometers, ``'mag'`` for magnetometers and ``'eeg'``
for EEG channels. If None, default values will be used (see Notes).
%(verbose)s
Returns
-------
cov : instance of Covariance
The ad hoc diagonal noise covariance for the M/EEG data channels.
Notes
-----
The default noise values are 5 fT/cm, 20 fT, and 0.2 µV for gradiometers,
magnetometers, and EEG channels respectively.
.. versionadded:: 0.9.0
"""
picks = pick_types(info, meg=True, eeg=True, exclude=())
std = _handle_default('noise_std', std)
data = np.zeros(len(picks))
for meg, eeg, val in zip(('grad', 'mag', False), (False, False, True),
(std['grad'], std['mag'], std['eeg'])):
these_picks = pick_types(info, meg=meg, eeg=eeg)
data[np.searchsorted(picks, these_picks)] = val * val
ch_names = [info['ch_names'][pick] for pick in picks]
return Covariance(data, ch_names, info['bads'], info['projs'], nfree=0)
def _check_n_samples(n_samples, n_chan):
"""Check to see if there are enough samples for reliable cov calc."""
n_samples_min = 10 * (n_chan + 1) // 2
if n_samples <= 0:
raise ValueError('No samples found to compute the covariance matrix')
if n_samples < n_samples_min:
warn('Too few samples (required : %d got : %d), covariance '
'estimate may be unreliable' % (n_samples_min, n_samples))
@verbose
def compute_raw_covariance(raw, tmin=0, tmax=None, tstep=0.2, reject=None,
flat=None, picks=None, method='empirical',
method_params=None, cv=3, scalings=None, n_jobs=1,
return_estimators=False, reject_by_annotation=True,
rank=None, verbose=None):
"""Estimate noise covariance matrix from a continuous segment of raw data.
It is typically useful to estimate a noise covariance from empty room
data or time intervals before starting the stimulation.
.. note:: To estimate the noise covariance from epoched data, use
:func:`mne.compute_covariance` instead.
Parameters
----------
raw : instance of Raw
Raw data.
tmin : float
Beginning of time interval in seconds. Defaults to 0.
tmax : float | None (default None)
End of time interval in seconds. If None (default), use the end of the
recording.
tstep : float (default 0.2)
Length of data chunks for artifact rejection in seconds.
Can also be None to use a single epoch of (tmax - tmin)
duration. This can use a lot of memory for large ``Raw``
instances.
reject : dict | None (default None)
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None (default None)
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
%(picks_good_data_noref)s
method : str | list | None (default 'empirical')
The method used for covariance estimation.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
method_params : dict | None (default None)
Additional parameters to the estimation procedure.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.12
cv : int | sklearn.model_selection object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger by default :class:`sklearn.model_selection.KFold`
with 3 splits.
.. versionadded:: 0.12
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale magnetometers and gradiometers
at the same unit.
.. versionadded:: 0.12
%(n_jobs)s
.. versionadded:: 0.12
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False.
.. versionadded:: 0.12
%(reject_by_annotation_epochs)s
.. versionadded:: 0.14
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
%(verbose)s
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_covariance : Estimate noise covariance matrix from epoched data.
Notes
-----
This function will:
1. Partition the data into evenly spaced, equal-length epochs.
2. Load them into memory.
3. Subtract the mean across all time points and epochs for each channel.
4. Process the :class:`Epochs` by :func:`compute_covariance`.
This will produce a slightly different result compared to using
:func:`make_fixed_length_events`, :class:`Epochs`, and
:func:`compute_covariance` directly, since that would (with the recommended
baseline correction) subtract the mean across time *for each epoch*
(instead of across epochs) for each channel.
"""
tmin = 0. if tmin is None else float(tmin)
dt = 1. / raw.info['sfreq']
tmax = raw.times[-1] + dt if tmax is None else float(tmax)
tstep = tmax - tmin if tstep is None else float(tstep)
tstep_m1 = tstep - dt # inclusive!
events = make_fixed_length_events(raw, 1, tmin, tmax, tstep)
logger.info('Using up to %s segment%s' % (len(events), _pl(events)))
# don't exclude any bad channels, inverses expect all channels present
if picks is None:
# Need to include all channels e.g. if eog rejection is to be used
picks = np.arange(raw.info['nchan'])
pick_mask = np.in1d(
picks, _pick_data_channels(raw.info, with_ref_meg=False))
else:
pick_mask = slice(None)
picks = _picks_to_idx(raw.info, picks)
epochs = Epochs(raw, events, 1, 0, tstep_m1, baseline=None,
picks=picks, reject=reject, flat=flat, verbose=False,
preload=False, proj=False,
reject_by_annotation=reject_by_annotation)
if method is None:
method = 'empirical'
if isinstance(method, str) and method == 'empirical':
# potentially *much* more memory efficient to do it the iterative way
picks = picks[pick_mask]
data = 0
n_samples = 0
mu = 0
# Read data in chunks
for raw_segment in epochs:
raw_segment = raw_segment[pick_mask]
mu += raw_segment.sum(axis=1)
data += np.dot(raw_segment, raw_segment.T)
n_samples += raw_segment.shape[1]
_check_n_samples(n_samples, len(picks))
data -= mu[:, None] * (mu[None, :] / n_samples)
data /= (n_samples - 1.0)
logger.info("Number of samples used : %d" % n_samples)
logger.info('[done]')
ch_names = [raw.info['ch_names'][k] for k in picks]
bads = [b for b in raw.info['bads'] if b in ch_names]
return Covariance(data, ch_names, bads, raw.info['projs'],
nfree=n_samples - 1)
del picks, pick_mask
# This makes it equivalent to what we used to do (and do above for
# empirical mode), treating all epochs as if they were a single long one
epochs.load_data()
ch_means = epochs._data.mean(axis=0).mean(axis=1)
epochs._data -= ch_means[np.newaxis, :, np.newaxis]
# fake this value so there are no complaints from compute_covariance
epochs.baseline = (None, None)
return compute_covariance(epochs, keep_sample_mean=True, method=method,
method_params=method_params, cv=cv,
scalings=scalings, n_jobs=n_jobs,
return_estimators=return_estimators,
rank=rank)
def _check_method_params(method, method_params, keep_sample_mean=True,
name='method', allow_auto=True, rank=None):
"""Check that method and method_params are usable."""
accepted_methods = ('auto', 'empirical', 'diagonal_fixed', 'ledoit_wolf',
'oas', 'shrunk', 'pca', 'factor_analysis', 'shrinkage')
_method_params = {
'empirical': {'store_precision': False, 'assume_centered': True},
'diagonal_fixed': {'store_precision': False, 'assume_centered': True},
'ledoit_wolf': {'store_precision': False, 'assume_centered': True},
'oas': {'store_precision': False, 'assume_centered': True},
'shrinkage': {'shrinkage': 0.1, 'store_precision': False,
'assume_centered': True},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30),
'store_precision': False, 'assume_centered': True},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}
}
for ch_type in _DATA_CH_TYPES_SPLIT:
_method_params['diagonal_fixed'][ch_type] = 0.1
if isinstance(method_params, dict):
for key, values in method_params.items():
if key not in _method_params:
raise ValueError('key (%s) must be "%s"' %
(key, '" or "'.join(_method_params)))
_method_params[key].update(method_params[key])
shrinkage = method_params.get('shrinkage', {}).get('shrinkage', 0.1)
if not 0 <= shrinkage <= 1:
raise ValueError('shrinkage must be between 0 and 1, got %s'
% (shrinkage,))
was_auto = False
if method is None:
method = ['empirical']
elif method == 'auto' and allow_auto:
was_auto = True
method = ['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
if not isinstance(method, (list, tuple)):
method = [method]
if not all(k in accepted_methods for k in method):
raise ValueError(
'Invalid {name} ({method}). Accepted values (individually or '
'in a list) are any of "{accepted_methods}" or None.'.format(
name=name, method=method, accepted_methods=accepted_methods))
if not (isinstance(rank, str) and rank == 'full'):
if was_auto:
method.pop(method.index('factor_analysis'))
for method_ in method:
if method_ in ('pca', 'factor_analysis'):
raise ValueError('%s can so far only be used with rank="full",'
' got rank=%r' % (method_, rank))
if not keep_sample_mean:
if len(method) != 1 or 'empirical' not in method:
raise ValueError('`keep_sample_mean=False` is only supported'
'with %s="empirical"' % (name,))
for p, v in _method_params.items():
if v.get('assume_centered', None) is False:
raise ValueError('`assume_centered` must be True'
' if `keep_sample_mean` is False')
return method, _method_params
@verbose
def compute_covariance(epochs, keep_sample_mean=True, tmin=None, tmax=None,
projs=None, method='empirical', method_params=None,
cv=3, scalings=None, n_jobs=1, return_estimators=False,
on_mismatch='raise', rank=None, verbose=None):
"""Estimate noise covariance matrix from epochs.
The noise covariance is typically estimated on pre-stimulus periods
when the stimulus onset is defined from events.
If the covariance is computed for multiple event types (events
with different IDs), the following two options can be used and combined:
1. either an Epochs object for each event type is created and
a list of Epochs is passed to this function.
2. an Epochs object is created for multiple events and passed
to this function.
.. note:: To estimate the noise covariance from non-epoched raw data, such
as an empty-room recording, use
:func:`mne.compute_raw_covariance` instead.
Parameters
----------
epochs : instance of Epochs, or list of Epochs
The epochs.
keep_sample_mean : bool (default True)
If False, the average response over epochs is computed for
each event type and subtracted during the covariance
computation. This is useful if the evoked response from a
previous stimulus extends into the baseline period of the next.
Note. This option is only implemented for method='empirical'.
tmin : float | None (default None)
Start time for baseline. If None start at first sample.
tmax : float | None (default None)
End time for baseline. If None end at last sample.
projs : list of Projection | None (default None)
List of projectors to use in covariance calculation, or None
to indicate that the projectors from the epochs should be
inherited. If None, then projectors from all epochs must match.
method : str | list | None (default 'empirical')
The method used for covariance estimation. If 'empirical' (default),
the sample covariance will be computed. A list can be passed to
perform estimates using multiple methods.
If 'auto' or a list of methods, the best estimator will be determined
based on log-likelihood and cross-validation on unseen data as
described in :footcite:`EngemannGramfort2015`. Valid methods are
'empirical', 'diagonal_fixed', 'shrunk', 'oas', 'ledoit_wolf',
'factor_analysis', 'shrinkage', and 'pca' (see Notes). If ``'auto'``,
it expands to::
['shrunk', 'diagonal_fixed', 'empirical', 'factor_analysis']
``'factor_analysis'`` is removed when ``rank`` is not 'full'.
The ``'auto'`` mode is not recommended if there are many
segments of data, since computation can take a long time.
.. versionadded:: 0.9.0
method_params : dict | None (default None)
Additional parameters to the estimation procedure. Only considered if
method is not None. Keys must correspond to the value(s) of ``method``.
If None (default), expands to the following (with the addition of
``{'store_precision': False, 'assume_centered': True} for all methods
except ``'factor_analysis'`` and ``'pca'``)::
{'diagonal_fixed': {'grad': 0.1, 'mag': 0.1, 'eeg': 0.1, ...},
'shrinkage': {'shrikage': 0.1},
'shrunk': {'shrinkage': np.logspace(-4, 0, 30)},
'pca': {'iter_n_components': None},
'factor_analysis': {'iter_n_components': None}}
cv : int | sklearn.model_selection object (default 3)
The cross validation method. Defaults to 3, which will
internally trigger by default :class:`sklearn.model_selection.KFold`
with 3 splits.
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale data to roughly the same order of
magnitude.
%(n_jobs)s
return_estimators : bool (default False)
Whether to return all estimators or the best. Only considered if
method equals 'auto' or is a list of str. Defaults to False.
on_mismatch : str
What to do when the MEG<->Head transformations do not match between
epochs. If "raise" (default) an error is raised, if "warn" then a
warning is emitted, if "ignore" then nothing is printed. Having
mismatched transforms can in some cases lead to unexpected or
unstable results in covariance calculation, e.g. when data
have been processed with Maxwell filtering but not transformed
to the same head position.
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
%(verbose)s
Returns
-------
cov : instance of Covariance | list
The computed covariance. If method equals 'auto' or is a list of str
and return_estimators equals True, a list of covariance estimators is
returned (sorted by log-likelihood, from high to low, i.e. from best
to worst).
See Also
--------
compute_raw_covariance : Estimate noise covariance from raw data, such as
empty-room recordings.
Notes
-----
Baseline correction or sufficient high-passing should be used
when creating the :class:`Epochs` to ensure that the data are zero mean,
otherwise the computed covariance matrix will be inaccurate.
Valid ``method`` strings are:
* ``'empirical'``
The empirical or sample covariance (default)
* ``'diagonal_fixed'``
A diagonal regularization based on channel types as in
:func:`mne.cov.regularize`.
* ``'shrinkage'``
Fixed shrinkage.
.. versionadded:: 0.16
* ``'ledoit_wolf'``
The Ledoit-Wolf estimator, which uses an
empirical formula for the optimal shrinkage value
:footcite:`LedoitWolf2004`.
* ``'oas'``
The OAS estimator :footcite:`ChenEtAl2010`, which uses a different
empricial formula for the optimal shrinkage value.
.. versionadded:: 0.16
* ``'shrunk'``
Like 'ledoit_wolf', but with cross-validation
for optimal alpha.
* ``'pca'``
Probabilistic PCA with low rank :footcite:`TippingBishop1999`.
* ``'factor_analysis'``
Factor analysis with low rank :footcite:`Barber2012`.
``'ledoit_wolf'`` and ``'pca'`` are similar to ``'shrunk'`` and
``'factor_analysis'``, respectively, except that they use
cross validation (which is useful when samples are correlated, which
is often the case for M/EEG data). The former two are not included in
the ``'auto'`` mode to avoid redundancy.
For multiple event types, it is also possible to create a
single :class:`Epochs` object with events obtained using
:func:`mne.merge_events`. However, the resulting covariance matrix
will only be correct if ``keep_sample_mean is True``.
The covariance can be unstable if the number of samples is small.
In that case it is common to regularize the covariance estimate.
The ``method`` parameter allows to regularize the covariance in an
automated way. It also allows to select between different alternative
estimation algorithms which themselves achieve regularization.
Details are described in :footcite:`EngemannGramfort2015`.
For more information on the advanced estimation methods, see
:ref:`the sklearn manual <sklearn:covariance>`.
References
----------
.. footbibliography::
"""
# scale to natural unit for best stability with MEG/EEG
scalings = _check_scalings_user(scalings)
method, _method_params = _check_method_params(
method, method_params, keep_sample_mean, rank=rank)
del method_params
# for multi condition support epochs is required to refer to a list of
# epochs objects
def _unpack_epochs(epochs):
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
if not isinstance(epochs, list):
epochs = _unpack_epochs(epochs)
else:
epochs = sum([_unpack_epochs(epoch) for epoch in epochs], [])
# check for baseline correction
if any(epochs_t.baseline is None and epochs_t.info['highpass'] < 0.5 and
keep_sample_mean for epochs_t in epochs):
warn('Epochs are not baseline corrected, covariance '
'matrix may be inaccurate')
orig = epochs[0].info['dev_head_t']
_check_on_missing(on_mismatch, 'on_mismatch')
for ei, epoch in enumerate(epochs):
epoch.info._check_consistency()
if (orig is None) != (epoch.info['dev_head_t'] is None) or \
(orig is not None and not
np.allclose(orig['trans'],
epoch.info['dev_head_t']['trans'])):
msg = ('MEG<->Head transform mismatch between epochs[0]:\n%s\n\n'
'and epochs[%s]:\n%s'
% (orig, ei, epoch.info['dev_head_t']))
_on_missing(on_mismatch, msg, 'on_mismatch')
bads = epochs[0].info['bads']
if projs is None:
projs = epochs[0].info['projs']
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.proj != epochs[0].proj:
raise ValueError('Epochs must agree on the use of projections')
for proj_a, proj_b in zip(epochs_t.info['projs'], projs):
if not _proj_equal(proj_a, proj_b):
raise ValueError('Epochs must have same projectors')
projs = _check_projs(projs)
ch_names = epochs[0].ch_names
# make sure Epochs are compatible
for epochs_t in epochs[1:]:
if epochs_t.info['bads'] != bads:
raise ValueError('Epochs must have same bad channels')
if epochs_t.ch_names != ch_names:
raise ValueError('Epochs must have same channel names')
picks_list = _picks_by_type(epochs[0].info)
picks_meeg = np.concatenate([b for _, b in picks_list])
picks_meeg = np.sort(picks_meeg)
ch_names = [epochs[0].ch_names[k] for k in picks_meeg]
info = epochs[0].info # we will overwrite 'epochs'
if not keep_sample_mean:
# prepare mean covs
n_epoch_types = len(epochs)
data_mean = [0] * n_epoch_types
n_samples = np.zeros(n_epoch_types, dtype=np.int64)
n_epochs = np.zeros(n_epoch_types, dtype=np.int64)
for ii, epochs_t in enumerate(epochs):
tslice = _get_tslice(epochs_t, tmin, tmax)
for e in epochs_t:
e = e[picks_meeg, tslice]
if not keep_sample_mean:
data_mean[ii] += e
n_samples[ii] += e.shape[1]
n_epochs[ii] += 1
n_samples_epoch = n_samples // n_epochs
norm_const = np.sum(n_samples_epoch * (n_epochs - 1))
data_mean = [1.0 / n_epoch * np.dot(mean, mean.T) for n_epoch, mean
in zip(n_epochs, data_mean)]
info = pick_info(info, picks_meeg)
tslice = _get_tslice(epochs[0], tmin, tmax)
epochs = [ee.get_data(picks=picks_meeg)[..., tslice] for ee in epochs]
picks_meeg = np.arange(len(picks_meeg))
picks_list = _picks_by_type(info)
if len(epochs) > 1:
epochs = np.concatenate(epochs, 0)
else:
epochs = epochs[0]
epochs = np.hstack(epochs)
n_samples_tot = epochs.shape[-1]
_check_n_samples(n_samples_tot, len(picks_meeg))
epochs = epochs.T # sklearn | C-order
cov_data = _compute_covariance_auto(
epochs, method=method, method_params=_method_params, info=info,
cv=cv, n_jobs=n_jobs, stop_early=True, picks_list=picks_list,
scalings=scalings, rank=rank)
if keep_sample_mean is False:
cov = cov_data['empirical']['data']
# undo scaling
cov *= (n_samples_tot - 1)
# ... apply pre-computed class-wise normalization
for mean_cov in data_mean:
cov -= mean_cov
cov /= norm_const
covs = list()
for this_method, data in cov_data.items():
cov = Covariance(data.pop('data'), ch_names, info['bads'], projs,
nfree=n_samples_tot - 1)
# add extra info
cov.update(method=this_method, **data)
covs.append(cov)
logger.info('Number of samples used : %d' % n_samples_tot)
covs.sort(key=lambda c: c['loglik'], reverse=True)
if len(covs) > 1:
msg = ['log-likelihood on unseen data (descending order):']
for c in covs:
msg.append('%s: %0.3f' % (c['method'], c['loglik']))
logger.info('\n '.join(msg))
if return_estimators:
out = covs
else:
out = covs[0]
logger.info('selecting best estimator: {}'.format(out['method']))
else:
out = covs[0]
logger.info('[done]')
return out
def _check_scalings_user(scalings):
if isinstance(scalings, dict):
for k, v in scalings.items():
_check_option('the keys in `scalings`', k, ['mag', 'grad', 'eeg'])
elif scalings is not None and not isinstance(scalings, np.ndarray):
raise TypeError('scalings must be a dict, ndarray, or None, got %s'
% type(scalings))
scalings = _handle_default('scalings', scalings)
return scalings
def _eigvec_subspace(eig, eigvec, mask):
"""Compute the subspace from a subset of eigenvectors."""
# We do the same thing we do with projectors:
P = np.eye(len(eigvec)) - np.dot(eigvec[~mask].conj().T, eigvec[~mask])
eig, eigvec = eigh(P)
eigvec = eigvec.conj().T
return eig, eigvec
def _get_iid_kwargs():
import sklearn
kwargs = dict()
if LooseVersion(sklearn.__version__) < LooseVersion('0.22'):
kwargs['iid'] = False
return kwargs
def _compute_covariance_auto(data, method, info, method_params, cv,
scalings, n_jobs, stop_early, picks_list, rank):
"""Compute covariance auto mode."""
# rescale to improve numerical stability
orig_rank = rank
rank = compute_rank(RawArray(data.T, info, copy=None, verbose=False),
rank, scalings, info)
with _scaled_array(data.T, picks_list, scalings):
C = np.dot(data.T, data)
_, eigvec, mask = _smart_eigh(C, info, rank, proj_subspace=True,
do_compute_rank=False)
eigvec = eigvec[mask]
data = np.dot(data, eigvec.T)
used = np.where(mask)[0]
sub_picks_list = [(key, np.searchsorted(used, picks))
for key, picks in picks_list]
sub_info = pick_info(info, used) if len(used) != len(mask) else info
logger.info('Reducing data rank from %s -> %s'
% (len(mask), eigvec.shape[0]))
estimator_cov_info = list()
msg = 'Estimating covariance using %s'
ok_sklearn = check_version('sklearn')
if not ok_sklearn and (len(method) != 1 or method[0] != 'empirical'):
raise ValueError('scikit-learn is not installed, `method` must be '
'`empirical`, got %s' % (method,))
for method_ in method:
data_ = data.copy()
name = method_.__name__ if callable(method_) else method_
logger.info(msg % name.upper())
mp = method_params[method_]
_info = {}
if method_ == 'empirical':
est = EmpiricalCovariance(**mp)
est.fit(data_)
estimator_cov_info.append((est, est.covariance_, _info))
del est
elif method_ == 'diagonal_fixed':
est = _RegCovariance(info=sub_info, **mp)
est.fit(data_)
estimator_cov_info.append((est, est.covariance_, _info))
del est
elif method_ == 'ledoit_wolf':
from sklearn.covariance import LedoitWolf
shrinkages = []
lw = LedoitWolf(**mp)
for ch_type, picks in sub_picks_list:
lw.fit(data_[:, picks])
shrinkages.append((ch_type, lw.shrinkage_, picks))
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del lw, sc
elif method_ == 'oas':
from sklearn.covariance import OAS
shrinkages = []
oas = OAS(**mp)
for ch_type, picks in sub_picks_list:
oas.fit(data_[:, picks])
shrinkages.append((ch_type, oas.shrinkage_, picks))
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del oas, sc
elif method_ == 'shrinkage':
sc = _ShrunkCovariance(**mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del sc
elif method_ == 'shrunk':
from sklearn.model_selection import GridSearchCV
from sklearn.covariance import ShrunkCovariance
shrinkage = mp.pop('shrinkage')
tuned_parameters = [{'shrinkage': shrinkage}]
shrinkages = []
gs = GridSearchCV(ShrunkCovariance(**mp),
tuned_parameters, cv=cv, **_get_iid_kwargs())
for ch_type, picks in sub_picks_list:
gs.fit(data_[:, picks])
shrinkages.append((ch_type, gs.best_estimator_.shrinkage,
picks))
shrinkages = [c[0] for c in zip(shrinkages)]
sc = _ShrunkCovariance(shrinkage=shrinkages, **mp)
sc.fit(data_)
estimator_cov_info.append((sc, sc.covariance_, _info))
del shrinkage, sc
elif method_ == 'pca':
assert orig_rank == 'full'
pca, _info = _auto_low_rank_model(
data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv,
stop_early=stop_early)
pca.fit(data_)
estimator_cov_info.append((pca, pca.get_covariance(), _info))
del pca
elif method_ == 'factor_analysis':
assert orig_rank == 'full'
fa, _info = _auto_low_rank_model(
data_, method_, n_jobs=n_jobs, method_params=mp, cv=cv,
stop_early=stop_early)
fa.fit(data_)
estimator_cov_info.append((fa, fa.get_covariance(), _info))
del fa
else:
raise ValueError('Oh no! Your estimator does not have'
' a .fit method')
logger.info('Done.')
if len(method) > 1:
logger.info('Using cross-validation to select the best estimator.')
out = dict()
for ei, (estimator, cov, runtime_info) in \
enumerate(estimator_cov_info):
if len(method) > 1:
loglik = _cross_val(data, estimator, cv, n_jobs)
else:
loglik = None
# project back
cov = np.dot(eigvec.T, np.dot(cov, eigvec))
# undo bias
cov *= data.shape[0] / (data.shape[0] - 1)
# undo scaling
_undo_scaling_cov(cov, picks_list, scalings)
method_ = method[ei]
name = method_.__name__ if callable(method_) else method_
out[name] = dict(loglik=loglik, data=cov, estimator=estimator)
out[name].update(runtime_info)
return out
def _gaussian_loglik_scorer(est, X, y=None):
"""Compute the Gaussian log likelihood of X under the model in est."""
# compute empirical covariance of the test set
precision = est.get_precision()
n_samples, n_features = X.shape
log_like = -.5 * (X * (np.dot(X, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) - _logdet(precision))
out = np.mean(log_like)
return out
def _cross_val(data, est, cv, n_jobs):
"""Compute cross validation."""
from sklearn.model_selection import cross_val_score
return np.mean(cross_val_score(est, data, cv=cv, n_jobs=n_jobs,
scoring=_gaussian_loglik_scorer))
def _auto_low_rank_model(data, mode, n_jobs, method_params, cv,
stop_early=True, verbose=None):
"""Compute latent variable models."""
method_params = deepcopy(method_params)
iter_n_components = method_params.pop('iter_n_components')
if iter_n_components is None:
iter_n_components = np.arange(5, data.shape[1], 5)
from sklearn.decomposition import PCA, FactorAnalysis
if mode == 'factor_analysis':
est = FactorAnalysis
else:
assert mode == 'pca'
est = PCA
est = est(**method_params)
est.n_components = 1
scores = np.empty_like(iter_n_components, dtype=np.float64)
scores.fill(np.nan)
# make sure we don't empty the thing if it's a generator
max_n = max(list(deepcopy(iter_n_components)))
if max_n > data.shape[1]:
warn('You are trying to estimate %i components on matrix '
'with %i features.' % (max_n, data.shape[1]))
for ii, n in enumerate(iter_n_components):
est.n_components = n
try: # this may fail depending on rank and split
score = _cross_val(data=data, est=est, cv=cv, n_jobs=n_jobs)
except ValueError:
score = np.inf
if np.isinf(score) or score > 0:
logger.info('... infinite values encountered. stopping estimation')
break
logger.info('... rank: %i - loglik: %0.3f' % (n, score))
if score != -np.inf:
scores[ii] = score
if (ii >= 3 and np.all(np.diff(scores[ii - 3:ii]) < 0) and stop_early):
# early stop search when loglik has been going down 3 times
logger.info('early stopping parameter search.')
break
# happens if rank is too low right form the beginning
if np.isnan(scores).all():
raise RuntimeError('Oh no! Could not estimate covariance because all '
'scores were NaN. Please contact the MNE-Python '
'developers.')
i_score = np.nanargmax(scores)
best = est.n_components = iter_n_components[i_score]
logger.info('... best model at rank = %i' % best)
runtime_info = {'ranks': np.array(iter_n_components),
'scores': scores,
'best': best,
'cv': cv}
return est, runtime_info
###############################################################################
# Sklearn Estimators
class _RegCovariance(BaseEstimator):
"""Aux class."""
def __init__(self, info, grad=0.1, mag=0.1, eeg=0.1, seeg=0.1,
ecog=0.1, hbo=0.1, hbr=0.1, fnirs_cw_amplitude=0.1,
fnirs_fd_ac_amplitude=0.1, fnirs_fd_phase=0.1, fnirs_od=0.1,
csd=0.1, dbs=0.1, store_precision=False,
assume_centered=False):
self.info = info
# For sklearn compat, these cannot (easily?) be combined into
# a single dictionary
self.grad = grad
self.mag = mag
self.eeg = eeg
self.seeg = seeg
self.dbs = dbs
self.ecog = ecog
self.hbo = hbo
self.hbr = hbr
self.fnirs_cw_amplitude = fnirs_cw_amplitude
self.fnirs_fd_ac_amplitude = fnirs_fd_ac_amplitude
self.fnirs_fd_phase = fnirs_fd_phase
self.fnirs_od = fnirs_od
self.csd = csd
self.store_precision = store_precision
self.assume_centered = assume_centered
def fit(self, X):
"""Fit covariance model with classical diagonal regularization."""
self.estimator_ = EmpiricalCovariance(
store_precision=self.store_precision,
assume_centered=self.assume_centered)
self.covariance_ = self.estimator_.fit(X).covariance_
self.covariance_ = 0.5 * (self.covariance_ + self.covariance_.T)
cov_ = Covariance(
data=self.covariance_, names=self.info['ch_names'],
bads=self.info['bads'], projs=self.info['projs'],
nfree=len(self.covariance_))
cov_ = regularize(
cov_, self.info, proj=False, exclude='bads',
grad=self.grad, mag=self.mag, eeg=self.eeg,
ecog=self.ecog, seeg=self.seeg, dbs=self.dbs,
hbo=self.hbo, hbr=self.hbr, rank='full')
self.estimator_.covariance_ = self.covariance_ = cov_.data
return self
def score(self, X_test, y=None):
"""Delegate call to modified EmpiricalCovariance instance."""
return self.estimator_.score(X_test, y=y)
def get_precision(self):
"""Delegate call to modified EmpiricalCovariance instance."""
return self.estimator_.get_precision()
class _ShrunkCovariance(BaseEstimator):
"""Aux class."""
def __init__(self, store_precision, assume_centered,
shrinkage=0.1):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.shrinkage = shrinkage
def fit(self, X):
"""Fit covariance model with oracle shrinkage regularization."""
from sklearn.covariance import shrunk_covariance
self.estimator_ = EmpiricalCovariance(
store_precision=self.store_precision,
assume_centered=self.assume_centered)
cov = self.estimator_.fit(X).covariance_
if not isinstance(self.shrinkage, (list, tuple)):
shrinkage = [('all', self.shrinkage, np.arange(len(cov)))]
else:
shrinkage = self.shrinkage
zero_cross_cov = np.zeros_like(cov, dtype=bool)
for a, b in itt.combinations(shrinkage, 2):
picks_i, picks_j = a[2], b[2]
ch_ = a[0], b[0]
if 'eeg' in ch_:
zero_cross_cov[np.ix_(picks_i, picks_j)] = True
zero_cross_cov[np.ix_(picks_j, picks_i)] = True
self.zero_cross_cov_ = zero_cross_cov
# Apply shrinkage to blocks
for ch_type, c, picks in shrinkage:
sub_cov = cov[np.ix_(picks, picks)]
cov[np.ix_(picks, picks)] = shrunk_covariance(sub_cov,
shrinkage=c)
# Apply shrinkage to cross-cov
for a, b in itt.combinations(shrinkage, 2):
shrinkage_i, shrinkage_j = a[1], b[1]
picks_i, picks_j = a[2], b[2]
c_ij = np.sqrt((1. - shrinkage_i) * (1. - shrinkage_j))
cov[np.ix_(picks_i, picks_j)] *= c_ij
cov[np.ix_(picks_j, picks_i)] *= c_ij
# Set to zero the necessary cross-cov
if np.any(zero_cross_cov):
cov[zero_cross_cov] = 0.0
self.estimator_.covariance_ = self.covariance_ = cov
return self
def score(self, X_test, y=None):
"""Delegate to modified EmpiricalCovariance instance."""
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.estimator_.location_,
assume_centered=True)
if np.any(self.zero_cross_cov_):
test_cov[self.zero_cross_cov_] = 0.
res = log_likelihood(test_cov, self.estimator_.get_precision())
return res
def get_precision(self):
"""Delegate to modified EmpiricalCovariance instance."""
return self.estimator_.get_precision()
###############################################################################
# Writing
def write_cov(fname, cov):
"""Write a noise covariance matrix.
Parameters
----------
fname : str
The name of the file. It should end with -cov.fif or -cov.fif.gz.
cov : Covariance
The noise covariance matrix.
See Also
--------
read_cov
"""
cov.save(fname)
###############################################################################
# Prepare for inverse modeling
def _unpack_epochs(epochs):
"""Aux Function."""
if len(epochs.event_id) > 1:
epochs = [epochs[k] for k in epochs.event_id]
else:
epochs = [epochs]
return epochs
def _get_ch_whitener(A, pca, ch_type, rank):
"""Get whitener params for a set of channels."""
# whitening operator
eig, eigvec = eigh(A, overwrite_a=True)
eigvec = eigvec.conj().T
mask = np.ones(len(eig), bool)
eig[:-rank] = 0.0
mask[:-rank] = False
logger.info(' Setting small %s eigenvalues to zero (%s)'
% (ch_type, 'using PCA' if pca else 'without PCA'))
if pca: # No PCA case.
# This line will reduce the actual number of variables in data
# and leadfield to the true rank.
eigvec = eigvec[:-rank].copy()
return eig, eigvec, mask
@verbose
def prepare_noise_cov(noise_cov, info, ch_names=None, rank=None,
scalings=None, on_rank_mismatch='ignore', verbose=None):
"""Prepare noise covariance matrix.
Parameters
----------
noise_cov : instance of Covariance
The noise covariance to process.
info : dict
The measurement info (used to get channel types and bad channels).
ch_names : list | None
The channel names to be considered. Can be None to use
``info['ch_names']``.
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
If dict, it will override the following dict (default if None)::
dict(mag=1e12, grad=1e11, eeg=1e5)
%(on_rank_mismatch)s
%(verbose)s
Returns
-------
cov : instance of Covariance
A copy of the covariance with the good channels subselected
and parameters updated.
"""
# reorder C and info to match ch_names order
noise_cov_idx = list()
missing = list()
ch_names = info['ch_names'] if ch_names is None else ch_names
for c in ch_names:
# this could be try/except ValueError, but it is not the preferred way
if c in noise_cov.ch_names:
noise_cov_idx.append(noise_cov.ch_names.index(c))
else:
missing.append(c)
if len(missing):
raise RuntimeError('Not all channels present in noise covariance:\n%s'
% missing)
C = noise_cov._get_square()[np.ix_(noise_cov_idx, noise_cov_idx)]
info = pick_info(info, pick_channels(info['ch_names'], ch_names))
projs = info['projs'] + noise_cov['projs']
noise_cov = Covariance(
data=C, names=ch_names, bads=list(noise_cov['bads']),
projs=deepcopy(noise_cov['projs']), nfree=noise_cov['nfree'],
method=noise_cov.get('method', None),
loglik=noise_cov.get('loglik', None))
eig, eigvec, _ = _smart_eigh(noise_cov, info, rank, scalings, projs,
ch_names, on_rank_mismatch=on_rank_mismatch)
noise_cov.update(eig=eig, eigvec=eigvec)
return noise_cov
@verbose
def _smart_eigh(C, info, rank, scalings=None, projs=None,
ch_names=None, proj_subspace=False, do_compute_rank=True,
on_rank_mismatch='ignore', verbose=None):
"""Compute eigh of C taking into account rank and ch_type scalings."""
scalings = _handle_default('scalings_cov_rank', scalings)
projs = info['projs'] if projs is None else projs
ch_names = info['ch_names'] if ch_names is None else ch_names
if info['ch_names'] != ch_names:
info = pick_info(info, [info['ch_names'].index(c) for c in ch_names])
assert info['ch_names'] == ch_names
n_chan = len(ch_names)
# Create the projection operator
proj, ncomp, _ = make_projector(projs, ch_names)
if isinstance(C, Covariance):
C = C['data']
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension = %d)'
% ncomp)
C = np.dot(proj, np.dot(C, proj.T))
noise_cov = Covariance(C, ch_names, [], projs, 0)
if do_compute_rank: # if necessary
rank = compute_rank(
noise_cov, rank, scalings, info, on_rank_mismatch=on_rank_mismatch)
assert C.ndim == 2 and C.shape[0] == C.shape[1]
# time saving short-circuit
if proj_subspace and sum(rank.values()) == C.shape[0]:
return np.ones(n_chan), np.eye(n_chan), np.ones(n_chan, bool)
dtype = complex if C.dtype == np.complex_ else float
eig = np.zeros(n_chan, dtype)
eigvec = np.zeros((n_chan, n_chan), dtype)
mask = np.zeros(n_chan, bool)
for ch_type, picks in _picks_by_type(info, meg_combined=True,
ref_meg=False, exclude='bads'):
if len(picks) == 0:
continue
this_C = C[np.ix_(picks, picks)]
if ch_type not in rank and ch_type in ('mag', 'grad'):
this_rank = rank['meg'] # if there is only one or the other
else:
this_rank = rank[ch_type]
e, ev, m = _get_ch_whitener(this_C, False, ch_type.upper(), this_rank)
if proj_subspace:
# Choose the subspace the same way we do for projections
e, ev = _eigvec_subspace(e, ev, m)
eig[picks], eigvec[np.ix_(picks, picks)], mask[picks] = e, ev, m
# XXX : also handle ref for sEEG and ECoG
if ch_type == 'eeg' and _needs_eeg_average_ref_proj(info) and not \
_has_eeg_average_ref_proj(projs):
warn('No average EEG reference present in info["projs"], '
'covariance may be adversely affected. Consider recomputing '
'covariance using with an average eeg reference projector '
'added.')
return eig, eigvec, mask
@verbose
def regularize(cov, info, mag=0.1, grad=0.1, eeg=0.1, exclude='bads',
proj=True, seeg=0.1, ecog=0.1, hbo=0.1, hbr=0.1,
fnirs_cw_amplitude=0.1, fnirs_fd_ac_amplitude=0.1,
fnirs_fd_phase=0.1, fnirs_od=0.1, csd=0.1, dbs=0.1,
rank=None, scalings=None, verbose=None):
"""Regularize noise covariance matrix.
This method works by adding a constant to the diagonal for each
channel type separately. Special care is taken to keep the
rank of the data constant.
.. note:: This function is kept for reasons of backward-compatibility.
Please consider explicitly using the ``method`` parameter in
:func:`mne.compute_covariance` to directly combine estimation
with regularization in a data-driven fashion. See the `faq
<http://mne.tools/dev/overview/faq.html#how-should-i-regularize-the-covariance-matrix>`_
for more information.
Parameters
----------
cov : Covariance
The noise covariance matrix.
info : dict
The measurement info (used to get channel types and bad channels).
mag : float (default 0.1)
Regularization factor for MEG magnetometers.
grad : float (default 0.1)
Regularization factor for MEG gradiometers. Must be the same as
``mag`` if data have been processed with SSS.
eeg : float (default 0.1)
Regularization factor for EEG.
exclude : list | 'bads' (default 'bads')
List of channels to mark as bad. If 'bads', bads channels
are extracted from both info['bads'] and cov['bads'].
proj : bool (default True)
Apply projections to keep rank of data.
seeg : float (default 0.1)
Regularization factor for sEEG signals.
ecog : float (default 0.1)
Regularization factor for ECoG signals.
hbo : float (default 0.1)
Regularization factor for HBO signals.
hbr : float (default 0.1)
Regularization factor for HBR signals.
fnirs_cw_amplitude : float (default 0.1)
Regularization factor for fNIRS CW raw signals.
fnirs_fd_ac_amplitude : float (default 0.1)
Regularization factor for fNIRS FD AC raw signals.
fnirs_fd_phase : float (default 0.1)
Regularization factor for fNIRS raw phase signals.
fnirs_od : float (default 0.1)
Regularization factor for fNIRS optical density signals.
csd : float (default 0.1)
Regularization factor for EEG-CSD signals.
dbs : float (default 0.1)
Regularization factor for DBS signals.
%(rank_None)s
.. versionadded:: 0.17
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
Data will be rescaled before rank estimation to improve accuracy.
See :func:`mne.compute_covariance`.
.. versionadded:: 0.17
%(verbose)s
Returns
-------
reg_cov : Covariance
The regularized covariance matrix.
See Also
--------
mne.compute_covariance
""" # noqa: E501
from scipy import linalg
cov = cov.copy()
info._check_consistency()
scalings = _handle_default('scalings_cov_rank', scalings)
regs = dict(eeg=eeg, seeg=seeg, dbs=dbs, ecog=ecog, hbo=hbo, hbr=hbr,
fnirs_cw_amplitude=fnirs_cw_amplitude,
fnirs_fd_ac_amplitude=fnirs_fd_ac_amplitude,
fnirs_fd_phase=fnirs_fd_phase, fnirs_od=fnirs_od, csd=csd)
if exclude is None:
raise ValueError('exclude must be a list of strings or "bads"')
if exclude == 'bads':
exclude = info['bads'] + cov['bads']
picks_dict = {ch_type: [] for ch_type in _DATA_CH_TYPES_SPLIT}
meg_combined = 'auto' if rank != 'full' else False
picks_dict.update(dict(_picks_by_type(
info, meg_combined=meg_combined, exclude=exclude, ref_meg=False)))
if len(picks_dict.get('meg', [])) > 0 and rank != 'full': # combined
if mag != grad:
raise ValueError('On data where magnetometers and gradiometers '
'are dependent (e.g., SSSed data), mag (%s) must '
'equal grad (%s)' % (mag, grad))
logger.info('Regularizing MEG channels jointly')
regs['meg'] = mag
else:
regs.update(mag=mag, grad=grad)
if rank != 'full':
rank = compute_rank(cov, rank, scalings, info)
info_ch_names = info['ch_names']
ch_names_by_type = dict()
for ch_type, picks_type in picks_dict.items():
ch_names_by_type[ch_type] = [info_ch_names[i] for i in picks_type]
# This actually removes bad channels from the cov, which is not backward
# compatible, so let's leave all channels in
cov_good = pick_channels_cov(cov, include=info_ch_names, exclude=exclude)
ch_names = cov_good.ch_names
# Now get the indices for each channel type in the cov
idx_cov = {ch_type: [] for ch_type in ch_names_by_type}
for i, ch in enumerate(ch_names):
for ch_type in ch_names_by_type:
if ch in ch_names_by_type[ch_type]:
idx_cov[ch_type].append(i)
break
else:
raise Exception('channel %s is unknown type' % ch)
C = cov_good['data']
assert len(C) == sum(map(len, idx_cov.values()))
if proj:
projs = info['projs'] + cov_good['projs']
projs = activate_proj(projs)
for ch_type in idx_cov:
desc = ch_type.upper()
idx = idx_cov[ch_type]
if len(idx) == 0:
continue
reg = regs[ch_type]
if reg == 0.0:
logger.info(" %s regularization : None" % desc)
continue
logger.info(" %s regularization : %s" % (desc, reg))
this_C = C[np.ix_(idx, idx)]
U = np.eye(this_C.shape[0])
this_ch_names = [ch_names[k] for k in idx]
if rank == 'full':
if proj:
P, ncomp, _ = make_projector(projs, this_ch_names)
if ncomp > 0:
# This adjustment ends up being redundant if rank is None:
U = linalg.svd(P)[0][:, :-ncomp]
logger.info(' Created an SSP operator for %s '
'(dimension = %d)' % (desc, ncomp))
else:
this_picks = pick_channels(info['ch_names'], this_ch_names)
this_info = pick_info(info, this_picks)
# Here we could use proj_subspace=True, but this should not matter
# since this is already in a loop over channel types
_, eigvec, mask = _smart_eigh(this_C, this_info, rank)
U = eigvec[mask].T
this_C = np.dot(U.T, np.dot(this_C, U))
sigma = np.mean(np.diag(this_C))
this_C.flat[::len(this_C) + 1] += reg * sigma # modify diag inplace
this_C = np.dot(U, np.dot(this_C, U.T))
C[np.ix_(idx, idx)] = this_C
# Put data back in correct locations
idx = pick_channels(cov.ch_names, info_ch_names, exclude=exclude)
cov['data'][np.ix_(idx, idx)] = C
return cov
def _regularized_covariance(data, reg=None, method_params=None, info=None,
rank=None):
"""Compute a regularized covariance from data using sklearn.
This is a convenience wrapper for mne.decoding functions, which
adopted a slightly different covariance API.
Returns
-------
cov : ndarray, shape (n_channels, n_channels)
The covariance matrix.
"""
_validate_type(reg, (str, 'numeric', None))
if reg is None:
reg = 'empirical'
elif not isinstance(reg, str):
reg = float(reg)
if method_params is not None:
raise ValueError('If reg is a float, method_params must be None '
'(got %s)' % (type(method_params),))
method_params = dict(shrinkage=dict(
shrinkage=reg, assume_centered=True, store_precision=False))
reg = 'shrinkage'
method, method_params = _check_method_params(
reg, method_params, name='reg', allow_auto=False, rank=rank)
# use mag instead of eeg here to avoid the cov EEG projection warning
info = create_info(data.shape[-2], 1000., 'mag') if info is None else info
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', None)
cov = _compute_covariance_auto(
data.T, method=method, method_params=method_params,
info=info, cv=None, n_jobs=1, stop_early=True,
picks_list=picks_list, scalings=scalings,
rank=rank)[reg]['data']
return cov
@verbose
def compute_whitener(noise_cov, info=None, picks=None, rank=None,
scalings=None, return_rank=False, pca=False,
return_colorer=False, on_rank_mismatch='warn',
verbose=None):
"""Compute whitening matrix.
Parameters
----------
noise_cov : Covariance
The noise covariance.
info : dict | None
The measurement info. Can be None if ``noise_cov`` has already been
prepared with :func:`prepare_noise_cov`.
%(picks_good_data_noref)s
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None
The rescaling method to be applied. See documentation of
``prepare_noise_cov`` for details.
return_rank : bool
If True, return the rank used to compute the whitener.
.. versionadded:: 0.15
pca : bool | str
Space to project the data into. Options:
:data:`python:True`
Whitener will be shape (n_nonzero, n_channels).
``'white'``
Whitener will be shape (n_channels, n_channels), potentially rank
deficient, and have the first ``n_channels - n_nonzero`` rows and
columns set to zero.
:data:`python:False` (default)
Whitener will be shape (n_channels, n_channels), potentially rank
deficient, and rotated back to the space of the original data.
.. versionadded:: 0.18
return_colorer : bool
If True, return the colorer as well.
%(on_rank_mismatch)s
%(verbose)s
Returns
-------
W : ndarray, shape (n_channels, n_channels) or (n_nonzero, n_channels)
The whitening matrix.
ch_names : list
The channel names.
rank : int
Rank reduction of the whitener. Returned only if return_rank is True.
colorer : ndarray, shape (n_channels, n_channels) or (n_channels, n_nonzero)
The coloring matrix.
""" # noqa: E501
_validate_type(pca, (str, bool), 'space')
_valid_pcas = (True, 'white', False)
if pca not in _valid_pcas:
raise ValueError('space must be one of %s, got %s'
% (_valid_pcas, pca))
if info is None:
if 'eig' not in noise_cov:
raise ValueError('info can only be None if the noise cov has '
'already been prepared with prepare_noise_cov')
ch_names = deepcopy(noise_cov['names'])
else:
picks = _picks_to_idx(info, picks, with_ref_meg=False)
ch_names = [info['ch_names'][k] for k in picks]
del picks
noise_cov = prepare_noise_cov(
noise_cov, info, ch_names, rank, scalings,
on_rank_mismatch=on_rank_mismatch)
n_chan = len(ch_names)
assert n_chan == len(noise_cov['eig'])
# Omit the zeroes due to projection
eig = noise_cov['eig'].copy()
nzero = (eig > 0)
eig[~nzero] = 0. # get rid of numerical noise (negative) ones
if noise_cov['eigvec'].dtype.kind == 'c':
dtype = np.complex128
else:
dtype = np.float64
W = np.zeros((n_chan, 1), dtype)
W[nzero, 0] = 1.0 / np.sqrt(eig[nzero])
# Rows of eigvec are the eigenvectors
W = W * noise_cov['eigvec'] # C ** -0.5
C = np.sqrt(eig) * noise_cov['eigvec'].conj().T # C ** 0.5
n_nzero = nzero.sum()
logger.info(' Created the whitener using a noise covariance matrix '
'with rank %d (%d small eigenvalues omitted)'
% (n_nzero, noise_cov['dim'] - n_nzero))
# Do the requested projection
if pca is True:
W = W[nzero]
C = C[:, nzero]
elif pca is False:
W = np.dot(noise_cov['eigvec'].conj().T, W)
C = np.dot(C, noise_cov['eigvec'])
# Triage return
out = W, ch_names
if return_rank:
out += (n_nzero,)
if return_colorer:
out += (C,)
return out
@verbose
def whiten_evoked(evoked, noise_cov, picks=None, diag=None, rank=None,
scalings=None, verbose=None):
"""Whiten evoked data using given noise covariance.
Parameters
----------
evoked : instance of Evoked
The evoked data.
noise_cov : instance of Covariance
The noise covariance.
%(picks_good_data)s
diag : bool (default False)
If True, whiten using only the diagonal of the covariance.
%(rank_None)s
.. versionadded:: 0.18
Support for 'info' mode.
scalings : dict | None (default None)
To achieve reliable rank estimation on multiple sensors,
sensors have to be rescaled. This parameter controls the
rescaling. If dict, it will override the
following default dict (default if None):
dict(mag=1e12, grad=1e11, eeg=1e5)
%(verbose)s
Returns
-------
evoked_white : instance of Evoked
The whitened evoked data.
"""
evoked = evoked.copy()
picks = _picks_to_idx(evoked.info, picks)
if diag:
noise_cov = noise_cov.as_diag()
W, _ = compute_whitener(noise_cov, evoked.info, picks=picks,
rank=rank, scalings=scalings)
evoked.data[picks] = np.sqrt(evoked.nave) * np.dot(W, evoked.data[picks])
return evoked
@verbose
def _read_cov(fid, node, cov_kind, limited=False, verbose=None):
"""Read a noise covariance matrix."""
# Find all covariance matrices
from scipy import sparse
covs = dir_tree_find(node, FIFF.FIFFB_MNE_COV)
if len(covs) == 0:
raise ValueError('No covariance matrices found')
# Is any of the covariance matrices a noise covariance
for p in range(len(covs)):
tag = find_tag(fid, covs[p], FIFF.FIFF_MNE_COV_KIND)
if tag is not None and int(tag.data) == cov_kind:
this = covs[p]
# Find all the necessary data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIM)
if tag is None:
raise ValueError('Covariance matrix dimension not found')
dim = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_NFREE)
if tag is None:
nfree = -1
else:
nfree = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_METHOD)
if tag is None:
method = None
else:
method = tag.data
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_SCORE)
if tag is None:
score = None
else:
score = tag.data[0]
tag = find_tag(fid, this, FIFF.FIFF_MNE_ROW_NAMES)
if tag is None:
names = []
else:
names = tag.data.split(':')
if len(names) != dim:
raise ValueError('Number of names does not match '
'covariance matrix dimension')
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_MNE_COV_DIAG)
if tag is None:
raise ValueError('No covariance matrix data found')
else:
# Diagonal is stored
data = tag.data
diag = True
logger.info(' %d x %d diagonal covariance (kind = '
'%d) found.' % (dim, dim, cov_kind))
else:
if not sparse.issparse(tag.data):
# Lower diagonal is stored
vals = tag.data
data = np.zeros((dim, dim))
data[np.tril(np.ones((dim, dim))) > 0] = vals
data = data + data.T
data.flat[::dim + 1] /= 2.0
diag = False
logger.info(' %d x %d full covariance (kind = %d) '
'found.' % (dim, dim, cov_kind))
else:
diag = False
data = tag.data
logger.info(' %d x %d sparse covariance (kind = %d)'
' found.' % (dim, dim, cov_kind))
# Read the possibly precomputed decomposition
tag1 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVALUES)
tag2 = find_tag(fid, this, FIFF.FIFF_MNE_COV_EIGENVECTORS)
if tag1 is not None and tag2 is not None:
eig = tag1.data
eigvec = tag2.data
else:
eig = None
eigvec = None
# Read the projection operator
projs = _read_proj(fid, this)
# Read the bad channel list
bads = _read_bad_channels(fid, this, None)
# Put it together
assert dim == len(data)
assert data.ndim == (1 if diag else 2)
cov = dict(kind=cov_kind, diag=diag, dim=dim, names=names,
data=data, projs=projs, bads=bads, nfree=nfree, eig=eig,
eigvec=eigvec)
if score is not None:
cov['loglik'] = score
if method is not None:
cov['method'] = method
if limited:
del cov['kind'], cov['dim'], cov['diag']
return cov
logger.info(' Did not find the desired covariance matrix (kind = %d)'
% cov_kind)
return None
def _write_cov(fid, cov):
"""Write a noise covariance matrix."""
start_block(fid, FIFF.FIFFB_MNE_COV)
# Dimensions etc.
write_int(fid, FIFF.FIFF_MNE_COV_KIND, cov['kind'])
write_int(fid, FIFF.FIFF_MNE_COV_DIM, cov['dim'])
if cov['nfree'] > 0:
write_int(fid, FIFF.FIFF_MNE_COV_NFREE, cov['nfree'])
# Channel names
if cov['names'] is not None and len(cov['names']) > 0:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, cov['names'])
# Data
if cov['diag']:
write_double(fid, FIFF.FIFF_MNE_COV_DIAG, cov['data'])
else:
# Store only lower part of covariance matrix
dim = cov['dim']
mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0
vals = cov['data'][mask].ravel()
write_double(fid, FIFF.FIFF_MNE_COV, vals)
# Eigenvalues and vectors if present
if cov['eig'] is not None and cov['eigvec'] is not None:
write_float_matrix(fid, FIFF.FIFF_MNE_COV_EIGENVECTORS, cov['eigvec'])
write_double(fid, FIFF.FIFF_MNE_COV_EIGENVALUES, cov['eig'])
# Projection operator
if cov['projs'] is not None and len(cov['projs']) > 0:
_write_proj(fid, cov['projs'])
# Bad channels
if cov['bads'] is not None and len(cov['bads']) > 0:
start_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
write_name_list(fid, FIFF.FIFF_MNE_CH_NAME_LIST, cov['bads'])
end_block(fid, FIFF.FIFFB_MNE_BAD_CHANNELS)
# estimator method
if 'method' in cov:
write_string(fid, FIFF.FIFF_MNE_COV_METHOD, cov['method'])
# negative log-likelihood score
if 'loglik' in cov:
write_double(
fid, FIFF.FIFF_MNE_COV_SCORE, np.array(cov['loglik']))
# Done!
end_block(fid, FIFF.FIFFB_MNE_COV)
|
rkmaddox/mne-python
|
mne/cov.py
|
Python
|
bsd-3-clause
| 79,191
|
[
"Gaussian"
] |
52cf8276a909f1c49c5902078aa4493a49aaa9bb9f0febe2843519cbea6060bf
|
#!/usr/bin/env python
#JSON {"lot": "UKS/6-31G*",
#JSON "scf": "CDIISSCFSolver",
#JSON "linalg": "CholeskyLinalgFactory",
#JSON "difficulty": 6,
#JSON "description": "UKS DFT example with LDA and numerical Hartree"}
from horton import *
# Load the coordinates from file.
# Use the XYZ file from HORTON's test data directory.
fn_xyz = context.get_fn('test/methyl.xyz')
mol = IOData.from_file(fn_xyz)
# Create a Gaussian basis set
obasis = get_gobasis(mol.coordinates, mol.numbers, '6-31g*')
# Create a linalg factory
lf = DenseLinalgFactory(obasis.nbasis)
# Compute Gaussian integrals (not the ERI!)
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, lf)
# Define a numerical integration grid needed the XC functionals. The mode='keep'
# option is need for the numerical Becke-Poisson solver.
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, mode='keep')
# Create alpha orbitals
exp_alpha = lf.create_expansion()
exp_beta = lf.create_expansion()
# Initial guess
guess_core_hamiltonian(olp, kin, na, exp_alpha, exp_beta)
# Construct the restricted HF effective Hamiltonian
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
UTwoIndexTerm(kin, 'kin'),
UGridGroup(obasis, grid, [
UBeckeHartree(lmax=8),
ULibXCLDA('x'),
ULibXCLDA('c_vwn'),
]),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
# Decide how to occupy the orbitals (5 alpha electrons, 4 beta electrons)
occ_model = AufbauOccModel(5, 4)
# Converge WFN with CDIIS SCF
# - Construct the initial density matrix (needed for CDIIS).
occ_model.assign(exp_alpha, exp_beta)
dm_alpha = exp_alpha.to_dm()
dm_beta = exp_beta.to_dm()
# - SCF solver
scf_solver = CDIISSCFSolver(1e-6)
scf_solver(ham, lf, olp, occ_model, dm_alpha, dm_beta)
# Derive orbitals (coeffs, energies and occupations) from the Fock and density
# matrices. The energy is also computed to store it in the output file below.
fock_alpha = lf.create_two_index()
fock_beta = lf.create_two_index()
ham.reset(dm_alpha, dm_beta)
ham.compute_energy()
ham.compute_fock(fock_alpha, fock_beta)
exp_alpha.from_fock_and_dm(fock_alpha, dm_alpha, olp)
exp_beta.from_fock_and_dm(fock_beta, dm_beta, olp)
# Assign results to the molecule object and write it to a file, e.g. for
# later analysis. Note that the CDIIS algorithm can only really construct an
# optimized density matrix and no orbitals.
mol.title = 'UKS computation on methyl'
mol.energy = ham.cache['energy']
mol.obasis = obasis
mol.exp_alpha = exp_alpha
mol.exp_beta = exp_beta
mol.dm_alpha = dm_alpha
mol.dm_beta = dm_beta
# useful for post-processing (results stored in double precision):
mol.to_file('methyl.h5')
|
eustislab/horton
|
data/examples/hf_dft/uks_methyl_numlda.py
|
Python
|
gpl-3.0
| 2,789
|
[
"Gaussian"
] |
bf1cbd905c42925b394999f2df72085af94c68996d679febecde124088c09426
|
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.mail import send_mail, mail_managers, EmailMessage
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from OpenDataCatalog.contest.models import *
from datetime import datetime
def get_entries(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entries.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_entries_table(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest)
if not request.GET.__contains__('sort'):
entries = entries.order_by('-vote_count')
return render_to_response('contest/entry_table.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_winners(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
entries = Entry.objects.filter(contest=contest, is_visible=True).order_by('-vote_count')
return render_to_response('contest/winners.html', {'contest': contest, 'entries': entries}, context_instance=RequestContext(request))
def get_rules(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
return render_to_response('contest/rules.html', {'contest': contest}, context_instance=RequestContext(request))
def get_entry(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
return render_to_response('contest/entry.html', {'contest': entry.contest, 'entry': entry}, context_instance=RequestContext(request))
#@login_required
def add_entry(request, contest_id=1):
contest = Contest.objects.get(pk=contest_id)
if request.method == 'POST':
form = EntryForm(request.POST)
form.contest = contest_id
if form.is_valid():
data = {
#"submitter": request.user.username,
"submit_date": datetime.now(),
"org_name": form.cleaned_data.get("org_name"),
"org_url": form.cleaned_data.get("org_url"),
"contact_person": form.cleaned_data.get("contact_person"),
"contact_phone": form.cleaned_data.get("contact_phone"),
"contact_email": form.cleaned_data.get("contact_email"),
"data_set": form.cleaned_data.get("data_set"),
"data_use": form.cleaned_data.get("data_use"),
"data_mission": form.cleaned_data.get("data_mission")
}
subject = 'OpenDataPhilly - Contest Submission'
user_email = form.cleaned_data.get("contact_email")
text_content = render_to_string('contest/submit_email.txt', data)
text_content_copy = render_to_string('contest/submit_email_copy.txt', data)
mail_managers(subject, text_content)
msg = EmailMessage(subject, text_content_copy, to=[user_email])
msg.send()
return render_to_response('contest/thanks.html', {'contest': contest}, context_instance=RequestContext(request))
else:
form = EntryForm()
return render_to_response('contest/submit_entry.html', {'contest': contest, 'form': form}, context_instance=RequestContext(request))
@login_required
def add_vote(request, entry_id):
entry = Entry.objects.get(pk=entry_id)
contest = entry.contest
user = User.objects.get(username=request.user)
if contest.user_can_vote(user):
new_vote = Vote(user=user, entry=entry)
new_vote.save()
entry.vote_count = entry.vote_set.count()
entry.save()
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>Thank you for your vote! You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.success(request, '<div style="font-weight:bold;">Your vote has been recorded.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
next_vote_date = contest.get_next_vote_date(user)
if next_vote_date > contest.end_date:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You will not be able to vote again before the end of the contest. <br><br>Please encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
else:
messages.error(request, '<div style="font-weight:bold;">You have already voted.</div>You may vote once per week, so come back and visit us again on ' + next_vote_date.strftime('%A, %b %d %Y, %I:%M%p') + '. <br><br>Until then, encourage others to visit <a href="/">OpenDataPhilly</a> and to join the race toward more open data!')
return redirect('/contest/?sort=vote_count')
|
azavea/Open-Data-Catalog
|
OpenDataCatalog/contest/views.py
|
Python
|
mit
| 5,512
|
[
"VisIt"
] |
a24d34326865a8c85ebbcc979b6756c834896e75ccea6570d3e60d0deb45df56
|
"""
Migration script to rename the sequencer information form type to external service information form
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
from sqlalchemy.exc import *
from galaxy.model.custom_types import *
import datetime
now = datetime.datetime.utcnow
import logging
log = logging.getLogger( __name__ )
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
current_form_type = 'Sequencer Information Form'
new_form_type = "External Service Information Form"
cmd = "update form_definition set type='%s' where type='%s'" % ( new_form_type, current_form_type )
migrate_engine.execute( cmd )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
new_form_type = 'Sequencer Information Form'
current_form_type = "External Service Information Form"
cmd = "update form_definition set type='%s' where type='%s'" % ( new_form_type, current_form_type )
migrate_engine.execute( cmd )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0069_rename_sequencer_form_type.py
|
Python
|
gpl-3.0
| 1,102
|
[
"Galaxy"
] |
2761fe721d9f4756c74a6a9f81032751c4978531e176ab8757be50f2beef8d12
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from pprint import pprint
# Visit https://apps.twitter.com/ to obtain the data
#import credentials.py
import tweepy
import importlib.machinery
import sys
sys.path.insert(0, "secret")
try:
from credentials import *
except ImportError:
print('No Import')
auth = tweepy.OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
api = tweepy.API(auth)
pprint(api)
user = api.me()
pprint(user)
#api.update_status('Hello Python Central!')
public_tweets = api.home_timeline()
for tweet in public_tweets:
print(tweet.text)
|
davidam/python-examples
|
nlp/tweepy-example.py
|
Python
|
gpl-3.0
| 693
|
[
"VisIt"
] |
461a0f0815b7045efa89d080d43a44228c3a3b348107e147a2e95f79184f30f4
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
FIXME: Proper module docstring
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Nov 9, 2012"
import unittest
from pathlib import Path
from monty.serialization import loadfn, dumpfn
import os
from pymatgen.core.periodic_table import Element
from pymatgen.entries.entry_tools import group_entries_by_structure, EntrySet
test_dir = Path(__file__).absolute().parent / ".." / ".." / ".." / 'test_files'
class FuncTest(unittest.TestCase):
def test_group_entries_by_structure(self):
entries = loadfn(str(test_dir / "TiO2_entries.json"))
groups = group_entries_by_structure(entries)
self.assertEqual(sorted([len(g) for g in groups]),
[1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 4])
self.assertLess(len(groups), len(entries))
# Make sure no entries are left behind
self.assertEqual(sum([len(g) for g in groups]), len(entries))
class EntrySetTest(unittest.TestCase):
def setUp(self):
entries = loadfn(str(test_dir / "Li-Fe-P-O_entries.json"))
self.entry_set = EntrySet(entries)
def test_chemsys(self):
self.assertEqual(self.entry_set.chemsys, {'Fe', 'Li', 'O', 'P'})
def test_get_subset(self):
entries = self.entry_set.get_subset_in_chemsys(["Li", "O"])
for e in entries:
self.assertTrue(set([Element.Li, Element.O]).issuperset(e.composition.keys()))
self.assertRaises(ValueError, self.entry_set.get_subset_in_chemsys, ["Fe", "F"])
def test_remove_non_ground_states(self):
l = len(self.entry_set)
self.entry_set.remove_non_ground_states()
self.assertLess(len(self.entry_set), l)
def test_as_dict(self):
dumpfn(self.entry_set, "temp_entry_set.json")
entry_set = loadfn("temp_entry_set.json")
self.assertEqual(len(entry_set), len(self.entry_set))
os.remove("temp_entry_set.json")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
dongsenfo/pymatgen
|
pymatgen/entries/tests/test_entry_tools.py
|
Python
|
mit
| 2,233
|
[
"pymatgen"
] |
15cef8eb84ed387ac3e8d642ca0d5dd5c702206b4f66df179ba6af55854d3658
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from collections import OrderedDict
import inspect
import os
import re
import signal
from .utils import is_valid_type
try:
import cPickle as pickle
except ImportError:
import pickle
# Convenient Checkpointing for ESPResSo
class Checkpoint:
"""Checkpoint handling (reading and writing).
Parameters
----------
checkpoint_id : :obj:`str`
A string identifying a specific checkpoint.
checkpoint_path : :obj:`str`, optional
Path for reading and writing the checkpoint.
If not given, the CWD is used.
"""
def __init__(self, checkpoint_id=None, checkpoint_path="."):
# check if checkpoint_id is valid (only allow a-z A-Z 0-9 _ -)
if not isinstance(checkpoint_id, str) or bool(
re.compile(r"[^a-zA-Z0-9_\-]").search(checkpoint_id)):
raise ValueError("Invalid checkpoint id.")
if not isinstance(checkpoint_path, str):
raise ValueError("Invalid checkpoint path.")
self.checkpoint_objects = []
self.checkpoint_signals = []
frm = inspect.stack()[1]
self.calling_module = inspect.getmodule(frm[0])
checkpoint_path = os.path.join(checkpoint_path, checkpoint_id)
self.checkpoint_dir = os.path.realpath(checkpoint_path)
if not os.path.isdir(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
# update checkpoint counter
self.counter = 0
while os.path.isfile(os.path.join(
self.checkpoint_dir, f"{self.counter}.checkpoint")):
self.counter += 1
# init signals
for signum in self.read_signals():
self.register_signal(signum)
def __getattr_submodule(self, obj, name, default):
"""
Generalization of getattr(). __getattr_submodule(object,
"name1.sub1.sub2", None) will return attribute sub2 if available
otherwise None.
"""
names = name.split('.')
for i in range(len(names) - 1):
obj = getattr(obj, names[i], default)
return getattr(obj, names[-1], default)
def __setattr_submodule(self, obj, name, value):
"""
Generalization of setattr(). __setattr_submodule(object,
"name1.sub1.sub2", value) will set attribute sub2 to value. Will raise
exception if parent modules do not exist.
"""
names = name.split('.')
for i in range(len(names) - 1):
obj = getattr(obj, names[i], None)
if obj is None:
raise Exception(
"Cannot set attribute of non existing submodules: {}\nCheck the order you registered objects for checkpointing.".format(name))
setattr(obj, names[-1], value)
def __hasattr_submodule(self, obj, name):
"""
Generalization of hasattr(). __hasattr_submodule(object,
"name1.sub1.sub2") will return True if submodule sub1 has the attribute
sub2.
"""
names = name.split('.')
for i in range(len(names) - 1):
obj = getattr(obj, names[i], None)
return hasattr(obj, names[-1])
def register(self, *args):
"""Register python objects for checkpointing.
Parameters
----------
args : list of :obj:`str`
Names of python objects to be registered for checkpointing.
"""
for a in args:
if not isinstance(a, str):
raise ValueError(
"The object that should be checkpointed is identified with its name given as a string.")
# if not a in dir(self.calling_module):
if not self.__hasattr_submodule(self.calling_module, a):
raise KeyError(
f"The given object '{a}' was not found in the current scope.")
if a in self.checkpoint_objects:
raise KeyError(
f"The given object '{a}' is already registered for checkpointing.")
self.checkpoint_objects.append(a)
def unregister(self, *args):
"""Unregister python objects for checkpointing.
Parameters
----------
args : list of :obj:`str`
Names of python objects to be unregistered for checkpointing.
"""
for a in args:
if not isinstance(a, str) or a not in self.checkpoint_objects:
raise KeyError(
f"The given object '{a}' was not registered for checkpointing yet.")
self.checkpoint_objects.remove(a)
def get_registered_objects(self):
"""
Returns a list of all object names that are registered for
checkpointing.
"""
return self.checkpoint_objects
def has_checkpoints(self):
"""Check for checkpoints.
Returns
-------
:obj:`bool`
``True`` if any checkpoints exist that match ``checkpoint_id`` and
``checkpoint_path`` otherwise ``False``.
"""
return self.counter > 0
def get_last_checkpoint_index(self):
"""
Returns the last index of the given checkpoint id. Will raise exception
if no checkpoints are found.
"""
if not self.has_checkpoints():
raise Exception(
"No checkpoints found. Cannot return index for last checkpoint.")
return self.counter - 1
def save(self, checkpoint_index=None):
"""
Saves all registered python objects in the given checkpoint directory
using cPickle.
"""
# get attributes of registered objects
checkpoint_data = OrderedDict()
for obj_name in self.checkpoint_objects:
checkpoint_data[obj_name] = self.__getattr_submodule(
self.calling_module, obj_name, None)
if checkpoint_index is None:
checkpoint_index = self.counter
filename = os.path.join(
self.checkpoint_dir, f"{checkpoint_index}.checkpoint")
tmpname = filename + ".__tmp__"
with open(tmpname, "wb") as checkpoint_file:
pickle.dump(checkpoint_data, checkpoint_file, -1)
os.rename(tmpname, filename)
def load(self, checkpoint_index=None):
"""
Loads the python objects using (c)Pickle and sets them in the calling
module.
Parameters
----------
checkpoint_index : :obj:`int`, optional
If not given, the last ``checkpoint_index`` will be used.
"""
if checkpoint_index is None:
checkpoint_index = self.get_last_checkpoint_index()
filename = os.path.join(
self.checkpoint_dir, f"{checkpoint_index}.checkpoint")
with open(filename, "rb") as f:
checkpoint_data = pickle.load(f)
for key in checkpoint_data:
self.__setattr_submodule(
self.calling_module, key, checkpoint_data[key])
self.checkpoint_objects.append(key)
def __signal_handler(self, signum, frame): # pylint: disable=unused-argument
"""
Will be called when a registered signal was sent.
"""
self.save()
exit(signum)
def read_signals(self):
"""
Reads all registered signals from the signal file and returns a list of
integers.
"""
if not os.path.isfile(os.path.join(self.checkpoint_dir, "signals")):
return []
with open(os.path.join(self.checkpoint_dir, "signals"), "r") as signal_file:
signals = signal_file.readline().strip().split()
signals = [int(i)
for i in signals] # will raise exception if signal file contains invalid entries
return signals
def __write_signal(self, signum=None):
"""Writes the given signal integer signum to the signal file.
"""
signum = int(signum)
if not is_valid_type(signum, int):
raise ValueError("Signal must be an integer number.")
signals = self.read_signals()
if signum not in signals:
signals.append(signum)
signals = " ".join(str(i) for i in signals)
with open(os.path.join(self.checkpoint_dir, "signals"), "w") as signal_file:
signal_file.write(signals)
def register_signal(self, signum=None):
"""Register a signal that will trigger the signal handler.
Parameters
----------
signum : :obj:`int`
Signal to be registered.
"""
if not is_valid_type(signum, int):
raise ValueError("Signal must be an integer number.")
if signum in self.checkpoint_signals:
raise KeyError(
f"The signal {signum} is already registered for checkpointing.")
signal.signal(signum, self.__signal_handler)
self.checkpoint_signals.append(signum)
self.__write_signal(signum)
|
fweik/espresso
|
src/python/espressomd/checkpointing.py
|
Python
|
gpl-3.0
| 9,661
|
[
"ESPResSo"
] |
112e4e6269c5c9cf0e608f411444cfa0ba2febdfcc54fcfb6f41c70ff2509fc9
|
#################################################################
# $HeadURL$
#################################################################
"""
Usage of ThreadPool
ThreadPool creates a pool of worker threads to process a queue of tasks
much like the producers/consumers paradigm. Users just need to fill the queue
with tasks to be executed and worker threads will execute them
To start working with the ThreadPool first it has to be instanced::
threadPool = ThreadPool( minThreads, maxThreads, maxQueuedRequests )
minThreads -> at all times no less than <minThreads> workers will be alive
maxThreads -> at all times no more than <maxThreads> workers will be alive
maxQueuedRequests -> No more than <maxQueuedRequests> can be waiting to be executed
If another request is added to the ThreadPool, the thread will
lock until another request is taken out of the queue.
The ThreadPool will automatically increase and decrease the pool of workers as needed
To add requests to the queue::
threadPool.generateJobAndQueueIt( <functionToExecute>,
args = ( arg1, arg2, ... ),
oCallback = <resultCallbackFunction> )
or::
request = ThreadedJob( <functionToExecute>,
args = ( arg1, arg2, ... )
oCallback = <resultCallbackFunction> )
threadPool.queueJob( request )
The result callback and the parameters are optional arguments.
Once the requests have been added to the pool. They will be executed as soon as possible.
Worker threads automatically return the return value of the requests. To run the result callback
functions execute::
threadPool.generateJobAndQueueIt( <functionToExecute>,
args = ( arg1, arg2, ... ),
oCallback = <resultCallbackFunction> )
or::
request = ThreadedJob( <functionToExecute>,
args = ( arg1, arg2, ... )
oCallback = <resultCallbackFunction> )
threadPool.queueJob( request )
The result callback and the parameters are optional arguments.
Once the requests have been added to the pool. They will be executed as soon as possible.
Worker threads automatically return the return value of the requests. To run the result callback
functions execute::
threadPool.processRequests()
This method will process the existing return values of the requests. Even if the requests do not return
anything this method (or any process result method) has to be called to clean the result queues.
To wait until all the requests are finished and process their result call::
threadPool.processAllRequests()
This function will block until all requests are finished and their result values have been processed.
It is also possible to set the threadPool in auto processing results mode. It'll process the results as
soon as the requests have finished. To enable this mode call::
threadPool.daemonize()
"""
__RCSID__ = "$Id$"
import time
import sys
import Queue
import threading
try:
from DIRAC.FrameworkSystem.Client.Logger import gLogger
except:
gLogger = False
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
class WorkingThread( threading.Thread ):
def __init__( self, oPendingQueue, oResultsQueue, **kwargs ):
threading.Thread.__init__( self, **kwargs )
self.setDaemon( 1 )
self.__pendingQueue = oPendingQueue
self.__resultsQueue = oResultsQueue
self.__threadAlive = True
self.__working = False
self.start()
def isWorking( self ):
return self.__working
def kill( self ):
self.__threadAlive = False
def run( self ):
while self.__threadAlive:
oJob = self.__pendingQueue.get( block = True )
if not self.__threadAlive:
self.__pendingQueue.put( oJob )
break
self.__working = True
oJob.process()
self.__working = False
if oJob.hasCallback():
self.__resultsQueue.put( oJob, block = True )
class ThreadedJob:
def __init__( self,
oCallable,
args = None,
kwargs = None,
sTJId = None,
oCallback = None,
oExceptionCallback = None ):
self.__jobFunction = oCallable
self.__jobArgs = args or []
self.__jobKwArgs = kwargs or {}
self.__tjID = sTJId
self.__resultCallback = oCallback
self.__exceptionCallback = oExceptionCallback
self.__done = False
self.__exceptionRaised = False
self.__jobResult = None
self.__jobException = None
def __showException( self, threadedJob, exceptionInfo ):
if gLogger:
gLogger.exception( "Exception in thread", lExcInfo = exceptionInfo )
def jobId( self ):
return self.__tjID
def hasCallback( self ):
return self.__resultCallback or self.__exceptionCallback
def exceptionRaised( self ):
return self.__exceptionRaised
def doExceptionCallback( self ):
if self.__done and self.__exceptionRaised and self.__exceptionCallback:
self.__exceptionCallback( self, self.__jobException )
def doCallback( self ):
if self.__done and not self.__exceptionRaised and self.__resultCallback:
self.__resultCallback( self, self.__jobResult )
def process( self ):
self.__done = True
try:
self.__jobResult = self.__jobFunction( *self.__jobArgs, **self.__jobKwArgs )
except Exception as lException:
self.__exceptionRaised = True
if not self.__exceptionCallback:
if gLogger:
gLogger.exception( "Exception in thread", lException = lException )
else:
self.__jobException = sys.exc_info()
class ThreadPool( threading.Thread ):
def __init__( self, iMinThreads, iMaxThreads = 0, iMaxQueuedRequests = 0, strictLimits = True ):
threading.Thread.__init__( self )
if iMinThreads < 1:
self.__minThreads = 1
else:
self.__minThreads = iMinThreads
if iMaxThreads < self.__minThreads:
self.__maxThreads = self.__minThreads
else:
self.__maxThreads = iMaxThreads
self.__strictLimits = strictLimits
self.__pendingQueue = Queue.Queue( iMaxQueuedRequests )
self.__resultsQueue = Queue.Queue( iMaxQueuedRequests + iMaxThreads )
self.__workingThreadsList = []
self.__spawnNeededWorkingThreads()
def getMaxThreads( self ):
return self.__maxThreads
def getMinThreads( self ):
return self.__minThreads
def numWorkingThreads( self ):
return self.__countWorkingThreads()
def numWaitingThreads( self ):
return self.__countWaitingThreads()
def __spawnWorkingThread( self ):
self.__workingThreadsList.append( WorkingThread( self.__pendingQueue, self.__resultsQueue ) )
def __killWorkingThread( self ):
if self.__strictLimits:
for i in range( len( self.__workingThreadsList ) ):
wT = self.__workingThreadsList[i]
if not wT.isWorking():
wT.kill()
del self.__workingThreadsList[i]
break
else:
self.__workingThreadsList[0].kill()
del self.__workingThreadsList[0]
def __countWaitingThreads( self ):
iWaitingThreads = 0
for oWT in self.__workingThreadsList:
if not oWT.isWorking():
iWaitingThreads += 1
return iWaitingThreads
def __countWorkingThreads( self ):
iWorkingThreads = 0
for oWT in self.__workingThreadsList:
if oWT.isWorking():
iWorkingThreads += 1
return iWorkingThreads
def __spawnNeededWorkingThreads( self ):
while len( self.__workingThreadsList ) < self.__minThreads:
self.__spawnWorkingThread()
while self.__countWaitingThreads() == 0 and \
len( self.__workingThreadsList ) < self.__maxThreads:
self.__spawnWorkingThread()
def __killExceedingWorkingThreads( self ):
threadsToKill = len( self.__workingThreadsList ) - self.__maxThreads
for i in range ( max( threadsToKill, 0 ) ):
self.__killWorkingThread()
threadsToKill = self.__countWaitingThreads() - self.__minThreads
for i in range ( max( threadsToKill, 0 ) ):
self.__killWorkingThread()
def queueJob( self, oTJob, blocking = True ):
if not isinstance( oTJob, ThreadedJob ):
raise TypeError( "Jobs added to the thread pool must be ThreadedJob instances" )
try:
self.__pendingQueue.put( oTJob, block = blocking )
except Queue.Full:
return S_ERROR( "Queue is full" )
return S_OK()
def generateJobAndQueueIt( self,
oCallable,
args = None,
kwargs = None,
sTJId = None,
oCallback = None,
oExceptionCallback = None,
blocking = True ):
oTJ = ThreadedJob( oCallable, args, kwargs, sTJId, oCallback, oExceptionCallback )
return self.queueJob( oTJ, blocking )
def pendingJobs( self ):
return self.__pendingQueue.qsize()
def isFull( self ):
return self.__pendingQueue.full()
def isWorking( self ):
return not self.__pendingQueue.empty() or self.__countWorkingThreads()
def processResults( self ):
iProcessed = 0
while True:
self.__spawnNeededWorkingThreads()
if self.__resultsQueue.empty():
self.__killExceedingWorkingThreads()
break
oJob = self.__resultsQueue.get()
oJob.doExceptionCallback()
oJob.doCallback()
iProcessed += 1
self.__killExceedingWorkingThreads()
return iProcessed
def processAllResults( self ):
while not self.__pendingQueue.empty() or self.__countWorkingThreads():
self.processResults()
time.sleep( 0.1 )
self.processResults()
def daemonize( self ):
self.setDaemon( 1 )
self.start()
#This is the ThreadPool threaded function. YOU ARE NOT SUPPOSED TO CALL THIS FUNCTION!!!
def run( self ):
while True:
self.processResults()
time.sleep( 1 )
gThreadPool = False
def getGlobalThreadPool():
global gThreadPool
if not gThreadPool:
gThreadPool = ThreadPool( 1, 500 )
gThreadPool.daemonize()
return gThreadPool
if __name__ == "__main__":
import random
def doSomething( iNumber ):
time.sleep( random.randint( 1, 5 ) )
fResult = random.random() * iNumber
if fResult > 3:
raise Exception( "TEST EXCEPTION" )
return fResult
def showResult( oTJ, fResult ):
print "Result %s from %s" % ( fResult, oTJ )
def showException( oTJ, exc_info ):
print "Exception %s from %s" % ( exc_info[1], oTJ )
OTP = ThreadPool( 5, 10 )
def generateWork( iWorkUnits ):
for iNumber in [ random.randint( 1, 20 ) for uNothing in range( iWorkUnits ) ]:
oTJ = ThreadedJob( doSomething,
args = ( iNumber, ),
oCallback = showResult,
oExceptionCallback = showException )
OTP.queueJob( oTJ )
print 'MaxThreads =', OTP.getMaxThreads()
print 'MinThreads =', OTP.getMinThreads()
generateWork( 30 )
while True:
time.sleep( 1 )
gIResult = OTP.processResults()
gINew = gIResult + random.randint( -3, 2 )
print "Processed %s, generating %s.." % ( gIResult, gINew )
generateWork( gINew )
print "Threads %s" % OTP.numWorkingThreads(), OTP.pendingJobs()
|
Andrew-McNab-UK/DIRAC
|
Core/Utilities/ThreadPool.py
|
Python
|
gpl-3.0
| 11,411
|
[
"DIRAC"
] |
8d638e2cebd563fa11d99335992d21e8a5f915f3118be927fdba7b2b00cafc5b
|
# -*- coding: utf-8 -*-
# Test scripts are always called from BUILD directory by cmake.
import os
import sys
import numpy as np
import time
import test_difshells as td
import chan_proto
import param_chan
import moose
print('Using moose from %s' % moose.__file__ )
difshell_no = 3
difbuf_no = 0
script_dir_ = os.path.dirname( os.path.abspath( __file__ ) )
p_file = os.path.join( script_dir_, "soma.p" )
cond = {'CaL12':30*0.35e-5, 'SK':0.5*0.35e-6}
def assert_stat( vec, expected ):
min_, max_ = np.min( vec ), np.max( vec )
mean, std = np.mean( vec ), np.std( vec )
computed = [ min_, max_, mean, std ]
assert np.allclose( computed, expected ), \
"Got %s expected %s" % (computed, expected)
def test_hsolve_calcium():
for tick in range(0, 7):
moose.setClock(tick,10e-6)
moose.setClock(8, 0.005)
lib = moose.Neutral('/library')
model = moose.loadModel(p_file,'neuron')
pulse = moose.PulseGen('/neuron/pulse')
inject = 100e-10
chan_proto.chan_proto('/library/SK',param_chan.SK)
chan_proto.chan_proto('/library/CaL12',param_chan.Cal)
pulse.delay[0] = 8.
pulse.width[0] = 500e-12
pulse.level[0] = inject
pulse.delay[1] = 1e9
for comp in moose.wildcardFind('/neuron/#[TYPE=Compartment]'):
new_comp = moose.element(comp)
new_comp.initVm = -.08
difs, difb = td.add_difshells_and_buffers(new_comp,difshell_no,difbuf_no)
for name in cond:
chan = td.addOneChan(name,cond[name],new_comp)
if 'Ca' in name:
moose.connect(chan, "IkOut", difs[0], "influx")
if 'SK' in name:
moose.connect(difs[0], 'concentrationOut', chan, 'concen')
data = moose.Neutral('/data')
vmtab = moose.Table('/data/Vm')
shelltab = moose.Table('/data/Ca')
caltab = moose.Table('/data/CaL_Gk')
sktab = moose.Table('/data/SK_Gk')
moose.connect(vmtab, 'requestOut',moose.element('/neuron/soma') , 'getVm')
moose.connect(shelltab, 'requestOut', difs[0], 'getC')
moose.connect(caltab,'requestOut',moose.element('/neuron/soma/CaL12') ,'getGk')
moose.connect(sktab,'requestOut', moose.element('/neuron/soma/SK'),'getGk')
hsolve = moose.HSolve('/neuron/hsolve')
hsolve.dt = 10e-6
hsolve.target = ('/neuron/soma')
t_stop = 10.
moose.reinit()
moose.start(t_stop)
vec1 = sktab.vector
vec2 = shelltab.vector
assert_stat( vec1, [ 0.0, 5.102834e-22, 4.79066e-22, 2.08408e-23 ] )
assert_stat( vec2, [ 5.0e-5, 5.075007e-5, 5.036985e-5, 2.1950117e-7] )
assert len(np.where(sktab.vector<1e-19)[0]) == 2001
assert len(np.where(shelltab.vector>50e-6)[0]) == 2000
def main():
test_hsolve_calcium()
if __name__ == '__main__':
main()
|
dilawar/moose-core
|
tests/core/test_hsolve_externalCalcium.py
|
Python
|
gpl-3.0
| 2,762
|
[
"MOOSE",
"NEURON"
] |
3812899b0f5ae3b223ac6fffa368664c9d45198323e2451b432bacf358ae5a5b
|
import functools
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import DictCache
from inspect import getcallargs
from DIRAC.Core.DISET.RPCClient import RPCClient
__remoteMethods__ = []
def RemoteMethod( method ):
__remoteMethods__.append( method.__name__ )
@functools.wraps( method )
def wrapper( self, *args, **kwargs ):
if self.localAccess:
if not self.dbOK:
return S_ERROR( "Could not connect to the database" )
return method( self, *args, **kwargs )
rpc = self._getTokenStoreClient()
fName = method.__name__
return getattr( rpc, fName )( ( args, kwargs ) )
wrapper.__sneakybastard__ = method
return wrapper
class Cache( object ):
__caches = {}
def __init__( self, cName, atName = False, cacheTime = 300 ):
if cName not in self.__caches:
try:
self.__caches[ cName ] = DictCache()
except:
self.__caches[ cName ] = DictCache.DictCache()
self.__cache = self.__caches[ cName ]
self.__atName = atName
self.__cacheTime = cacheTime
@classmethod
def getCache( cls, cName ):
return cls.__caches[ cName ]
def __call__( self, method ):
def wrapped( rSelf, *args, **kwargs ):
try:
rMethod = method.__sneakybastard__
except AttributeError:
rMethod = method
pass
fArgs = getcallargs( rMethod, rSelf, *args, **kwargs )
if self.__atName:
cKey = fArgs[ self.__atName ]
else:
cKey = tuple( str( fArgs[k] ) for k in sorted( fArgs ) if k != 'self' )
value = self.__cache.get( cKey )
if value:
return value
value = rMethod( **fArgs )
if not value[ 'OK' ]:
return value
self.__cache.add( cKey, self.__cacheTime, value )
return value
return wrapped
class OAToken( object ):
class DBHold:
def __init__( self ):
self.checked = False
self.reset()
def reset( self ):
self.token = False
__db = DBHold()
try:
__cache = DictCache()
except:
__cache = DictCache.DictCache()
_sDisableLocal = False
def __init__( self, forceLocal = False, getRPCFunctor = False ):
self.__forceLocal = forceLocal
if getRPCFunctor:
self.__getRPCFunctor = getRPCFunctor
else:
self.__getRPCFunctor = RPCClient
#Init DB if there
if not OAToken.__db.checked:
OAToken.__db.checked = True
for varName, dbName in ( ( 'token', 'OATokenDB' ), ):
try:
dbImp = "RESTDIRAC.RESTSystem.DB.%s" % dbName
dbMod = __import__( dbImp, fromlist = [ dbImp ] )
dbClass = getattr( dbMod, dbName )
dbInstance = dbClass()
setattr( OAToken.__db, varName, dbInstance )
result = dbInstance._getConnection()
if not result[ 'OK' ]:
gLogger.warn( "Could not connect to %s (%s). Resorting to RPC" % ( dbName, result[ 'Message' ] ) )
OAToken.__db.reset()
break
else:
result[ 'Value' ].close()
except ( ImportError, RuntimeError ), excp:
gLogger.exception( "" )
if self.__forceLocal:
raise
OAToken.__db.reset()
break
@property
def localAccess( self ):
if OAToken._sDisableLocal:
return False
if OAToken.__db.token or self.__forceLocal:
return True
return False
@property
def dbOK( self ):
if OAToken.__db.token:
return True
return False
def __getDB( self ):
db = OAToken.__db.token
if db:
return db
#TODO: Return proper thing to generate S_ERROR
return db
def _getTokenStoreClient( self ):
return self.__getRPCFunctor( "REST/OATokenStore" )
#Client creation
@Cache( 'client', 'name' )
@RemoteMethod
def registerClient( self, name, redirect, url, icon ):
return self.__getDB().registerClient( name, redirect, url, icon )
@Cache( 'client' )
@RemoteMethod
def getClientDataByID( self, cid ):
return self.__getDB().getClientDataByID( cid )
@Cache( 'client' )
@RemoteMethod
def getClientDataByName( self, name ):
return self.__getDB().getClientDataByName( name )
@RemoteMethod
def getClientsData( self, condDict = None ):
return self.__getDB().getClientsData( condDict )
@RemoteMethod
def deleteClientByID( self, cid ):
return self.__getDB().deleteClientByID( cid )
@RemoteMethod
def deleteClientByName( self, name ):
return self.__getDB().deleteClientByName( name )
#Codes
@RemoteMethod
def generateCode( self, cid, userDN, userGroup, userSetup, lifeTime, scope = "", redirect = "" ):
return self.__getDB().generateCode( cid, userDN, userGroup, userSetup, lifeTime, scope, redirect )
@RemoteMethod
def getCodeData( self, code ):
return self.__getDB().getCodeData( code )
@RemoteMethod
def deleteCode( self, code ):
return self.__getDB().deleteCode( code )
#Tokens
@RemoteMethod
def generateTokenFromCode( self, cid, code, redirect = False, secret = False, renewable = True ):
return self.__getDB().generateTokenFromCode( cid, code, redirect, secret, renewable )
@RemoteMethod
def generateToken( self, user, group, setup, scope = "", cid = False, secret = False, renewable = True, lifeTime = 86400 ):
return self.__getDB().generateToken( user, group, setup, scope, cid, secret, renewable, lifeTime )
def getCachedToken( self, token ):
cacheDict = Cache.getCache( 'token' )
cKey = ( token, )
value = cacheDict.get( cKey )
if value:
return value
result = self.getTokensData( {} )
if not result[ 'OK' ]:
return result
tokenData = result[ 'Value' ]
for tokenKey in tokenData:
cacheDict.add( ( tokenKey, ), 300, S_OK( tokenData[ tokenKey ] ) )
if token in tokenData:
return S_OK( tokenData[ token ] )
return S_ERROR( "Unknown token" )
@Cache( 'token' )
@RemoteMethod
def getTokenData( self, token ):
return self.__getDB().getTokenData( token )
@RemoteMethod
def getTokensData( self, condDict ):
return self.__getDB().getTokensData( condDict )
@RemoteMethod
def revokeToken( self, token ):
return self.__getDB().revokeToken( token )
@RemoteMethod
def revokeTokens( self, condDict ):
return self.__getDB().revokeTokens( condDict )
|
DIRACGrid/RESTDIRAC
|
RESTSystem/Client/OAToken.py
|
Python
|
gpl-3.0
| 6,296
|
[
"DIRAC"
] |
35d0f55eabb70c2bf92b75e4cec4f2d5ae93b3e48f26c72f22711ef0956ec933
|
# license-expression is a free software tool from nexB Inc. and others.
# Visit https://github.com/nexB/license-expression for support and download.
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and http://aboutcode.org
#
# This software is licensed under the Apache License version 2.0.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
from unittest import TestCase
import sys
from boolean.boolean import PARSE_UNBALANCED_CLOSING_PARENS
from boolean.boolean import PARSE_INVALID_SYMBOL_SEQUENCE
from license_expression import PARSE_INVALID_EXPRESSION
from license_expression import PARSE_INVALID_NESTING
from license_expression import PARSE_INVALID_EXCEPTION
from license_expression import PARSE_INVALID_SYMBOL_AS_EXCEPTION
from license_expression import ExpressionError
from license_expression import Keyword
from license_expression import Licensing
from license_expression import LicenseExpression
from license_expression import LicenseSymbol
from license_expression import LicenseWithExceptionSymbol
from license_expression import ParseError
from license_expression import Result
from license_expression import Output
from license_expression import group_results_for_with_subexpression
from license_expression import splitter
from license_expression import strip_and_skip_spaces
from license_expression import validate_symbols
from license_expression import TOKEN_AND
from license_expression import TOKEN_LPAR
from license_expression import TOKEN_OR
from license_expression import TOKEN_RPAR
from license_expression import TOKEN_SYMBOL
from license_expression import TOKEN_WITH
def _parse_error_as_dict(pe):
"""
Return a dict for a ParseError.
"""
return dict(
token_type=pe.token_type,
token_string=pe.token_string,
position=pe.position,
error_code=pe.error_code,
)
class LicenseSymbolTest(TestCase):
def test_LicenseSymbol(self):
sym1 = LicenseSymbol('MIT', ['MIT license'])
assert sym1 == sym1
assert 'MIT' == sym1.key
assert ('MIT license',) == sym1.aliases
sym2 = LicenseSymbol('mit', ['MIT license'])
assert 'mit' == sym2.key
assert ('MIT license',) == sym2.aliases
assert not sym2.is_exception
assert sym1 != sym2
assert sym1 is not sym2
sym3 = LicenseSymbol('mit', ['MIT license'], is_exception=True)
assert 'mit' == sym3.key
assert ('MIT license',) == sym3.aliases
assert sym3.is_exception
assert sym2 != sym3
sym4 = LicenseSymbol('mit', ['MIT license'])
assert 'mit' == sym4.key
assert ('MIT license',) == sym4.aliases
# symbol equality is based ONLY on the key
assert sym2 == sym4
assert sym1 != sym4
sym5 = LicenseWithExceptionSymbol(sym2, sym3)
assert sym2 == sym5.license_symbol
assert sym3 == sym5.exception_symbol
sym6 = LicenseWithExceptionSymbol(sym4, sym3)
# symbol euqality is based ONLY on the key
assert sym5 == sym6
class LicensingTest(TestCase):
def test_Licensing_create(self):
Licensing()
Licensing(None)
Licensing(list())
class LicensingTokenizeWithoutSymbolsTest(TestCase):
def test_tokenize_plain1(self):
licensing = Licensing()
expected = [
(TOKEN_LPAR, '(', 1),
(LicenseSymbol(key='mit'), 'mit', 3),
(TOKEN_RPAR, ')', 7),
(TOKEN_AND, 'and', 9),
(LicenseSymbol(key='gpl'), 'gpl', 13)
]
assert expected == list(licensing.tokenize(' ( mit ) and gpl'))
def test_tokenize_plain2(self):
licensing = Licensing()
expected = [
(TOKEN_LPAR, '(', 0),
(LicenseSymbol(key='mit'), 'mit', 1),
(TOKEN_AND, 'and', 5),
(LicenseSymbol(key='gpl'), 'gpl', 9),
(TOKEN_RPAR, ')', 12)
]
assert expected == list(licensing.tokenize('(mit and gpl)'))
def test_tokenize_plain3(self):
licensing = Licensing()
expected = [
(LicenseSymbol(key='mit'), 'mit', 0),
(TOKEN_AND, 'AND', 4),
(LicenseSymbol(key='gpl'), 'gpl', 8),
(TOKEN_OR, 'or', 12),
(LicenseSymbol(key='gpl'), 'gpl', 15)
]
assert expected == list(licensing.tokenize('mit AND gpl or gpl'))
def test_tokenize_plain4(self):
licensing = Licensing()
expected = [
(TOKEN_LPAR, '(', 0),
(TOKEN_LPAR, '(', 1),
(LicenseSymbol(key=u'l-a+'), u'l-a+', 2),
(TOKEN_AND, 'AND', 7),
(LicenseSymbol(key=u'l-b'), u'l-b', 11),
(TOKEN_RPAR, ')', 14),
(TOKEN_OR, 'OR', 16),
(TOKEN_LPAR, '(', 19),
(LicenseSymbol(key='l-c+'), 'l-c+', 20),
(TOKEN_RPAR, ')', 24),
(TOKEN_RPAR, ')', 25)
]
assert expected == list(licensing.tokenize('((l-a+ AND l-b) OR (l-c+))'))
def test_tokenize_plain5(self):
licensing = Licensing()
expected = [
(TOKEN_LPAR, '(', 0),
(TOKEN_LPAR, '(', 1),
(LicenseSymbol(key='l-a+'), 'l-a+', 2),
(TOKEN_AND, 'AND', 7),
(LicenseSymbol(key='l-b'), 'l-b', 11),
(TOKEN_RPAR, ')', 14),
(TOKEN_OR, 'OR', 16),
(TOKEN_LPAR, '(', 19),
(LicenseSymbol(key='l-c+'), 'l-c+', 20),
(TOKEN_RPAR, ')', 24),
(TOKEN_RPAR, ')', 25),
(TOKEN_AND, 'and', 27),
(LicenseWithExceptionSymbol(
license_symbol=LicenseSymbol(key='gpl'),
exception_symbol=LicenseSymbol(key='classpath')),
'gpl with classpath', 31
)
]
assert expected == list(licensing.tokenize('((l-a+ AND l-b) OR (l-c+)) and gpl with classpath'))
class LicensingTokenizeWithSymbolsTest(TestCase):
def get_symbols_and_licensing(self):
gpl_20 = LicenseSymbol('GPL-2.0', ['The GNU GPL 20'])
gpl_20_plus = LicenseSymbol('gpl-2.0+',
['The GNU GPL 20 or later', 'GPL-2.0 or later', 'GPL v2.0 or later'])
lgpl_21 = LicenseSymbol('LGPL-2.1', ['LGPL v2.1'])
mit = LicenseSymbol('MIT', ['MIT license'])
symbols = [gpl_20, gpl_20_plus, lgpl_21, mit]
licensing = Licensing(symbols)
return gpl_20, gpl_20_plus, lgpl_21, mit, licensing
def test_tokenize_1(self):
gpl_20, _gpl_20_plus, lgpl_21, mit, licensing = self.get_symbols_and_licensing()
result = licensing.tokenize('The GNU GPL 20 or LGPL-2.1 and mit')
expected = [
(gpl_20, 'The GNU GPL 20', 0),
(TOKEN_OR, ' or ', 14),
(lgpl_21, 'LGPL-2.1', 18),
(TOKEN_AND, ' and ', 26),
(mit, 'mit', 31)]
assert expected == list(result)
def test_tokenize_with_trailing_unknown(self):
gpl_20, _gpl_20_plus, lgpl_21, mit, licensing = self.get_symbols_and_licensing()
result = licensing.tokenize('The GNU GPL 20 or LGPL-2.1 and mit2')
expected = [
(gpl_20, 'The GNU GPL 20', 0),
(TOKEN_OR, ' or ', 14),
(lgpl_21, 'LGPL-2.1', 18),
(TOKEN_AND, ' and ', 26),
(mit, 'mit', 31),
(LicenseSymbol(key='2'), '2', 34)
]
assert expected == list(result)
def test_tokenize_3(self):
gpl_20, gpl_20_plus, lgpl_21, mit, licensing = self.get_symbols_and_licensing()
result = licensing.tokenize('The GNU GPL 20 or later or (LGPL-2.1 and mit) or The GNU GPL 20 or mit')
expected = [
(gpl_20_plus, 'The GNU GPL 20 or later', 0),
(TOKEN_OR, ' or ', 23),
(TOKEN_LPAR, '(', 27),
(lgpl_21, 'LGPL-2.1', 28),
(TOKEN_AND, ' and ', 36),
(mit, 'mit', 41),
(TOKEN_RPAR, ')', 44),
(TOKEN_OR, ' or ', 45),
(gpl_20, 'The GNU GPL 20', 49), (2, ' or ', 63),
(mit, 'mit', 67)
]
assert expected == list(result)
def test_tokenize_unknown_as_trailing_single_attached_character(self):
symbols = [LicenseSymbol('MIT', ['MIT license'])]
l = Licensing(symbols)
result = list(l.tokenize('mit2'))
expected = [
(LicenseSymbol(key='MIT', aliases=('MIT license',)), 'mit', 0),
(LicenseSymbol(key='2'), '2', 3),
]
assert expected == result
class LicensingParseTest(TestCase):
def test_parse_does_not_raise_error_for_empty_expression(self):
licensing = Licensing()
assert None == licensing.parse('')
def test_parse(self):
expression = ' ( (( gpl and bsd ) or lgpl) and gpl-exception) '
expected = '((gpl AND bsd) OR lgpl) AND gpl-exception'
licensing = Licensing()
self.assertEqual(expected, str(licensing.parse(expression)))
def test_parse_raise_ParseError(self):
expression = ' ( (( gpl and bsd ) or lgpl) and gpl-exception)) '
licensing = Licensing()
try:
licensing.parse(expression)
self.fail('ParseError should be raised')
except ParseError as pe:
expected = {'error_code': PARSE_UNBALANCED_CLOSING_PARENS, 'position': 48, 'token_string': ')', 'token_type': TOKEN_RPAR}
assert expected == _parse_error_as_dict(pe)
def test_parse_raise_ExpressionError_when_validating(self):
expression = 'gpl and bsd or lgpl with exception'
licensing = Licensing()
try:
licensing.parse(expression, validate=True)
except ExpressionError as ee:
assert 'Unknown license key(s): gpl, bsd, lgpl, exception' == str(ee)
def test_parse_raise_ExpressionError_when_validating_strict(self):
expression = 'gpl and bsd or lgpl with exception'
licensing = Licensing()
try:
licensing.parse(expression, validate=True, strict=True)
except ExpressionError as ee:
assert str(ee).startswith('exception_symbol must be an exception with "is_exception" set to True:')
def test_parse_in_strict_mode_for_solo_symbol(self):
expression = 'lgpl'
licensing = Licensing()
licensing.parse(expression, strict=True)
def test_parse_invalid_expression_raise_expression(self):
licensing = Licensing()
expr = 'wrong'
licensing.parse(expr)
expr = 'l-a AND none'
licensing.parse(expr)
expr = '(l-a + AND l-b'
try:
licensing.parse(expr)
self.fail("Exception not raised when validating '%s'" % expr)
except ParseError:
pass
expr = '(l-a + AND l-b))'
try:
licensing.parse(expr)
self.fail("Exception not raised when validating '%s'" % expr)
except ParseError:
pass
expr = 'l-a AND'
try:
licensing.parse(expr)
self.fail("Exception not raised when validating '%s'" % expr)
except ParseError:
pass
expr = 'OR l-a'
try:
licensing.parse(expr)
self.fail("Exception not raised when validating '%s'" % expr)
except ParseError:
pass
expr = '+l-a'
licensing.parse(expr)
def test_parse_can_parse(self):
licensing = Licensing()
expr = ' GPL-2.0 or LGPL2.1 and mit '
parsed = licensing.parse(expr)
gpl2 = LicenseSymbol('GPL-2.0')
lgpl = LicenseSymbol('LGPL2.1')
mit = LicenseSymbol('mit')
expected = [gpl2, lgpl, mit]
self.assertEqual(expected, licensing.license_symbols(parsed))
self.assertEqual(expected, licensing.license_symbols(expr))
self.assertEqual('GPL-2.0 OR (LGPL2.1 AND mit)', str(parsed))
expected = licensing.OR(gpl2, licensing.AND(lgpl, mit))
assert expected == parsed
def test_parse_errors_catch_invalid_nesting(self):
licensing = Licensing()
try:
licensing.parse('mit (and LGPL 2.1)')
self.fail('Exception not raised')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_NESTING, 'position': 4, 'token_string': '(', 'token_type': TOKEN_LPAR}
assert expected == _parse_error_as_dict(pe)
def test_parse_errors_catch_invalid_expression_with_bare_and(self):
licensing = Licensing()
try:
licensing.parse('and')
self.fail('Exception not raised')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_EXPRESSION, 'position':-1, 'token_string': '', 'token_type': None}
assert expected == _parse_error_as_dict(pe)
def test_parse_errors_catch_invalid_expression_with_or_and_no_other(self):
licensing = Licensing()
try:
licensing.parse('or that')
self.fail('Exception not raised')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_EXPRESSION, 'position':-1, 'token_string': '', 'token_type': None}
assert expected == _parse_error_as_dict(pe)
def test_parse_errors_catch_invalid_expression_with_empty_parens(self):
licensing = Licensing()
try:
licensing.parse('with ( )this')
self.fail('Exception not raised')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_EXPRESSION, 'position': 0, 'token_string': 'with', 'token_type': TOKEN_WITH}
assert expected == _parse_error_as_dict(pe)
def test_parse_errors_catch_invalid_non_unicode_byte_strings_on_python3(self):
py2 = sys.version_info[0] == 2
py3 = sys.version_info[0] == 3
licensing = Licensing()
if py2:
extra_bytes = bytes(chr(0) + chr(12) + chr(255))
try:
licensing.parse('mit (and LGPL 2.1)'.encode('utf-8') + extra_bytes)
self.fail('Exception not raised')
except ExpressionError as ee:
assert str(ee).startswith('expression must be a string and')
if py3:
extra_bytes = bytes(chr(0) + chr(12) + chr(255), encoding='utf-8')
try:
licensing.parse('mit (and LGPL 2.1)'.encode('utf-8') + extra_bytes)
self.fail('Exception not raised')
except ExpressionError as ee:
assert str(ee).startswith('Invalid license key')
def test_parse_errors_does_not_raise_error_on_plain_non_unicode_raw_string(self):
# plain non-unicode string does not raise error
licensing = Licensing()
x = licensing.parse(r'mit and (LGPL-2.1)')
self.assertTrue(isinstance(x, LicenseExpression))
def test_parse_simplify_and_contain_and_equal(self):
licensing = Licensing()
expr = licensing.parse(' GPL-2.0 or LGPL2.1 and mit ')
expr2 = licensing.parse(' (mit and LGPL2.1) or GPL-2.0 ')
self.assertEqual(expr2.simplify(), expr.simplify())
self.assertEqual(expr2, expr)
expr3 = licensing.parse('mit and LGPL2.1')
self.assertTrue(expr3 in expr2)
def test_license_expression_is_equivalent(self):
lic = Licensing()
is_equiv = lic.is_equivalent
self.assertTrue(is_equiv(lic.parse('mit AND gpl'), lic.parse('mit AND gpl')))
self.assertTrue(is_equiv(lic.parse('mit AND gpl'), lic.parse('gpl AND mit')))
self.assertTrue(is_equiv(lic.parse('mit AND gpl and apache'), lic.parse('apache and gpl AND mit')))
self.assertTrue(is_equiv(lic.parse('mit AND (gpl AND apache)'), lic.parse('(mit AND gpl) AND apache')))
# same but without parsing:
self.assertTrue(is_equiv('mit AND gpl', 'mit AND gpl'))
self.assertTrue(is_equiv('mit AND gpl', 'gpl AND mit'))
self.assertTrue(is_equiv('mit AND gpl and apache', 'apache and gpl AND mit'))
self.assertTrue(is_equiv('mit AND (gpl AND apache)', '(mit AND gpl) AND apache'))
# Real-case example of generated expression vs. stored expression:
ex1 = '''Commercial
AND apache-1.1 AND apache-2.0 AND aslr AND bsd-new
AND cpl-1.0 AND epl-1.0
AND ibm-icu AND ijg AND jdom AND lgpl-2.1
AND mit-open-group AND mpl-1.1 AND sax-pd AND unicode AND w3c AND
w3c-documentation'''
ex2 = '''
apache-1.1 AND apache-2.0 AND aslr AND bsd-new
AND cpl-1.0 AND epl-1.0
AND lgpl-2.1 AND ibm-icu AND ijg
AND jdom AND mit-open-group
AND mpl-1.1 AND Commercial AND sax-pd AND unicode
AND w3c-documentation AND w3c'''
self.assertTrue(is_equiv(lic.parse(ex1), lic.parse(ex2)))
self.assertFalse(is_equiv(lic.parse('mit AND gpl'), lic.parse('mit OR gpl')))
self.assertFalse(is_equiv(lic.parse('mit AND gpl'), lic.parse('gpl OR mit')))
def test_license_expression_license_keys(self):
licensing = Licensing()
assert ['mit', 'gpl'] == licensing.license_keys(licensing.parse(' ( mit ) and gpl'))
assert ['mit', 'gpl'] == licensing.license_keys(licensing.parse('(mit and gpl)'))
# these two are surprising for now: this is because the expression is a
# logical expression so the order may be different on more complex expressions
assert ['mit', 'gpl'] == licensing.license_keys(licensing.parse('mit AND gpl or gpl'))
assert ['l-a+', 'l-b', '+l-c'] == licensing.license_keys(licensing.parse('((l-a+ AND l-b) OR (+l-c))'))
# same without parsing
assert ['mit', 'gpl'] == licensing.license_keys('mit AND gpl or gpl')
assert ['l-a+', 'l-b', 'l-c+'] == licensing.license_keys('((l-a+ AND l-b) OR (l-c+))')
def test_end_to_end(self):
# these were formerly doctest ported to actual real code tests here
l = Licensing()
expr = l.parse(' GPL-2.0 or LGPL-2.1 and mit ')
expected = 'GPL-2.0 OR (LGPL-2.1 AND mit)'
assert expected == str(expr)
expected = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LGPL-2.1'),
LicenseSymbol('mit'),
]
assert expected == l.license_symbols(expr)
def test_pretty(self):
l = Licensing()
expr = l.parse(' GPL-2.0 or LGPL2.1 and mit ')
expected = '''OR(
LicenseSymbol('GPL-2.0'),
AND(
LicenseSymbol('LGPL2.1'),
LicenseSymbol('mit')
)
)'''
assert expected == expr.pretty()
def test_simplify_and_contains(self):
l = Licensing()
expr = l.parse(' GPL-2.0 or LGPL2.1 and mit ')
expr2 = l.parse(' GPL-2.0 or (mit and LGPL2.1) ')
assert expr2.simplify() == expr.simplify()
expr3 = l.parse('mit and LGPL2.1')
assert expr3 in expr2
def test_simplify_and_equivalent_and_contains(self):
l = Licensing()
expr2 = l.parse(' GPL-2.0 or (mit and LGPL-2.1) or bsd Or GPL-2.0 or (mit and LGPL-2.1)')
# note thats simplification does SORT the symbols such that they can
# eventually be compared sequence-wise. This sorting is based on license key
expected = 'GPL-2.0 OR bsd OR (LGPL-2.1 AND mit)'
assert expected == str(expr2.simplify())
# Two expressions can be compared for equivalence:
expr1 = l.parse(' GPL-2.0 or (LGPL-2.1 and mit) ')
assert 'GPL-2.0 OR (LGPL-2.1 AND mit)' == str(expr1)
expr2 = l.parse(' (mit and LGPL-2.1) or GPL-2.0 ')
assert '(mit AND LGPL-2.1) OR GPL-2.0' == str(expr2)
assert l.is_equivalent(expr1, expr2)
assert 'GPL-2.0 OR (LGPL-2.1 AND mit)' == str(expr1.simplify())
assert 'GPL-2.0 OR (LGPL-2.1 AND mit)' == str(expr2.simplify())
assert expr1.simplify() == expr2.simplify()
expr3 = l.parse(' GPL-2.0 or mit or LGPL-2.1')
assert not l.is_equivalent(expr2, expr3)
expr4 = l.parse('mit and LGPL-2.1')
assert expr4.simplify() in expr2.simplify()
assert l.contains(expr2, expr4)
def test_create_from_python(self):
# Expressions can be built from Python expressions, using bitwise operators
# between Licensing objects, but use with caution. The behavior is not as
# well specified that using text expression and parse
licensing = Licensing()
expr1 = (licensing.LicenseSymbol('GPL-2.0')
| (licensing.LicenseSymbol('mit')
& licensing.LicenseSymbol('LGPL-2.1')))
expr2 = licensing.parse(' GPL-2.0 or (mit and LGPL-2.1) ')
assert 'GPL-2.0 OR (LGPL-2.1 AND mit)' == str(expr1.simplify())
assert 'GPL-2.0 OR (LGPL-2.1 AND mit)' == str(expr2.simplify())
assert licensing.is_equivalent(expr1, expr2)
a = licensing.OR(
LicenseSymbol(key='gpl-2.0'),
licensing.AND(LicenseSymbol(key='mit'),
LicenseSymbol(key='lgpl-2.1')
)
)
b = licensing.OR(
LicenseSymbol(key='gpl-2.0'),
licensing.AND(LicenseSymbol(key='mit'),
LicenseSymbol(key='lgpl-2.1')
)
)
assert a == b
def test_parse_with_repeated_or_later_raise_parse_error(self):
l = Licensing()
expr = 'LGPL2.1+ + and mit'
try:
l.parse(expr)
self.fail('Exception not raised')
except ParseError as ee:
expected = 'Invalid symbols sequence such as (A B) for token: "+" at position: 9'
assert expected == str(ee)
def test_render_complex(self):
licensing = Licensing()
expression = '''
EPL-1.0 AND Apache-1.1 AND Apache-2.0 AND BSD-Modified AND CPL-1.0 AND
ICU-Composite-License AND JPEG-License AND JDOM-License AND LGPL-2.0 AND
MIT-Open-Group AND MPL-1.1 AND SAX-PD AND Unicode-Inc-License-Agreement
AND W3C-Software-Notice and License AND W3C-Documentation-License'''
result = licensing.parse(expression)
expected = ('EPL-1.0 AND Apache-1.1 AND Apache-2.0 AND BSD-Modified '
'AND CPL-1.0 AND ICU-Composite-License AND JPEG-License '
'AND JDOM-License AND LGPL-2.0 AND MIT-Open-Group AND MPL-1.1 '
'AND SAX-PD AND Unicode-Inc-License-Agreement '
'AND W3C-Software-Notice AND License AND W3C-Documentation-License')
assert expected == result.render('{symbol.key}')
expectedkey = ('EPL-1.0 AND Apache-1.1 AND Apache-2.0 AND BSD-Modified AND '
'CPL-1.0 AND ICU-Composite-License AND JPEG-License AND JDOM-License AND '
'LGPL-2.0 AND MIT-Open-Group AND MPL-1.1 AND SAX-PD AND '
'Unicode-Inc-License-Agreement AND W3C-Software-Notice AND License AND'
' W3C-Documentation-License')
assert expectedkey == result.render('{symbol.key}')
def test_render_with(self):
licensing = Licensing()
expression = 'GPL-2.0 with Classpath-2.0 OR BSD-new'
result = licensing.parse(expression)
expected = 'GPL-2.0 WITH Classpath-2.0 OR BSD-new'
assert expected == result.render('{symbol.key}')
expected_html = (
'<a href="path/GPL-2.0">GPL-2.0</a> WITH '
'<a href="path/Classpath-2.0">Classpath-2.0</a> '
'OR <a href="path/BSD-new">BSD-new</a>')
assert expected_html == result.render('<a href="path/{symbol.key}">{symbol.key}</a>')
expected = 'GPL-2.0 WITH Classpath-2.0 OR BSD-new'
assert expected == result.render('{symbol.key}')
def test_parse_complex(self):
licensing = Licensing()
expression = ' GPL-2.0 or later with classpath-Exception and mit or LPL-2.1 and mit or later '
result = licensing.parse(expression)
# this may look weird, but we did not provide symbols hence in "or later",
# "later" is treated as if it were a license
expected = 'GPL-2.0 OR (later WITH classpath-Exception AND mit) OR (LPL-2.1 AND mit) OR later'
assert expected == result.render('{symbol.key}')
def test_parse_complex2(self):
licensing = Licensing()
expr = licensing.parse(" GPL-2.0 or LGPL-2.1 and mit ")
expected = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LGPL-2.1'),
LicenseSymbol('mit')
]
assert expected == sorted(licensing.license_symbols(expr))
expected = 'GPL-2.0 OR (LGPL-2.1 AND mit)'
assert expected == expr.render('{symbol.key}')
def test_Licensing_can_scan_valid_expressions_with_symbols_that_contain_and_with_or(self):
licensing = Licensing()
expression = 'orgpl or withbsd with orclasspath and andmit or andlgpl and ormit or withme'
result = [r.string for r in licensing.get_scanner().scan(expression)]
expected = [
'orgpl', ' or ', 'withbsd', ' with ', 'orclasspath',
' and ', 'andmit', ' or ', 'andlgpl', ' and ', 'ormit',
' or ', 'withme'
]
assert expected == result
def test_Licensing_can_tokenize_valid_expressions_with_symbols_that_contain_and_with_or(self):
licensing = Licensing()
expression = 'orgpl or withbsd with orclasspath and andmit or anlgpl and ormit or withme'
result = list(licensing.tokenize(expression))
expected = [
(LicenseSymbol(key='orgpl'), 'orgpl', 0),
(2, 'or', 6),
(LicenseWithExceptionSymbol(
license_symbol=LicenseSymbol(key='withbsd'),
exception_symbol=LicenseSymbol(key='orclasspath')),
'withbsd with orclasspath', 9),
(1, 'and', 34),
(LicenseSymbol(key='andmit'), 'andmit', 38),
(2, 'or', 45),
(LicenseSymbol(key='anlgpl'), 'anlgpl', 48),
(1, 'and', 55),
(LicenseSymbol(key='ormit'), 'ormit', 59),
(2, 'or', 65),
(LicenseSymbol(key='withme'), 'withme', 68)
]
assert expected == result
def test_Licensing_can_parse_valid_expressions_with_symbols_that_contain_and_with_or(self):
licensing = Licensing()
expression = 'orgpl or withbsd with orclasspath and andmit or anlgpl and ormit or withme'
result = licensing.parse(expression)
expected = 'orgpl OR (withbsd WITH orclasspath AND andmit) OR (anlgpl AND ormit) OR withme'
assert expected == result.render('{symbol.key}')
class LicensingParseWithSymbolsSimpleTest(TestCase):
def test_Licensing_with_illegal_symbols_raise_Exception(self):
try:
Licensing([
'GPL-2.0 or LATER',
'classpath Exception',
'something with else+',
'mit',
'LGPL 2.1',
'mit or later'
])
except ExpressionError as ee:
expected = ('Invalid license key: "or later" words are reserved and '
'cannot be used in a key: "GPL-2.0 or LATER"')
assert expected == str(ee)
def get_syms_and_licensing(self):
a = LicenseSymbol('l-a')
ap = LicenseSymbol('L-a+', ['l-a +'])
b = LicenseSymbol('l-b')
c = LicenseSymbol('l-c')
symbols = [a, ap, b, c]
return a, ap, b, c, Licensing(symbols)
def test_parse_license_expression1(self):
a, _ap, _b, _c, licensing = self.get_syms_and_licensing()
express_string = 'l-a'
result = licensing.parse(express_string)
assert express_string == str(result)
expected = a
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression_with_alias(self):
_a, ap, _b, _c, licensing = self.get_syms_and_licensing()
express_string = 'l-a +'
result = licensing.parse(express_string)
assert 'L-a+' == str(result)
expected = ap
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression3(self):
_a, ap, _b, _c, licensing = self.get_syms_and_licensing()
express_string = 'l-a+'
result = licensing.parse(express_string)
assert 'L-a+' == str(result)
expected = ap
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression4(self):
_a, _ap, _b, _c, licensing = self.get_syms_and_licensing()
express_string = '(l-a)'
result = licensing.parse(express_string)
assert 'l-a' == str(result)
expected = LicenseSymbol(key='l-a', aliases=())
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression5(self):
_a, ap, b, c, licensing = self.get_syms_and_licensing()
express_string = '((l-a+ AND l-b) OR (l-c))'
result = licensing.parse(express_string)
assert '(L-a+ AND l-b) OR l-c' == str(result)
expected = licensing.OR(licensing.AND(ap, b), c)
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression6(self):
a, _ap, b, _c, licensing = self.get_syms_and_licensing()
express_string = 'l-a and l-b'
result = licensing.parse(express_string)
assert 'l-a AND l-b' == str(result)
expected = licensing.AND(a, b)
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression7(self):
a, _ap, b, _c, licensing = self.get_syms_and_licensing()
express_string = 'l-a or l-b'
result = licensing.parse(express_string)
assert 'l-a OR l-b' == str(result)
expected = licensing.OR(a, b)
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression8(self):
a, _ap, b, c, licensing = self.get_syms_and_licensing()
express_string = 'l-a and l-b OR l-c'
result = licensing.parse(express_string)
assert '(l-a AND l-b) OR l-c' == str(result)
expected = licensing.OR(licensing.AND(a, b), c)
assert expected == result
assert [] == licensing.unknown_license_keys(result)
def test_parse_license_expression8_twice(self):
_a, _ap, _b, _c, licensing = self.get_syms_and_licensing()
express_string = 'l-a and l-b OR l-c'
result = licensing.parse(express_string)
assert '(l-a AND l-b) OR l-c' == str(result)
# there was some issues with reusing a Licensing
result = licensing.parse(express_string)
assert '(l-a AND l-b) OR l-c' == str(result)
def test_parse_license_expression_with_trailing_space_plus(self):
symbols = [
LicenseSymbol('l-a'),
LicenseSymbol('L-a+', ['l-a +']),
LicenseSymbol('l-b'),
LicenseSymbol('l-c'),
]
licensing = Licensing(symbols)
expresssion_str = 'l-a'
result = licensing.parse(expresssion_str)
assert expresssion_str == str(result)
assert [] == licensing.unknown_license_keys(result)
# plus sign is not attached to the symbol, but an alias
expresssion_str = 'l-a +'
result = licensing.parse(expresssion_str)
assert 'l-a+' == str(result).lower()
assert [] == licensing.unknown_license_keys(result)
expresssion_str = '(l-a)'
result = licensing.parse(expresssion_str)
assert 'l-a' == str(result).lower()
assert [] == licensing.unknown_license_keys(result)
expresssion_str = '((l-a+ AND l-b) OR (l-c))'
result = licensing.parse(expresssion_str)
assert '(L-a+ AND l-b) OR l-c' == str(result)
assert [] == licensing.unknown_license_keys(result)
expresssion_str = 'l-a and l-b'
result = licensing.parse(expresssion_str)
assert 'l-a AND l-b' == str(result)
assert [] == licensing.unknown_license_keys(result)
expresssion_str = 'l-a or l-b'
result = licensing.parse(expresssion_str)
assert 'l-a OR l-b' == str(result)
assert [] == licensing.unknown_license_keys(result)
expresssion_str = 'l-a and l-b OR l-c'
result = licensing.parse(expresssion_str)
assert '(l-a AND l-b) OR l-c' == str(result)
assert [] == licensing.unknown_license_keys(result)
def test_parse_of_side_by_side_symbols_raise_exception(self):
gpl2 = LicenseSymbol('gpl')
l = Licensing([gpl2])
try:
l.parse('gpl mit')
self.fail('ParseError not raised')
except ParseError:
pass
def test_validate_symbols(self):
symbols = [
LicenseSymbol('l-a', is_exception=True),
LicenseSymbol('l-a'),
LicenseSymbol('l-b'),
LicenseSymbol('l-c'),
]
warnings, errors = validate_symbols(symbols)
expectedw = []
assert expectedw == warnings
expectede = [
'Invalid duplicated license key: l-a.',
]
assert expectede == errors
class LicensingParseWithSymbolsTest(TestCase):
def test_parse_raise_ParseError_when_validating_strict_with_non_exception_symbols(self):
licensing = Licensing(['gpl', 'bsd', 'lgpl', 'exception'])
expression = 'gpl and bsd or lgpl with exception'
try:
licensing.parse(expression, validate=True, strict=True)
except ParseError as pe:
expected = {
'error_code': PARSE_INVALID_SYMBOL_AS_EXCEPTION,
'position': 25,
'token_string': 'exception',
'token_type': TOKEN_SYMBOL}
assert expected == _parse_error_as_dict(pe)
def test_parse_raise_ParseError_when_validating_strict_with_exception_symbols_in_incorrect_spot(self):
licensing = Licensing([LicenseSymbol('gpl', is_exception=False),
LicenseSymbol('exception', is_exception=True)])
licensing.parse('gpl with exception', validate=True, strict=True)
try:
licensing.parse('exception with gpl', validate=True, strict=True)
except ParseError as pe:
expected = {
'error_code': PARSE_INVALID_EXCEPTION,
'position': 0,
'token_string': 'exception',
'token_type': TOKEN_SYMBOL}
assert expected == _parse_error_as_dict(pe)
try:
licensing.parse('gpl with gpl', validate=True, strict=True)
except ParseError as pe:
expected = {
'error_code': PARSE_INVALID_SYMBOL_AS_EXCEPTION,
'position': 9,
'token_string': 'gpl',
'token_type': TOKEN_SYMBOL}
assert expected == _parse_error_as_dict(pe)
class LicensingSymbolsReplacement(TestCase):
def get_symbols_and_licensing(self):
gpl2 = LicenseSymbol('gpl-2.0', ['The GNU GPL 20', 'GPL-2.0', 'GPL v2.0'])
gpl2plus = LicenseSymbol('gpl-2.0+', ['The GNU GPL 20 or later', 'GPL-2.0 or later', 'GPL v2.0 or later'])
lgpl = LicenseSymbol('LGPL-2.1', ['LGPL v2.1'])
mit = LicenseSymbol('MIT', ['MIT license'])
mitand2 = LicenseSymbol('mitand2', ['mitand2', 'mitand2 license'])
symbols = [gpl2, gpl2plus, lgpl, mit, mitand2]
licensing = Licensing(symbols)
return gpl2, gpl2plus, lgpl, mit, mitand2, licensing
def test_simple_substitution(self):
gpl2, gpl2plus, _lgpl, _mit, _mitand2, licensing = self.get_symbols_and_licensing()
subs = {gpl2plus: gpl2}
expr = licensing.parse('gpl-2.0 or gpl-2.0+')
result = expr.subs(subs)
assert 'gpl-2.0 OR gpl-2.0' == result.render()
def test_advanced_substitution(self):
_gpl2, _gpl2plus, lgpl, _mit, _mitand2, licensing = self.get_symbols_and_licensing()
source = licensing.parse('gpl-2.0+ and mit')
target = lgpl
subs = {source: target}
expr = licensing.parse('gpl-2.0 or gpl-2.0+ and mit')
result = expr.subs(subs)
assert 'gpl-2.0 OR LGPL-2.1' == result.render()
def test_multiple_substitutions(self):
gpl2, gpl2plus, lgpl, mit, _mitand2, licensing = self.get_symbols_and_licensing()
source1 = licensing.parse('gpl-2.0+ and mit')
target1 = lgpl
source2 = licensing.parse('mitand2')
target2 = mit
source3 = gpl2
target3 = gpl2plus
subs = OrderedDict([
(source1, target1),
(source2, target2),
(source3, target3),
])
expr = licensing.parse('gpl-2.0 or gpl-2.0+ and mit')
# step 1: yields 'gpl-2.0 or lgpl'
# step 2: yields 'gpl-2.0+ or LGPL-2.1'
result = expr.subs(subs)
assert 'gpl-2.0+ OR LGPL-2.1' == result.render()
def test_multiple_substitutions_complex(self):
gpl2, gpl2plus, lgpl, mit, _mitand2, licensing = self.get_symbols_and_licensing()
source1 = licensing.parse('gpl-2.0+ and mit')
target1 = lgpl
source2 = licensing.parse('mitand2')
target2 = mit
source3 = gpl2
target3 = gpl2plus
subs = OrderedDict([
(source1, target1),
(source2, target2),
(source3, target3),
])
expr = licensing.parse('(gpl-2.0 or gpl-2.0+ and mit) and (gpl-2.0 or gpl-2.0+ and mit)')
# step 1: yields 'gpl-2.0 or lgpl'
# step 2: yields 'gpl-2.0+ or LGPL-2.1'
result = expr.subs(subs)
assert '(gpl-2.0+ OR LGPL-2.1) AND (gpl-2.0+ OR LGPL-2.1)' == result.render()
expr = licensing.parse('(gpl-2.0 or mit and gpl-2.0+) and (gpl-2.0 or gpl-2.0+ and mit)')
# step 1: yields 'gpl-2.0 or lgpl'
# step 2: yields 'gpl-2.0+ or LGPL-2.1'
result = expr.subs(subs)
assert '(gpl-2.0+ OR LGPL-2.1) AND (gpl-2.0+ OR LGPL-2.1)' == result.render()
class LicensingParseWithSymbolsAdvancedTest(TestCase):
def get_symbols_and_licensing(self):
gpl2 = LicenseSymbol('gpl-2.0', ['The GNU GPL 20', 'GPL-2.0', 'GPL v2.0'])
gpl2plus = LicenseSymbol('gpl-2.0+', ['The GNU GPL 20 or later', 'GPL-2.0 or later', 'GPL v2.0 or later'])
lgpl = LicenseSymbol('LGPL-2.1', ['LGPL v2.1'])
mit = LicenseSymbol('MIT', ['MIT license'])
mitand2 = LicenseSymbol('mitand2', ['mitand2', 'mitand2 license'])
symbols = [gpl2, gpl2plus, lgpl, mit, mitand2]
licensing = Licensing(symbols)
return gpl2, gpl2plus, lgpl, mit, mitand2, licensing
def test_parse_trailing_char_raise_exception(self):
_gpl2, _gpl2plus, _lgpl, _mit, _mitand2, licensing = self.get_symbols_and_licensing()
try:
licensing.parse('The GNU GPL 20 or LGPL-2.1 and mit2')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_SYMBOL_SEQUENCE, 'position': 34,
'token_string': '2', 'token_type': LicenseSymbol('2')}
assert expected == _parse_error_as_dict(pe)
def test_parse_expression_with_trailing_unknown_should_raise_exception(self):
gpl2, gpl2plus, lgpl, mit, _mitand2, licensing = self.get_symbols_and_licensing()
unknown = LicenseSymbol(key='123')
tokens = list(licensing.tokenize('The GNU GPL 20 or later or (LGPL-2.1 and mit) or The GNU GPL 20 or mit 123'))
expected = [
(gpl2plus, 'The GNU GPL 20 or later', 0),
(TOKEN_OR, ' or ', 23),
(TOKEN_LPAR, '(', 27),
(lgpl, 'LGPL-2.1', 28),
(TOKEN_AND, ' and ', 36),
(mit, 'mit', 41),
(TOKEN_RPAR, ')', 44),
(TOKEN_OR, ' or ', 45),
(gpl2, 'The GNU GPL 20', 49),
(TOKEN_OR, ' or ', 63),
(mit, 'mit', 67),
(unknown, ' 123', 70)
]
assert expected == tokens
try:
licensing.parse('The GNU GPL 20 or later or (LGPL-2.1 and mit) or The GNU GPL 20 or mit 123')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_SYMBOL_SEQUENCE, 'position': 70,
'token_string': ' 123', 'token_type': unknown}
assert expected == _parse_error_as_dict(pe)
def test_parse_expression_with_trailing_unknown_should_raise_exception2(self):
_gpl2, _gpl2_plus, _lgpl, _mit, _mitand2, licensing = self.get_symbols_and_licensing()
unknown = LicenseSymbol(key='123')
try:
licensing.parse('The GNU GPL 20 or mit 123')
except ParseError as pe:
expected = {'error_code': PARSE_INVALID_SYMBOL_SEQUENCE, 'position': 21,
'token_string': ' 123', 'token_type': unknown}
assert expected == _parse_error_as_dict(pe)
def test_parse_expression_with_WITH(self):
gpl2, _gpl2plus, lgpl, mit, mitand2, _ = self.get_symbols_and_licensing()
mitexp = LicenseSymbol('mitexp', ('mit exp',), is_exception=True)
gpl_20_or_later = LicenseSymbol('GPL-2.0+', ['The GNU GPL 20 or later'])
symbols = [gpl2, lgpl, mit, mitand2, mitexp, gpl_20_or_later]
licensing = Licensing(symbols)
expr = 'The GNU GPL 20 or later or (LGPL-2.1 and mit) or The GNU GPL 20 or mit with mit exp'
tokens = list(licensing.tokenize(expr))
expected = [
(gpl_20_or_later, 'The GNU GPL 20 or later', 0),
(TOKEN_OR, ' or ', 23),
(TOKEN_LPAR, '(', 27),
(lgpl, 'LGPL-2.1', 28),
(TOKEN_AND, ' and ', 36),
(mit, 'mit', 41),
(TOKEN_RPAR, ')', 44),
(TOKEN_OR, ' or ', 45),
(gpl2, 'The GNU GPL 20', 49),
(TOKEN_OR, ' or ', 63),
(LicenseWithExceptionSymbol(mit, mitexp), 'mit with mit exp', 67)
]
assert expected == tokens
parsed = licensing.parse(expr)
expected = 'GPL-2.0+ OR (LGPL-2.1 AND MIT) OR gpl-2.0 OR MIT WITH mitexp'
assert expected == str(parsed)
expected = 'GPL-2.0+ OR (LGPL-2.1 AND MIT) OR gpl-2.0 OR MIT WITH mitexp'
assert expected == parsed.render()
def test_parse_expression_with_WITH_and_unknown_symbol(self):
gpl2, _gpl2plus, lgpl, mit, mitand2, _ = self.get_symbols_and_licensing()
mitexp = LicenseSymbol('mitexp', ('mit exp',), is_exception=True)
gpl_20_or_later = LicenseSymbol('GPL-2.0+', ['The GNU GPL 20 or later'])
symbols = [gpl2, lgpl, mit, mitand2, mitexp, gpl_20_or_later]
licensing = Licensing(symbols)
expr = 'The GNU GPL 20 or later or (LGPL-2.1 and mit) or The GNU GPL 20 or mit with 123'
parsed = licensing.parse(expr)
assert ['123'] == licensing.unknown_license_keys(parsed)
assert ['123'] == licensing.unknown_license_keys(expr)
def test_unknown_keys(self):
_gpl2, _gpl2plus, _lgpl, _mit, _mitand2, licensing = self.get_symbols_and_licensing()
expr = 'The GNU GPL 20 or LGPL-2.1 and mit'
parsed = licensing.parse(expr)
expected = 'gpl-2.0 OR (LGPL-2.1 AND MIT)'
assert expected == str(parsed)
assert 'gpl-2.0 OR (LGPL-2.1 AND MIT)' == parsed.render('{symbol.key}')
assert [] == licensing.unknown_license_keys(parsed)
assert [] == licensing.unknown_license_keys(expr)
def test_unknown_keys_with_trailing_char(self):
gpl2, _gpl2plus, lgpl, _mit, mitand2, licensing = self.get_symbols_and_licensing()
expr = 'The GNU GPL 20 or LGPL-2.1 and mitand2'
parsed = licensing.parse(expr)
expected = [gpl2, lgpl, mitand2]
assert expected == licensing.license_symbols(parsed)
assert expected == licensing.license_symbols(licensing.parse(parsed))
assert expected == licensing.license_symbols(expr)
assert [] == licensing.unknown_license_keys(parsed)
assert [] == licensing.unknown_license_keys(expr)
def test_unknown_keys_with_trailing_char_2(self):
_gpl2, _gpl2plus, _lgpl, _mit, _mitand2, licensing = self.get_symbols_and_licensing()
expr = 'The GNU GPL 20 or LGPL-2.1 and mitand3'
try:
licensing.parse(expr)
self.fail('ParseError should be raised')
except ParseError as pe:
expected = {'error_code': 5, 'position': 34, 'token_string': u'and3', 'token_type': LicenseSymbol(key=u'and3')}
assert expected == _parse_error_as_dict(pe)
def test_parse_with_overlapping_key_with_licensing(self):
symbols = [
LicenseSymbol('MIT', ['MIT license']),
LicenseSymbol('LGPL-2.1', ['LGPL v2.1']),
LicenseSymbol('zlib', ['zlib']),
LicenseSymbol('d-zlib', ['D zlib']),
LicenseSymbol('mito', ['mit o']),
LicenseSymbol('hmit', ['h verylonglicense']),
]
licensing = Licensing(symbols)
expression = 'mit or mit AND zlib or mit or mit with verylonglicense'
results = str(licensing.parse(expression))
expected = 'mit OR (MIT AND zlib) OR mit OR MIT WITH verylonglicense'
self.assertEqual(expected, results)
class LicensingSymbolsTest(TestCase):
def test_get_license_symbols(self):
symbols = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('mit'),
LicenseSymbol('LGPL 2.1')
]
l = Licensing(symbols)
assert symbols == l.license_symbols(l.parse(' GPL-2.0 and mit or LGPL 2.1 and mit '))
def test_get_license_symbols2(self):
symbols = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LATER'),
LicenseSymbol('mit'),
LicenseSymbol('LGPL 2.1+'),
LicenseSymbol('Foo exception', is_exception=True),
]
l = Licensing(symbols)
expr = ' GPL-2.0 or LATER and mit or LGPL 2.1+ and mit with Foo exception '
expected = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LATER'),
LicenseSymbol('mit'),
LicenseSymbol('LGPL 2.1+'),
LicenseSymbol('mit'),
LicenseSymbol('Foo exception', is_exception=True),
]
assert expected == l.license_symbols(l.parse(expr), unique=False)
def test_get_license_symbols3(self):
symbols = [
LicenseSymbol('mit'),
LicenseSymbol('LGPL 2.1+'),
LicenseSymbol('Foo exception', is_exception=True),
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LATER'),
]
l = Licensing(symbols)
expr = 'mit or LGPL 2.1+ and mit with Foo exception or GPL-2.0 or LATER '
assert symbols == l.license_symbols(l.parse(expr))
def test_get_license_symbols4(self):
symbols = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LATER'),
LicenseSymbol('big exception', is_exception=True),
LicenseSymbol('mit'),
LicenseSymbol('LGPL 2.1+'),
LicenseSymbol('Foo exception', is_exception=True),
]
l = Licensing(symbols)
expr = (' GPL-2.0 or LATER with big exception and mit or '
'LGPL 2.1+ and mit or later with Foo exception ')
expected = [
LicenseSymbol('GPL-2.0'),
LicenseSymbol('LATER'),
LicenseSymbol('big exception', is_exception=True),
LicenseSymbol('mit'),
LicenseSymbol('LGPL 2.1+'),
LicenseSymbol('mit'),
LicenseSymbol('LATER'),
LicenseSymbol('Foo exception', is_exception=True),
]
assert expected == l.license_symbols(l.parse(expr), unique=False)
def test_license_symbols(self):
licensing = Licensing([
'GPL-2.0 or LATER',
'classpath Exception',
'something with else+',
'mit',
'LGPL 2.1',
'mit or later'
])
expr = (' GPL-2.0 or LATER with classpath Exception and mit and '
'mit with SOMETHING with ELSE+ or LGPL 2.1 and '
'GPL-2.0 or LATER with classpath Exception and '
'mit or later or LGPL 2.1 or mit or GPL-2.0 or LATER '
'with SOMETHING with ELSE+ and lgpl 2.1')
gpl2plus = LicenseSymbol(key='GPL-2.0 or LATER')
cpex = LicenseSymbol(key='classpath Exception')
someplus = LicenseSymbol(key='something with else+')
mitplus = LicenseSymbol(key='mit or later')
mit = LicenseSymbol(key='mit')
lgpl = LicenseSymbol(key='LGPL 2.1')
gpl_with_cp = LicenseWithExceptionSymbol(license_symbol=gpl2plus, exception_symbol=cpex)
mit_with_some = LicenseWithExceptionSymbol(license_symbol=mit, exception_symbol=someplus)
gpl2_with_someplus = LicenseWithExceptionSymbol(license_symbol=gpl2plus, exception_symbol=someplus)
parsed = licensing.parse(expr)
expected = [gpl_with_cp, mit, mit_with_some, lgpl, gpl_with_cp, mitplus, lgpl, mit, gpl2_with_someplus, lgpl]
assert expected == licensing.license_symbols(parsed, unique=False, decompose=False)
expected = [gpl_with_cp, mit, mit_with_some, lgpl, mitplus, gpl2_with_someplus]
assert expected == licensing.license_symbols(parsed, unique=True, decompose=False)
expected = [gpl2plus, cpex, mit, mit, someplus, lgpl, gpl2plus, cpex, mitplus, lgpl, mit, gpl2plus, someplus, lgpl]
assert expected == licensing.license_symbols(parsed, unique=False, decompose=True)
expected = [gpl2plus, cpex, mit, someplus, lgpl, mitplus]
assert expected == licensing.license_symbols(parsed, unique=True, decompose=True)
def test_primary_license_symbol_and_primary_license_key(self):
licensing = Licensing([
'GPL-2.0 or LATER',
'classpath Exception',
'mit',
'LGPL 2.1',
'mit or later'
])
expr = ' GPL-2.0 or LATER with classpath Exception and mit or LGPL 2.1 and mit or later '
gpl = LicenseSymbol('GPL-2.0 or LATER')
cpex = LicenseSymbol('classpath Exception')
expected = LicenseWithExceptionSymbol(gpl, cpex)
parsed = licensing.parse(expr)
assert expected == licensing.primary_license_symbol(parsed, decompose=False)
assert gpl == licensing.primary_license_symbol(parsed, decompose=True)
assert 'GPL-2.0 or LATER' == licensing.primary_license_key(parsed)
expr = ' GPL-2.0 or later with classpath Exception and mit or LGPL 2.1 and mit or later '
expected = 'GPL-2.0 or LATER WITH classpath Exception'
assert expected == licensing.primary_license_symbol(
parsed, decompose=False).render('{symbol.key}')
class SplitAndTokenizeTest(TestCase):
def test_splitter(self):
expr = (' GPL-2.0 or later with classpath Exception and mit and '
'mit with SOMETHING with ELSE+ or LGPL 2.1 and '
'GPL-2.0 or LATER with (Classpath Exception and '
'mit or later) or LGPL 2.1 or mit or GPL-2.0 or LATER '
'with SOMETHING with ELSE+ and lgpl 2.1')
results = list(splitter(expr))
expected = [
Result(0, 0, ' ', None),
Result(1, 7, 'GPL-2.0', Output('GPL-2.0', LicenseSymbol(key='GPL-2.0',))),
Result(8, 8, ' ', None),
Result(9, 10, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(11, 11, ' ', None),
Result(12, 16, 'later', Output('later', LicenseSymbol(key='later',))),
Result(17, 17, ' ', None),
Result(18, 21, 'with', Output('with', Keyword(value='with', type=TOKEN_WITH))),
Result(22, 22, ' ', None),
Result(23, 31, 'classpath', Output('classpath', LicenseSymbol(key='classpath',))),
Result(32, 32, ' ', None),
Result(33, 41, 'Exception', Output('Exception', LicenseSymbol(key='Exception',))),
Result(42, 42, ' ', None),
Result(43, 45, 'and', Output('and', Keyword(value='and', type=TOKEN_AND))),
Result(46, 46, ' ', None),
Result(47, 49, 'mit', Output('mit', LicenseSymbol(key='mit',))),
Result(50, 50, ' ', None),
Result(51, 53, 'and', Output('and', Keyword(value='and', type=TOKEN_AND))),
Result(54, 54, ' ', None),
Result(55, 57, 'mit', Output('mit', LicenseSymbol(key='mit',))),
Result(58, 58, ' ', None),
Result(59, 62, 'with', Output('with', Keyword(value='with', type=TOKEN_WITH))),
Result(63, 63, ' ', None),
Result(64, 72, 'SOMETHING', Output('SOMETHING', LicenseSymbol(key='SOMETHING',))),
Result(73, 73, ' ', None),
Result(74, 77, 'with', Output('with', Keyword(value='with', type=TOKEN_WITH))),
Result(78, 78, ' ', None),
Result(79, 83, 'ELSE+', Output('ELSE+', LicenseSymbol(key='ELSE+',))),
Result(84, 84, ' ', None),
Result(85, 86, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(87, 87, ' ', None),
Result(88, 91, 'LGPL', Output('LGPL', LicenseSymbol(key='LGPL',))),
Result(92, 92, ' ', None),
Result(93, 95, '2.1', Output('2.1', LicenseSymbol(key='2.1',))),
Result(96, 96, ' ', None),
Result(97, 99, 'and', Output('and', Keyword(value='and', type=TOKEN_AND))),
Result(100, 100, ' ', None),
Result(101, 107, 'GPL-2.0', Output('GPL-2.0', LicenseSymbol(key='GPL-2.0',))),
Result(108, 108, ' ', None),
Result(109, 110, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(111, 111, ' ', None),
Result(112, 116, 'LATER', Output('LATER', LicenseSymbol(key='LATER',))),
Result(117, 117, ' ', None),
Result(118, 121, 'with', Output('with', Keyword(value='with', type=TOKEN_WITH))),
Result(122, 122, ' ', None),
Result(123, 123, '(', Output('(', Keyword(value='(', type=TOKEN_LPAR))),
Result(124, 132, 'Classpath', Output('Classpath', LicenseSymbol(key='Classpath',))),
Result(133, 133, ' ', None),
Result(134, 142, 'Exception', Output('Exception', LicenseSymbol(key='Exception',))),
Result(143, 143, ' ', None),
Result(144, 146, 'and', Output('and', Keyword(value='and', type=TOKEN_AND))),
Result(147, 147, ' ', None),
Result(148, 150, 'mit', Output('mit', LicenseSymbol(key='mit',))),
Result(151, 151, ' ', None),
Result(152, 153, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(154, 154, ' ', None),
Result(155, 159, 'later', Output('later', LicenseSymbol(key='later',))),
Result(160, 160, ')', Output(')', Keyword(value=')', type=TOKEN_RPAR))),
Result(161, 161, ' ', None),
Result(162, 163, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(164, 164, ' ', None),
Result(165, 168, 'LGPL', Output('LGPL', LicenseSymbol(key='LGPL',))),
Result(169, 169, ' ', None),
Result(170, 172, '2.1', Output('2.1', LicenseSymbol(key='2.1',))),
Result(173, 173, ' ', None),
Result(174, 175, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(176, 176, ' ', None),
Result(177, 179, 'mit', Output('mit', LicenseSymbol(key='mit',))),
Result(180, 180, ' ', None),
Result(181, 182, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(183, 183, ' ', None),
Result(184, 190, 'GPL-2.0', Output('GPL-2.0', LicenseSymbol(key='GPL-2.0',))),
Result(191, 191, ' ', None),
Result(192, 193, 'or', Output('or', Keyword(value='or', type=TOKEN_OR))),
Result(194, 194, ' ', None),
Result(195, 199, 'LATER', Output('LATER', LicenseSymbol(key='LATER',))),
Result(200, 200, ' ', None),
Result(201, 204, 'with', Output('with', Keyword(value='with', type=TOKEN_WITH))),
Result(205, 205, ' ', None),
Result(206, 214, 'SOMETHING', Output('SOMETHING', LicenseSymbol(key='SOMETHING',))),
Result(215, 215, ' ', None),
Result(216, 219, 'with', Output('with', Keyword(value='with', type=TOKEN_WITH))),
Result(220, 220, ' ', None),
Result(221, 225, 'ELSE+', Output('ELSE+', LicenseSymbol(key='ELSE+',))),
Result(226, 226, ' ', None),
Result(227, 229, 'and', Output('and', Keyword(value='and', type=TOKEN_AND))),
Result(230, 230, ' ', None),
Result(231, 234, 'lgpl', Output('lgpl', LicenseSymbol(key='lgpl',))),
Result(235, 235, ' ', None),
Result(236, 238, '2.1', Output('2.1', LicenseSymbol(key='2.1',)))
]
assert expected == results
def test_tokenize_step_by_step_does_not_munge_trailing_symbols(self):
gpl2 = LicenseSymbol(key='GPL-2.0')
gpl2plus = LicenseSymbol(key='GPL-2.0 or LATER')
cpex = LicenseSymbol(key='classpath Exception', is_exception=True)
mitthing = LicenseSymbol(key='mithing')
mitthing_with_else = LicenseSymbol(key='mitthing with else+', is_exception=False)
mit = LicenseSymbol(key='mit')
mitplus = LicenseSymbol(key='mit or later')
elsish = LicenseSymbol(key='else')
elsishplus = LicenseSymbol(key='else+')
lgpl = LicenseSymbol(key='LGPL 2.1')
licensing = Licensing([
gpl2,
gpl2plus,
cpex,
mitthing,
mitthing_with_else,
mit,
mitplus,
elsish,
elsishplus,
lgpl,
])
expr = (' GPL-2.0 or later with classpath Exception and mit and '
'mit with mitthing with ELSE+ or LGPL 2.1 and '
'GPL-2.0 or LATER with Classpath Exception and '
'mit or later or LGPL 2.1 or mit or GPL-2.0 or LATER '
'with mitthing with ELSE+ and lgpl 2.1 or gpl-2.0')
# fist scan
scanner = licensing.get_scanner()
result = list(scanner.scan(expr))
WITH_KW = Keyword(value=' with ', type=10)
AND_KW = Keyword(value=' and ', type=1)
OR_KW = Keyword(value=' or ', type=2)
expected = [
Result(0, 0, ' ', None),
Result(1, 16, 'GPL-2.0 or later', Output('GPL-2.0 or LATER', gpl2plus, 1)),
Result(17, 22, ' with ', Output(' with ', WITH_KW, 0)),
Result(23, 41, 'classpath Exception', Output('classpath Exception', cpex, 1)),
Result(42, 46, ' and ', Output(' and ', AND_KW, 0)),
Result(47, 49, 'mit', Output('mit', mit, 1)),
Result(50, 54, ' and ', Output(' and ', AND_KW, 0)),
Result(55, 57, 'mit', Output('mit', mit, 1)),
Result(58, 63, ' with ', Output(' with ', WITH_KW, 0)),
Result(64, 82, 'mitthing with ELSE+', Output('mitthing with else+', mitthing_with_else, 1)),
Result(83, 86, ' or ', Output(' or ', OR_KW, 0)),
Result(87, 94, 'LGPL 2.1', Output('LGPL 2.1', lgpl, 1)),
Result(95, 99, ' and ', Output(' and ', AND_KW, 0)),
Result(100, 115, 'GPL-2.0 or LATER', Output('GPL-2.0 or LATER', gpl2plus, 1)),
Result(116, 121, ' with ', Output(' with ', WITH_KW, 0)),
Result(122, 140, 'Classpath Exception', Output('classpath Exception', cpex, 1)),
Result(141, 145, ' and ', Output(' and ', AND_KW, 0)),
Result(146, 157, 'mit or later', Output('mit or later', mitplus, 1)),
Result(158, 161, ' or ', Output(' or ', OR_KW, 0)),
Result(162, 169, 'LGPL 2.1', Output('LGPL 2.1', lgpl, 1)),
Result(170, 173, ' or ', Output(' or ', OR_KW, 0)),
Result(174, 176, 'mit', Output('mit', mit, 1)),
Result(177, 180, ' or ', Output(' or ', OR_KW, 0)),
Result(181, 196, 'GPL-2.0 or LATER', Output('GPL-2.0 or LATER', gpl2plus, 1)),
Result(197, 202, ' with ', Output(' with ', WITH_KW, 0)),
Result(203, 221, 'mitthing with ELSE+', Output('mitthing with else+', mitthing_with_else, 1)),
Result(222, 226, ' and ', Output(' and ', AND_KW, 0)),
Result(227, 234, 'lgpl 2.1', Output('LGPL 2.1', lgpl, 1)),
Result(235, 238, ' or ', Output(' or ', OR_KW, 0)),
Result(239, 245, 'gpl-2.0', Output('GPL-2.0', gpl2, 1))
]
assert expected == result
assert 246 == expected[-1].end + 1
assert 246 == sum(len(r.string) for r in result)
# skip spaces
result = list(strip_and_skip_spaces(result))
# here only the first token is a space
assert expected[1:] == result
# group results
gpl2pluso = Output('GPL-2.0 or LATER', LicenseSymbol('GPL-2.0 or LATER', is_exception=False), 1)
cpex0 = Output('classpath Exception', LicenseSymbol('classpath Exception', is_exception=True), 1)
mito = Output('mit', LicenseSymbol('mit', is_exception=False), 1)
mieo1 = Output('mitthing with else+', LicenseSymbol('mitthing with else+', is_exception=False), 1)
lgplo = Output('LGPL 2.1', LicenseSymbol('LGPL 2.1', is_exception=False), 1)
mitoo = Output('mit or later', LicenseSymbol('mit or later', is_exception=False), 1)
gpl202 = Output('GPL-2.0', LicenseSymbol('GPL-2.0', is_exception=False), 1)
with_kw = Output(' with ', WITH_KW, 0)
and_kw = Output(' and ', AND_KW, 0)
or_kw = Output(' or ', OR_KW, 0)
expected_groups = [
(Result(1, 16, 'GPL-2.0 or later', gpl2pluso),
Result(17, 22, ' with ', with_kw),
Result(23, 41, 'classpath Exception', cpex0)),
(Result(42, 46, ' and ', and_kw),),
(Result(47, 49, 'mit', mito),),
(Result(50, 54, ' and ', and_kw),),
(Result(55, 57, 'mit', mito),
Result(58, 63, ' with ', with_kw),
Result(64, 82, 'mitthing with ELSE+', mieo1)),
(Result(83, 86, ' or ', or_kw),),
(Result(87, 94, 'LGPL 2.1', lgplo),),
(Result(95, 99, ' and ', and_kw),),
(Result(100, 115, 'GPL-2.0 or LATER', gpl2pluso),
Result(116, 121, ' with ', with_kw),
Result(122, 140, 'Classpath Exception', cpex0)),
(Result(141, 145, ' and ', and_kw),),
(Result(146, 157, 'mit or later', mitoo),),
(Result(158, 161, ' or ', or_kw),),
(Result(162, 169, 'LGPL 2.1', lgplo),),
(Result(170, 173, ' or ', or_kw),),
(Result(174, 176, 'mit', mito),),
(Result(177, 180, ' or ', or_kw),),
(Result(181, 196, 'GPL-2.0 or LATER', gpl2pluso),
Result(197, 202, ' with ', with_kw),
Result(203, 221, 'mitthing with ELSE+', mieo1)),
(Result(222, 226, ' and ', and_kw),),
(Result(227, 234, 'lgpl 2.1', lgplo),),
(Result(235, 238, ' or ', or_kw),),
(Result(239, 245, 'gpl-2.0', gpl202),)
]
result_groups = list(group_results_for_with_subexpression(result))
assert expected_groups == result_groups
# finally retest it all with tokenize
gpl2plus_with_cpex = LicenseWithExceptionSymbol(license_symbol=gpl2plus, exception_symbol=cpex)
gpl2plus_with_someplus = LicenseWithExceptionSymbol(license_symbol=gpl2plus, exception_symbol=mitthing_with_else)
mit_with_mitthing_with_else = LicenseWithExceptionSymbol(license_symbol=mit, exception_symbol=mitthing_with_else)
expected = [
(gpl2plus_with_cpex, 'GPL-2.0 or later with classpath Exception', 1),
(TOKEN_AND, ' and ', 42),
(mit, 'mit', 47),
(TOKEN_AND, ' and ', 50),
(mit_with_mitthing_with_else, 'mit with mitthing with ELSE+', 55),
(TOKEN_OR, ' or ', 83),
(lgpl, 'LGPL 2.1', 87),
(TOKEN_AND, ' and ', 95),
(gpl2plus_with_cpex, 'GPL-2.0 or LATER with Classpath Exception', 100),
(TOKEN_AND, ' and ', 141),
(mitplus, 'mit or later', 146),
(TOKEN_OR, ' or ', 158),
(lgpl, 'LGPL 2.1', 162),
(TOKEN_OR, ' or ', 170),
(mit, 'mit', 174),
(TOKEN_OR, ' or ', 177),
(gpl2plus_with_someplus, 'GPL-2.0 or LATER with mitthing with ELSE+', 181),
(TOKEN_AND, ' and ', 222),
(lgpl, 'lgpl 2.1', 227),
(TOKEN_OR, ' or ', 235),
(gpl2, 'gpl-2.0', 239),
]
assert expected == list(licensing.tokenize(expr))
|
all3fox/license-expression
|
tests/test_license_expression.py
|
Python
|
apache-2.0
| 65,726
|
[
"VisIt"
] |
6b2f58527a7c4c8665f6f59ec90adc2720ebc9a88d2a94e7604c093941422507
|
#!/usr/bin/env python
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import glob
import os
import sys
from setuptools import setup
from galaxy.common import version
# Paths we'll use later
etcpath = "/etc/galaxy"
homedir = "/var/lib/galaxy"
if os.path.exists("/etc/debian_version"):
webconfig = "/etc/apache2/conf.d"
else:
webconfig = "/etc/httpd/conf.d"
if (os.environ.get('USER', '') == 'vagrant'
or os.environ.get('SUDO_USER', '') == 'vagrant'):
del os.link
#####################################################################
# Helper Functions
def explode_glob_path(path):
"""Take a glob and hand back the full recursive expansion,
ignoring links.
"""
result = []
includes = glob.glob(path)
for item in includes:
if os.path.isdir(item) and not os.path.islink(item):
result.extend(explode_glob_path(os.path.join(item, "*")))
else:
result.append(item)
return result
def proc_data_files(data_files):
"""Because data_files doesn't natively support globs...
let's add them.
"""
result = []
# If running in a virtualenv, don't return data files that would install to
# system paths (mainly useful for running tests via tox).
if hasattr(sys, 'real_prefix'):
return result
for dir, files in data_files:
includes = []
for item in files:
includes.extend(explode_glob_path(item))
result.append((dir, includes))
return result
#####################################################################
setup(
name='galaxy',
version=version.get_git_version(),
author='Ansible, Inc.',
author_email='support@ansible.com',
description='Galaxy: Find, reuse and share the best Ansible content.',
long_description='Galaxy is a web site and command line tool for '
'creating and sharing Ansible roles.',
license='Apache-2.0',
keywords='ansible galaxy',
url='http://github.com/ansible/galaxy',
packages=['galaxy'],
include_package_data=True,
zip_safe=False,
setup_requires=[],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators'
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
],
entry_points={
'console_scripts': ['galaxy-manage = galaxy:manage'],
},
data_files=proc_data_files([
("%s" % homedir, ["config/wsgi.py",
"galaxy/static/favicon.ico"])]
),
)
|
chouseknecht/galaxy
|
setup.py
|
Python
|
apache-2.0
| 3,620
|
[
"Galaxy"
] |
43419e4b4e8486a615c8a72f7e1f8809cf62b924e010b8df7df20b89b453d626
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
__all__ = [
"ndtr",
"ndtri",
"log_ndtr",
"log_cdf_laplace",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = -20
LOGNDTR_FLOAT32_LOWER = -10
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = 8
LOGNDTR_FLOAT32_UPPER = 5
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t**2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = array_ops.where(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
array_ops.where(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def ndtri(p, name="ndtri"):
"""The inverse of the CDF of the Normal distribution function.
Returns x such that the area under the pdf from minus infinity to x is equal
to p.
A piece-wise rational approximation is done for the function.
This is a port of the implementation in netlib.
Args:
p: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtri").
Returns:
x: `Tensor` with `dtype=p.dtype`.
Raises:
TypeError: if `p` is not floating-type.
"""
with ops.name_scope(name, values=[p]):
p = ops.convert_to_tensor(p, name="p")
if p.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"p.dtype=%s is not handled, see docstring for supported types."
% p.dtype)
return _ndtri(p)
def _ndtri(p):
"""Implements ndtri core logic."""
# Constants used in piece-wise rational approximations. Taken from the cephes
# library:
# https://github.com/scipy/scipy/blob/master/scipy/special/cephes/ndtri.c
p0 = list(reversed([-5.99633501014107895267E1,
9.80010754185999661536E1,
-5.66762857469070293439E1,
1.39312609387279679503E1,
-1.23916583867381258016E0]))
q0 = list(reversed([1.0,
1.95448858338141759834E0,
4.67627912898881538453E0,
8.63602421390890590575E1,
-2.25462687854119370527E2,
2.00260212380060660359E2,
-8.20372256168333339912E1,
1.59056225126211695515E1,
-1.18331621121330003142E0]))
p1 = list(reversed([4.05544892305962419923E0,
3.15251094599893866154E1,
5.71628192246421288162E1,
4.40805073893200834700E1,
1.46849561928858024014E1,
2.18663306850790267539E0,
-1.40256079171354495875E-1,
-3.50424626827848203418E-2,
-8.57456785154685413611E-4]))
q1 = list(reversed([1.0,
1.57799883256466749731E1,
4.53907635128879210584E1,
4.13172038254672030440E1,
1.50425385692907503408E1,
2.50464946208309415979E0,
-1.42182922854787788574E-1,
-3.80806407691578277194E-2,
-9.33259480895457427372E-4]))
p2 = list(reversed([3.23774891776946035970E0,
6.91522889068984211695E0,
3.93881025292474443415E0,
1.33303460815807542389E0,
2.01485389549179081538E-1,
1.23716634817820021358E-2,
3.01581553508235416007E-4,
2.65806974686737550832E-6,
6.23974539184983293730E-9]))
q2 = list(reversed([1.0,
6.02427039364742014255E0,
3.67983563856160859403E0,
1.37702099489081330271E0,
2.16236993594496635890E-1,
1.34204006088543189037E-2,
3.28014464682127739104E-4,
2.89247864745380683936E-6,
6.79019408009981274425E-9]))
def _create_polynomial(var, coeffs):
"""Compute n_th order polynomial via Horner's method."""
if not coeffs:
return 0.
return coeffs[0] + _create_polynomial(var, coeffs[1:]) * var
maybe_complement_p = array_ops.where(p > 1. - np.exp(-2.), 1. - p, p)
# Write in an arbitrary value in place of 0 for p since 0 will cause NaNs
# later on. The result from the computation when p == 0 is not used so any
# number that doesn't result in NaNs is fine.
sanitized_mcp = array_ops.where(
maybe_complement_p <= 0.,
constant_op.constant(0.5, dtype=p.dtype, shape=p.shape),
maybe_complement_p)
# Compute x for p > exp(-2): x/sqrt(2pi) = w + w**3 P0(w**2)/Q0(w**2).
w = sanitized_mcp - 0.5
ww = w ** 2
x_for_big_p = w + w * ww * (_create_polynomial(ww, p0)
/ _create_polynomial(ww, q0))
x_for_big_p *= -np.sqrt(2. * np.pi)
# Compute x for p <= exp(-2): x = z - log(z)/z - (1/z) P(1/z) / Q(1/z),
# where z = sqrt(-2. * log(p)), and P/Q are chosen between two different
# arrays based on wether p < exp(-32).
z = math_ops.sqrt(-2. * math_ops.log(sanitized_mcp))
first_term = z - math_ops.log(z) / z
second_term_small_p = (_create_polynomial(1. / z, p2)
/ _create_polynomial(1. / z, q2)) / z
second_term_otherwise = (_create_polynomial(1. / z, p1)
/ _create_polynomial(1. / z, q1)) / z
x_for_small_p = first_term - second_term_small_p
x_otherwise = first_term - second_term_otherwise
x = array_ops.where(sanitized_mcp > np.exp(-2.),
x_for_big_p,
array_ops.where(z >= 8.0, x_for_small_p, x_otherwise))
x = array_ops.where(p > 1. - np.exp(-2.), x, -x)
infinity = constant_op.constant(np.inf, dtype=x.dtype, shape=x.shape)
x_nan_replaced = array_ops.where(
p <= 0.0, -infinity, array_ops.where(p >= 1.0, infinity, x))
return x_nan_replaced
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x**2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x**2)^n, n=1:N}
R_N = O(exp(-0.5 x**2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from py/scipy/special/cephes/ndtr.c.
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Taylor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return array_ops.where(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
array_ops.where(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, appropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
if series_order <= 0:
return 1.
x_2 = math_ops.square(x)
even_sum = 0.
odd_sum = 0.
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
if n % 2:
odd_sum += _double_factorial(2 * n - 1) / x_2n
else:
even_sum += _double_factorial(2 * n - 1) / x_2n
x_2n *= x_2
return 1. + even_sum - odd_sum
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
def log_cdf_laplace(x, name="log_cdf_laplace"):
"""Log Laplace distribution function.
This function calculates `Log[L(x)]`, where `L(x)` is the cumulative
distribution function of the Laplace distribution, i.e.
```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt```
For numerical accuracy, `L(x)` is computed in different ways depending on `x`,
```
x <= 0:
Log[L(x)] = Log[0.5] + x, which is exact
0 < x:
Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
`Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
# For x < 0, L(x) = 0.5 * exp{x} exactly, so Log[L(x)] = log(0.5) + x.
lower_solution = -np.log(2.) + x
# safe_exp_neg_x = exp{-x} for x > 0, but is
# bounded above by 1, which avoids
# log[1 - 1] = -inf for x = log(1/2), AND
# exp{-x} --> inf, for x << -1
safe_exp_neg_x = math_ops.exp(-math_ops.abs(x))
# log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used
# internally by log1p, rather than being done explicitly here.
upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x)
return array_ops.where(x < 0., lower_solution, upper_solution)
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/python/ops/distributions/special_math.py
|
Python
|
apache-2.0
| 14,747
|
[
"Gaussian"
] |
331c8a913a0ebe85b7a7db4768352b24b3383f6438936645599ebc5948d89fd6
|
#!/usr/bin/env python
#
# Copyright 2013 Tristan Bereau and Christian Kramer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################
# Write R skript to generate the field pictures using ggplot2
#
from sys import *
import subprocess,os,time
import os.path
filename_body = argv[1]
fp = open(filename_body+"_makepics.r","w")
fp.write("library(ggplot2)\n") # Diff Fields first
fp.write("xydiffs = read.table('"+filename_body+"_xydiffs.txt',na.strings='********')\n")
fp.write("colnames(xydiffs) = c('X','Y','Diff')\n")
fp.write("postscript('"+filename_body+"_xydiffs.eps')\n")
fp.write("v <- ggplot(xydiffs, aes(Y,X,z=Diff))\n")
fp.write("v + geom_tile(aes(fill=Diff)) + scale_fill_gradientn(colour=c('white','yellow','green','orange','red'),limits=c(0,40)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xzdiffs = read.table('"+filename_body+"_xzdiffs.txt',na.strings='********')\n")
fp.write("colnames(xzdiffs) = c('X','Z','Diff')\n")
fp.write("postscript('"+filename_body+"_xzdiffs.eps')\n")
fp.write("v <- ggplot(xzdiffs, aes(Z,X,z=Diff))\n")
fp.write("v + geom_tile(aes(fill=Diff)) + scale_fill_gradientn(colour=c('white','yellow','green','orange','red'),limits=c(0,40)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("yzdiffs = read.table('"+filename_body+"_yzdiffs.txt',na.strings='********')\n")
fp.write("colnames(yzdiffs) = c('Y','Z','Diff')\n")
fp.write("postscript('"+filename_body+"_yzdiffs.eps')\n")
fp.write("v <- ggplot(yzdiffs, aes(Z,Y,z=Diff))\n")
fp.write("v + geom_tile(aes(fill=Diff)) + scale_fill_gradientn(colour=c('white','yellow','green','orange','red'),limits=c(0,40)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xy_gauss = read.table('"+filename_body+"_xy-gauss-en.txt',na.strings='********')\n") # Potential_fields from Gaussian second
fp.write("colnames(xy_gauss) = c('X','Y','MEP')\n")
fp.write("postscript('"+filename_body+"_xy_gauss.eps')\n")
fp.write("v <- ggplot(xy_gauss, aes(Y,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xz_gauss = read.table('"+filename_body+"_xz-gauss-en.txt',na.strings='********')\n")
fp.write("colnames(xz_gauss) = c('X','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_xz_gauss.eps')\n")
fp.write("v <- ggplot(xz_gauss, aes(Z,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("yz_gauss = read.table('"+filename_body+"_yz-gauss-en.txt',na.strings='********')\n")
fp.write("colnames(yz_gauss) = c('Y','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_yz_gauss.eps')\n")
fp.write("v <- ggplot(yz_gauss, aes(Z,Y,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xy_mtp = read.table('"+filename_body+"_xy-mult-en.txt',na.strings='********')\n") # Potential_fields from Multipoles last
fp.write("colnames(xy_mtp) = c('X','Y','MEP')\n")
fp.write("postscript('"+filename_body+"_xy_mtp.eps')\n")
fp.write("v <- ggplot(xy_mtp, aes(Y,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("xz_mtp = read.table('"+filename_body+"_xz-mult-en.txt',na.strings='********')\n")
fp.write("colnames(xz_mtp) = c('X','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_xz_mtp.eps')\n")
fp.write("v <- ggplot(xz_mtp, aes(Z,X,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("yz_mtp = read.table('"+filename_body+"_yz-mult-en.txt',na.strings='********')\n")
fp.write("colnames(yz_mtp) = c('Y','Z','MEP')\n")
fp.write("postscript('"+filename_body+"_yz_mtp.eps')\n")
fp.write("v <- ggplot(yz_mtp, aes(Z,Y,z=MEP))\n")
fp.write("v + geom_tile(aes(fill=MEP)) + scale_fill_gradientn(colour=rainbow(10),limits=c(-50,50)) + stat_contour() + xlab('Z [Bohr]') + ylab('Y [Bohr]') + opts(axis.title.x = theme_text(face = 'bold',size = 14),axis.title.y = theme_text(face = 'bold',size = 14,angle = 90),axis.text.x = theme_text(size = 14,vjust=1),axis.text.y = theme_text(size = 14,hjust = 1),legend.text = theme_text(size = 12),legend.title = theme_text(size = 12, face = 'bold', hjust = 0))\n")
fp.write("dev.off()\n")
fp.write("q()\n")
fp.close()
print
print " Run 'R CMD BATCH "+filename_body+"_makepics.r' to generate the pictures"
print
exit(0)
|
MMunibas/FittingWizard
|
scripts/draw_pics.py
|
Python
|
bsd-3-clause
| 8,080
|
[
"Gaussian"
] |
c0fbbff279283a90d008d0805aceeeaab0783dca78447cf01342f29779ecdbec
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2014 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors:
#
# This code for reading Amber restart and inpcrd files was taken from ParmEd,
# which is released under the GNU Lesser General Public License
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
"""
This module provides the ability to read Amber inpcrd/restart files as well as
Amber NetCDF restart files. This code was taken from ParmEd and simplified by
removing the functionality that is not needed.
"""
from __future__ import print_function, division
from distutils.version import StrictVersion
from math import ceil
import os
import warnings
import numpy as np
from mdtraj import version
from mdtraj.formats.registry import FormatRegistry
from mdtraj.utils import ensure_type, import_, in_units_of, cast_indices, six
__all__ = ['AmberRestartFile', 'load_restrt', 'AmberNetCDFRestartFile',
'load_ncrestrt']
range = six.moves.range
@FormatRegistry.register_loader('.rst7')
@FormatRegistry.register_loader('.restrt')
@FormatRegistry.register_loader('.inpcrd')
def load_restrt(filename, top=None, atom_indices=None):
"""Load an AMBER ASCII restart/inpcrd file. Since this file doesn't contain
information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.rst7')
@FormatRegistry.register_fileobject('.restrt')
@FormatRegistry.register_fileobject('.inpcrd')
class AmberRestartFile(object):
"""Interface for reading and writing AMBER ASCII restart files. This is a
file-like object, that supports both reading and writing depending on the
`mode` flag. It implements the context manager protocol, so you can also
use it with the python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
See Also
--------
md.AmberNetCDFRestartFile : Low level interface to AMBER NetCDF-format
restart files
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=True):
self._closed = True
self._mode = mode
self._filename = filename
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
if mode == 'w':
self._needs_initialization = True
self._handle = open(filename, mode)
self._closed = False
elif mode == 'r':
with open(filename, mode) as f:
f.readline()
words = f.readline().split()
try:
self._n_atoms = int(words[0])
except (IndexError, ValueError):
raise TypeError('"%s" is not a recognized Amber restart' %
filename)
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._n_atoms
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def _parse(self, lines):
""" Parses the file """
self._time = None
try:
words = lines[1].split()
self._n_atoms = natom = int(words[0])
except (IndexError, ValueError):
raise TypeError('not a recognized Amber restart')
time = None
if len(words) >= 2:
time = float(words[1])
lines_per_frame = int(ceil(natom / 2))
if len(lines) == lines_per_frame + 2:
hasbox = hasvels = False
elif natom in (1, 2) and len(lines) == 4:
# This is the _only_ case where line counting does not work -- there
# is either 1 or 2 atoms and there are 4 lines. The 1st 3 lines are
# the title, natom/time, and coordinates. The 4th are almost always
# velocities since it's hard to have a periodic system this small.
# However, velocities (which are scaled down by 20.445) have a ~0%
# chance of being 60+, so we can pretty easily tell if the last line
# has box dimensions and angles or velocities. I cannot envision a
# plausible scenario where the detection here will ever fail
line = lines[3]
if natom == 1:
tmp = [line[i:i+12] for i in range(0, 72, 12) if
line[i:i+12].strip()]
if len(tmp) == 3:
hasvels = True
hasbox = False
elif len(tmp) == 6:
hasbox = True
hasvels = False
else:
raise TypeError('not a recognized Amber restart')
else:
# Ambiguous case
tmp = [float(line[i:i+12]) >= 60.0 for i in range(0, 72, 12)]
if any(tmp):
hasbox = True
hasvels = False
else:
hasvels = True
hasbox = False
elif len(lines) == lines_per_frame + 3:
hasbox = True
hasvels = False
elif len(lines) == 2*lines_per_frame + 2:
hasbox = False
hasvels = True
elif len(lines) == 2*lines_per_frame + 3:
hasbox = hasvels = True
else:
raise TypeError('Badly formatted restart file. Has %d lines for '
'%d atoms' % (len(lines), natom))
coordinates = np.zeros((1, natom, 3))
if time is None:
time = np.zeros(1)
else:
time = np.asarray((time,))
# Fill the coordinates
for i in range(lines_per_frame):
line = lines[i+2] # Skip first two lines
i2 = i * 2
coordinates[0,i2,:] = [float(line[j:j+12]) for j in range(0,36,12)]
i2 += 1
if i2 < natom:
coordinates[0,i2,:] = [float(line[j:j+12]) for j in
range(36,72,12)]
if hasbox:
cell_lengths = np.zeros((1,3))
cell_angles = np.zeros((1,3))
line = lines[-1]
cell_lengths[0,:] = [float(line[i:i+12]) for i in range(0,36,12)]
cell_angles[0,:] = [float(line[i:i+12]) for i in range(36,72,12)]
else:
cell_lengths = cell_angles = None
return coordinates, time, cell_lengths, cell_angles
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER ASCII restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
with open(self._filename, 'r') as f:
lines = f.readlines()
coordinates, time, cell_lengths, cell_angles = self._parse(lines)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = coordinates[:, atom_slice, :]
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER ASCII restart
file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('restart file has already been written -- can '
'only write one frame to restart files.')
# These are no-ops.
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, self._n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._handle.write('Amber restart file (without velocities) written by '
'MDTraj\n')
self._handle.write('%5d%15.7e\n' % (self._n_atoms, time))
fmt = '%12.7f%12.7f%12.7f'
for i in range(self._n_atoms):
acor = coordinates[0, i, :]
self._handle.write(fmt % (acor[0], acor[1], acor[2]))
if i % 2 == 1: self._handle.write('\n')
if self._n_atoms % 2 == 1: self._handle.write('\n')
if cell_lengths is not None:
self._handle.write(fmt % (cell_lengths[0,0], cell_lengths[0,1],
cell_lengths[0,2]))
self._handle.write(fmt % (cell_angles[0,0], cell_angles[0,1],
cell_angles[0,2]) + '\n')
self._handle.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
@FormatRegistry.register_loader('.ncrst')
def load_ncrestrt(filename, top=None, atom_indices=None):
"""Load an AMBER NetCDF restart/inpcrd file. Since this file doesn't
contain information to specify the topology, you need to supply a topology
Parameters
----------
filename : str
name of the AMBER restart file
top : {str, Trajectory, Topology}
Pass in either the path to a file containing topology information (e.g.,
a PDB, an AMBER prmtop, or certain types of Trajectory objects) to
supply the necessary topology information that is not present in these
files
atom_indices : array_like, optional
If not None, then read only a subset of the atoms coordinates from the
file.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object
See Also
--------
mdtraj.AmberRestartFile : Low level interface to AMBER restart files
"""
from mdtraj.core.trajectory import _parse_topology
topology = _parse_topology(top)
atom_indices = cast_indices(atom_indices)
with AmberNetCDFRestartFile(filename) as f:
return f.read_as_traj(topology, atom_indices=atom_indices)
@FormatRegistry.register_fileobject('.ncrst')
class AmberNetCDFRestartFile(object):
"""Interface for reading and writing AMBER NetCDF files. This is a file-like
object, that supports both reading and writing depending on the `mode` flag.
It implements the context manager protocol, so you can also use it with the
python 'with' statement.
Parameters
----------
filename : str
The name of the file to open
mode : {'r', 'w'}, default='r'
The mode in which to open the file. Valid options are 'r' or 'w' for
'read' or 'write'
force_overwrite : bool, default=False
In write mode, if a file named `filename` already exists, clobber it and
overwrite it
"""
distance_unit = 'angstroms'
def __init__(self, filename, mode='r', force_overwrite=False):
self._closed = True
self._mode = mode
if StrictVersion(import_('scipy.version').short_version) < StrictVersion('0.12.0'):
raise ImportError('MDTraj NetCDF support requires scipy>=0.12.0. '
'You have %s' % import_('scipy.version').short_version)
netcdf = import_('scipy.io').netcdf_file
if mode not in ('r', 'w'):
raise ValueError("mode must be one of ['r', 'w']")
if mode == 'w' and not force_overwrite and os.path.exists(filename):
raise IOError('"%s" already exists' % filename)
# AMBER uses the NetCDF3 format, with 64 bit encodings, which for
# scipy.io.netcdf_file is "version=2"
self._handle = netcdf(filename, mode=mode, version=2)
self._closed = False
if mode == 'w':
self._needs_initialization = True
elif mode == 'r':
self._needs_initialization = False
else:
raise RuntimeError()
@property
def n_atoms(self):
self._validate_open()
if self._needs_initialization:
raise IOError('The file is uninitialized')
return self._handle.dimensions['atom']
@property
def n_frames(self):
return 1 # always 1 frame
def _validate_open(self):
if self._closed:
raise IOError('The file is closed.')
def read_as_traj(self, topology, atom_indices=None):
"""Read an AMBER ASCII restart file as a trajectory.
Parameters
----------
topology : Topology
The system topology
atom_indices : array_like, optional
If not none, then read only a subset of the atoms coordinates from the
file. This may be slightly slower than the standard read because it required
an extra copy, but will save memory.
Returns
-------
trajectory : Trajectory
A trajectory object with 1 frame created from the file.
"""
from mdtraj.core.trajectory import Trajectory
if atom_indices is not None:
topology = topology.subset(atom_indices)
xyz, time, cell_lengths, cell_angles = self.read(atom_indices=atom_indices)
xyz = in_units_of(xyz, self.distance_unit, Trajectory._distance_unit,
inplace=True)
cell_lengths = in_units_of(cell_lengths, self.distance_unit,
Trajectory._distance_unit, inplace=True)
return Trajectory(xyz=xyz, topology=topology, time=time,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
def read(self, atom_indices=None):
"""Read data from an AMBER NetCDF restart file
Parameters
----------
atom_indices : np.ndarray, dtype=int, optional
The specific indices of the atoms you'd like to retrieve. If not
supplied, all of the atoms will be retrieved.
Returns
-------
coordinates : np.ndarray, shape=(1, n_atoms, 3)
The cartesian coordinates of the atoms, in units of angstroms. These
files only ever contain 1 frame
time : np.ndarray, None
The time corresponding to the frame, in units of picoseconds, or
None if no time information is present
cell_lengths : np.ndarray, None
The lengths (a, b, c) of the unit cell for the frame in angstroms,
or None if the information is not present in the file
cell_angles : np.ndarray, None
The angles (\alpha, \beta, \gamma) defining the unit cell for each
frame, or None if the information is not present in the file.
Notes
-----
If the file is not a NetCDF file with the appropriate convention, a
TypeError is raised. If variables that are needed do not exist or if
illegal values are passed in for parameters, ValueError is raised. If
I/O errors occur, IOError is raised.
"""
if self._mode != 'r':
raise IOError('The file was opened in mode=%s. Reading is not '
'allowed.' % self._mode)
if 'coordinates' not in self._handle.variables:
raise ValueError('No coordinates found in the NetCDF file.')
# Check that conventions are correct
try:
conventions = self._handle.Conventions.decode('ascii')
except UnicodeDecodeError:
raise TypeError('NetCDF file does not have correct Conventions')
try:
convention_version = self._handle.ConventionVersion.decode('ascii')
except UnicodeDecodeError:
raise ValueError('NetCDF file does not have correct ConventionVersion')
except AttributeError:
raise TypeError('NetCDF file does not have ConventionVersion')
if (not hasattr(self._handle, 'Conventions') or
conventions != 'AMBERRESTART'):
raise TypeError('NetCDF file does not have correct Conventions')
if convention_version != '1.0':
raise ValueError('NetCDF restart has ConventionVersion %s. Only '
'Version 1.0 is supported.' % convention_version)
if atom_indices is not None:
atom_slice = ensure_type(atom_indices, dtype=np.int, ndim=1,
name='atom_indices', warn_on_cast=False)
if not np.all(atom_slice) >= 0:
raise ValueError('Entries in atom_slice must be >= 0')
coordinates = self._handle.variables['coordinates'][atom_slice, :]
else:
coordinates = self._handle.variables['coordinates'][:, :]
# Get unit cell parameters
if 'cell_lengths' in self._handle.variables:
cell_lengths = self._handle.variables['cell_lengths'][:]
else:
cell_lengths = None
if 'cell_angles' in self._handle.variables:
cell_angles = self._handle.variables['cell_angles'][:]
else:
cell_angles = None
if cell_lengths is None and cell_angles is not None:
warnings.warn('cell_lengths were found, but no cell_angles')
if cell_lengths is not None and cell_angles is None:
warnings.warn('cell_angles were found, but no cell_lengths')
if 'time' in self._handle.variables:
time = self._handle.variables['time'].getValue()
else:
time = None
# scipy.io.netcdf variables are mem-mapped, and are only backed by valid
# memory while the file handle is open. This is _bad_ because we need to
# support the user opening the file, reading the coordinates, and then
# closing it, and still having the coordinates be a valid memory
# segment.
# https://github.com/mdtraj/mdtraj/issues/440
if coordinates is not None and not coordinates.flags['WRITEABLE']:
coordinates = np.array(coordinates, copy=True)
if cell_lengths is not None and not cell_lengths.flags['WRITEABLE']:
cell_lengths = np.array(cell_lengths, copy=True)
if cell_angles is not None and not cell_angles.flags['WRITEABLE']:
cell_angles = np.array(cell_angles, copy=True)
# The leading frame dimension is missing on all of these arrays since
# restart files have only one frame. Reshape them to add this extra
# dimension
coordinates = coordinates[np.newaxis,:]
if cell_lengths is not None:
cell_lengths = cell_lengths[np.newaxis,:]
if cell_angles is not None:
cell_angles = cell_angles[np.newaxis,:]
if time is not None:
time = np.asarray([time,])
return coordinates, time, cell_lengths, cell_angles
def write(self, coordinates, time=None, cell_lengths=None,
cell_angles=None):
"""Write one frame of a MD trajectory to disk in the AMBER NetCDF
restart file format.
Parameters
----------
coordinates : np.ndarray, dtype=np.float32, shape=([1,] n_atoms, 3)
The cartesian coordinates of each atom, in units of angstroms. Must
be only a single frame (shape can be (1,N,3) or (N,3) where N is
the number of atoms)
time : array-like with 1 element or float, optional
The time corresponding to this frame. If not specified, a place
holder of 0 will be written
cell_lengths : np.ndarray, dtype=np.double, shape=([1,] 3)
The lengths (a,b,c) of the unit cell for the frame in Angstroms
cell_angles : np.ndarray, dtype=np.double, shape=([1,] 3)
The angles between the unit cell vectors for the frame in Degrees
Notes
-----
You must only have one frame to write to this file.
"""
if self._mode != 'w':
raise IOError('The file was opened in mode=%s. Writing not allowed.'
% self._mode)
if not self._needs_initialization:
# Must have already been written -- can only write once
raise RuntimeError('NetCDF restart file has already been written '
'-- can only write one frame to restart files.')
# these are no-ops
# coordinates = in_units_of(coordinates, None, 'angstroms')
# time = in_units_of(time, None, 'picoseconds')
# cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
# cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates',
length=None, can_be_none=False,
shape=(1,None,3), warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms = coordinates.shape[0], coordinates.shape[1]
if n_frames != 1:
raise ValueError('Can only write 1 frame to a restart file!')
if time is not None:
try:
time = float(time)
except TypeError:
raise TypeError('Can only provide a single time')
else:
time = 0.0
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles',
length=1, can_be_none=True,
warn_on_cast=False,
add_newaxis_on_deficient_ndim=True)
if ((cell_lengths is None and cell_angles is not None) or
(cell_lengths is not None and cell_angles is None)):
prov, negl = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
prov, negl = negl, prov
raise ValueError('You provided the variable "%s" but did not '
'provide "%s". Either provide both or neither -- '
'one without the other is meaningless.' %
(prov, negl))
self._initialize_headers(n_atoms=n_atoms,
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None))
self._needs_initialization = False
# Write the time, coordinates, and box info
if time is not None:
self._handle.variables['time'][0] = float(time)
self._handle.variables['coordinates'][:,:] = coordinates[0,:,:]
if cell_lengths is not None:
self._handle.variables['cell_angles'][:] = cell_angles[0,:]
self._handle.variables['cell_lengths'][:] = cell_lengths[0,:]
self.flush()
def _initialize_headers(self, n_atoms, set_coordinates, set_time, set_cell):
"""Initialize the headers and convention properties of the NetCDF
restart file
"""
ncfile = self._handle
ncfile.Conventions = 'AMBERRESTART'
ncfile.ConventionVersion = "1.0"
ncfile.title = 'NetCDF Restart file written by MDTraj w/out velocities'
ncfile.application = 'Omnia'
ncfile.program = 'MDTraj'
ncfile.programVersion = version.short_version
# Dimensions
ncfile.createDimension('spatial', 3)
ncfile.createDimension('atom', n_atoms)
if set_cell:
ncfile.createDimension('cell_spatial', 3)
ncfile.createDimension('label', 5)
ncfile.createDimension('cell_angular', 3)
if set_time:
ncfile.createDimension('time', 1)
# Variables
v = ncfile.createVariable('spatial', 'c', ('spatial',))
v[:] = np.asarray(list('xyz'))
v = ncfile.createVariable('coordinates', 'd', ('atom', 'spatial'))
v.units = 'angstrom'
if set_cell:
v = ncfile.createVariable('cell_angular', 'c',
('cell_angular', 'label'))
v[0] = np.asarray(list('alpha'))
v[1] = np.asarray(list('beta '))
v[2] = np.asarray(list('gamma'))
v = ncfile.createVariable('cell_spatial', 'c', ('cell_spatial',))
v[:] = np.asarray(list('abc'))
v = ncfile.createVariable('cell_lengths', 'd', ('cell_spatial',))
v.units = 'angstrom'
v = ncfile.createVariable('cell_angles', 'd', ('cell_angular',))
v.units = 'degree'
if set_time:
v = ncfile.createVariable('time', 'd', ('time',))
v.units = 'picosecond'
self.flush()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
if not self._closed and hasattr(self, '_handle'):
self._handle.close()
self._closed = True
def __del__(self):
self.close()
def __len__(self):
return 1 # All restarts have only 1 frame
def flush(self):
self._validate_open()
if self._mode != 'w':
raise IOError('Cannot flush a file opened for reading')
self._handle.flush()
|
msultan/mdtraj
|
mdtraj/formats/amberrst.py
|
Python
|
lgpl-2.1
| 33,271
|
[
"Amber",
"MDTraj",
"NetCDF"
] |
bc4d93d27f3ebab7543e790fb27a72c3d11e4bab0a2857bfe00361e9cef06a2d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import inspect
from grafo import Grafo
def foo(a): # função a ser testada
b = 2 + a
if (b > 2):
print a
print b
if (a > b):
print 3
if(a == 4):
print "asd"
else:
print a
print "asfsad"
a = 123
b = 234
if (b > a):
b = 4
pass
class Ast_walker(ast.NodeVisitor):
def __init__(self, grafo):
self.grafo = grafo
def visit_Module(self, node):
# asts sempre iniciam com módulo, coloquei ele chamando o resto
self.generic_visit(node)
def visit_If(self, node):
'''Todo If tem os parâmetros test(condição),
body(condição satisfeita) e orelse(condição não satisfeita).
Os nós correspondentes a esses campos ficam dentro deles (são listas).
'''
self.grafo.criaNo("If", node.lineno)
grafo.defCampo("body")
if (len(node.body) == 0):
self.grafo.criaNo("bodyVazio", node.lineno)
for no in node.body:
self.visit(no)
grafo.defCampo("orelse")
if (len(node.orelse) == 0):
self.grafo.criaNo("orelseVazio", node.lineno)
for no in node.orelse:
self.visit(no) # se chamar generic, fura as restrições
grafo.defCampo("fimOrelse")
def generic_visit(self, node):
'''
Nós de tipos cujas visitas não tiverem sido redefinidas
pelos métodos acima serão visitadas por esse método.
'''
lineno = -1
# nem todo nó tem o atributo lineno, mas todos os úteis têm
if hasattr(node, "lineno"):
lineno = node.lineno
self.grafo.criaNo(type(node).__name__, lineno)
ast.NodeVisitor.generic_visit(self, node)
grafo = Grafo()
walker = Ast_walker(grafo)
codeAst = ast.parse(inspect.getsource(foo))
walker.visit(codeAst)
# grafo.printGrafo()
grafo.geraDot()
|
hugo-tavares/python-cfg
|
ast_walker.py
|
Python
|
gpl-3.0
| 1,991
|
[
"VisIt"
] |
6fdcfc32239a9684f95d569787249fa9c919f6fe43ef39db1ddec1fbf310b86f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Liang Wang <liang.wang@cs.helsinki.fi>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
#
# Liang Wang @ CS Dept, Helsinki Univ, Finland
# 2014.05.01
#
import logging
import numpy
import tempfile
from scipy import linalg
from scipy.spatial import distance
logger = logging.getLogger('panns.utils')
class Node():
__slots__ = ['proj', 'ofst', 'lchd', 'rchd', 'nlst']
pass
class NaiveTree(object):
def __init__(self):
self.root = Node()
pass
pass
class Metric():
"""
The basic metric class used in Panns index building. Super class
of MetricEuclidean and MetricCosine.
"""
@staticmethod
def split(u, idxs, mtx):
"""
Project the data points on a random vector and return the
average value.
Parameters:
u: random vector.
idxs: data points to project.
mtx: data set.
"""
v = numpy.zeros(len(u), u.dtype)
for i in idxs:
v += mtx[i]
a = numpy.dot(u, v) / len(idxs)
return a
@staticmethod
def side(u, v, offset):
"""
Project v on u then check which side it falls in given the
offset.
Parameters:
u: random vector.
v: data point to project.
"""
r = None
x = numpy.dot(u, v) - offset
### Todo: need to be fixed for small value
if abs(x) < 1e-08:
r = ( numpy.random.uniform(0,1,1)[0] > 0.5 )
else:
r = ( x > 0 )
return r
pass
class MetricEuclidean(Metric):
"""
Metric class for Euclidean index.
"""
@staticmethod
def distance(u, v):
return distance.euclidean(u, v)
pass
class MetricCosine(Metric):
"""
Metric class for cosine index.
"""
@staticmethod
def distance(u, v):
return 1.0 - numpy.dot(u, v)
pass
def gaussian_vector(size, normalize=False, dtype='float32', seed=None):
"""
Returns a (normalized) Gaussian random vector.
Parameters:
normalize: the vector length is normalized to 1 if True.
"""
numpy.random.seed(seed)
v = numpy.random.normal(0,1,size)
if normalize:
v = v / linalg.norm(v)
return v
def precision(relevant, retrieved):
"""
Return the precision of the search result.
Parameters:
relevant: the relevant data points.
retrieved: the retireved data points
"""
r = 1.0 * len(set(relevant) & set(retrieved)) / len(retrieved)
return r
def recall(relevant, retrieved):
"""
Return the recall of the search result.
Parameters:
relevant: the relevant data points.
retrieved: the retireved data points
"""
r = 1.0 * len(set(relevant) & set(retrieved)) / len(relevant)
return r
def build_parallel(mtx, shape_mtx, K, dtype, t):
"""
The function for parallel building index. Implemented here because
the default python serialization cannot pickle instance function.
Parameters:
mtx: a row-based data set, should be an numpy matrix.
K: max number of data points on a leaf.
t: index of binary trees.
"""
logger.info('pass %i ...' % t)
mtx = numpy.memmap(mtx, dtype=dtype, mode='r', shape=shape_mtx)
numpy.random.seed(t**2)
tree = NaiveTree()
children = range(len(mtx))
make_tree_parallel(tree.root, children, mtx, shape_mtx[1], dtype, K)
return tree
def make_tree_parallel(parent, children, mtx, dim, dtype, K, lvl=0):
"""
Builds up a binary tree recursively, for parallel building.
Parameters:
parent: parent node index.
children: a list of children node indices.
mtx: a row-based data set.
K: max number of data points on a leaf.
"""
if len(children) <= max(K, lvl):
parent.nlst = children
return
l_child, r_child = None, None
for attempt in xrange(16):
parent.proj = numpy.random.randint(2**32-1)
u = gaussian_vector(dim, True, dtype, parent.proj)
parent.ofst = Metric.split(u, children, mtx)
l_child, r_child = [], []
for i in children:
if Metric.side(mtx[i], u, parent.ofst):
r_child.append(i)
else:
l_child.append(i)
if len(l_child) > 0 and len(r_child) > 0:
break
parent.lchd = Node()
parent.rchd = Node()
make_tree_parallel(parent.lchd, l_child, mtx, dim, dtype, K)
make_tree_parallel(parent.rchd, r_child, mtx, dim, dtype, K)
return
def make_mmap(mtx, shape, dtype, fname=None):
m, n = shape
if fname is None:
fname = tempfile.mkstemp()[1]
logger.info('mmaping the data to %s ...' % fname)
fpw = numpy.memmap(fname, dtype=dtype, mode='w+', shape=(m,n))
for i in xrange(m):
fpw[i] = mtx[i]
del fpw
return fname
def load_mmap(fname, shape, dtype):
mtx = numpy.memmap(fname, dtype=dtype, mode='r', shape=shape)
return mtx
|
beni55/panns
|
panns/utils.py
|
Python
|
gpl-2.0
| 5,074
|
[
"Gaussian"
] |
5ce2357d5c71f8a37c872285bda24875c52a368033b9436a4bc877bbd5c91976
|
import os
import warnings
import numpy as np
from ..core import indexing
from ..core.dataarray import DataArray
from ..core.utils import is_scalar
from .common import BackendArray
from .file_manager import CachingFileManager
from .locks import SerializableLock
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
_ERROR_MSG = (
"The kind of indexing operation you are trying to do is not "
"valid on rasterio files. Try to load your data with ds.load()"
"first."
)
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
def __init__(self, manager, lock, vrt_params=None):
from rasterio.vrt import WarpedVRT
self.manager = manager
self.lock = lock
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError("All bands should have the same dtype")
self._dtype = np.dtype(dtypes[0])
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
def _get_indexer(self, key):
"""Get indexer for rasterio array.
Parameters
----------
key : tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See Also
--------
indexing.decompose_indexer
"""
assert len(key) == 3, "rasterio datasets should always be 3D"
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(k, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = k.indices(n)
np_inds.append(slice(None, None, step))
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(-(2 - i))
start = k
stop = k + 1
else:
start, stop = np.min(k), np.max(k) + 1
np_inds.append(k - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
from rasterio.vrt import WarpedVRT
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(s):
return np.fromstring(s.strip("{}"), dtype="float", sep=",")
def default(s):
return s.strip("{}")
parse = {"wavelength": parsevec, "fwhm": parsevec}
parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}
return parsed_meta
def open_rasterio(
filename,
parse_coordinates=None,
chunks=None,
cache=None,
lock=None,
**kwargs,
):
"""Open a file with rasterio.
.. deprecated:: 0.20.0
Deprecated in favor of rioxarray.
For information about transitioning, see:
https://corteva.github.io/rioxarray/stable/getting_started/getting_started.html
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
You can generate 2D coordinates from the file's attributes with::
>>> from affine import Affine
>>> da = xr.open_rasterio(
... "https://github.com/rasterio/rasterio/raw/1.2.1/tests/data/RGB.byte.tif"
... )
>>> da
<xarray.DataArray (band: 3, y: 718, x: 791)>
[1703814 values with dtype=uint8]
Coordinates:
* band (band) int64 1 2 3
* y (y) float64 2.827e+06 2.826e+06 2.826e+06 ... 2.612e+06 2.612e+06
* x (x) float64 1.021e+05 1.024e+05 1.027e+05 ... 3.389e+05 3.392e+05
Attributes:
transform: (300.0379266750948, 0.0, 101985.0, 0.0, -300.041782729805...
crs: +init=epsg:32618
res: (300.0379266750948, 300.041782729805)
is_tiled: 0
nodatavals: (0.0, 0.0, 0.0)
scales: (1.0, 1.0, 1.0)
offsets: (0.0, 0.0, 0.0)
AREA_OR_POINT: Area
>>> transform = Affine(*da.attrs["transform"])
>>> transform
Affine(300.0379266750948, 0.0, 101985.0,
0.0, -300.041782729805, 2826915.0)
>>> nx, ny = da.sizes["x"], da.sizes["y"]
>>> x, y = transform * np.meshgrid(np.arange(nx) + 0.5, np.arange(ny) + 0.5)
>>> x
array([[102135.01896334, 102435.05689001, 102735.09481669, ...,
338564.90518331, 338864.94310999, 339164.98103666],
[102135.01896334, 102435.05689001, 102735.09481669, ...,
338564.90518331, 338864.94310999, 339164.98103666],
[102135.01896334, 102435.05689001, 102735.09481669, ...,
338564.90518331, 338864.94310999, 339164.98103666],
...,
[102135.01896334, 102435.05689001, 102735.09481669, ...,
338564.90518331, 338864.94310999, 339164.98103666],
[102135.01896334, 102435.05689001, 102735.09481669, ...,
338564.90518331, 338864.94310999, 339164.98103666],
[102135.01896334, 102435.05689001, 102735.09481669, ...,
338564.90518331, 338864.94310999, 339164.98103666]])
Parameters
----------
filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates : bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used to avoid issues with concurrent access to the same file when using
dask's multithreaded backend.
Returns
-------
data : DataArray
The newly created DataArray.
"""
warnings.warn(
"open_rasterio is Deprecated in favor of rioxarray. "
"For information about transitioning, see: "
"https://corteva.github.io/rioxarray/stable/getting_started/getting_started.html",
DeprecationWarning,
stacklevel=2,
)
import rasterio
from rasterio.vrt import WarpedVRT
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(
src_crs=vrt.src_crs.to_string(),
crs=vrt.crs.to_string(),
resampling=vrt.resampling,
tolerance=vrt.tolerance,
src_nodata=vrt.src_nodata,
nodata=vrt.nodata,
width=vrt.width,
height=vrt.height,
src_transform=vrt.src_transform,
transform=vrt.transform,
dtype=vrt.working_dtype,
warp_extras=vrt.warp_extras,
)
if lock is None:
lock = RASTERIO_LOCK
manager = CachingFileManager(
rasterio.open,
filename,
lock=lock,
mode="r",
kwargs=kwargs,
)
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
coords = {}
# Get bands
if riods.count < 1:
raise ValueError("Unknown dims")
coords["band"] = np.asarray(riods.indexes)
# Get coordinates
if riods.transform.is_rectilinear:
# 1d coordinates
parse = True if parse_coordinates is None else parse_coordinates
if parse:
nx, ny = riods.width, riods.height
# xarray coordinates are pixel centered
x, _ = riods.transform * (np.arange(nx) + 0.5, np.zeros(nx) + 0.5)
_, y = riods.transform * (np.zeros(ny) + 0.5, np.arange(ny) + 0.5)
coords["y"] = y
coords["x"] = x
else:
# 2d coordinates
parse = False if (parse_coordinates is None) else parse_coordinates
if parse:
warnings.warn(
"The file coordinates' transformation isn't "
"rectilinear: xarray won't parse the coordinates "
"in this case. Set `parse_coordinates=False` to "
"suppress this warning.",
RuntimeWarning,
stacklevel=3,
)
# Attributes
attrs = {}
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
attrs["transform"] = tuple(riods.transform)[:6]
if hasattr(riods, "crs") and riods.crs:
# CRS is a dict-like object specific to rasterio
# If CRS is not None, we convert it back to a PROJ4 string using
# rasterio itself
try:
attrs["crs"] = riods.crs.to_proj4()
except AttributeError:
attrs["crs"] = riods.crs.to_string()
if hasattr(riods, "res"):
# (width, height) tuple of pixels in units of CRS
attrs["res"] = riods.res
if hasattr(riods, "is_tiled"):
# Is the TIF tiled? (bool)
# We cast it to an int for netCDF compatibility
attrs["is_tiled"] = np.uint8(riods.is_tiled)
if hasattr(riods, "nodatavals"):
# The nodata values for the raster bands
attrs["nodatavals"] = tuple(
np.nan if nodataval is None else nodataval for nodataval in riods.nodatavals
)
if hasattr(riods, "scales"):
# The scale values for the raster bands
attrs["scales"] = riods.scales
if hasattr(riods, "offsets"):
# The offset values for the raster bands
attrs["offsets"] = riods.offsets
if hasattr(riods, "descriptions") and any(riods.descriptions):
# Descriptions for each dataset band
attrs["descriptions"] = riods.descriptions
if hasattr(riods, "units") and any(riods.units):
# A list of units string for each dataset band
attrs["units"] = riods.units
# Parse extra metadata from tags, if supported
parsers = {"ENVI": _parse_envi, "GTiff": lambda m: m}
driver = riods.driver
if driver in parsers:
if driver == "GTiff":
meta = parsers[driver](riods.tags())
else:
meta = parsers[driver](riods.tags(ns=driver))
for k, v in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if isinstance(v, (list, np.ndarray)) and len(v) == riods.count:
coords[k] = ("band", np.asarray(v))
else:
attrs[k] = v
data = indexing.LazilyIndexedArray(RasterioArrayWrapper(manager, lock, vrt_params))
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=("band", "y", "x"), coords=coords, attrs=attrs)
if chunks is not None:
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
token = tokenize(filename, mtime, chunks)
name_prefix = f"open_rasterio-{token}"
result = result.chunk(chunks, name_prefix=name_prefix, token=token)
# Make the file closeable
result.set_close(manager.close)
return result
|
pydata/xarray
|
xarray/backends/rasterio_.py
|
Python
|
apache-2.0
| 15,484
|
[
"NetCDF"
] |
c9332eb9787854ab132f0f08764011adcfde2de0db81c181b9d5b0465b2caa7f
|
import numpy as np
from ase import Atom, Atoms
from ase.units import Hartree
from gpaw.mpi import size
from gpaw import GPAW
from gpaw.response.bse import BSE
GS = 1
bse = 1
casida = 1
compare = 1
if GS:
d = 2.89
cluster = Atoms([Atom('Na', (0, 0, 0)),
Atom('Na', (0, 0, d)),
], pbc=True)
cluster.set_cell((15.,15.,18.), scale_atoms=False)
cluster.center()
calc = GPAW(h=0.3, nbands=8, setups={'Na': '1'})
cluster.set_calculator(calc)
cluster.get_potential_energy()
calc.write('Na2.gpw','all')
if bse:
bse = BSE('Na2.gpw',
w=np.linspace(0,15,151),
nv=[0,8],
nc=[0,8],
mode='RPA',
coupling=True,
q=np.array([0,0,0.0001]),
optical_limit=True,
ecut=50.,
nbands=8)
bse.initialize()
H_SS = bse.calculate()
bse.diagonalize(H_SS)
w = np.real(bse.w_S) * Hartree
print np.shape(w)
energies = np.sort(w)[len(w)/2:]
print 'BSE:', energies
if casida:
from gpaw.lrtddft import LrTDDFT
from gpaw.lrtddft import photoabsorption_spectrum
calc = GPAW('Na2.gpw',txt=None)
lr = LrTDDFT(calc, xc=None, istart=0, jend=7, nspins=1)
lr.diagonalize()
photoabsorption_spectrum(lr, 'Na2_spectrum.dat', width=0.05)
energies_lrtddft = lr.get_energies() * Hartree
print 'lrTDDFT:', energies_lrtddft
if compare:
assert (np.abs(energies - energies_lrtddft)).max() < 3*1e-3
|
robwarm/gpaw-symm
|
gpaw/test/bse_vs_lrtddft.py
|
Python
|
gpl-3.0
| 1,536
|
[
"ASE",
"GPAW"
] |
d6401351e702652f2bcb77e73a64ac9924172a03662990727acbfa28aa3fdd33
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
import abc
import asyncio
import base64
import hashlib
import os
import sys
import struct
import tornado.escape
import tornado.web
from urllib.parse import urlparse
import zlib
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado.escape import utf8, native_str, to_unicode
from tornado import gen, httpclient, httputil
from tornado.ioloop import IOLoop, PeriodicCallback
from tornado.iostream import StreamClosedError, IOStream
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.queues import Queue
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask
from typing import (
TYPE_CHECKING,
cast,
Any,
Optional,
Dict,
Union,
List,
Awaitable,
Callable,
Tuple,
Type,
)
from types import TracebackType
if TYPE_CHECKING:
from typing_extensions import Protocol
# The zlib compressor types aren't actually exposed anywhere
# publicly, so declare protocols for the portions we use.
class _Compressor(Protocol):
def compress(self, data: bytes) -> bytes:
pass
def flush(self, mode: int) -> bytes:
pass
class _Decompressor(Protocol):
unconsumed_tail = b"" # type: bytes
def decompress(self, data: bytes, max_length: int) -> bytes:
pass
class _WebSocketDelegate(Protocol):
# The common base interface implemented by WebSocketHandler on
# the server side and WebSocketClientConnection on the client
# side.
def on_ws_connection_close(
self, close_code: Optional[int] = None, close_reason: Optional[str] = None
) -> None:
pass
def on_message(self, message: Union[str, bytes]) -> Optional["Awaitable[None]"]:
pass
def on_ping(self, data: bytes) -> None:
pass
def on_pong(self, data: bytes) -> None:
pass
def log_exception(
self,
typ: Optional[Type[BaseException]],
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
pass
_default_max_message_size = 10 * 1024 * 1024
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class _DecompressTooLargeError(Exception):
pass
class _WebSocketParams(object):
def __init__(
self,
ping_interval: Optional[float] = None,
ping_timeout: Optional[float] = None,
max_message_size: int = _default_max_message_size,
compression_options: Optional[Dict[str, Any]] = None,
) -> None:
self.ping_interval = ping_interval
self.ping_timeout = ping_timeout
self.max_message_size = max_message_size
self.compression_options = compression_options
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. You can also
override `open` and `on_close` to handle opened and closed
connections.
Custom upgrade response headers can be sent by overriding
`~tornado.web.RequestHandler.set_default_headers` or
`~tornado.web.RequestHandler.prepare`.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
If the application setting ``websocket_ping_interval`` has a non-zero
value, a ping will be sent periodically, and the connection will be
closed if a response is not received before the ``websocket_ping_timeout``.
Messages larger than the ``websocket_max_message_size`` application setting
(default 10MiB) will not be accepted.
.. versionchanged:: 4.5
Added ``websocket_ping_interval``, ``websocket_ping_timeout``, and
``websocket_max_message_size``.
"""
def __init__(
self,
application: tornado.web.Application,
request: httputil.HTTPServerRequest,
**kwargs: Any
) -> None:
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None # type: Optional[WebSocketProtocol]
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.stream = None # type: Optional[IOStream]
self._on_close_called = False
async def get(self, *args: Any, **kwargs: Any) -> None:
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != "websocket":
self.set_status(400)
log_msg = 'Can "Upgrade" only to "WebSocket".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(
lambda s: s.strip().lower(), headers.get("Connection", "").split(",")
)
if "upgrade" not in connection:
self.set_status(400)
log_msg = '"Connection" must be "Upgrade".'
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
await self.ws_connection.accept_connection(self)
else:
self.set_status(426, "Upgrade Required")
self.set_header("Sec-WebSocket-Version", "7, 8, 13")
@property
def ping_interval(self) -> Optional[float]:
"""The interval for websocket keep-alive pings.
Set websocket_ping_interval = 0 to disable pings.
"""
return self.settings.get("websocket_ping_interval", None)
@property
def ping_timeout(self) -> Optional[float]:
"""If no ping is received in this many seconds,
close the websocket connection (VPNs, etc. can fail to cleanly close ws connections).
Default is max of 3 pings or 30 seconds.
"""
return self.settings.get("websocket_ping_timeout", None)
@property
def max_message_size(self) -> int:
"""Maximum allowed message size.
If the remote peer sends a message larger than this, the connection
will be closed.
Default is 10MiB.
"""
return self.settings.get(
"websocket_max_message_size", _default_max_message_size
)
def write_message(
self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Consistently raises `WebSocketClosedError`. Previously could
sometimes raise `.StreamClosedError`.
"""
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subprotocols: List[str]) -> Optional[str]:
"""Override to implement subprotocol negotiation.
``subprotocols`` is a list of strings identifying the
subprotocols proposed by the client. This method may be
overridden to return one of those strings to select it, or
``None`` to not select a subprotocol.
Failure to select a subprotocol does not automatically abort
the connection, although clients may close the connection if
none of their proposed subprotocols was selected.
The list may be empty, in which case this method must return
None. This method is always called exactly once even if no
subprotocols were proposed so that the handler can be advised
of this fact.
.. versionchanged:: 5.1
Previously, this method was called with a list containing
an empty string instead of an empty list if no subprotocols
were proposed by the client.
"""
return None
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol returned by `select_subprotocol`.
.. versionadded:: 5.1
"""
assert self.ws_connection is not None
return self.ws_connection.selected_subprotocol
def get_compression_options(self) -> Optional[Dict[str, Any]]:
"""Override to return compression options for the connection.
If this method returns None (the default), compression will
be disabled. If it returns a dict (even an empty one), it
will be enabled. The contents of the dict may be used to
control the following compression options:
``compression_level`` specifies the compression level.
``mem_level`` specifies the amount of memory used for the internal compression state.
These parameters are documented in details here:
https://docs.python.org/3.6/library/zlib.html#zlib.compressobj
.. versionadded:: 4.1
.. versionchanged:: 4.5
Added ``compression_level`` and ``mem_level``.
"""
# TODO: Add wbits option.
return None
def open(self, *args: str, **kwargs: str) -> Optional[Awaitable[None]]:
"""Invoked when a new WebSocket is opened.
The arguments to `open` are extracted from the `tornado.web.URLSpec`
regular expression, just like the arguments to
`tornado.web.RequestHandler.get`.
`open` may be a coroutine. `on_message` will not be called until
`open` has returned.
.. versionchanged:: 5.1
``open`` may be a coroutine.
"""
pass
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
"""Handle incoming messages on the WebSocket
This method must be overridden.
.. versionchanged:: 4.5
``on_message`` can be a coroutine.
"""
raise NotImplementedError
def ping(self, data: Union[str, bytes] = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``websocket_ping_interval`` application
setting instead of sending pings manually.
.. versionchanged:: 5.1
The data argument is now optional.
"""
data = utf8(data)
if self.ws_connection is None or self.ws_connection.is_closing():
raise WebSocketClosedError()
self.ws_connection.write_ping(data)
def on_pong(self, data: bytes) -> None:
"""Invoked when the response to a ping frame is received."""
pass
def on_ping(self, data: bytes) -> None:
"""Invoked when the a ping frame is received."""
pass
def on_close(self) -> None:
"""Invoked when the WebSocket is closed.
If the connection was closed cleanly and a status code or reason
phrase was supplied, these values will be available as the attributes
``self.close_code`` and ``self.close_reason``.
.. versionchanged:: 4.0
Added ``close_code`` and ``close_reason`` attributes.
"""
pass
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
"""Closes this Web Socket.
Once the close handshake is successful the socket will be closed.
``code`` may be a numeric status code, taken from the values
defined in `RFC 6455 section 7.4.1
<https://tools.ietf.org/html/rfc6455#section-7.4.1>`_.
``reason`` may be a textual message about why the connection is
closing. These values are made available to the client, but are
not otherwise interpreted by the websocket protocol.
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.ws_connection:
self.ws_connection.close(code, reason)
self.ws_connection = None
def check_origin(self, origin: str) -> bool:
"""Override to enable support for allowing alternate origins.
The ``origin`` argument is the value of the ``Origin`` HTTP
header, the url responsible for initiating this request. This
method is not called for clients that do not send this header;
such requests are always allowed (because all browsers that
implement WebSockets support this header, and non-browser
clients do not have the same cross-site security concerns).
Should return ``True`` to accept the request or ``False`` to
reject it. By default, rejects all requests with an origin on
a host other than this one.
This is a security protection against cross site scripting attacks on
browsers, since WebSockets are allowed to bypass the usual same-origin
policies and don't use CORS headers.
.. warning::
This is an important security measure; don't disable it
without understanding the security implications. In
particular, if your authentication is cookie-based, you
must either restrict the origins allowed by
``check_origin()`` or implement your own XSRF-like
protection for websocket connections. See `these
<https://www.christian-schneider.net/CrossSiteWebSocketHijacking.html>`_
`articles
<https://devcenter.heroku.com/articles/websocket-security>`_
for more.
To accept all cross-origin traffic (which was the default prior to
Tornado 4.0), simply override this method to always return ``True``::
def check_origin(self, origin):
return True
To allow connections from any subdomain of your site, you might
do something like::
def check_origin(self, origin):
parsed_origin = urllib.parse.urlparse(origin)
return parsed_origin.netloc.endswith(".mydomain.com")
.. versionadded:: 4.0
"""
parsed_origin = urlparse(origin)
origin = parsed_origin.netloc
origin = origin.lower()
host = self.request.headers.get("Host")
# Check to see that origin matches host directly, including ports
return origin == host
def set_nodelay(self, value: bool) -> None:
"""Set the no-delay flag for this stream.
By default, small messages may be delayed and/or combined to minimize
the number of packets sent. This can sometimes cause 200-500ms delays
due to the interaction between Nagle's algorithm and TCP delayed
ACKs. To reduce this delay (at the expense of possibly increasing
bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
connection is established.
See `.BaseIOStream.set_nodelay` for additional details.
.. versionadded:: 3.1
"""
assert self.ws_connection is not None
self.ws_connection.set_nodelay(value)
def on_connection_close(self) -> None:
if self.ws_connection:
self.ws_connection.on_connection_close()
self.ws_connection = None
if not self._on_close_called:
self._on_close_called = True
self.on_close()
self._break_cycles()
def on_ws_connection_close(
self, close_code: Optional[int] = None, close_reason: Optional[str] = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _break_cycles(self) -> None:
# WebSocketHandlers call finish() early, but we don't want to
# break up reference cycles (which makes it impossible to call
# self.render_string) until after we've really closed the
# connection (if it was established in the first place,
# indicated by status code 101).
if self.get_status() != 101 or self._on_close_called:
super(WebSocketHandler, self)._break_cycles()
def send_error(self, *args: Any, **kwargs: Any) -> None:
if self.stream is None:
super(WebSocketHandler, self).send_error(*args, **kwargs)
else:
# If we get an uncaught exception during the handshake,
# we have no choice but to abruptly close the connection.
# TODO: for uncaught exceptions after the handshake,
# we can close the connection more gracefully.
self.stream.close()
def get_websocket_protocol(self) -> Optional["WebSocketProtocol"]:
websocket_version = self.request.headers.get("Sec-WebSocket-Version")
if websocket_version in ("7", "8", "13"):
params = _WebSocketParams(
ping_interval=self.ping_interval,
ping_timeout=self.ping_timeout,
max_message_size=self.max_message_size,
compression_options=self.get_compression_options(),
)
return WebSocketProtocol13(self, False, params)
return None
def _detach_stream(self) -> IOStream:
# disable non-WS methods
for method in [
"write",
"redirect",
"set_header",
"set_cookie",
"set_status",
"flush",
"finish",
]:
setattr(self, method, _raise_not_supported_for_websockets)
return self.detach()
def _raise_not_supported_for_websockets(*args: Any, **kwargs: Any) -> None:
raise RuntimeError("Method not supported for Web Sockets")
class WebSocketProtocol(abc.ABC):
"""Base class for WebSocket protocol versions.
"""
def __init__(self, handler: "_WebSocketDelegate") -> None:
self.handler = handler
self.stream = None # type: Optional[IOStream]
self.client_terminated = False
self.server_terminated = False
def _run_callback(
self, callback: Callable, *args: Any, **kwargs: Any
) -> "Optional[Future[Any]]":
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
self.handler.log_exception(*sys.exc_info())
self._abort()
return None
else:
if result is not None:
result = gen.convert_yielded(result)
assert self.stream is not None
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self) -> None:
self._abort()
def _abort(self) -> None:
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
if self.stream is not None:
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
@abc.abstractmethod
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
raise NotImplementedError()
@abc.abstractmethod
def is_closing(self) -> bool:
raise NotImplementedError()
@abc.abstractmethod
async def accept_connection(self, handler: WebSocketHandler) -> None:
raise NotImplementedError()
@abc.abstractmethod
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
raise NotImplementedError()
@property
@abc.abstractmethod
def selected_subprotocol(self) -> Optional[str]:
raise NotImplementedError()
@abc.abstractmethod
def write_ping(self, data: bytes) -> None:
raise NotImplementedError()
# The entry points below are used by WebSocketClientConnection,
# which was introduced after we only supported a single version of
# WebSocketProtocol. The WebSocketProtocol/WebSocketProtocol13
# boundary is currently pretty ad-hoc.
@abc.abstractmethod
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def start_pinging(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
async def _receive_frame_loop(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def set_nodelay(self, x: bool) -> None:
raise NotImplementedError()
class _PerMessageDeflateCompressor(object):
def __init__(
self,
persistent: bool,
max_wbits: Optional[int],
compression_options: Optional[Dict[str, Any]] = None,
) -> None:
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
# There is no symbolic constant for the minimum wbits value.
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError(
"Invalid max_wbits value %r; allowed range 8-%d",
max_wbits,
zlib.MAX_WBITS,
)
self._max_wbits = max_wbits
if (
compression_options is None
or "compression_level" not in compression_options
):
self._compression_level = tornado.web.GZipContentEncoding.GZIP_LEVEL
else:
self._compression_level = compression_options["compression_level"]
if compression_options is None or "mem_level" not in compression_options:
self._mem_level = 8
else:
self._mem_level = compression_options["mem_level"]
if persistent:
self._compressor = self._create_compressor() # type: Optional[_Compressor]
else:
self._compressor = None
def _create_compressor(self) -> "_Compressor":
return zlib.compressobj(
self._compression_level, zlib.DEFLATED, -self._max_wbits, self._mem_level
)
def compress(self, data: bytes) -> bytes:
compressor = self._compressor or self._create_compressor()
data = compressor.compress(data) + compressor.flush(zlib.Z_SYNC_FLUSH)
assert data.endswith(b"\x00\x00\xff\xff")
return data[:-4]
class _PerMessageDeflateDecompressor(object):
def __init__(
self,
persistent: bool,
max_wbits: Optional[int],
max_message_size: int,
compression_options: Optional[Dict[str, Any]] = None,
) -> None:
self._max_message_size = max_message_size
if max_wbits is None:
max_wbits = zlib.MAX_WBITS
if not (8 <= max_wbits <= zlib.MAX_WBITS):
raise ValueError(
"Invalid max_wbits value %r; allowed range 8-%d",
max_wbits,
zlib.MAX_WBITS,
)
self._max_wbits = max_wbits
if persistent:
self._decompressor = (
self._create_decompressor()
) # type: Optional[_Decompressor]
else:
self._decompressor = None
def _create_decompressor(self) -> "_Decompressor":
return zlib.decompressobj(-self._max_wbits)
def decompress(self, data: bytes) -> bytes:
decompressor = self._decompressor or self._create_decompressor()
result = decompressor.decompress(
data + b"\x00\x00\xff\xff", self._max_message_size
)
if decompressor.unconsumed_tail:
raise _DecompressTooLargeError()
return result
class WebSocketProtocol13(WebSocketProtocol):
"""Implementation of the WebSocket protocol from RFC 6455.
This class supports versions 7 and 8 of the protocol in addition to the
final version 13.
"""
# Bit masks for the first byte of a frame.
FIN = 0x80
RSV1 = 0x40
RSV2 = 0x20
RSV3 = 0x10
RSV_MASK = RSV1 | RSV2 | RSV3
OPCODE_MASK = 0x0F
stream = None # type: IOStream
def __init__(
self,
handler: "_WebSocketDelegate",
mask_outgoing: bool,
params: _WebSocketParams,
) -> None:
WebSocketProtocol.__init__(self, handler)
self.mask_outgoing = mask_outgoing
self.params = params
self._final_frame = False
self._frame_opcode = None
self._masked_frame = None
self._frame_mask = None # type: Optional[bytes]
self._frame_length = None
self._fragmented_message_buffer = None # type: Optional[bytes]
self._fragmented_message_opcode = None
self._waiting = None # type: object
self._compression_options = params.compression_options
self._decompressor = None # type: Optional[_PerMessageDeflateDecompressor]
self._compressor = None # type: Optional[_PerMessageDeflateCompressor]
self._frame_compressed = None # type: Optional[bool]
# The total uncompressed size of all messages received or sent.
# Unicode messages are encoded to utf8.
# Only for testing; subject to change.
self._message_bytes_in = 0
self._message_bytes_out = 0
# The total size of all packets received or sent. Includes
# the effect of compression, frame overhead, and control frames.
self._wire_bytes_in = 0
self._wire_bytes_out = 0
self.ping_callback = None # type: Optional[PeriodicCallback]
self.last_ping = 0.0
self.last_pong = 0.0
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
# Use a property for this to satisfy the abc.
@property
def selected_subprotocol(self) -> Optional[str]:
return self._selected_subprotocol
@selected_subprotocol.setter
def selected_subprotocol(self, value: Optional[str]) -> None:
self._selected_subprotocol = value
async def accept_connection(self, handler: WebSocketHandler) -> None:
try:
self._handle_websocket_headers(handler)
except ValueError:
handler.set_status(400)
log_msg = "Missing/Invalid WebSocket headers"
handler.finish(log_msg)
gen_log.debug(log_msg)
return
try:
await self._accept_connection(handler)
except asyncio.CancelledError:
self._abort()
return
except ValueError:
gen_log.debug("Malformed WebSocket request received", exc_info=True)
self._abort()
return
def _handle_websocket_headers(self, handler: WebSocketHandler) -> None:
"""Verifies all invariant- and required headers
If a header is missing or have an incorrect value ValueError will be
raised
"""
fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
if not all(map(lambda f: handler.request.headers.get(f), fields)):
raise ValueError("Missing/Invalid WebSocket headers")
@staticmethod
def compute_accept_value(key: Union[str, bytes]) -> str:
"""Computes the value for the Sec-WebSocket-Accept header,
given the value for Sec-WebSocket-Key.
"""
sha1 = hashlib.sha1()
sha1.update(utf8(key))
sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11") # Magic value
return native_str(base64.b64encode(sha1.digest()))
def _challenge_response(self, handler: WebSocketHandler) -> str:
return WebSocketProtocol13.compute_accept_value(
cast(str, handler.request.headers.get("Sec-Websocket-Key"))
)
async def _accept_connection(self, handler: WebSocketHandler) -> None:
subprotocol_header = handler.request.headers.get("Sec-WebSocket-Protocol")
if subprotocol_header:
subprotocols = [s.strip() for s in subprotocol_header.split(",")]
else:
subprotocols = []
self.selected_subprotocol = handler.select_subprotocol(subprotocols)
if self.selected_subprotocol:
assert self.selected_subprotocol in subprotocols
handler.set_header("Sec-WebSocket-Protocol", self.selected_subprotocol)
extensions = self._parse_extensions_header(handler.request.headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
# TODO: negotiate parameters if compression_options
# specifies limits.
self._create_compressors("server", ext[1], self._compression_options)
if (
"client_max_window_bits" in ext[1]
and ext[1]["client_max_window_bits"] is None
):
# Don't echo an offered client_max_window_bits
# parameter with no value.
del ext[1]["client_max_window_bits"]
handler.set_header(
"Sec-WebSocket-Extensions",
httputil._encode_header("permessage-deflate", ext[1]),
)
break
handler.clear_header("Content-Type")
handler.set_status(101)
handler.set_header("Upgrade", "websocket")
handler.set_header("Connection", "Upgrade")
handler.set_header("Sec-WebSocket-Accept", self._challenge_response(handler))
handler.finish()
self.stream = handler._detach_stream()
self.start_pinging()
try:
open_result = handler.open(*handler.open_args, **handler.open_kwargs)
if open_result is not None:
await open_result
except Exception:
handler.log_exception(*sys.exc_info())
self._abort()
return
await self._receive_frame_loop()
def _parse_extensions_header(
self, headers: httputil.HTTPHeaders
) -> List[Tuple[str, Dict[str, str]]]:
extensions = headers.get("Sec-WebSocket-Extensions", "")
if extensions:
return [httputil._parse_header(e.strip()) for e in extensions.split(",")]
return []
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
"""Process the headers sent by the server to this client connection.
'key' is the websocket handshake challenge/response key.
"""
assert headers["Upgrade"].lower() == "websocket"
assert headers["Connection"].lower() == "upgrade"
accept = self.compute_accept_value(key)
assert headers["Sec-Websocket-Accept"] == accept
extensions = self._parse_extensions_header(headers)
for ext in extensions:
if ext[0] == "permessage-deflate" and self._compression_options is not None:
self._create_compressors("client", ext[1])
else:
raise ValueError("unsupported extension %r", ext)
self.selected_subprotocol = headers.get("Sec-WebSocket-Protocol", None)
def _get_compressor_options(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Converts a websocket agreed_parameters set to keyword arguments
for our compressor objects.
"""
options = dict(
persistent=(side + "_no_context_takeover") not in agreed_parameters
) # type: Dict[str, Any]
wbits_header = agreed_parameters.get(side + "_max_window_bits", None)
if wbits_header is None:
options["max_wbits"] = zlib.MAX_WBITS
else:
options["max_wbits"] = int(wbits_header)
options["compression_options"] = compression_options
return options
def _create_compressors(
self,
side: str,
agreed_parameters: Dict[str, Any],
compression_options: Optional[Dict[str, Any]] = None,
) -> None:
# TODO: handle invalid parameters gracefully
allowed_keys = set(
[
"server_no_context_takeover",
"client_no_context_takeover",
"server_max_window_bits",
"client_max_window_bits",
]
)
for key in agreed_parameters:
if key not in allowed_keys:
raise ValueError("unsupported compression parameter %r" % key)
other_side = "client" if (side == "server") else "server"
self._compressor = _PerMessageDeflateCompressor(
**self._get_compressor_options(side, agreed_parameters, compression_options)
)
self._decompressor = _PerMessageDeflateDecompressor(
max_message_size=self.params.max_message_size,
**self._get_compressor_options(
other_side, agreed_parameters, compression_options
)
)
def _write_frame(
self, fin: bool, opcode: int, data: bytes, flags: int = 0
) -> "Future[None]":
data_len = len(data)
if opcode & 0x8:
# All control frames MUST have a payload length of 125
# bytes or less and MUST NOT be fragmented.
if not fin:
raise ValueError("control frames may not be fragmented")
if data_len > 125:
raise ValueError("control frame payloads may not exceed 125 bytes")
if fin:
finbit = self.FIN
else:
finbit = 0
frame = struct.pack("B", finbit | opcode | flags)
if self.mask_outgoing:
mask_bit = 0x80
else:
mask_bit = 0
if data_len < 126:
frame += struct.pack("B", data_len | mask_bit)
elif data_len <= 0xFFFF:
frame += struct.pack("!BH", 126 | mask_bit, data_len)
else:
frame += struct.pack("!BQ", 127 | mask_bit, data_len)
if self.mask_outgoing:
mask = os.urandom(4)
data = mask + _websocket_mask(mask, data)
frame += data
self._wire_bytes_out += len(frame)
return self.stream.write(frame)
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
"""Sends the given message to the client of this Web Socket."""
if binary:
opcode = 0x2
else:
opcode = 0x1
message = tornado.escape.utf8(message)
assert isinstance(message, bytes)
self._message_bytes_out += len(message)
flags = 0
if self._compressor:
message = self._compressor.compress(message)
flags |= self.RSV1
# For historical reasons, write methods in Tornado operate in a semi-synchronous
# mode in which awaiting the Future they return is optional (But errors can
# still be raised). This requires us to go through an awkward dance here
# to transform the errors that may be returned while presenting the same
# semi-synchronous interface.
try:
fut = self._write_frame(True, opcode, message, flags=flags)
except StreamClosedError:
raise WebSocketClosedError()
async def wrapper() -> None:
try:
await fut
except StreamClosedError:
raise WebSocketClosedError()
return asyncio.ensure_future(wrapper())
def write_ping(self, data: bytes) -> None:
"""Send ping frame."""
assert isinstance(data, bytes)
self._write_frame(True, 0x9, data)
async def _receive_frame_loop(self) -> None:
try:
while not self.client_terminated:
await self._receive_frame()
except StreamClosedError:
self._abort()
self.handler.on_ws_connection_close(self.close_code, self.close_reason)
async def _read_bytes(self, n: int) -> bytes:
data = await self.stream.read_bytes(n)
self._wire_bytes_in += n
return data
async def _receive_frame(self) -> None:
# Read the frame header.
data = await self._read_bytes(2)
header, mask_payloadlen = struct.unpack("BB", data)
is_final_frame = header & self.FIN
reserved_bits = header & self.RSV_MASK
opcode = header & self.OPCODE_MASK
opcode_is_control = opcode & 0x8
if self._decompressor is not None and opcode != 0:
# Compression flag is present in the first frame's header,
# but we can't decompress until we have all the frames of
# the message.
self._frame_compressed = bool(reserved_bits & self.RSV1)
reserved_bits &= ~self.RSV1
if reserved_bits:
# client is using as-yet-undefined extensions; abort
self._abort()
return
is_masked = bool(mask_payloadlen & 0x80)
payloadlen = mask_payloadlen & 0x7F
# Parse and validate the length.
if opcode_is_control and payloadlen >= 126:
# control frames must have payload < 126
self._abort()
return
if payloadlen < 126:
self._frame_length = payloadlen
elif payloadlen == 126:
data = await self._read_bytes(2)
payloadlen = struct.unpack("!H", data)[0]
elif payloadlen == 127:
data = await self._read_bytes(8)
payloadlen = struct.unpack("!Q", data)[0]
new_len = payloadlen
if self._fragmented_message_buffer is not None:
new_len += len(self._fragmented_message_buffer)
if new_len > self.params.max_message_size:
self.close(1009, "message too big")
self._abort()
return
# Read the payload, unmasking if necessary.
if is_masked:
self._frame_mask = await self._read_bytes(4)
data = await self._read_bytes(payloadlen)
if is_masked:
assert self._frame_mask is not None
data = _websocket_mask(self._frame_mask, data)
# Decide what to do with this frame.
if opcode_is_control:
# control frames may be interleaved with a series of fragmented
# data frames, so control frames must not interact with
# self._fragmented_*
if not is_final_frame:
# control frames must not be fragmented
self._abort()
return
elif opcode == 0: # continuation frame
if self._fragmented_message_buffer is None:
# nothing to continue
self._abort()
return
self._fragmented_message_buffer += data
if is_final_frame:
opcode = self._fragmented_message_opcode
data = self._fragmented_message_buffer
self._fragmented_message_buffer = None
else: # start of new data message
if self._fragmented_message_buffer is not None:
# can't start new message until the old one is finished
self._abort()
return
if not is_final_frame:
self._fragmented_message_opcode = opcode
self._fragmented_message_buffer = data
if is_final_frame:
handled_future = self._handle_message(opcode, data)
if handled_future is not None:
await handled_future
def _handle_message(self, opcode: int, data: bytes) -> "Optional[Future[None]]":
"""Execute on_message, returning its Future if it is a coroutine."""
if self.client_terminated:
return None
if self._frame_compressed:
assert self._decompressor is not None
try:
data = self._decompressor.decompress(data)
except _DecompressTooLargeError:
self.close(1009, "message too big after decompression")
self._abort()
return None
if opcode == 0x1:
# UTF-8 data
self._message_bytes_in += len(data)
try:
decoded = data.decode("utf-8")
except UnicodeDecodeError:
self._abort()
return None
return self._run_callback(self.handler.on_message, decoded)
elif opcode == 0x2:
# Binary data
self._message_bytes_in += len(data)
return self._run_callback(self.handler.on_message, data)
elif opcode == 0x8:
# Close
self.client_terminated = True
if len(data) >= 2:
self.close_code = struct.unpack(">H", data[:2])[0]
if len(data) > 2:
self.close_reason = to_unicode(data[2:])
# Echo the received close code, if any (RFC 6455 section 5.5.1).
self.close(self.close_code)
elif opcode == 0x9:
# Ping
try:
self._write_frame(True, 0xA, data)
except StreamClosedError:
self._abort()
self._run_callback(self.handler.on_ping, data)
elif opcode == 0xA:
# Pong
self.last_pong = IOLoop.current().time()
return self._run_callback(self.handler.on_pong, data)
else:
self._abort()
return None
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
"""Closes the WebSocket connection."""
if not self.server_terminated:
if not self.stream.closed():
if code is None and reason is not None:
code = 1000 # "normal closure" status code
if code is None:
close_data = b""
else:
close_data = struct.pack(">H", code)
if reason is not None:
close_data += utf8(reason)
try:
self._write_frame(True, 0x8, close_data)
except StreamClosedError:
self._abort()
self.server_terminated = True
if self.client_terminated:
if self._waiting is not None:
self.stream.io_loop.remove_timeout(self._waiting)
self._waiting = None
self.stream.close()
elif self._waiting is None:
# Give the client a few seconds to complete a clean shutdown,
# otherwise just close the connection.
self._waiting = self.stream.io_loop.add_timeout(
self.stream.io_loop.time() + 5, self._abort
)
def is_closing(self) -> bool:
"""Return ``True`` if this connection is closing.
The connection is considered closing if either side has
initiated its closing handshake or if the stream has been
shut down uncleanly.
"""
return self.stream.closed() or self.client_terminated or self.server_terminated
@property
def ping_interval(self) -> Optional[float]:
interval = self.params.ping_interval
if interval is not None:
return interval
return 0
@property
def ping_timeout(self) -> Optional[float]:
timeout = self.params.ping_timeout
if timeout is not None:
return timeout
assert self.ping_interval is not None
return max(3 * self.ping_interval, 30)
def start_pinging(self) -> None:
"""Start sending periodic pings to keep the connection alive"""
assert self.ping_interval is not None
if self.ping_interval > 0:
self.last_ping = self.last_pong = IOLoop.current().time()
self.ping_callback = PeriodicCallback(
self.periodic_ping, self.ping_interval * 1000
)
self.ping_callback.start()
def periodic_ping(self) -> None:
"""Send a ping to keep the websocket alive
Called periodically if the websocket_ping_interval is set and non-zero.
"""
if self.is_closing() and self.ping_callback is not None:
self.ping_callback.stop()
return
# Check for timeout on pong. Make sure that we really have
# sent a recent ping in case the machine with both server and
# client has been suspended since the last ping.
now = IOLoop.current().time()
since_last_pong = now - self.last_pong
since_last_ping = now - self.last_ping
assert self.ping_interval is not None
assert self.ping_timeout is not None
if (
since_last_ping < 2 * self.ping_interval
and since_last_pong > self.ping_timeout
):
self.close()
return
self.write_ping(b"")
self.last_ping = now
def set_nodelay(self, x: bool) -> None:
self.stream.set_nodelay(x)
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
protocol = None # type: WebSocketProtocol
def __init__(
self,
request: httpclient.HTTPRequest,
on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None,
compression_options: Optional[Dict[str, Any]] = None,
ping_interval: Optional[float] = None,
ping_timeout: Optional[float] = None,
max_message_size: int = _default_max_message_size,
subprotocols: Optional[List[str]] = [],
) -> None:
self.connect_future = Future() # type: Future[WebSocketClientConnection]
self.read_queue = Queue(1) # type: Queue[Union[None, str, bytes]]
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.params = _WebSocketParams(
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
compression_options=compression_options,
)
scheme, sep, rest = request.url.partition(":")
scheme = {"ws": "http", "wss": "https"}[scheme]
request.url = scheme + sep + rest
request.headers.update(
{
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": self.key,
"Sec-WebSocket-Version": "13",
}
)
if subprotocols is not None:
request.headers["Sec-WebSocket-Protocol"] = ",".join(subprotocols)
if compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers[
"Sec-WebSocket-Extensions"
] = "permessage-deflate; client_max_window_bits"
self.tcp_client = TCPClient()
super(WebSocketClientConnection, self).__init__(
None,
request,
lambda: None,
self._on_http_response,
104857600,
self.tcp_client,
65536,
104857600,
)
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None # type: ignore
def on_connection_close(self) -> None:
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self._on_message(None)
self.tcp_client.close()
super(WebSocketClientConnection, self).on_connection_close()
def on_ws_connection_close(
self, close_code: Optional[int] = None, close_reason: Optional[str] = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _on_http_response(self, response: httpclient.HTTPResponse) -> None:
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(
WebSocketError("Non-websocket response")
)
async def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> None:
assert isinstance(start_line, httputil.ResponseStartLine)
if start_line.code != 101:
await super(WebSocketClientConnection, self).headers_received(
start_line, headers
)
return
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.stream = self.connection.detach()
IOLoop.current().add_callback(self.protocol._receive_frame_loop)
self.protocol.start_pinging()
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None # type: ignore
future_set_result_unless_cancelled(self.connect_future, self)
def write_message(
self, message: Union[str, bytes], binary: bool = False
) -> "Future[None]":
"""Sends a message to the WebSocket server.
If the stream is closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Exception raised on a closed stream changed from `.StreamClosedError`
to `WebSocketClosedError`.
"""
return self.protocol.write_message(message, binary=binary)
def read_message(
self,
callback: Optional[Callable[["Future[Union[None, str, bytes]]"], None]] = None,
) -> Awaitable[Union[None, str, bytes]]:
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
awaitable = self.read_queue.get()
if callback is not None:
self.io_loop.add_future(asyncio.ensure_future(awaitable), callback)
return awaitable
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
return self._on_message(message)
def _on_message(
self, message: Union[None, str, bytes]
) -> Optional[Awaitable[None]]:
if self._on_message_callback:
self._on_message_callback(message)
return None
else:
return self.read_queue.put(message)
def ping(self, data: bytes = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``ping_interval`` argument to
`websocket_connect` instead of sending pings manually.
.. versionadded:: 5.1
"""
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
def on_pong(self, data: bytes) -> None:
pass
def on_ping(self, data: bytes) -> None:
pass
def get_websocket_protocol(self) -> WebSocketProtocol:
return WebSocketProtocol13(self, mask_outgoing=True, params=self.params)
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol selected by the server.
.. versionadded:: 5.1
"""
return self.protocol.selected_subprotocol
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
assert typ is not None
assert value is not None
app_log.error("Uncaught exception %s", value, exc_info=(typ, value, tb))
def websocket_connect(
url: Union[str, httpclient.HTTPRequest],
callback: Optional[Callable[["Future[WebSocketClientConnection]"], None]] = None,
connect_timeout: Optional[float] = None,
on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None,
compression_options: Optional[Dict[str, Any]] = None,
ping_interval: Optional[float] = None,
ping_timeout: Optional[float] = None,
max_message_size: int = _default_max_message_size,
subprotocols: Optional[List[str]] = None,
) -> "Awaitable[WebSocketClientConnection]":
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
``subprotocols`` may be a list of strings specifying proposed
subprotocols. The selected protocol may be found on the
``selected_subprotocol`` attribute of the connection object
when the connection is complete.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.1
Added the ``subprotocols`` argument.
"""
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = cast(
httpclient.HTTPRequest,
httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS),
)
conn = WebSocketClientConnection(
request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
subprotocols=subprotocols,
)
if callback is not None:
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future
|
allenl203/tornado
|
tornado/websocket.py
|
Python
|
apache-2.0
| 61,445
|
[
"VisIt"
] |
8620f036fc9ec4eb584b2a6f170f323db8ea4b2421324690d008891f33a249b6
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !!#
# !! You should edit it to your liking. !!#
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is there for backwards compatibility and when you don't
# ! want that setting translated.
# ! Option (b) should be used for settings that are different in
# ! different languages.
# Data about this site
BLOG_AUTHOR = "Your Name" # (translatable)
BLOG_TITLE = "Demo Site" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "https://example.com/"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "https://example.com/"
BLOG_EMAIL = "joe@demo.site"
BLOG_DESCRIPTION = "This is a demo site for Nikola." # (translatable)
# Nikola is multilingual!
#
# Currently supported languages are:
# bg Bulgarian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# de German
# el Greek [NOT gr!]
# en English
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# it Italian
# ja Japanese [NOT jp!]
# nb Norwegian Bokmål
# nl Dutch
# pt_br Portuguese (Brasil)
# pl Polish
# ru Russian
# sl Slovenian [NOT sl_si!]
# tr Turkish (Turkey) [NOT tr_tr!]
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
"en": "",
"pl": "./pl",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar.
# You should provide a key-value pair for each used language.
# (the same way you would do with a (translatable) setting.)
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/archive.html', 'Archives'),
('/categories/index.html', 'Tags'),
('/rss.xml', 'RSS'),
),
}
# Below this point, everything is optional
# While nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can ommit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combinated with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.rst", "stories", "story.tmpl"),
("stories/*.txt", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of "source" "relative destination".
# Default is:
# FILES_FOLDERS = {'files': '' }
# Which means copy 'files' into 'output'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = True
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
# CREATE_SINGLE_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
# FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = []
# Commands to execute to deploy. Can be anything, for example,
# you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola install_plugin ping`).
# To do manual deployment, set it to []
# DEPLOY_COMMANDS = []
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
# OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, there are no filters.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <https://getnikola.com/handbook.html#post-processing-filters>
# FILTERS = {
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# Galleries are folders in galleries/
# Final location of galleries will be output / GALLERY_PATH / gallery_name
# GALLERY_PATH = "galleries"
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to 'old posts, page %d' or 'page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
# INDEXES_TITLE = "" # If this is empty, defaults to BLOG_TITLE
# INDEXES_PAGES = "" # If this is empty, defaults to '[old posts,] page %d' (see above)
# INDEXES_PAGES_MAIN = False # If True, INDEXES_PAGES is also displayed on
# # the main (the newest) index page (index.html)
# Name of the theme to use.
THEME = "bootstrap3"
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# For creating favicons, take a look at:
# http://www.netmagazine.com/features/create-perfect-favicon
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# A HTML fragment with the Read more... link.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
# LICENSE = """
# <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/">
# <img alt="Creative Commons License BY-NC-SA"
# style="border-width:0; margin-bottom:12px;"
# src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# (translatable)
CONTENT_FOOTER = 'Contents © {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="https://getnikola.com/" rel="nofollow">Nikola</a> {license}'
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems, one of "disqus", "livefyre", "intensedebate", "moot",
# "googleplus", "facebook" or "isso"
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "nikolademo"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# If True, schedules post to today if possible, even if scheduled hour is over
# SCHEDULE_FORCE_TODAY = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuracion you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What MarkDown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# (translatable)
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
# SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
# COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# INDEX_DISPLAY_POST_COUNT = 10
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# Strip HTML in the RSS feed? Default to False
# RSS_PLAIN = False
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- Custom search -->
# <form method="get" id="search" action="//duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s"/>
# <input type="hidden" name="k8" value="#444444"/>
# <input type="hidden" name="k9" value="#D51920"/>
# <input type="hidden" name="kt" value="h"/>
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a google search form, here's an example that should just work:
# SEARCH_FORM = """
# <!-- Custom search with google-->
# <form id="search" action="//www.google.com/search" method="get" class="navbar-form pull-left">
# <input type="hidden" name="q" value="site:%s" />
# <input type="text" name="q" maxlength="255" results="0" placeholder="Search"/>
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
# Also, there is a local search plugin you can use, based on Tipue, but it requires setting several
# options:
# SEARCH_FORM = """
# <span class="navbar-form pull-left">
# <input type="text" id="tipue_search_input">
# </span>"""
#
# BODY_END = """
# <script src="/assets/js/tipuesearch_set.js"></script>
# <script src="/assets/js/tipuesearch.js"></script>
# <script>
# $(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
# });
# </script>
# """
# EXTRA_HEAD_DATA = """
# <link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
# <div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div>
# """
# ENABLED_EXTRAS = ['local_search']
#
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery and html5shiv are served from the Google CDN and
# Bootstrap is served from BootstrapCDN (provided by MaxCDN)
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Twitter Card summaries / Open Graph.
# Twitter cards make it possible for you to attach media to Tweets
# that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit
# https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID
# # instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (eg. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use W3C-DTF Format (ex. 2012-03-30T23:00:00+02:00)
#
# TIMEZONE = 'UTC'
# If webassets is installed, bundle JS and CSS to make site loading faster
# USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# Experimental plugins - use at your own risk.
# They probably need some manual adjustments - please see their respective
# readme.
# ENABLED_EXTRAS = [
# 'planetoid',
# 'ipynb',
# 'local_search',
# 'render_mustache',
# ]
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# You can configure the logging handlers installed as plugins or change the
# log level of the default stdout handler.
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'WARNING', 'bubble': True},
# 'smtp': {
# 'from_addr': 'test-errors@example.com',
# 'recipients': ('test@example.com'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
# }
}
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
|
s2hc-johan/nikola
|
tests/data/translated_titles/conf.py
|
Python
|
mit
| 27,281
|
[
"VisIt"
] |
d3dad1cc443c4522cd1d0e2e894f4a4918dbe2b23e037a9aa8ad402bb0339a72
|
#!/usr/bin/env python
# encoding: utf-8
"""
Get HDF5 patch results on VOSpace and combine them into a single HDF5 file.
Run as fetch_patch_results.py phat_field_starfish.hdf5 --vodir phat/fields
2015-06-16 - Created by Jonathan Sick
"""
import os
import argparse
import vos
import h5py
import numpy as np
from astropy.table import Table
from starfisher.sfh import estimate_mean_age, marginalize_sfh_metallicity
from androcmd.phatpatchfit import compute_patch_gal_coords
def main():
args = parse_args()
dataset = h5py.File(args.output_hdf5, 'a')
dataset_patches = dataset.require_group('patches')
vosdir = os.path.join('vos:jonathansick', args.vodir)
c = vos.Client('vos:jonathansick')
file_names = c.listdir(vosdir)
for name in file_names:
print name
if not name.endswith('.hdf5'):
continue
patch_name = os.path.splitext(os.path.basename(name))[0]
if patch_name in dataset_patches:
print "{0} already in dataset".format(patch_name)
continue
try:
c.copy(os.path.join(vosdir, name), name)
except:
print "Could not download {0}".format(name)
continue
# open the patch's HDF5 file
try:
patch_data = h5py.File(name, 'r')
except IOError:
continue
patch_serial = patch_data.attrs['patch']
patch_data.copy(patch_data,
dataset_patches,
name=patch_serial)
patch_data.close()
os.remove(name)
validate_coords(dataset_patches)
reduce_sfh_table(dataset, dataset_patches)
dataset.flush()
dataset.close()
def parse_args():
parser = argparse.ArgumentParser(
description="fetch_patch_results.py phat_field_starfish.hdf5 "
"--vodir phat/fields")
parser.add_argument('output_hdf5')
parser.add_argument('--vodir', default='phat/patches')
return parser.parse_args()
def validate_coords(patch_data):
for patch_name, group in patch_data.items():
print patch_name, group
if 'r_kpc' not in group.attrs.keys():
print "Adding galaxy coords for {0}".format(patch_name)
r_kpc, phi = compute_patch_gal_coords(group.attrs['ra0'],
group.attrs['dec0'])
group.attrs['r_kpc'] = r_kpc
group.attrs['phi'] = phi
def reduce_sfh_table(dataset, patches, fit_keys=None):
patch_names = []
ra = []
dec = []
r_kpc = []
phi = []
mean_ages = []
mean_age_errs = []
ages_25 = []
ages_75 = []
chisqs = []
for patch_name, patch_group in patches.items():
patch_names.append(patch_name)
r_kpc.append(patch_group.attrs['r_kpc'])
phi.append(patch_group.attrs['phi'])
ra.append(patch_group.attrs['ra0'])
dec.append(patch_group.attrs['dec0'])
if fit_keys is None:
fit_keys = patch_group['sfh'].keys()
# redo the mean age estimation for each SFH; the SFH may have been
# persisted with an incorrect mean age estimation algo
for fit_key in fit_keys:
sfh_table = patch_group['sfh'][fit_key]
bin_age = sfh_table['log(age)'] ** 10. / 1e9
mass = sfh_table['mass']
mass_positive_sigma = sfh_table['mass_pos_err']
mass_negative_sigma = sfh_table['mass_neg_err']
mean_age = estimate_mean_age(
bin_age, mass,
mass_positive_sigma=mass_positive_sigma,
mass_negative_sigma=mass_negative_sigma)
sfh_table.attrs['mean_age'] = mean_age
mean_ages.append([patch_group['sfh'][fit_key].attrs['mean_age'][0]
for fit_key in fit_keys])
mean_age_errs.append([patch_group['sfh'][fit_key].attrs['mean_age'][1]
for fit_key in fit_keys])
chisqs.append([patch_group['chi_hess'][fit_key].attrs['chi_red']
for fit_key in fit_keys])
# compute a marginalized SFH and persist it to the dataset
marginal_sfh_group = patch_group.create_group('sfh_marginal')
for fit_key in fit_keys:
sfh_table = Table(np.array(patch_group['sfh'][fit_key]))
marginalized_sfh_table = marginalize_sfh_metallicity(sfh_table)
marginal_sfh_group.create_dataset(
fit_key,
data=np.array(marginalized_sfh_table))
# Compute the 25th and 75th percentils of cumulative SF.
_25 = []
_75 = []
for fit_key in fit_keys:
t = patch_group['sfh_marginal'][fit_key]
age_gyr = t['log(age)'] ** 10. / 1e9
srt = np.argsort(age_gyr)
age_gyr = age_gyr[srt]
mass = t['mass'][srt]
fractional_mass = np.cumsum(mass) / mass.sum() * 100.
result = np.interp([25., 75.],
fractional_mass,
age_gyr)
q25 = result[0]
q75 = result[1]
_25.append(q25)
_75.append(q75)
ages_25.append(_25)
ages_75.append(_75)
# Build a record array
age_fmt = 'mean_age_{0}'
age_err_fmt = 'mean_age_err_{0}'
age_25_fmt = 'age_25_{0}'
age_75_fmt = 'age_75_{0}'
chi_fmt = 'chi_red_{0}'
dtype = [('name', 'S40'), ('r_kpc', float), ('phi', float)] \
+ [('ra', float), ('dec', float)] \
+ [(age_fmt.format(n), float) for n in fit_keys] \
+ [(age_err_fmt.format(n), float) for n in fit_keys] \
+ [(age_25_fmt.format(n), float) for n in fit_keys] \
+ [(age_75_fmt.format(n), float) for n in fit_keys] \
+ [(chi_fmt.format(n), float) for n in fit_keys]
n = len(patch_names)
sfh_table = np.empty(n, dtype=np.dtype(dtype))
sfh_table['name'][:] = patch_names
sfh_table['r_kpc'][:] = r_kpc
sfh_table['phi'][:] = phi
sfh_table['ra'][:] = ra
sfh_table['dec'][:] = dec
for i, fit_key in enumerate(fit_keys):
sfh_table[age_fmt.format(fit_key)][:] = [v[i] for v in mean_ages]
sfh_table[age_err_fmt.format(fit_key)][:] = [v[i]
for v in mean_age_errs]
sfh_table[age_25_fmt.format(fit_key)][:] = [v[i] for v in ages_25]
sfh_table[age_75_fmt.format(fit_key)][:] = [v[i] for v in ages_75]
sfh_table[age_75_fmt.format(fit_key)][:] = [v[i] for v in ages_75]
sfh_table[chi_fmt.format(fit_key)][:] = [v[i] for v in chisqs]
if 'sfh_table' in dataset.keys():
del dataset['sfh_table']
dataset.create_dataset('sfh_table', data=sfh_table)
if __name__ == '__main__':
main()
|
jonathansick/androcmd
|
scripts/fetch_patch_results.py
|
Python
|
mit
| 6,784
|
[
"Galaxy"
] |
40df406b16e6a73f6e8ba5e1faf3bc6d74769a8ba5f3b8d1518d63f7d1eb571a
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Neighbor Search wrapper for MDAnalysis --- :mod:`MDAnalysis.lib.NeighborSearch`
===============================================================================
This module contains classes that allow neighbor searches directly with
`AtomGroup` objects from `MDAnalysis`.
"""
import numpy as np
from Bio.KDTree import KDTree
from MDAnalysis.core.groups import AtomGroup, Atom
class AtomNeighborSearch(object):
"""This class can be used to find all atoms/residues/segements within the
radius of a given query position.
This class is using the BioPython KDTree for the neighborsearch. This class
also does not apply PBC to the distance calculattions. So you have to ensure
yourself that the trajectory has been corrected for PBC artifacts.
"""
def __init__(self, atom_group, bucket_size=10):
"""
Parameters
----------
atom_list : AtomGroup
list of atoms
bucket_size : int
Number of entries in leafs of the KDTree. If you suffer poor
performance you can play around with this number. Increasing the
`bucket_size` will speed up the construction of the KDTree but
slow down the search.
"""
self.atom_group = atom_group
self._u = atom_group.universe
self.kdtree = KDTree(dim=3, bucket_size=bucket_size)
self.kdtree.set_coords(atom_group.positions)
def search(self, atoms, radius, level='A'):
"""
Return all atoms/residues/segments that are within *radius* of the
atoms in *atoms*.
Parameters
----------
atoms : AtomGroup or Atom
list of atoms
radius : float
Radius for search in Angstrom.
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
"""
if isinstance(atoms, Atom):
positions = atoms.position.reshape(1, 3)
else:
positions = atoms.positions
indices = []
for pos in positions:
self.kdtree.search(pos, radius)
indices.append(self.kdtree.get_indices())
unique_idx = np.unique([i for l in indices for i in l]).astype(np.int64)
return self._index2level(unique_idx, level)
def _index2level(self, indices, level):
"""Convert list of atom_indices in a AtomGroup to either the
Atoms or segments/residues containing these atoms.
Parameters
----------
indices
list of atom indices
level : str
char (A, R, S). Return atoms(A), residues(R) or segments(S) within
*radius* of *atoms*.
"""
n_atom_list = self.atom_group[indices]
if level == 'A':
if not n_atom_list:
return []
else:
return n_atom_list
elif level == 'R':
return list({a.residue for a in n_atom_list})
elif level == 'S':
return list(set([a.segment for a in n_atom_list]))
else:
raise NotImplementedError('{0}: level not implemented'.format(level))
|
alejob/mdanalysis
|
package/MDAnalysis/lib/NeighborSearch.py
|
Python
|
gpl-2.0
| 4,172
|
[
"Biopython",
"MDAnalysis"
] |
0c1faac07b226cb656afb1d836fe4f223f1a21ee47878b2be5ff3afee887b2f8
|
'''
*** SHED SKIN Python-to-C++ Compiler ***
Copyright 2005-2013 Mark Dufour; License GNU GPL version 3 (See LICENSE)
cpp.py: output C++ code
output equivalent C++ code, using templates and virtuals to support data and OO polymorphism.
class GenerateVisitor: inherits visitor pattern from compiler.visitor.ASTVisitor, to recursively generate C++ code for each syntactical Python construct. the constraint graph, with inferred types, is first 'merged' back to program dimensions (gx.merged_inh).
'''
import os
import string
import struct
import textwrap
import jinja2
from compiler import walk
from compiler.ast import Const, AssTuple, AssList, From, Add, Stmt, AssAttr, \
Keyword, AssName, CallFunc, Slice, Getattr, Dict, Subscript, \
Function as FunctionNode, Return, Class as ClassNode, Name, List, Discard, Sliceobj, Tuple
from compiler.visitor import ASTVisitor
from error import error
from extmod import convert_methods, convert_methods2, do_extmod, pyinit_func
from infer import analyze_callfunc, callfunc_targets, connect_actual_formal, \
called, inode, var_types
from makefile import generate_makefile
from python import assign_rec, aug_msg, Class, def_class, \
is_enum, is_fastfor, is_literal, is_zip2, \
lookup_class, lookup_class_module, lookup_var, lookup_module, \
Function, Module, Variable, StaticClass, smart_lookup_var
from typestr import incompatible_assignment_rec, lowest_common_parents, \
nodetypestr, polymorphic_t, singletype, unboxable, typestr
from virtual import virtuals
class CPPNamer(object):
def __init__(self, gx, mv):
self.gx = gx
self.class_names = [cl.ident for cl in self.gx.allclasses]
self.cpp_keywords = self.gx.cpp_keywords
self.ss_prefix = self.gx.ss_prefix
self.name_by_type = {
str: self.name_str,
Class: self.name_class,
Function: self.name_function,
Variable: self.name_variable,
}
self.mv = mv
def nokeywords(self, name):
if name in self.cpp_keywords:
return self.ss_prefix + name
return name
def namespace_class(self, cl, add_cl=''):
module = cl.mv.module
if module.ident != 'builtin' and module != self.mv.module and module.name_list:
return module.full_path() + '::' + add_cl + self.name(cl)
else:
return add_cl + self.name(cl)
def name(self, obj):
get_name = self.name_by_type[type(obj)]
name = get_name(obj)
return self.nokeywords(name)
def name_variable(self, var):
if var.masks_global():
return '_' + var.name
return self.name_str(var.name)
def name_function(self, func):
return self.name_str(func.ident)
def name_class(self, obj):
return obj.ident
def name_str(self, name):
if [x for x in ('init', 'add') if name == x + self.gx.main_module.ident] or \
name in self.class_names or name + '_' in self.class_names:
name = '_' + name
return name
# --- code generation visitor; use type information
class GenerateVisitor(ASTVisitor):
def __init__(self, gx, module):
self.gx = gx
self.output_base = module.filename[:-3]
self.out = file(self.output_base + '.cpp', 'w')
self.indentation = ''
self.consts = {}
self.mergeinh = self.gx.merged_inh
self.module = module
self.mv = module.mv
self.name = module.ident
self.filling_consts = False
self.with_count = 0
self.bool_wrapper = {}
self.namer = CPPNamer(self.gx, self)
self.jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
self.gx.sysdir + '/templates/cpp/'),
trim_blocks=True,
lstrip_blocks=True,
)
self.jinja_env.filters['depointer'] = (
lambda ts: ts[:-1] if ts.endswith('*') else ts)
def cpp_name(self, obj):
return self.namer.name(obj)
def insert_consts(self, declare): # XXX ugly
if not self.consts:
return
self.filling_consts = True
if declare:
suffix = '.hpp'
else:
suffix = '.cpp'
lines = file(self.output_base + suffix, 'r').readlines()
newlines = []
j = -1
for (i, line) in enumerate(lines):
if line.startswith('namespace ') and not 'XXX' in line: # XXX
j = i + 1
newlines.append(line)
if i == j:
pairs = []
done = set()
for (node, name) in self.consts.items():
if not name in done and node in self.mergeinh and self.mergeinh[node]: # XXX
ts = nodetypestr(self.gx, node, inode(self.gx, node).parent, mv=self.mv)
if declare:
ts = 'extern ' + ts
pairs.append((ts, name))
done.add(name)
newlines.extend(self.group_declarations(pairs))
newlines.append('\n')
newlines2 = []
j = -1
for (i, line) in enumerate(newlines):
if line.startswith('void __init() {'):
j = i
newlines2.append(line)
if i == j:
todo = {}
for (node, name) in self.consts.items():
if not name in todo:
todo[int(name[6:])] = node
todolist = todo.keys()
todolist.sort()
for number in todolist:
if self.mergeinh[todo[number]]: # XXX
name = 'const_' + str(number)
self.start(' ' + name + ' = ')
if isinstance(todo[number], Const) and isinstance(todo[number].value, str) and len(todo[number].value) == 1:
self.append("__char_cache[%d];" % ord(todo[number].value))
else:
self.visit(todo[number], inode(self.gx, todo[number]).parent)
newlines2.append(self.line + ';\n')
newlines2.append('\n')
file(self.output_base + suffix, 'w').writelines(newlines2)
self.filling_consts = False
def insert_extras(self, suffix):
lines = file(self.output_base + suffix, 'r').readlines()
newlines = []
for line in lines:
newlines.append(line)
if suffix == '.cpp' and line.startswith('#include'):
newlines.extend(self.include_files())
elif suffix == '.hpp' and line.startswith('using namespace'):
newlines.extend(self.fwd_class_refs())
file(self.output_base + suffix, 'w').writelines(newlines)
def fwd_class_refs(self):
lines = []
for _module in self.module.prop_includes:
if _module.builtin:
continue
for name in _module.name_list:
lines.append('namespace __%s__ { /* XXX */\n' % name)
for cl in _module.mv.classes.values():
lines.append('class %s;\n' % self.cpp_name(cl))
for name in _module.name_list:
lines.append('}\n')
if lines:
lines.insert(0, '\n')
return lines
def include_files(self):
# find all (indirect) dependencies
includes = set()
includes.add(self.module)
changed = True
while changed:
size = len(includes)
for module in list(includes):
includes.update(module.prop_includes)
includes.update(module.mv.imports.values())
includes.update(module.mv.fake_imports.values())
changed = (size != len(includes))
includes = set(i for i in includes if i.ident != 'builtin')
# order by cross-file inheritance dependencies
for include in includes:
include.deps = set()
for include in includes:
for cl in include.mv.classes.values():
if cl.bases:
module = cl.bases[0].mv.module
if module.ident != 'builtin' and module != include:
include.deps.add(module)
includes1 = [i for i in includes if i.builtin]
includes2 = [i for i in includes if not i.builtin]
includes = includes1 + self.includes_rec(set(includes2))
return ['#include "%s"\n' % module.include_path() for module in includes]
def includes_rec(self, includes): # XXX should be recursive!? ugh
todo = includes.copy()
result = []
while todo:
for include in todo:
if not include.deps - set(result):
todo.remove(include)
result.append(include)
break
else: # XXX circular dependency warning?
result.append(todo.pop())
return result
# --- group pairs of (type, name) declarations, while paying attention to '*'
def group_declarations(self, pairs):
group = {}
for (type, name) in pairs:
group.setdefault(type, []).append(name)
result = []
for (type, names) in group.items():
names.sort()
if type.endswith('*'):
result.append(type + (', *'.join(names)) + ';\n')
else:
result.append(type + (', '.join(names)) + ';\n')
return result
def header_file(self):
self.out = file(self.output_base + '.hpp', 'w')
self.visit(self.module.ast, True)
self.out.close()
def output(self, text):
print >>self.out, self.indentation + text
def start(self, text=None):
self.line = self.indentation
if text:
self.line += text
def append(self, text):
self.line += text
def eol(self, text=None):
if text:
self.append(text)
if self.line.strip():
print >>self.out, self.line + ';'
def indent(self):
self.indentation += 4 * ' '
def deindent(self):
self.indentation = self.indentation[:-4]
def visitm(self, *args):
func = None
if args and isinstance(args[-1], (Function, Class)):
func = args[-1]
for arg in args[:-1]:
if isinstance(arg, str):
self.append(arg)
else:
self.visit(arg, func)
def connector(self, node, func):
if singletype(self.gx, node, Module):
return '::'
elif unboxable(self.gx, self.mergeinh[node]):
return '.'
else:
return '->'
def gen_declare_defs(self, vars):
for name, var in vars:
if (singletype(self.gx, var, Module) or var.invisible
or var.name in {'__exception', '__exception2'}):
continue
ts = nodetypestr(self.gx, var, var.parent, mv=self.mv)
yield ts, self.cpp_name(var)
def declare_defs(self, vars, declare):
pairs = []
for ts, name in self.gen_declare_defs(vars):
if declare:
if 'for_in_loop' in ts: # XXX
continue
ts = 'extern ' + ts
pairs.append((ts, name))
return ''.join(self.group_declarations(pairs))
def get_constant(self, node):
parent = inode(self.gx, node).parent
while isinstance(parent, Function) and parent.listcomp: # XXX
parent = parent.parent
if isinstance(parent, Function) and (parent.inherited or not self.inhcpa(parent)): # XXX
return
for other in self.consts: # XXX use mapping
if node.value == other.value:
return self.consts[other]
self.consts[node] = 'const_' + str(len(self.consts))
return self.consts[node]
def module_hpp(self, node):
define = '_'.join(self.module.name_list).upper() + '_HPP'
print >>self.out, '#ifndef __' + define
print >>self.out, '#define __' + define + '\n'
# --- namespaces
print >>self.out, 'using namespace __shedskin__;'
for n in self.module.name_list:
print >>self.out, 'namespace __' + n + '__ {'
print >>self.out
# class declarations
for child in node.node.getChildNodes():
if isinstance(child, ClassNode):
cl = def_class(self.gx, child.name, mv=self.mv)
print >>self.out, 'class ' + self.cpp_name(cl) + ';'
print >>self.out
# --- lambda typedefs
self.func_pointers()
# globals
defs = self.declare_defs(list(self.mv.globals.items()), declare=True)
if defs:
self.output(defs)
print >>self.out
# --- class definitions
for child in node.node.getChildNodes():
if isinstance(child, ClassNode):
self.class_hpp(child)
# --- defaults
for type, number in self.gen_defaults():
print >>self.out, 'extern %s default_%d;' % (type, number)
# function declarations
if self.module != self.gx.main_module:
print >>self.out, 'void __init();'
for child in node.node.getChildNodes():
if isinstance(child, FunctionNode):
func = self.mv.funcs[child.name]
if self.inhcpa(func):
self.visitFunction(func.node, declare=True)
print >>self.out
if self.gx.extension_module:
print >>self.out, 'extern "C" {'
pyinit_func(self)
print >>self.out, '}'
for n in self.module.name_list:
print >>self.out, '} // module namespace'
self.rich_comparison()
if self.gx.extension_module:
convert_methods2(self.gx, self)
print >>self.out, '#endif'
def gen_defaults(self):
for default, (nr, func, func_def_nr) in self.module.mv.defaults.items():
formal = func.formals[len(func.formals) - len(func.defaults) + func_def_nr]
var = func.vars[formal]
yield typestr(self.gx, self.mergeinh[var], func, mv=self.mv), nr # + ' ' + ('default_%d;' % nr)
def init_defaults(self, func):
for default in func.defaults:
if default in self.mv.defaults:
nr, func, func_def_nr = self.mv.defaults[default]
formal = func.formals[len(func.formals) - len(func.defaults) + func_def_nr]
var = func.vars[formal]
if self.mergeinh[var]:
ts = [t for t in self.mergeinh[default] if isinstance(t[0], Function)]
if not ts or [t for t in ts if called(t[0])]:
self.start('default_%d = ' % nr)
self.visit_conv(default, self.mergeinh[var], None)
self.eol()
def rich_comparison(self):
cmp_cls, lt_cls, gt_cls, le_cls, ge_cls = [], [], [], [], []
for cl in self.mv.classes.values():
if not '__cmp__' in cl.funcs and [f for f in ('__eq__', '__lt__', '__gt__') if f in cl.funcs]:
cmp_cls.append(cl)
if not '__lt__' in cl.funcs and '__gt__' in cl.funcs:
lt_cls.append(cl)
if not '__gt__' in cl.funcs and '__lt__' in cl.funcs:
gt_cls.append(cl)
if not '__le__' in cl.funcs and '__ge__' in cl.funcs:
le_cls.append(cl)
if not '__ge__' in cl.funcs and '__le__' in cl.funcs:
ge_cls.append(cl)
if cmp_cls or lt_cls or gt_cls or le_cls or ge_cls:
print >>self.out, 'namespace __shedskin__ { /* XXX */'
for cl in cmp_cls:
t = '__%s__::%s *' % (self.mv.module.ident, self.cpp_name(cl))
print >>self.out, 'template<> inline __ss_int __cmp(%sa, %sb) {' % (t, t)
print >>self.out, ' if (!a) return -1;'
if '__eq__' in cl.funcs:
print >>self.out, ' if(a->__eq__(b)) return 0;'
if '__lt__' in cl.funcs:
print >>self.out, ' return (a->__lt__(b))?-1:1;'
elif '__gt__' in cl.funcs:
print >>self.out, ' return (a->__gt__(b))?1:-1;'
else:
print >>self.out, ' return __cmp<void *>(a, b);'
print >>self.out, '}'
self.rich_compare(lt_cls, 'lt', 'gt')
self.rich_compare(gt_cls, 'gt', 'lt')
self.rich_compare(le_cls, 'le', 'ge')
self.rich_compare(ge_cls, 'ge', 'le')
print >>self.out, '}'
def rich_compare(self, cls, msg, fallback_msg):
for cl in cls:
t = '__%s__::%s *' % (self.mv.module.ident, self.cpp_name(cl))
print >>self.out, 'template<> inline __ss_bool __%s(%sa, %sb) {' % (msg, t, t)
# print >>self.out, ' if (!a) return -1;' # XXX check
print >>self.out, ' return b->__%s__(a);' % fallback_msg
print >>self.out, '}'
def module_cpp(self, node):
file_top = self.jinja_env.get_template('module.cpp.tpl').render(
node=node,
module=self.module,
imports=[
(child, self.gx.from_module[child])
for child in node.node.getChildNodes()
if isinstance(child, From) and child.modname != '__future__'],
globals=self.gen_declare_defs(self.mv.globals.items()),
nodetypestr=lambda var: nodetypestr(self.gx, var, var.parent, mv=self.mv),
defaults=self.gen_defaults(),
listcomps=self.mv.listcomps,
cpp_name=self.cpp_name,
namer=self.namer,
dedent=textwrap.dedent
)
print >>self.out, file_top
# --- declarations
self.listcomps = {}
for (listcomp, lcfunc, func) in self.mv.listcomps:
self.listcomps[listcomp] = (lcfunc, func)
self.do_listcomps(True)
self.do_lambdas(True)
print >>self.out
# --- definitions
self.do_listcomps(False)
self.do_lambdas(False)
for child in node.node.getChildNodes():
if isinstance(child, ClassNode):
self.class_cpp(child)
elif isinstance(child, FunctionNode):
self.do_comments(child)
self.visit(child)
# --- __init
self.output('void __init() {')
self.indent()
if self.module == self.gx.main_module and not self.gx.extension_module:
self.output('__name__ = new str("__main__");\n')
else:
self.output('__name__ = new str("%s");\n' % self.module.ident)
for child in node.node.getChildNodes():
if isinstance(child, FunctionNode):
self.init_defaults(child)
elif isinstance(child, ClassNode):
for child2 in child.code.getChildNodes():
if isinstance(child2, FunctionNode):
self.init_defaults(child2)
if child.name in self.mv.classes:
cl = self.mv.classes[child.name]
self.output('cl_' + cl.ident + ' = new class_("%s");' % (cl.ident))
if cl.parent.static_nodes:
self.output('%s::__static__();' % self.cpp_name(cl))
elif isinstance(child, Discard):
self.visit_discard(child)
elif isinstance(child, From) and child.modname != '__future__':
module = self.gx.from_module[child]
for (name, pseudonym) in child.names:
pseudonym = pseudonym or name
if name == '*':
for var in module.mv.globals.values():
if not var.invisible and not var.imported and not var.name.startswith('__') and var_types(self.gx, var):
self.start(self.namer.nokeywords(var.name) + ' = ' + module.full_path() + '::' + self.namer.nokeywords(var.name))
self.eol()
elif pseudonym in self.module.mv.globals and not [t for t in var_types(self.gx, self.module.mv.globals[pseudonym]) if isinstance(t[0], Module)]:
self.start(self.namer.nokeywords(pseudonym) + ' = ' + module.full_path() + '::' + self.namer.nokeywords(name))
self.eol()
elif not isinstance(child, (ClassNode, FunctionNode)):
self.do_comments(child)
self.visit(child)
self.deindent()
self.output('}\n')
# --- close namespace
for n in self.module.name_list:
print >>self.out, '} // module namespace'
print >>self.out
# --- c++ main/extension module setup
if self.gx.extension_module:
do_extmod(self.gx, self)
if self.module == self.gx.main_module:
self.do_main()
def visit_discard(self, node, func=None):
if isinstance(node.expr, Const) and node.expr.value is None: # XXX merge with visitStmt
pass
elif isinstance(node.expr, Const) and type(node.expr.value) == str:
self.do_comment(node.expr.value)
else:
self.start('')
self.visit(node, func)
self.eol()
def visitModule(self, node, declare=False):
if declare:
self.module_hpp(node)
else:
self.module_cpp(node)
def do_main(self):
modules = self.gx.modules.values()
if any(module.builtin and module.ident == 'sys' for module in modules):
print >>self.out, 'int main(int __ss_argc, char **__ss_argv) {'
else:
print >>self.out, 'int main(int, char **) {'
self.do_init_modules()
print >>self.out, ' __shedskin__::__start(__%s__::__init);' % self.module.ident
print >>self.out, '}'
def do_init_modules(self):
print >>self.out, ' __shedskin__::__init();'
for module in sorted(self.gx.modules.values(), key=lambda x: x.import_order):
if module != self.gx.main_module and module.ident != 'builtin':
if module.ident == 'sys':
if self.gx.extension_module:
print >>self.out, ' __sys__::__init(0, 0);'
else:
print >>self.out, ' __sys__::__init(__ss_argc, __ss_argv);'
else:
print >>self.out, ' ' + module.full_path() + '::__init();'
def do_comment(self, s):
if not s:
return
doc = s.replace('/*', '//').replace('*/', '//').split('\n')
self.output('/**')
if doc[0].strip():
self.output(doc[0])
rest = textwrap.dedent('\n'.join(doc[1:])).splitlines()
for l in rest:
self.output(l)
self.output('*/')
def do_comments(self, child):
if child in self.gx.comments:
for n in self.gx.comments[child]:
self.do_comment(n)
def visitContinue(self, node, func=None):
self.output('continue;')
def visitWith(self, node, func=None):
self.start()
if node.vars:
self.visitm('WITH_VAR(', node.expr, ',', node.vars, func)
else:
self.visitm('WITH(', node.expr, func)
self.append(',%d)' % self.with_count)
self.with_count += 1
print >>self.out, self.line
self.indent()
self.mv.current_with_vars.append(node.vars)
self.visit(node.body, func)
self.mv.current_with_vars.pop()
self.deindent()
self.output('END_WITH')
def visitWhile(self, node, func=None):
print >>self.out
if node.else_:
self.output('%s = 0;' % self.mv.tempcount[node.else_])
self.start('while (')
self.bool_test(node.test, func)
self.append(') {')
print >>self.out, self.line
self.indent()
self.gx.loopstack.append(node)
self.visit(node.body, func)
self.gx.loopstack.pop()
self.deindent()
self.output('}')
if node.else_:
self.output('if (!%s) {' % self.mv.tempcount[node.else_])
self.indent()
self.visit(node.else_, func)
self.deindent()
self.output('}')
def copy_method(self, cl, name, declare):
class_name = self.cpp_name(cl)
header = class_name + ' *'
if not declare:
header += class_name + '::'
header += name + '('
self.start(header)
if name == '__deepcopy__':
self.append('dict<void *, pyobj *> *memo')
self.append(')')
if not declare:
print >>self.out, self.line + ' {'
self.indent()
self.output(class_name + ' *c = new ' + class_name + '();')
if name == '__deepcopy__':
self.output('memo->__setitem__(this, c);')
for var in cl.vars.values():
if not var.invisible and var in self.gx.merged_inh and self.gx.merged_inh[var]:
varname = self.cpp_name(var)
if name == '__deepcopy__':
self.output('c->%s = __deepcopy(%s);' % (varname, varname))
else:
self.output('c->%s = %s;' % (varname, varname))
self.output('return c;')
self.deindent()
self.output('}\n')
else:
self.eol()
def copy_methods(self, cl, declare):
if cl.has_copy:
self.copy_method(cl, '__copy__', declare)
if cl.has_deepcopy:
self.copy_method(cl, '__deepcopy__', declare)
def class_hpp(self, node):
cl = self.mv.classes[node.name]
self.output('extern class_ *cl_' + cl.ident + ';')
# --- header
clnames = [self.namer.namespace_class(b) for b in cl.bases]
if not clnames:
clnames = ['pyobj']
if '__iter__' in cl.funcs: # XXX get return type of 'next'
ts = nodetypestr(self.gx, cl.funcs['__iter__'].retnode.thing, mv=self.mv)
if ts.startswith('__iter<'):
ts = ts[ts.find('<') + 1:ts.find('>')]
clnames = ['pyiter<%s>' % ts] # XXX use iterable interface
if '__call__' in cl.funcs:
callfunc = cl.funcs['__call__']
r_typestr = nodetypestr(self.gx, callfunc.retnode.thing, mv=self.mv).strip()
nargs = len(callfunc.formals) - 1
argtypes = [nodetypestr(self.gx, callfunc.vars[callfunc.formals[i + 1]], mv=self.mv).strip() for i in range(nargs)]
clnames = ['pycall%d<%s,%s>' % (nargs, r_typestr, ','.join(argtypes))]
self.output('class ' + self.cpp_name(cl) + ' : ' + ', '.join(['public ' + clname for clname in clnames]) + ' {')
self.do_comment(node.doc)
self.output('public:')
self.indent()
self.class_variables(cl)
# --- constructor
need_init = False
if '__init__' in cl.funcs:
initfunc = cl.funcs['__init__']
if self.inhcpa(initfunc):
need_init = True
# --- default constructor
if need_init:
self.output(self.cpp_name(cl) + '() {}')
else:
self.output(self.cpp_name(cl) + '() { this->__class__ = cl_' + cl.ident + '; }')
# --- init constructor
if need_init:
self.func_header(initfunc, declare=True, is_init=True)
self.indent()
self.output('this->__class__ = cl_' + cl.ident + ';')
self.output('__init__(' + ', '.join(self.cpp_name(initfunc.vars[f]) for f in initfunc.formals[1:]) + ');')
self.deindent()
self.output('}')
# --- destructor call
if '__del__' in cl.funcs and self.inhcpa(cl.funcs['__del__']):
self.output('~%s() { this->__del__(); }' % self.cpp_name(cl))
# --- static code
if cl.parent.static_nodes:
self.output('static void __static__();')
# --- methods
virtuals(self, cl, True)
for func in cl.funcs.values():
if func.node and not (func.ident == '__init__' and func.inherited):
self.visitFunction(func.node, cl, True)
self.copy_methods(cl, True)
if self.gx.extension_module:
convert_methods(self.gx, self, cl, True)
self.deindent()
self.output('};\n')
def class_cpp(self, node):
cl = self.mv.classes[node.name]
if node in self.gx.comments:
self.do_comments(node)
else:
self.output('/**\nclass %s\n*/\n' % cl.ident)
self.output('class_ *cl_' + cl.ident + ';\n')
# --- methods
virtuals(self, cl, False)
for func in cl.funcs.values():
if func.node and not (func.ident == '__init__' and func.inherited):
self.visitFunction(func.node, cl, False)
self.copy_methods(cl, False)
# --- class variable declarations
if cl.parent.vars: # XXX merge with visitModule
for var in cl.parent.vars.values():
if var in self.gx.merged_inh and self.gx.merged_inh[var]:
self.start(nodetypestr(self.gx, var, cl.parent, mv=self.mv) + cl.ident + '::' + self.cpp_name(var))
self.eol()
print >>self.out
# --- static init
if cl.parent.static_nodes:
self.output('void %s::__static__() {' % self.cpp_name(cl))
self.indent()
for node in cl.parent.static_nodes:
self.visit(node, cl.parent)
self.deindent()
self.output('}')
print >>self.out
def class_variables(self, cl):
# --- class variables
if cl.parent.vars:
for var in cl.parent.vars.values():
if var in self.gx.merged_inh and self.gx.merged_inh[var]:
self.output('static ' + nodetypestr(self.gx, var, cl.parent, mv=self.mv) + self.cpp_name(var) + ';')
print >>self.out
# --- instance variables
for var in cl.vars.values():
if var.invisible:
continue # var.name in cl.virtualvars: continue
# var is masked by ancestor var
vars = set()
for ancestor in cl.ancestors():
vars.update(ancestor.vars)
if var.name in vars:
continue
if var in self.gx.merged_inh and self.gx.merged_inh[var]:
self.output(nodetypestr(self.gx, var, cl, mv=self.mv) + self.cpp_name(var) + ';')
if [v for v in cl.vars if not v.startswith('__')]:
print >>self.out
def nothing(self, types):
if def_class(self.gx, 'complex') in (t[0] for t in types):
return 'mcomplex(0.0, 0.0)'
elif def_class(self.gx, 'bool_') in (t[0] for t in types):
return 'False'
else:
return '0'
def inhcpa(self, func):
return called(func) or (func in self.gx.inheritance_relations and [1 for f in self.gx.inheritance_relations[func] if called(f)])
def visitSlice(self, node, func=None):
if node.flags == 'OP_DELETE':
self.start()
self.visit(inode(self.gx, node.expr).fakefunc, func)
self.eol()
else:
self.visit(inode(self.gx, node.expr).fakefunc, func)
def visitLambda(self, node, parent=None):
self.append(self.mv.lambdaname[node])
def subtypes(self, types, varname):
subtypes = set()
for t in types:
if isinstance(t[0], Class):
var = t[0].vars.get(varname)
if var and (var, t[1], 0) in self.gx.cnode: # XXX yeah?
subtypes.update(self.gx.cnode[var, t[1], 0].types())
return subtypes
def bin_tuple(self, types):
for t in types:
if isinstance(t[0], Class) and t[0].ident == 'tuple2':
var1 = t[0].vars.get('first')
var2 = t[0].vars.get('second')
if var1 and var2:
if (var1, t[1], 0) in self.gx.cnode and (var2, t[1], 0) in self.gx.cnode:
if self.gx.cnode[var1, t[1], 0].types() != self.gx.cnode[var2, t[1], 0].types():
return True
return False
def instance_new(self, node, argtypes):
if argtypes is None:
argtypes = self.gx.merged_inh[node]
ts = typestr(self.gx, argtypes, mv=self.mv)
if ts.startswith('pyseq') or ts.startswith('pyiter'): # XXX
argtypes = self.gx.merged_inh[node]
ts = typestr(self.gx, argtypes, mv=self.mv)
self.append('(new ' + ts[:-2] + '(')
return argtypes
def visitDict(self, node, func=None, argtypes=None):
argtypes = self.instance_new(node, argtypes)
if node.items:
self.append(str(len(node.items)) + ', ')
ts_key = typestr(self.gx, self.subtypes(argtypes, 'unit'), mv=self.mv)
ts_value = typestr(self.gx, self.subtypes(argtypes, 'value'), mv=self.mv)
for (key, value) in node.items:
self.visitm('(new tuple2<%s, %s>(2,' % (ts_key, ts_value), func)
type_child = self.subtypes(argtypes, 'unit')
self.visit_conv(key, type_child, func)
self.append(',')
type_child = self.subtypes(argtypes, 'value')
self.visit_conv(value, type_child, func)
self.append('))')
if (key, value) != node.items[-1]:
self.append(',')
self.append('))')
def visit_tuple_list(self, node, func=None, argtypes=None):
if isinstance(func, Class): # XXX
func = None
argtypes = self.instance_new(node, argtypes)
children = node.getChildNodes()
if children:
self.append(str(len(children)) + ',')
if len(children) >= 2 and self.bin_tuple(argtypes): # XXX >=2?
type_child = self.subtypes(argtypes, 'first')
self.visit_conv(children[0], type_child, func)
self.append(',')
type_child = self.subtypes(argtypes, 'second')
self.visit_conv(children[1], type_child, func)
else:
for child in children:
type_child = self.subtypes(argtypes, 'unit')
self.visit_conv(child, type_child, func)
if child != children[-1]:
self.append(',')
self.append('))')
def visitTuple(self, node, func=None, argtypes=None):
if len(node.nodes) > 2:
types = set()
for child in node.nodes:
types.update(self.mergeinh[child])
typestr(self.gx, types, node=child, tuple_check=True, mv=self.mv)
self.visit_tuple_list(node, func, argtypes)
def visitList(self, node, func=None, argtypes=None):
self.visit_tuple_list(node, func, argtypes)
def visitAssert(self, node, func=None):
self.start('ASSERT(')
self.visitm(node.test, ', ', func)
if len(node.getChildNodes()) > 1:
self.visit(node.getChildNodes()[1], func)
else:
self.append('0')
self.eol(')')
def visitRaise(self, node, func=None):
cl = None # XXX sep func
t = [t[0] for t in self.mergeinh[node.expr1]]
if len(t) == 1:
cl = t[0]
self.start('throw (')
# --- raise class [, constructor args]
if isinstance(node.expr1, Name) and not lookup_var(node.expr1.name, func, mv=self.mv): # XXX lookup_class
self.append('new %s(' % node.expr1.name)
if node.expr2:
if isinstance(node.expr2, Tuple) and node.expr2.nodes:
for n in node.expr2.nodes:
self.visit(n, func)
if n != node.expr2.nodes[-1]:
self.append(', ') # XXX visitcomma(nodes)
else:
self.visit(node.expr2, func)
self.append(')')
# --- raise instance
elif isinstance(cl, Class) and cl.mv.module.ident == 'builtin' and not [a for a in cl.ancestors_upto(None) if a.ident == 'BaseException']:
self.append('new Exception()')
else:
self.visit(node.expr1, func)
self.eol(')')
def visitTryExcept(self, node, func=None):
# try
self.start('try {')
print >>self.out, self.line
self.indent()
if node.else_:
self.output('%s = 0;' % self.mv.tempcount[node.else_])
self.visit(node.body, func)
if node.else_:
self.output('%s = 1;' % self.mv.tempcount[node.else_])
self.deindent()
self.start('}')
# except
for handler in node.handlers:
if isinstance(handler[0], Tuple):
pairs = [(n, handler[1], handler[2]) for n in handler[0].nodes]
else:
pairs = [(handler[0], handler[1], handler[2])]
for (h0, h1, h2) in pairs:
if isinstance(h0, Name) and h0.name in ['int', 'float', 'str', 'class']:
continue # XXX lookup_class
elif h0:
cl = lookup_class(h0, self.mv)
if cl.mv.module.builtin and cl.ident in ['KeyboardInterrupt', 'FloatingPointError', 'OverflowError', 'ZeroDivisionError', 'SystemExit']:
error("system '%s' is not caught" % cl.ident, self.gx, h0, warning=True, mv=self.mv)
arg = self.namer.namespace_class(cl) + ' *'
else:
arg = 'Exception *'
if h1:
arg += h1.name
self.append(' catch (%s) {' % arg)
print >>self.out, self.line
self.indent()
self.visit(h2, func)
self.deindent()
self.start('}')
print >>self.out, self.line
# else
if node.else_:
self.output('if(%s) { // else' % self.mv.tempcount[node.else_])
self.indent()
self.visit(node.else_, func)
self.deindent()
self.output('}')
def do_fastfor(self, node, qual, quals, iter, func, genexpr):
if len(qual.list.args) == 3 and not is_literal(qual.list.args[2]):
for arg in qual.list.args: # XXX simplify
if arg in self.mv.tempcount:
self.start()
self.visitm(self.mv.tempcount[arg], ' = ', arg, func)
self.eol()
self.fastfor(qual, iter, func)
self.forbody(node, quals, iter, func, False, genexpr)
def visit_temp(self, node, func): # XXX generalize?
if node in self.mv.tempcount:
self.append(self.mv.tempcount[node])
else:
self.visit(node, func)
def fastfor(self, node, assname, func=None):
# --- for i in range(..) -> for( i=l, u=expr; i < u; i++ ) ..
ivar, evar = self.mv.tempcount[node.assign], self.mv.tempcount[node.list]
self.start('FAST_FOR(%s,' % assname)
if len(node.list.args) == 1:
self.append('0,')
self.visit_temp(node.list.args[0], func)
self.append(',')
else:
self.visit_temp(node.list.args[0], func)
self.append(',')
self.visit_temp(node.list.args[1], func)
self.append(',')
if len(node.list.args) != 3:
self.append('1')
else:
self.visit_temp(node.list.args[2], func)
self.append(',%s,%s)' % (ivar[2:], evar[2:]))
print >>self.out, self.line
def fastenum(self, node):
return is_enum(node) and self.only_classes(node.list.args[0], ('tuple', 'list'))
def fastzip2(self, node):
names = ('tuple', 'list')
return is_zip2(node) and self.only_classes(node.list.args[0], names) and self.only_classes(node.list.args[1], names)
def fastdictiter(self, node):
return isinstance(node.list, CallFunc) and isinstance(node.assign, (AssList, AssTuple)) and self.only_classes(node.list.node, ('dict',)) and isinstance(node.list.node, Getattr) and node.list.node.attrname == 'iteritems'
def only_classes(self, node, names):
if node not in self.mergeinh:
return False
classes = [def_class(self.gx, name, mv=self.mv) for name in names] + [def_class(self.gx, 'none')]
return not [t for t in self.mergeinh[node] if t[0] not in classes]
def visitFor(self, node, func=None):
if isinstance(node.assign, AssName):
assname = node.assign.name
elif isinstance(node.assign, AssAttr):
self.start('')
self.visitAssAttr(node.assign, func)
assname = self.line.strip() # XXX yuck
else:
assname = self.mv.tempcount[node.assign]
assname = self.cpp_name(assname)
print >>self.out
if node.else_:
self.output('%s = 0;' % self.mv.tempcount[node.else_])
if is_fastfor(node):
self.do_fastfor(node, node, None, assname, func, False)
elif self.fastenum(node):
self.do_fastenum(node, func, False)
self.forbody(node, None, assname, func, True, False)
elif self.fastzip2(node):
self.do_fastzip2(node, func, False)
self.forbody(node, None, assname, func, True, False)
elif self.fastdictiter(node):
self.do_fastdictiter(node, func, False)
self.forbody(node, None, assname, func, True, False)
else:
pref, tail = self.forin_preftail(node)
self.start('FOR_IN%s(%s,' % (pref, assname))
self.visit(node.list, func)
print >>self.out, self.line + ',' + tail + ')'
self.forbody(node, None, assname, func, False, False)
print >>self.out
def do_fastzip2(self, node, func, genexpr):
self.start('FOR_IN_ZIP(')
left, right = node.assign.nodes
self.do_fastzip2_one(left, func)
self.do_fastzip2_one(right, func)
self.visitm(node.list.args[0], ',', node.list.args[1], ',', func)
tail1 = self.mv.tempcount[(node, 2)][2:] + ',' + self.mv.tempcount[(node, 3)][2:] + ','
tail2 = self.mv.tempcount[(node.list)][2:] + ',' + self.mv.tempcount[(node, 4)][2:]
print >>self.out, self.line + tail1 + tail2 + ')'
self.indent()
if isinstance(left, (AssTuple, AssList)):
self.tuple_assign(left, self.mv.tempcount[left], func)
if isinstance(right, (AssTuple, AssList)):
self.tuple_assign(right, self.mv.tempcount[right], func)
def do_fastzip2_one(self, node, func):
if isinstance(node, (AssTuple, AssList)):
self.append(self.mv.tempcount[node])
else:
self.visit(node, func)
self.append(',')
def do_fastenum(self, node, func, genexpr):
self.start('FOR_IN_ENUM(')
left, right = node.assign.nodes
self.do_fastzip2_one(right, func)
self.visit(node.list.args[0], func)
tail = self.mv.tempcount[(node, 2)][2:] + ',' + self.mv.tempcount[node.list][2:]
print >>self.out, self.line + ',' + tail + ')'
self.indent()
self.start()
self.visitm(left, ' = ' + self.mv.tempcount[node.list], func)
self.eol()
if isinstance(right, (AssTuple, AssList)):
self.tuple_assign(right, self.mv.tempcount[right], func)
def do_fastdictiter(self, node, func, genexpr):
self.start('FOR_IN_DICT(')
left, right = node.assign.nodes
tail = self.mv.tempcount[node, 7][2:] + ',' + self.mv.tempcount[node, 6][2:] + ',' + self.mv.tempcount[node.list][2:]
self.visit(node.list.node.expr, func)
print >>self.out, self.line + ',' + tail + ')'
self.indent()
self.start()
if left in self.mv.tempcount: # XXX not for zip, enum..?
self.visitm('%s = %s->key' % (self.mv.tempcount[left], self.mv.tempcount[node, 6]), func)
else:
self.visitm(left, ' = %s->key' % self.mv.tempcount[node, 6], func)
self.eol()
self.start()
if right in self.mv.tempcount:
self.visitm('%s = %s->value' % (self.mv.tempcount[right], self.mv.tempcount[node, 6]), func)
else:
self.visitm(right, ' = %s->value' % self.mv.tempcount[node, 6], func)
self.eol()
if isinstance(left, (AssTuple, AssList)):
self.tuple_assign(left, self.mv.tempcount[left], func)
if isinstance(right, (AssTuple, AssList)):
self.tuple_assign(right, self.mv.tempcount[right], func)
def forin_preftail(self, node):
tail = self.mv.tempcount[node][2:] + ',' + self.mv.tempcount[node.list][2:]
tail += ',' + self.mv.tempcount[(node, 5)][2:]
return '', tail
def forbody(self, node, quals, iter, func, skip, genexpr):
if quals is not None:
self.listcompfor_body(node, quals, iter, func, False, genexpr)
return
if not skip:
self.indent()
if isinstance(node.assign, (AssTuple, AssList)):
self.tuple_assign(node.assign, self.mv.tempcount[node.assign], func)
self.gx.loopstack.append(node)
self.visit(node.body, func)
self.gx.loopstack.pop()
self.deindent()
self.output('END_FOR')
if node.else_:
self.output('if (!%s) {' % self.mv.tempcount[node.else_])
self.indent()
self.visit(node.else_, func)
self.deindent()
self.output('}')
def func_pointers(self):
for func in self.mv.lambdas.values():
argtypes = [nodetypestr(self.gx, func.vars[formal], func, mv=self.mv).rstrip() for formal in func.formals]
if func.largs is not None:
argtypes = argtypes[:func.largs]
rettype = nodetypestr(self.gx, func.retnode.thing, func, mv=self.mv)
print >>self.out, 'typedef %s(*lambda%d)(' % (rettype, func.lambdanr) + ', '.join(argtypes) + ');'
print >>self.out
# --- function/method header
def func_header(self, func, declare, is_init=False):
method = isinstance(func.parent, Class)
if method:
formals = [f for f in func.formals if f != 'self']
else:
formals = [f for f in func.formals]
if func.largs is not None:
formals = formals[:func.largs]
if is_init:
ident = self.cpp_name(func.parent)
else:
ident = self.cpp_name(func)
self.start()
# --- return expression
header = ''
if is_init:
pass
elif func.ident in ['__hash__']:
header += 'long ' # XXX __ss_int leads to problem with virtual parent
elif func.returnexpr:
header += nodetypestr(self.gx, func.retnode.thing, func, mv=self.mv) # XXX mult
else:
header += 'void '
ftypes = [nodetypestr(self.gx, func.vars[f], func, mv=self.mv) for f in formals]
# if arguments type too precise (e.g. virtually called) cast them back
oldftypes = ftypes
if func.ftypes:
ftypes = func.ftypes[1:]
# --- method header
if method and not declare:
header += self.cpp_name(func.parent) + '::'
header += ident
# --- cast arguments if necessary (explained above)
casts = []
casters = set()
if func.ftypes:
for i in range(min(len(oldftypes), len(ftypes))): # XXX this is 'cast on specialize'.. how about generalization?
if oldftypes[i] != ftypes[i]:
casts.append(oldftypes[i] + formals[i] + ' = (' + oldftypes[i] + ')__' + formals[i] + ';')
if not declare:
casters.add(i)
formals2 = formals[:]
for (i, f) in enumerate(formals2): # XXX
formals2[i] = self.cpp_name(func.vars[f])
if i in casters:
formals2[i] = '__' + formals2[i]
formaldecs = [o + f for (o, f) in zip(ftypes, formals2)]
if declare and isinstance(func.parent, Class) and func.ident in func.parent.staticmethods:
header = 'static ' + header
if is_init and not formaldecs:
formaldecs = ['int __ss_init']
if func.ident.startswith('__lambda'): # XXX
header = 'static inline ' + header
# --- output
self.append(header + '(' + ', '.join(formaldecs) + ')')
if is_init:
print >>self.out, self.line + ' {'
elif declare:
self.eol()
else:
print >>self.out, self.line + ' {'
self.indent()
if not declare and func.doc:
self.do_comment(func.doc)
for cast in casts:
self.output(cast)
self.deindent()
def visitFunction(self, node, parent=None, declare=False):
# locate right func instance
if parent and isinstance(parent, Class):
func = parent.funcs[node.name]
elif node.name in self.mv.funcs:
func = self.mv.funcs[node.name]
else:
func = self.mv.lambdas[node.name]
if func.invisible or (func.inherited and not func.ident == '__init__'):
return
if declare and func.declared: # XXX
return
# check whether function is called at all (possibly via inheritance)
if not self.inhcpa(func):
if func.ident in ['__iadd__', '__isub__', '__imul__']:
return
if func.lambdanr is None and not repr(node.code).startswith("Stmt([Raise(CallFunc(Name('NotImplementedError')"):
error(repr(func) + ' not called!', self.gx, node, warning=True, mv=self.mv)
if not (declare and func.parent and func.ident in func.parent.virtuals):
return
if func.isGenerator and not declare:
self.generator_class(func)
self.func_header(func, declare)
if declare:
return
self.indent()
if func.isGenerator:
self.generator_body(func)
return
# --- local declarations
self.local_defs(func)
# --- function body
for fake_unpack in func.expand_args.values():
self.visit(fake_unpack, func)
self.visit(node.code, func)
if func.fakeret:
self.visit(func.fakeret, func)
# --- add Return(None) (sort of) if function doesn't already end with a Return
if node.getChildNodes():
lastnode = node.getChildNodes()[-1]
if not func.ident == '__init__' and not func.fakeret and not isinstance(lastnode, Return) and not (isinstance(lastnode, Stmt) and isinstance(lastnode.nodes[-1], Return)): # XXX use Stmt in moduleVisitor
self.output('return %s;' % self.nothing(self.mergeinh[func.retnode.thing]))
self.deindent()
self.output('}\n')
def generator_ident(self, func): # XXX merge?
if func.parent:
return func.parent.ident + '_' + func.ident
return func.ident
def generator_class(self, func):
ident = self.generator_ident(func)
self.output('class __gen_%s : public %s {' % (ident, nodetypestr(self.gx, func.retnode.thing, func, mv=self.mv)[:-2]))
self.output('public:')
self.indent()
pairs = [(nodetypestr(self.gx, func.vars[f], func, mv=self.mv), self.cpp_name(func.vars[f])) for f in func.vars]
self.output(self.indentation.join(self.group_declarations(pairs)))
self.output('int __last_yield;\n')
args = []
for f in func.formals:
args.append(nodetypestr(self.gx, func.vars[f], func, mv=self.mv) + self.cpp_name(func.vars[f]))
self.output(('__gen_%s(' % ident) + ','.join(args) + ') {')
self.indent()
for f in func.formals:
self.output('this->%s = %s;' % (self.cpp_name(func.vars[f]), self.cpp_name(func.vars[f])))
for fake_unpack in func.expand_args.values():
self.visit(fake_unpack, func)
self.output('__last_yield = -1;')
self.deindent()
self.output('}\n')
func2 = nodetypestr(self.gx, func.retnode.thing, func, mv=self.mv)[7:-3]
self.output('%s __get_next() {' % func2)
self.indent()
self.output('switch(__last_yield) {')
self.indent()
for (i, n) in enumerate(func.yieldNodes):
self.output('case %d: goto __after_yield_%d;' % (i, i))
self.output('default: break;')
self.deindent()
self.output('}')
for child in func.node.code.getChildNodes():
if isinstance(child, Discard):
self.visit_discard(child, func)
else:
self.visit(child, func)
self.output('__stop_iteration = true;')
self.output('return __zero<%s>();' % func2)
self.deindent()
self.output('}\n')
self.deindent()
self.output('};\n')
def generator_body(self, func):
ident = self.generator_ident(func)
if not (func.isGenerator and func.parent):
formals = [self.cpp_name(func.vars[f]) for f in func.formals]
else:
formals = ['this'] + [self.cpp_name(func.vars[f]) for f in func.formals if f != 'self']
self.output('return new __gen_%s(%s);\n' % (ident, ','.join(formals)))
self.deindent()
self.output('}\n')
def visitYield(self, node, func):
self.output('__last_yield = %d;' % func.yieldNodes.index(node))
self.start('__result = ')
self.visit_conv(node.value, self.mergeinh[func.yieldnode.thing], func)
self.eol()
self.output('return __result;')
self.output('__after_yield_%d:;' % func.yieldNodes.index(node))
self.start()
def visitNot(self, node, func=None):
self.append('__NOT(')
self.bool_test(node.expr, func)
self.append(')')
def visitBackquote(self, node, func=None):
self.visitm('repr(', inode(self.gx, node.expr).fakefunc.node.expr, ')', func)
def visitIf(self, node, func=None):
for test in node.tests:
self.start()
if test != node.tests[0]:
self.append('else ')
self.append('if (')
self.bool_test(test[0], func)
print >>self.out, self.line + ') {'
self.indent()
self.visit(test[1], func)
self.deindent()
self.output('}')
if node.else_:
self.output('else {')
self.indent()
self.visit(node.else_, func)
self.deindent()
self.output('}')
def visitIfExp(self, node, func=None):
types = self.mergeinh[node]
self.append('((')
self.bool_test(node.test, func)
self.append(')?(')
self.visit_conv(node.then, types, func)
self.append('):(')
self.visit_conv(node.else_, types, func)
self.append('))')
def visit_conv(self, node, argtypes, func, check_temp=True):
# convert/cast node to type it is assigned to
actualtypes = self.mergeinh[node]
if check_temp and node in self.mv.tempcount: # XXX
self.append(self.mv.tempcount[node])
elif isinstance(node, Dict):
self.visitDict(node, func, argtypes=argtypes)
elif isinstance(node, Tuple):
self.visitTuple(node, func, argtypes=argtypes)
elif isinstance(node, List):
self.visitList(node, func, argtypes=argtypes)
elif isinstance(node, CallFunc) and isinstance(node.node, Name) and node.node.name in ('list', 'tuple', 'dict', 'set'):
self.visitCallFunc(node, func, argtypes=argtypes)
elif isinstance(node, Name) and node.name == 'None':
self.visit(node, func)
else: # XXX messy
cast = ''
if actualtypes and argtypes and typestr(self.gx, actualtypes, mv=self.mv) != typestr(self.gx, argtypes, mv=self.mv) and typestr(self.gx, actualtypes, mv=self.mv) != 'str *': # XXX
if incompatible_assignment_rec(self.gx, actualtypes, argtypes):
error("incompatible types", self.gx, node, warning=True, mv=self.mv)
else:
cast = '(' + typestr(self.gx, argtypes, mv=self.mv).strip() + ')'
if cast == '(complex)':
cast = 'mcomplex'
if cast:
self.append('(' + cast + '(')
self.visit(node, func)
if cast:
self.append('))')
def visitBreak(self, node, func=None):
if self.gx.loopstack[-1].else_ in self.mv.tempcount:
self.output('%s = 1;' % self.mv.tempcount[self.gx.loopstack[-1].else_])
self.output('break;')
def visitStmt(self, node, func=None):
for b in node.nodes:
if isinstance(b, Discard):
self.visit_discard(b, func)
else:
self.visit(b, func)
def visitOr(self, node, func=None):
self.visit_and_or(node, node.nodes, '__OR', 'or', func)
def visitAnd(self, node, func=None):
self.visit_and_or(node, node.nodes, '__AND', 'and', func)
def visit_and_or(self, node, nodes, op, mix, func=None):
if node in self.gx.bool_test_only:
self.append('(')
for n in nodes:
self.bool_test(n, func)
if n != node.nodes[-1]:
self.append(' ' + mix + ' ')
self.append(')')
else:
child = nodes[0]
if len(nodes) > 1:
self.append(op + '(')
self.visit_conv(child, self.mergeinh[node], func, check_temp=False)
if len(nodes) > 1:
self.append(', ')
self.visit_and_or(node, nodes[1:], op, mix, func)
self.append(', ' + self.mv.tempcount[child][2:] + ')')
def visitCompare(self, node, func=None, wrapper=True):
if not node in self.bool_wrapper:
self.append('___bool(')
self.done = set()
mapping = {
'>': ('__gt__', '>', None),
'<': ('__lt__', '<', None),
'!=': ('__ne__', '!=', None),
'==': ('__eq__', '==', None),
'<=': ('__le__', '<=', None),
'>=': ('__ge__', '>=', None),
'is': (None, '==', None),
'is not': (None, '!=', None),
'in': ('__contains__', None, None),
'not in': ('__contains__', None, '!'),
}
left = node.expr
for op, right in node.ops:
msg, short, pre = mapping[op]
if msg == '__contains__':
self.do_compare(right, left, msg, short, func, pre)
else:
self.do_compare(left, right, msg, short, func, pre)
if right != node.ops[-1][1]:
self.append('&&')
left = right
if not node in self.bool_wrapper:
self.append(')')
def visitAugAssign(self, node, func=None):
if isinstance(node.node, Subscript):
self.start()
if set([t[0].ident for t in self.mergeinh[node.node.expr] if isinstance(t[0], Class)]) in [set(['dict']), set(['defaultdict'])] and node.op == '+=':
self.visitm(node.node.expr, '->__addtoitem__(', inode(self.gx, node).subs, ', ', node.expr, ')', func)
self.eol()
return
self.visitm(inode(self.gx, node).temp1 + ' = ', node.node.expr, func)
self.eol()
self.start()
self.visitm(inode(self.gx, node).temp2 + ' = ', inode(self.gx, node).subs, func)
self.eol()
self.visit(inode(self.gx, node).assignhop, func)
def visitAdd(self, node, func=None):
str_nodes = self.rec_string_addition(node)
if str_nodes and len(str_nodes) > 2:
self.append('__add_strs(%d, ' % len(str_nodes))
for (i, node) in enumerate(str_nodes):
self.visit(node, func)
if i < len(str_nodes) - 1:
self.append(', ')
self.append(')')
else:
self.visitBinary(node.left, node.right, aug_msg(node, 'add'), '+', func)
def rec_string_addition(self, node):
if isinstance(node, Add):
l, r = self.rec_string_addition(node.left), self.rec_string_addition(node.right)
if l and r:
return l + r
elif self.mergeinh[node] == set([(def_class(self.gx, 'str_'), 0)]):
return [node]
def visitBitand(self, node, func=None):
self.visit_bitop(node, aug_msg(node, 'and'), '&', func)
def visitBitor(self, node, func=None):
self.visit_bitop(node, aug_msg(node, 'or'), '|', func)
def visitBitxor(self, node, func=None):
self.visit_bitop(node, aug_msg(node, 'xor'), '^', func)
def visit_bitop(self, node, msg, inline, func=None):
ltypes = self.mergeinh[node.nodes[0]]
ul = unboxable(self.gx, ltypes)
self.append('(')
for child in node.nodes:
self.append('(')
self.visit(child, func)
self.append(')')
if child is not node.nodes[-1]:
if ul:
self.append(inline)
else:
self.append('->' + msg)
self.append(')')
def visitRightShift(self, node, func=None):
self.visitBinary(node.left, node.right, aug_msg(node, 'rshift'), '>>', func)
def visitLeftShift(self, node, func=None):
self.visitBinary(node.left, node.right, aug_msg(node, 'lshift'), '<<', func)
def visitMul(self, node, func=None):
self.visitBinary(node.left, node.right, aug_msg(node, 'mul'), '*', func)
def visitDiv(self, node, func=None):
self.visitBinary(node.left, node.right, aug_msg(node, 'div'), '/', func)
def visitInvert(self, node, func=None): # XXX visitUnarySub merge, template function __invert?
if unboxable(self.gx, self.mergeinh[node.expr]):
self.visitm('~', node.expr, func)
else:
self.visitCallFunc(inode(self.gx, node.expr).fakefunc, func)
def visitFloorDiv(self, node, func=None):
self.visitBinary(node.left, node.right, aug_msg(node, 'floordiv'), '//', func)
def visitPower(self, node, func=None):
self.power(node.left, node.right, None, func)
def power(self, left, right, mod, func=None):
inttype = set([(def_class(self.gx, 'int_'), 0)]) # XXX merge
if self.mergeinh[left] == inttype and self.mergeinh[right] == inttype:
if not isinstance(right, Const):
error("pow(int, int) returns int after compilation", self.gx, left, warning=True, mv=self.mv)
if mod:
self.visitm('__power(', left, ', ', right, ', ', mod, ')', func)
else:
self.visitm('__power(', left, ', ', right, ')', func)
def visitSub(self, node, func=None):
self.visitBinary(node.left, node.right, aug_msg(node, 'sub'), '-', func)
def visitBinary(self, left, right, middle, inline, func=None): # XXX cleanup please
ltypes = self.mergeinh[left]
rtypes = self.mergeinh[right]
ul, ur = unboxable(self.gx, ltypes), unboxable(self.gx, rtypes)
inttype = set([(def_class(self.gx, 'int_'), 0)]) # XXX new type?
floattype = set([(def_class(self.gx, 'float_'), 0)]) # XXX new type?
# --- inline mod/div
# XXX C++ knows %, /, so we can overload?
if (floattype.intersection(ltypes) or inttype.intersection(ltypes)):
if inline in ['%'] or (inline in ['/'] and not (floattype.intersection(ltypes) or floattype.intersection(rtypes))):
if not def_class(self.gx, 'complex') in (t[0] for t in rtypes): # XXX
self.append({'%': '__mods', '/': '__divs'}[inline] + '(')
self.visit(left, func)
self.append(', ')
self.visit(right, func)
self.append(')')
return
# --- inline floordiv
if (inline and ul and ur) and inline in ['//']:
self.append({'//': '__floordiv'}[inline] + '(')
self.visit(left, func)
self.append(',')
self.visit(right, func)
self.append(')')
return
# --- beauty fix for '1 +- nj' notation
if inline in ['+', '-'] and isinstance(right, Const) and isinstance(right.value, complex):
if floattype.intersection(ltypes) or inttype.intersection(ltypes):
self.append('mcomplex(')
self.visit(left, func)
self.append(', ' + {'+': '', '-': '-'}[inline] + str(right.value.imag) + ')')
return
# --- inline other
if inline and ((ul and ur) or not middle or (isinstance(left, Name) and left.name == 'None') or (isinstance(right, Name) and right.name == 'None')): # XXX not middle, cleanup?
self.append('(')
self.visit(left, func)
self.append(inline)
self.visit(right, func)
self.append(')')
return
# --- 'a.__mul__(b)': use template to call to b.__mul__(a), while maintaining evaluation order
if inline in ['+', '*', '-', '/'] and ul and not ur:
self.append('__' + {'+': 'add', '*': 'mul', '-': 'sub', '/': 'div'}[inline] + '2(')
self.visit(left, func)
self.append(', ')
self.visit(right, func)
self.append(')')
return
# --- default: left, connector, middle, right
argtypes = ltypes | rtypes
self.append('(')
if middle == '__add__':
self.visit_conv(left, argtypes, func)
else:
self.visit(left, func)
self.append(')')
self.append(self.connector(left, func) + middle + '(')
if middle == '__add__':
self.visit_conv(right, argtypes, func)
else:
self.visit(right, func)
self.append(')')
def do_compare(self, left, right, middle, inline, func=None, prefix=''):
ltypes = self.mergeinh[left]
rtypes = self.mergeinh[right]
argtypes = ltypes | rtypes
ul, ur = unboxable(self.gx, ltypes), unboxable(self.gx, rtypes)
# --- inline other
if inline and ((ul and ur) or not middle or (isinstance(left, Name) and left.name == 'None') or (isinstance(right, Name) and right.name == 'None')): # XXX not middle, cleanup?
self.append('(')
self.visit2(left, argtypes, middle, func)
self.append(inline)
self.visit2(right, argtypes, middle, func)
self.append(')')
return
# --- prefix '!'
postfix = ''
if prefix:
self.append('(' + prefix)
postfix = ')'
# --- comparison
if middle in ['__eq__', '__ne__', '__gt__', '__ge__', '__lt__', '__le__']:
self.append(middle[:-2] + '(')
self.visit2(left, argtypes, middle, func)
self.append(', ')
self.visit2(right, argtypes, middle, func)
self.append(')' + postfix)
return
# --- default: left, connector, middle, right
self.append('(')
self.visit2(left, argtypes, middle, func)
self.append(')')
if middle == '==':
self.append('==(')
else:
self.append(self.connector(left, func) + middle + '(')
self.visit2(right, argtypes, middle, func)
self.append(')' + postfix)
def visit2(self, node, argtypes, middle, func): # XXX use temp vars in comparisons, e.g. (t1=fun())
if node in self.mv.tempcount:
if node in self.done:
self.append(self.mv.tempcount[node])
else:
self.visitm('(' + self.mv.tempcount[node] + '=', node, ')', func)
self.done.add(node)
elif middle == '__contains__':
self.visit(node, func)
else:
self.visit_conv(node, argtypes, func)
def visitUnarySub(self, node, func=None):
self.visitm('(', func)
if unboxable(self.gx, self.mergeinh[node.expr]):
self.visitm('-', node.expr, func)
else:
self.visitCallFunc(inode(self.gx, node.expr).fakefunc, func)
self.visitm(')', func)
def visitUnaryAdd(self, node, func=None):
self.visitm('(', func)
if unboxable(self.gx, self.mergeinh[node.expr]):
self.visitm('+', node.expr, func)
else:
self.visitCallFunc(inode(self.gx, node.expr).fakefunc, func)
self.visitm(')', func)
def library_func(self, funcs, modname, clname, funcname):
for func in funcs:
if not func.mv.module.builtin or func.mv.module.ident != modname:
continue
if clname is not None:
if not func.parent or func.parent.ident != clname:
continue
return func.ident == funcname
def add_args_arg(self, node, funcs):
''' append argument that describes which formals are actually filled in '''
if self.library_func(funcs, 'datetime', 'time', 'replace') or \
self.library_func(funcs, 'datetime', 'datetime', 'replace'):
formals = funcs[0].formals[1:] # skip self
formal_pos = dict((v, k) for k, v in enumerate(formals))
positions = []
for i, arg in enumerate(node.args):
if isinstance(arg, Keyword):
positions.append(formal_pos[arg.name])
else:
positions.append(i)
if positions:
self.append(str(reduce(lambda a, b: a | b, ((1 << x) for x in positions))) + ', ')
else:
self.append('0, ')
def visitCallFunc(self, node, func=None, argtypes=None):
objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analyze_callfunc(self.gx, node, merge=self.gx.merged_inh)
funcs = callfunc_targets(self.gx, node, self.gx.merged_inh)
if self.library_func(funcs, 're', None, 'findall') or \
self.library_func(funcs, 're', 're_object', 'findall'):
error("'findall' does not work with groups (use 'finditer' instead)", self.gx, node, warning=True, mv=self.mv)
if self.library_func(funcs, 'socket', 'socket', 'settimeout') or \
self.library_func(funcs, 'socket', 'socket', 'gettimeout'):
error("socket.set/gettimeout do not accept/return None", self.gx, node, warning=True, mv=self.mv)
if self.library_func(funcs, 'builtin', None, 'map') and len(node.args) > 2:
error("default fillvalue for 'map' becomes 0 for integers", self.gx, node, warning=True, mv=self.mv)
if self.library_func(funcs, 'itertools', None, 'izip_longest'):
error("default fillvalue for 'izip_longest' becomes 0 for integers", self.gx, node, warning=True, mv=self.mv)
if self.library_func(funcs, 'struct', None, 'unpack'):
error("struct.unpack should be used as follows: 'a, .. = struct.unpack(..)'", self.gx, node, warning=True, mv=self.mv)
if self.library_func(funcs, 'array', 'array', '__init__'):
if not node.args or not isinstance(node.args[0], Const) or node.args[0].value not in 'cbBhHiIlLfd':
error("non-constant or unsupported type code", self.gx, node, warning=True, mv=self.mv)
if self.library_func(funcs, 'builtin', None, 'id'):
if struct.calcsize("P") == 8 and struct.calcsize('i') == 4 and not self.gx.longlong:
error("return value of 'id' does not fit in 32-bit integer (try shedskin -l)", self.gx, node, warning=True, mv=self.mv)
nrargs = len(node.args)
if isinstance(func, Function) and func.largs:
nrargs = func.largs
# --- target expression
if node.node in self.mergeinh and [t for t in self.mergeinh[node.node] if isinstance(t[0], Function)]: # anonymous function
self.visitm(node.node, '(', func)
elif constructor:
ts = self.namer.nokeywords(nodetypestr(self.gx, node, func, mv=self.mv))
if ts == 'complex ':
self.append('mcomplex(')
constructor = False # XXX
else:
if argtypes is not None: # XXX merge instance_new
ts = typestr(self.gx, argtypes, mv=self.mv)
if ts.startswith('pyseq') or ts.startswith('pyiter'): # XXX
argtypes = self.gx.merged_inh[node]
ts = typestr(self.gx, argtypes, mv=self.mv)
self.append('(new ' + ts[:-2] + '(')
if funcs and len(funcs[0].formals) == 1 and not funcs[0].mv.module.builtin:
self.append('1') # don't call default constructor
elif parent_constr:
cl = lookup_class(node.node.expr, self.mv)
self.append(self.namer.namespace_class(cl) + '::' + node.node.attrname + '(')
elif direct_call: # XXX no namespace (e.g., math.pow), check nr of args
if ident == 'float' and node.args and self.mergeinh[node.args[0]] == set([(def_class(self.gx, 'float_'), 0)]):
self.visit(node.args[0], func)
return
if ident in ['abs', 'int', 'float', 'str', 'dict', 'tuple', 'list', 'type', 'cmp', 'sum', 'zip']:
self.append('__' + ident + '(')
elif ident in ['min', 'max', 'iter', 'round']:
self.append('___' + ident + '(')
elif ident == 'bool':
self.bool_test(node.args[0], func, always_wrap=True)
return
elif ident == 'pow' and direct_call.mv.module.ident == 'builtin':
if nrargs == 3:
third = node.args[2]
else:
third = None
self.power(node.args[0], node.args[1], third, func)
return
elif ident == 'hash':
self.append('hasher(') # XXX cleanup
elif ident == '__print': # XXX
self.append('print(')
elif ident == 'isinstance' and node.args[0] not in self.gx.filters:
self.append('True')
return
else:
if isinstance(node.node, Name):
if isinstance(func, Function) and isinstance(func.parent, Class) and ident in func.parent.funcs: # masked by method
self.append(funcs[0].mv.module.full_path() + '::')
self.append(self.cpp_name(funcs[0]))
else:
self.visit(node.node)
self.append('(')
elif method_call:
for cl, _ in self.mergeinh[objexpr]:
if isinstance(cl, Class) and cl.ident != 'none' and ident not in cl.funcs:
conv = {'int_': 'int', 'float_': 'float', 'str_': 'str', 'class_': 'class', 'none': 'none'}
clname = conv.get(cl.ident, cl.ident)
error("class '%s' has no method '%s'" % (clname, ident), self.gx, node, warning=True, mv=self.mv)
if isinstance(cl, Class) and ident in cl.staticmethods:
error("staticmethod '%s' called without using class name" % ident, self.gx, node, warning=True, mv=self.mv)
return
# tuple2.__getitem -> __getfirst__/__getsecond
if ident == '__getitem__' and isinstance(node.args[0], Const) and node.args[0].value in (0, 1) and self.only_classes(objexpr, ('tuple2',)):
self.visit(node.node.expr, func)
self.append('->%s()' % ['__getfirst__', '__getsecond__'][node.args[0].value])
return
if ident == '__call__':
self.visitm(node.node, '->__call__(', func)
elif ident == 'is_integer' and (def_class(self.gx, 'float_'), 0) in self.mergeinh[node.node.expr]:
self.visitm('__ss_is_integer(', node.node.expr, ')', func)
return
else:
self.visitm(node.node, '(', func)
else:
if ident:
error("unresolved call to '" + ident + "'", self.gx, node, mv=self.mv, warning=True)
else:
error("unresolved call (possibly caused by method passing, which is currently not allowed)", self.gx, node, mv=self.mv, warning=True)
return
if not funcs:
if constructor:
self.append(')')
self.append(')')
return
self.visit_callfunc_args(funcs, node, func)
self.append(')')
if constructor:
self.append(')')
def bool_test(self, node, func, always_wrap=False):
wrapper = always_wrap or not self.only_classes(node, ('int_', 'bool_'))
if node in self.gx.bool_test_only:
self.visit(node, func)
elif wrapper:
self.append('___bool(')
self.visit(node, func)
is_func = bool([1 for t in self.mergeinh[node] if isinstance(t[0], Function)])
self.append(('', '!=NULL')[is_func] + ')') # XXX
else:
self.bool_wrapper[node] = True
self.visit(node, func)
def visit_callfunc_args(self, funcs, node, func):
objexpr, ident, direct_call, method_call, constructor, parent_constr, anon_func = analyze_callfunc(self.gx, node, merge=self.gx.merged_inh)
target = funcs[0] # XXX
print_function = self.library_func(funcs, 'builtin', None, '__print')
castnull = False # XXX
if (self.library_func(funcs, 'random', None, 'seed') or
self.library_func(funcs, 'random', None, 'triangular') or
self.library_func(funcs, 'random', 'Random', 'seed') or
self.library_func(funcs, 'random', 'Random', 'triangular')):
castnull = True
for itertools_func in ['islice', 'izip_longest', 'permutations']:
if self.library_func(funcs, 'itertools', None, itertools_func):
castnull = True
break
for f in funcs:
if len(f.formals) != len(target.formals):
error('calling functions with different numbers of arguments', self.gx, node, warning=True, mv=self.mv)
self.append(')')
return
if target.inherited_from:
target = target.inherited_from
pairs, rest, err = connect_actual_formal(self.gx, node, target, parent_constr, merge=self.mergeinh)
if err and not target.mv.module.builtin: # XXX
error('call with incorrect number of arguments', self.gx, node, warning=True, mv=self.mv)
if isinstance(func, Function) and func.lambdawrapper:
rest = func.largs
if target.node.varargs:
self.append('%d' % rest)
if rest or pairs:
self.append(', ')
double = False
if ident in ['min', 'max']:
for arg in node.args:
if arg in self.mergeinh and (def_class(self.gx, 'float_'), 0) in self.mergeinh[arg]:
double = True
self.add_args_arg(node, funcs)
if isinstance(func, Function) and func.largs is not None:
kw = [p for p in pairs if p[1].name.startswith('__kw_')]
nonkw = [p for p in pairs if not p[1].name.startswith('__kw_')]
pairs = kw + nonkw[:func.largs]
for (arg, formal) in pairs:
cast = False
builtin_types = self.cast_to_builtin(arg, func, formal, target, method_call, objexpr)
formal_types = builtin_types or self.mergeinh[formal]
if double and self.mergeinh[arg] == set([(def_class(self.gx, 'int_'), 0)]):
cast = True
self.append('((double)(')
elif castnull and isinstance(arg, Name) and arg.name == 'None':
cast = True
self.append('((void *)(')
if (print_function or self.library_func(funcs, 'struct', None, 'pack')) and not formal.name.startswith('__kw_'):
types = [t[0].ident for t in self.mergeinh[arg]]
if 'float_' in types or 'int_' in types or 'bool_' in types or 'complex' in types:
cast = True
self.append('___box((')
if arg in target.mv.defaults:
if self.mergeinh[arg] == set([(def_class(self.gx, 'none'), 0)]):
self.append('NULL')
elif target.mv.module == self.mv.module:
self.append('default_%d' % (target.mv.defaults[arg][0]))
else:
self.append('%s::default_%d' % (target.mv.module.full_path(), target.mv.defaults[arg][0]))
elif arg in self.consts:
self.append(self.consts[arg])
else:
if constructor and ident in ['set', 'frozenset'] and nodetypestr(self.gx, arg, func, mv=self.mv) in ['list<void *> *', 'tuple<void *> *', 'pyiter<void *> *', 'pyseq<void *> *', 'pyset<void *>']: # XXX
pass
elif not builtin_types and target.mv.module.builtin:
self.visit(arg, func)
else:
self.visit_conv(arg, formal_types, func)
if cast:
self.append('))')
if (arg, formal) != pairs[-1]:
self.append(', ')
if constructor and ident == 'frozenset':
if pairs:
self.append(',')
self.append('1')
def cast_to_builtin(self, arg, func, formal, target, method_call, objexpr):
# type inference cannot deduce all necessary casts to builtin formals
vars = {'u': 'unit', 'v': 'value', 'o': None}
if target.mv.module.builtin and method_call and formal.name in vars and target.parent.ident in ('list', 'dict', 'set'):
subtypes = self.subtypes(self.mergeinh[objexpr], vars[formal.name])
if nodetypestr(self.gx, arg, func, mv=self.mv) != typestr(self.gx, subtypes, mv=self.mv):
return subtypes
def cast_to_builtin2(self, arg, func, objexpr, msg, formal_nr):
# shortcut for outside of visitCallFunc XXX merge with visitCallFunc?
cls = [t[0] for t in self.mergeinh[objexpr] if isinstance(t[0], Class)]
if cls:
cl = cls.pop()
if msg in cl.funcs:
target = cl.funcs[msg]
if formal_nr < len(target.formals):
formal = target.vars[target.formals[formal_nr]]
builtin_types = self.cast_to_builtin(arg, func, formal, target, True, objexpr)
if builtin_types:
return typestr(self.gx, builtin_types, mv=self.mv)
def visitReturn(self, node, func=None):
if func.isGenerator:
self.output('__stop_iteration = true;')
func2 = nodetypestr(self.gx, func.retnode.thing, mv=self.mv)[7:-3] # XXX meugh
self.output('return __zero<%s>();' % func2)
return
self.start('return ')
self.visit_conv(node.value, self.mergeinh[func.retnode.thing], func)
self.eol()
def tuple_assign(self, lvalue, rvalue, func):
temp = self.mv.tempcount[lvalue]
if isinstance(lvalue, tuple):
nodes = lvalue
else:
nodes = lvalue.nodes
# --- nested unpacking assignment: a, (b,c) = d, e
if [item for item in nodes if not isinstance(item, AssName)]:
self.start(temp + ' = ')
if isinstance(rvalue, str):
self.append(rvalue)
else:
self.visit(rvalue, func)
self.eol()
for i, item in enumerate(nodes):
selector = self.get_selector(temp, item, i)
if isinstance(item, AssName):
self.output('%s = %s;' % (item.name, selector))
elif isinstance(item, (AssTuple, AssList)): # recursion
self.tuple_assign(item, selector, func)
elif isinstance(item, Subscript):
self.assign_pair(item, selector, func)
elif isinstance(item, AssAttr):
self.assign_pair(item, selector, func)
self.eol(' = ' + selector)
# --- non-nested unpacking assignment: a,b,c = d
else:
self.start()
self.visitm(temp, ' = ', rvalue, func)
self.eol()
for i, item in enumerate(lvalue.nodes):
self.start()
self.visitm(item, ' = ', self.get_selector(temp, item, i), func)
self.eol()
def one_class(self, node, names):
for clname in names:
if self.only_classes(node, (clname,)):
return True
return False
def get_selector(self, temp, item, i):
rvalue_node = self.gx.item_rvalue[item]
sel = '__getitem__(%d)' % i
if i < 2 and self.only_classes(rvalue_node, ('tuple2',)):
sel = ['__getfirst__()', '__getsecond__()'][i]
elif self.one_class(rvalue_node, ('list', 'str_', 'tuple')):
sel = '__getfast__(%d)' % i
return '%s->%s' % (temp, sel)
def subs_assign(self, lvalue, func):
if len(lvalue.subs) > 1:
subs = inode(self.gx, lvalue.expr).faketuple
else:
subs = lvalue.subs[0]
self.visitm(lvalue.expr, self.connector(lvalue.expr, func), '__setitem__(', subs, ', ', func)
def struct_unpack_cpp(self, node, func):
struct_unpack = self.gx.struct_unpack.get(node)
if struct_unpack:
sinfo, tvar, tvar_pos = struct_unpack
self.start()
self.visitm(tvar, ' = ', node.expr.args[1], func)
self.eol()
self.output('%s = 0;' % tvar_pos)
hop = 0
for (o, c, t, d) in sinfo:
self.start()
expr = "__struct__::unpack_%s('%c', '%c', %d, %s, &%s)" % (t, o, c, d, tvar, tvar_pos)
if c == 'x' or (d == 0 and c != 's'):
self.visitm(expr, func)
else:
n = list(node.nodes[0])[hop]
hop += 1
if isinstance(n, Subscript): # XXX merge
self.subs_assign(n, func)
self.visitm(expr, ')', func)
elif isinstance(n, AssName):
self.visitm(n, ' = ', expr, func)
elif isinstance(n, AssAttr):
self.visitAssAttr(n, func)
self.visitm(' = ', expr, func)
self.eol()
return True
return False
def visitAssign(self, node, func=None):
if self.struct_unpack_cpp(node, func):
return
# temp vars
if len(node.nodes) > 1 or isinstance(node.expr, Tuple):
if isinstance(node.expr, Tuple):
if [n for n in node.nodes if isinstance(n, AssTuple)]: # XXX a,b=d[i,j]=..?
for child in node.expr.nodes:
if not (child, 0, 0) in self.gx.cnode: # (a,b) = (1,2): (1,2) never visited
continue
if not isinstance(child, Const) and not (isinstance(child, Name) and child.name == 'None'):
self.start(self.mv.tempcount[child] + ' = ')
self.visit(child, func)
self.eol()
elif not isinstance(node.expr, Const) and not (isinstance(node.expr, Name) and node.expr.name == 'None'):
self.start(self.mv.tempcount[node.expr] + ' = ')
self.visit(node.expr, func)
self.eol()
# a = (b,c) = .. = expr
for left in node.nodes:
pairs = assign_rec(left, node.expr)
for (lvalue, rvalue) in pairs:
self.start('') # XXX remove?
# expr[expr] = expr
if isinstance(lvalue, Subscript) and not isinstance(lvalue.subs[0], Sliceobj):
self.assign_pair(lvalue, rvalue, func)
# expr.attr = expr
elif isinstance(lvalue, AssAttr):
lcp = lowest_common_parents(polymorphic_t(self.gx, self.mergeinh[lvalue.expr]))
# property
if len(lcp) == 1 and isinstance(lcp[0], Class) and lvalue.attrname in lcp[0].properties:
self.visitm(lvalue.expr, '->' + self.cpp_name(lcp[0].properties[lvalue.attrname][1]) + '(', rvalue, ')', func)
elif lcp and isinstance(lcp[0], Class):
var = lookup_var(lvalue.attrname, lcp[0], mv=self.mv)
vartypes = set()
if var:
vartypes = self.mergeinh[var]
self.visit(lvalue, func)
self.append(' = ')
self.visit_conv(rvalue, vartypes, func)
else:
self.visitm(lvalue, ' = ', rvalue, func)
self.eol()
# name = expr
elif isinstance(lvalue, AssName):
vartypes = self.mergeinh[lookup_var(lvalue.name, func, mv=self.mv)]
self.visit(lvalue, func)
self.append(' = ')
self.visit_conv(rvalue, vartypes, func)
self.eol()
# (a,(b,c), ..) = expr
elif isinstance(lvalue, (AssTuple, AssList)):
self.tuple_assign(lvalue, rvalue, func)
# expr[a:b] = expr
elif isinstance(lvalue, Slice):
if isinstance(rvalue, Slice) and lvalue.upper == rvalue.upper == None and lvalue.lower == rvalue.lower == None:
self.visitm(lvalue.expr, self.connector(lvalue.expr, func), 'units = ', rvalue.expr, self.connector(rvalue.expr, func), 'units', func)
else: # XXX let visitCallFunc(fakefunc) use cast_to_builtin
fakefunc = inode(self.gx, lvalue.expr).fakefunc
self.visitm('(', fakefunc.node.expr, ')->__setslice__(', fakefunc.args[0], ',', fakefunc.args[1], ',', fakefunc.args[2], ',', fakefunc.args[3], ',', func)
self.visit_conv(fakefunc.args[4], self.mergeinh[lvalue.expr], func)
self.append(')')
self.eol()
# expr[a:b:c] = expr
elif isinstance(lvalue, Subscript) and isinstance(lvalue.subs[0], Sliceobj): # XXX see comment above
fakefunc = inode(self.gx, lvalue.expr).fakefunc
self.visitm('(', fakefunc.node.expr, ')->__setslice__(', fakefunc.args[0], ',', fakefunc.args[1], ',', fakefunc.args[2], ',', fakefunc.args[3], ',', func)
self.visit_conv(fakefunc.args[4], self.mergeinh[lvalue.expr], func)
self.append(')')
self.eol()
def assign_pair(self, lvalue, rvalue, func):
self.start('')
# expr[expr] = expr
if isinstance(lvalue, Subscript) and not isinstance(lvalue.subs[0], Sliceobj):
self.subs_assign(lvalue, func)
if isinstance(rvalue, str):
self.append(rvalue)
elif rvalue in self.mv.tempcount:
self.append(self.mv.tempcount[rvalue])
else:
cast = self.cast_to_builtin2(rvalue, func, lvalue.expr, '__setitem__', 2)
if cast:
self.append('((%s)' % cast)
self.visit(rvalue, func)
if cast:
self.append(')')
self.append(')')
self.eol()
# expr.x = expr
elif isinstance(lvalue, AssAttr):
self.visitAssAttr(lvalue, func)
def do_lambdas(self, declare):
for l in self.mv.lambdas.values():
if l.ident not in self.mv.funcs:
self.visitFunction(l.node, declare=declare)
def do_listcomps(self, declare):
for (listcomp, lcfunc, func) in self.mv.listcomps: # XXX cleanup
if lcfunc.mv.module.builtin:
continue
parent = func
while isinstance(parent, Function) and parent.listcomp:
parent = parent.parent
if isinstance(parent, Function):
if not self.inhcpa(parent) or parent.inherited:
continue
genexpr = listcomp in self.gx.genexp_to_lc.values()
if declare:
self.listcomp_head(listcomp, True, genexpr)
elif genexpr:
self.genexpr_class(listcomp, declare)
else:
self.listcomp_func(listcomp)
def listcomp_head(self, node, declare, genexpr):
lcfunc, func = self.listcomps[node]
args = [a + b for a, b in self.lc_args(lcfunc, func)]
ts = nodetypestr(self.gx, node, lcfunc, mv=self.mv)
if not ts.endswith('*'):
ts += ' '
if genexpr:
self.genexpr_class(node, declare)
else:
self.output('static inline ' + ts + lcfunc.ident + '(' + ', '.join(args) + ')' + [' {', ';'][declare])
def lc_args(self, lcfunc, func):
args = []
for name in lcfunc.misses:
if lookup_var(name, func, mv=self.mv).parent:
args.append((nodetypestr(self.gx, lookup_var(name, lcfunc, mv=self.mv), lcfunc, mv=self.mv), self.cpp_name(name)))
return args
def listcomp_func(self, node):
lcfunc, func = self.listcomps[node]
self.listcomp_head(node, False, False)
self.indent()
self.local_defs(lcfunc)
self.output(nodetypestr(self.gx, node, lcfunc, mv=self.mv) + '__ss_result = new ' + nodetypestr(self.gx, node, lcfunc, mv=self.mv)[:-2] + '();\n')
self.listcomp_rec(node, node.quals, lcfunc, False)
self.output('return __ss_result;')
self.deindent()
self.output('}\n')
def genexpr_class(self, node, declare):
lcfunc, func = self.listcomps[node]
args = self.lc_args(lcfunc, func)
func1 = lcfunc.ident + '(' + ', '.join(a + b for a, b in args) + ')'
func2 = nodetypestr(self.gx, node, lcfunc, mv=self.mv)[7:-3]
if declare:
ts = nodetypestr(self.gx, node, lcfunc, mv=self.mv)
if not ts.endswith('*'):
ts += ' '
self.output('class ' + lcfunc.ident + ' : public ' + ts[:-2] + ' {')
self.output('public:')
self.indent()
self.local_defs(lcfunc)
for a, b in args:
self.output(a + b + ';')
self.output('int __last_yield;\n')
self.output(func1 + ';')
self.output(func2 + ' __get_next();')
self.deindent()
self.output('};\n')
else:
self.output(lcfunc.ident + '::' + func1 + ' {')
for a, b in args:
self.output(' this->%s = %s;' % (b, b))
self.output(' __last_yield = -1;')
self.output('}\n')
self.output(func2 + ' ' + lcfunc.ident + '::__get_next() {')
self.indent()
self.output('if(!__last_yield) goto __after_yield_0;')
self.output('__last_yield = 0;\n')
self.listcomp_rec(node, node.quals, lcfunc, True)
self.output('__stop_iteration = true;')
self.output('return __zero<%s>();' % func2)
self.deindent()
self.output('}\n')
def local_defs(self, func):
pairs = []
for (name, var) in func.vars.items():
if not var.invisible and (not hasattr(func, 'formals') or name not in func.formals): # XXX
pairs.append((nodetypestr(self.gx, var, func, mv=self.mv), self.cpp_name(var)))
self.output(self.indentation.join(self.group_declarations(pairs)))
# --- nested for loops: loop headers, if statements
def listcomp_rec(self, node, quals, lcfunc, genexpr):
if not quals:
if genexpr:
self.start('__result = ')
self.visit(node.expr, lcfunc)
self.eol()
self.output('return __result;')
self.start('__after_yield_0:')
elif len(node.quals) == 1 and not is_fastfor(node.quals[0]) and not self.fastenum(node.quals[0]) and not self.fastzip2(node.quals[0]) and not node.quals[0].ifs and self.one_class(node.quals[0].list, ('tuple', 'list', 'str_', 'dict', 'set')):
self.start('__ss_result->units[' + self.mv.tempcount[node.quals[0].list] + '] = ')
self.visit(node.expr, lcfunc)
else:
self.start('__ss_result->append(')
self.visit(node.expr, lcfunc)
self.append(')')
self.eol()
return
qual = quals[0]
# iter var
if isinstance(qual.assign, AssName):
var = lookup_var(qual.assign.name, lcfunc, mv=self.mv)
else:
var = lookup_var(self.mv.tempcount[qual.assign], lcfunc, mv=self.mv)
iter = self.cpp_name(var)
if is_fastfor(qual):
self.do_fastfor(node, qual, quals, iter, lcfunc, genexpr)
elif self.fastenum(qual):
self.do_fastenum(qual, lcfunc, genexpr)
self.listcompfor_body(node, quals, iter, lcfunc, True, genexpr)
elif self.fastzip2(qual):
self.do_fastzip2(qual, lcfunc, genexpr)
self.listcompfor_body(node, quals, iter, lcfunc, True, genexpr)
elif self.fastdictiter(qual):
self.do_fastdictiter(qual, lcfunc, genexpr)
self.listcompfor_body(node, quals, iter, lcfunc, True, genexpr)
else:
if not isinstance(qual.list, Name):
itervar = self.mv.tempcount[qual]
self.start('')
self.visitm(itervar, ' = ', qual.list, lcfunc)
self.eol()
else:
itervar = self.cpp_name(qual.list.name)
pref, tail = self.forin_preftail(qual)
if len(node.quals) == 1 and not qual.ifs and not genexpr:
if self.one_class(qual.list, ('list', 'tuple', 'str_', 'dict', 'set')):
self.output('__ss_result->resize(len(' + itervar + '));')
self.start('FOR_IN' + pref + '(' + iter + ',' + itervar + ',' + tail)
print >>self.out, self.line + ')'
self.listcompfor_body(node, quals, iter, lcfunc, False, genexpr)
def listcompfor_body(self, node, quals, iter, lcfunc, skip, genexpr):
qual = quals[0]
if not skip:
self.indent()
if isinstance(qual.assign, (AssTuple, AssList)):
self.tuple_assign(qual.assign, iter, lcfunc)
# if statements
if qual.ifs:
self.start('if (')
self.indent()
for cond in qual.ifs:
self.bool_test(cond.test, lcfunc)
if cond != qual.ifs[-1]:
self.append(' && ')
self.append(') {')
print >>self.out, self.line
# recurse
self.listcomp_rec(node, quals[1:], lcfunc, genexpr)
# --- nested for loops: loop tails
if qual.ifs:
self.deindent()
self.output('}')
self.deindent()
self.output('END_FOR\n')
def visitGenExpr(self, node, func=None):
self.visit(self.gx.genexp_to_lc[node], func)
def visitListComp(self, node, func=None):
lcfunc, _ = self.listcomps[node]
args = []
temp = self.line
for name in lcfunc.misses:
var = lookup_var(name, func, mv=self.mv)
if var.parent:
if name == 'self' and not func.listcomp: # XXX parent?
args.append('this')
else:
args.append(self.cpp_name(var))
self.line = temp
if node in self.gx.genexp_to_lc.values():
self.append('new ')
self.append(lcfunc.ident + '(' + ', '.join(args) + ')')
def visitSubscript(self, node, func=None):
if node.flags == 'OP_DELETE':
self.start()
if isinstance(node.subs[0], Sliceobj):
self.visitCallFunc(inode(self.gx, node.expr).fakefunc, func)
else:
self.visitCallFunc(inode(self.gx, node.expr).fakefunc, func)
self.eol()
else:
self.visitCallFunc(inode(self.gx, node.expr).fakefunc, func)
def visitMod(self, node, func=None):
# --- non-str % ..
if [t for t in self.gx.merged_inh[node.left] if t[0].ident != 'str_']:
self.visitBinary(node.left, node.right, '__mod__', '%', func)
return
# --- str % non-constant dict/tuple
if not isinstance(node.right, (Tuple, Dict)) and node.right in self.gx.merged_inh: # XXX
if [t for t in self.gx.merged_inh[node.right] if t[0].ident == 'dict']:
self.visitm('__moddict(', node.left, ', ', node.right, ')', func)
return
elif [t for t in self.gx.merged_inh[node.right] if t[0].ident in ['tuple', 'tuple2']]:
self.visitm('__modtuple(', node.left, ', ', node.right, ')', func)
return
# --- str % constant-dict:
if isinstance(node.right, Dict): # XXX geen str keys
self.visitm('__modcd(', node.left, ', ', 'new list<str *>(%d, ' % len(node.right.items), func)
self.append(', '.join(('new str("%s")' % key.value) for key, value in node.right.items))
self.append(')')
nodes = [value for (key, value) in node.right.items]
else:
self.visitm('__modct(', node.left, func)
# --- str % constant-tuple
if isinstance(node.right, Tuple):
nodes = node.right.nodes
# --- str % non-tuple/non-dict
else:
nodes = [node.right]
self.append(', %d' % len(nodes))
# --- visit nodes, boxing scalars
for n in nodes:
if [clname for clname in ('float_', 'int_', 'bool_', 'complex') if def_class(self.gx, clname) in [t[0] for t in self.mergeinh[n]]]:
self.visitm(', ___box(', n, ')', func)
else:
self.visitm(', ', n, func)
self.append(')')
def visitPrintnl(self, node, func=None):
self.visitPrint(node, func, print_space=False)
def visitPrint(self, node, func=None, print_space=True):
self.start('print2(')
if node.dest:
self.visitm(node.dest, ', ', func)
else:
self.append('NULL,')
if print_space:
self.append('1,')
else:
self.append('0,')
self.append(str(len(node.nodes)))
for n in node.nodes:
types = [t[0].ident for t in self.mergeinh[n]]
if 'float_' in types or 'int_' in types or 'bool_' in types or 'complex' in types:
self.visitm(', ___box(', n, ')', func)
else:
self.visitm(', ', n, func)
self.eol(')')
def visitGetattr(self, node, func=None):
cl, module = lookup_class_module(node.expr, inode(self.gx, node).mv, func)
# module.attr
if module:
self.append(module.full_path() + '::')
# class.attr: staticmethod
elif cl and node.attrname in cl.staticmethods:
ident = cl.ident
if cl.ident in ['dict', 'defaultdict']: # own namespace because of template vars
self.append('__' + cl.ident + '__::')
elif isinstance(node.expr, Getattr):
submodule = lookup_module(node.expr.expr, inode(self.gx, node).mv)
self.append(submodule.full_path() + '::' + ident + '::')
else:
self.append(ident + '::')
# class.attr
elif cl: # XXX merge above?
ident = cl.ident
if isinstance(node.expr, Getattr):
submodule = lookup_module(node.expr.expr, inode(self.gx, node).mv)
self.append(submodule.full_path() + '::' + cl.ident + '::')
else:
self.append(ident + '::')
# obj.attr
else:
checkcls = [] # XXX better to just inherit vars?
for t in self.mergeinh[node.expr]:
if isinstance(t[0], Class):
checkcls.extend(t[0].ancestors(True))
for cl in checkcls:
if not node.attrname in t[0].funcs and node.attrname in cl.parent.vars: # XXX
error("class attribute '" + node.attrname + "' accessed without using class name", self.gx, node, warning=True, mv=self.mv)
break
else:
if not self.mergeinh[node.expr] and not node.attrname.startswith('__'): # XXX
error('expression has no type', self.gx, node, warning=True, mv=self.mv)
elif not self.mergeinh[node] and not [cl for cl in checkcls if node.attrname in cl.funcs] and not node.attrname.startswith('__'): # XXX
error('expression has no type', self.gx, node, warning=True, mv=self.mv)
if not isinstance(node.expr, Name):
self.append('(')
if isinstance(node.expr, Name) and not lookup_var(node.expr.name, func, mv=self.mv): # XXX XXX
self.append(node.expr.name)
else:
self.visit(node.expr, func)
if not isinstance(node.expr, (Name)):
self.append(')')
self.append(self.connector(node.expr, func))
ident = node.attrname
# property
lcp = lowest_common_parents(polymorphic_t(self.gx, self.mergeinh[node.expr]))
if len(lcp) == 1 and node.attrname in lcp[0].properties:
self.append(self.cpp_name(lcp[0].properties[node.attrname][0]) + '()')
return
# getfast
if ident == '__getitem__' and self.one_class(node.expr, ('list', 'str_', 'tuple')):
ident = '__getfast__'
elif ident == '__getitem__' and len(lcp) == 1 and lcp[0].ident == 'array': # XXX merge into above
ident = '__getfast__'
self.append(self.attr_var_ref(node, ident))
def attr_var_ref(self, node, ident): # XXX blegh
lcp = lowest_common_parents(polymorphic_t(self.gx, self.mergeinh[node.expr]))
if len(lcp) == 1 and isinstance(lcp[0], Class) and node.attrname in lcp[0].vars and not node.attrname in lcp[0].funcs:
return self.cpp_name(lcp[0].vars[node.attrname])
return self.cpp_name(ident)
def visitAssAttr(self, node, func=None): # XXX merge with visitGetattr
if node.flags == 'OP_DELETE':
error("'del' has no effect without refcounting", self.gx, node, warning=True, mv=self.mv)
return
cl, module = lookup_class_module(node.expr, inode(self.gx, node).mv, func)
# module.attr
if module:
self.append(module.full_path() + '::')
# class.attr
elif cl:
if isinstance(node.expr, Getattr):
submodule = lookup_module(node.expr.expr, inode(self.gx, node).mv)
self.append(submodule.full_path() + '::' + cl.ident + '::')
else:
self.append(cl.ident + '::')
# obj.attr
else:
if isinstance(node.expr, Name) and not lookup_var(node.expr.name, func, mv=self.mv): # XXX
self.append(node.expr.name)
else:
self.visit(node.expr, func)
self.append(self.connector(node.expr, func)) # XXX '->'
self.append(self.attr_var_ref(node, node.attrname))
def visitAssName(self, node, func=None):
if node.flags == 'OP_DELETE':
error("'del' has no effect without refcounting", self.gx, node, warning=True, mv=self.mv)
return
self.append(self.cpp_name(node.name))
def visitName(self, node, func=None, add_cl=True):
map = {'True': 'True', 'False': 'False'}
if node in self.mv.lwrapper:
self.append(self.mv.lwrapper[node])
elif node.name == 'None':
self.append('NULL')
elif node.name == 'self':
lcp = lowest_common_parents(polymorphic_t(self.gx, self.mergeinh[node]))
if ((not func or func.listcomp or not isinstance(func.parent, Class)) or
(func and func.parent and func.isGenerator)): # XXX lookup_var?
self.append('self')
elif len(lcp) == 1 and not (lcp[0] is func.parent or lcp[0] in func.parent.ancestors()): # see test 160
self.mv.module.prop_includes.add(lcp[0].module) # XXX generalize
self.append('((' + self.namer.namespace_class(lcp[0]) + ' *)this)')
else:
self.append('this')
elif node.name in map:
self.append(map[node.name])
else: # XXX clean up
if not self.mergeinh[node] and not inode(self.gx, node).parent in self.gx.inheritance_relations:
error("variable '" + node.name + "' has no type", self.gx, node, warning=True, mv=self.mv)
self.append(node.name)
elif singletype(self.gx, node, Module):
self.append('__' + singletype(self.gx, node, Module).ident + '__')
else:
if ((def_class(self.gx, 'class_'), 0) in self.mergeinh[node] or
(add_cl and [t for t in self.mergeinh[node] if isinstance(t[0], StaticClass)])):
cl = lookup_class(node, self.mv)
if cl:
self.append(self.namer.namespace_class(cl, add_cl='cl_'))
else:
self.append(self.cpp_name(node.name))
else:
if isinstance(func, Class) and node.name in func.parent.vars: # XXX
self.append(func.ident + '::')
var = smart_lookup_var(node.name, func, mv=self.mv)
if var:
if node in self.gx.filters:
self.append('((%s *)' % self.gx.filters[node].ident)
if var.is_global:
self.append(self.module.full_path() + '::')
self.append(self.cpp_name(var.var))
if node in self.gx.filters:
self.append(')')
else:
self.append(node.name) # XXX
def expand_special_chars(self, value):
value = list(value)
replace = dict(['\\\\', '\nn', '\tt', '\rr', '\ff', '\bb', '\vv', '""'])
for i in range(len(value)):
if value[i] in replace:
value[i] = '\\' + replace[value[i]]
elif value[i] not in string.printable:
value[i] = '\\' + oct(ord(value[i])).zfill(4)[1:]
return ''.join(value)
def visitConst(self, node, func=None):
if not self.filling_consts and isinstance(node.value, str):
self.append(self.get_constant(node))
return
if node.value is None:
self.append('NULL')
return
t = list(inode(self.gx, node).types())[0]
if t[0].ident == 'int_':
self.append('__ss_int(')
self.append(str(node.value))
if self.gx.longlong:
self.append('LL')
self.append(')')
elif t[0].ident == 'float_':
if str(node.value) in ['inf', '1.#INF', 'Infinity']:
self.append('INFINITY')
elif str(node.value) in ['-inf', '-1.#INF', 'Infinity']:
self.append('-INFINITY')
else:
self.append(str(node.value))
elif t[0].ident == 'str_':
self.append('new str("%s"' % self.expand_special_chars(node.value))
if '\0' in node.value: # '\0' delimiter in C
self.append(', %d' % len(node.value))
self.append(')')
elif t[0].ident == 'complex':
self.append('mcomplex(%s, %s)' % (node.value.real, node.value.imag))
else:
self.append('new %s(%s)' % (t[0].ident, node.value))
def generate_code(gx):
for module in gx.modules.values():
if not module.builtin:
gv = GenerateVisitor(gx, module)
walk(module.ast, gv)
gv.out.close()
gv.header_file()
gv.out.close()
gv.insert_consts(declare=False)
gv.insert_consts(declare=True)
gv.insert_extras('.hpp')
gv.insert_extras('.cpp')
generate_makefile(gx)
|
shedskin/shedskin
|
shedskin/cpp.py
|
Python
|
gpl-3.0
| 114,119
|
[
"VisIt"
] |
fdddbea94458e2f9c10171e699a996c1ca22bef8f2a5d692c222ef21a8aae21e
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtCore, QtGui
from openlp.core.lib import build_icon, translate
from openlp.core.lib.ui import create_button_box
class Ui_SongUsageDetailDialog(object):
def setupUi(self, songUsageDetailDialog):
songUsageDetailDialog.setObjectName(u'songUsageDetailDialog')
songUsageDetailDialog.resize(609, 413)
self.verticalLayout = QtGui.QVBoxLayout(songUsageDetailDialog)
self.verticalLayout.setSpacing(8)
self.verticalLayout.setContentsMargins(8, 8, 8, 8)
self.verticalLayout.setObjectName(u'verticalLayout')
self.dateRangeGroupBox = QtGui.QGroupBox(songUsageDetailDialog)
self.dateRangeGroupBox.setObjectName(u'dateRangeGroupBox')
self.dateHorizontalLayout = QtGui.QHBoxLayout(self.dateRangeGroupBox)
self.dateHorizontalLayout.setSpacing(8)
self.dateHorizontalLayout.setContentsMargins(8, 8, 8, 8)
self.dateHorizontalLayout.setObjectName(u'dateHorizontalLayout')
self.fromDate = QtGui.QCalendarWidget(self.dateRangeGroupBox)
self.fromDate.setObjectName(u'fromDate')
self.dateHorizontalLayout.addWidget(self.fromDate)
self.toLabel = QtGui.QLabel(self.dateRangeGroupBox)
self.toLabel.setScaledContents(False)
self.toLabel.setAlignment(QtCore.Qt.AlignCenter)
self.toLabel.setObjectName(u'toLabel')
self.dateHorizontalLayout.addWidget(self.toLabel)
self.toDate = QtGui.QCalendarWidget(self.dateRangeGroupBox)
self.toDate.setObjectName(u'toDate')
self.dateHorizontalLayout.addWidget(self.toDate)
self.verticalLayout.addWidget(self.dateRangeGroupBox)
self.fileGroupBox = QtGui.QGroupBox(self.dateRangeGroupBox)
self.fileGroupBox.setObjectName(u'fileGroupBox')
self.fileHorizontalLayout = QtGui.QHBoxLayout(self.fileGroupBox)
self.fileHorizontalLayout.setSpacing(8)
self.fileHorizontalLayout.setContentsMargins(8, 8, 8, 8)
self.fileHorizontalLayout.setObjectName(u'fileHorizontalLayout')
self.fileLineEdit = QtGui.QLineEdit(self.fileGroupBox)
self.fileLineEdit.setObjectName(u'fileLineEdit')
self.fileLineEdit.setReadOnly(True)
self.fileHorizontalLayout.addWidget(self.fileLineEdit)
self.saveFilePushButton = QtGui.QPushButton(self.fileGroupBox)
self.saveFilePushButton.setMaximumWidth(self.saveFilePushButton.size().height())
self.saveFilePushButton.setIcon(build_icon(u':/general/general_open.png'))
self.saveFilePushButton.setObjectName(u'saveFilePushButton')
self.fileHorizontalLayout.addWidget(self.saveFilePushButton)
self.verticalLayout.addWidget(self.fileGroupBox)
self.button_box = create_button_box(songUsageDetailDialog, u'button_box', [u'cancel', u'ok'])
self.verticalLayout.addWidget(self.button_box)
self.retranslateUi(songUsageDetailDialog)
QtCore.QObject.connect(self.saveFilePushButton, QtCore.SIGNAL(u'clicked()'),
songUsageDetailDialog.defineOutputLocation)
def retranslateUi(self, songUsageDetailDialog):
songUsageDetailDialog.setWindowTitle(translate('SongUsagePlugin.SongUsageDetailForm', 'Song Usage Extraction'))
self.dateRangeGroupBox.setTitle(translate('SongUsagePlugin.SongUsageDetailForm', 'Select Date Range'))
self.toLabel.setText(translate('SongUsagePlugin.SongUsageDetailForm', 'to'))
self.fileGroupBox.setTitle(translate('SongUsagePlugin.SongUsageDetailForm', 'Report Location'))
|
marmyshev/transitions
|
openlp/plugins/songusage/forms/songusagedetaildialog.py
|
Python
|
gpl-2.0
| 5,627
|
[
"Brian"
] |
403623af4c5de9f336d653ef45d691ab257d4bd8f12c23f649c0d46de7bd791f
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while (det < previous_det and remaining_iterations > 0
and not np.isinf(det)):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start] +
X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : bool
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rousseeuw] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [ButlerDavies] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [RVD]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVD] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
# Check that the covariance of the support data is not equal to 0.
# Otherwise self.dist_ = 0 and thus correction = 0.
n_samples = len(self.dist_)
n_support = np.sum(self.support_)
if n_support < n_samples and np.allclose(self.raw_covariance_, 0):
raise ValueError('The covariance matrix of the support data '
'is equal to 0, try to increase support_fraction')
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates) described
in [RVDriessen]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVDriessen] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
BiaDarkia/scikit-learn
|
sklearn/covariance/robust_covariance.py
|
Python
|
bsd-3-clause
| 31,108
|
[
"Gaussian"
] |
124a451691f249fca0b762bb6d986617d40d6a9e2209b2a9a19ab1bf19ce7395
|
# Line too long - pylint: disable=C0301
# Copyright (c) Greenplum Inc 2011. All Rights Reserved.
from contextlib import closing
import os
import platform
import shutil
import sys
import tarfile
try:
from gppylib import gplog
from gppylib.commands import gp
from gppylib.commands.base import Command, REMOTE, WorkerPool, ExecutionError
from gppylib.commands.unix import Scp
from gppylib.gpversion import GpVersion
from gppylib.mainUtils import ExceptionNoStackTraceNeeded
from gppylib.operations import Operation
from gppylib.operations.utils import RemoteOperation, ParallelOperation
from gppylib.operations.unix import CheckFile, CheckDir, MakeDir, RemoveFile, RemoveRemoteTree, RemoveRemoteFile, CheckRemoteDir, MakeRemoteDir, CheckRemoteFile, ListRemoteFilesByPattern, ListFiles, ListFilesByPattern
from gppylib.utils import TableLogger
import yaml
from yaml.scanner import ScannerError
except ImportError, ex:
sys.exit('Operation: Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(ex))
logger = gplog.get_default_logger()
def dereference_symlink(path):
"""
MPP-15429: rpm is funky with symlinks...
During an rpm -e invocation, rpm mucks with the /usr/local/greenplum-db symlink.
From strace output, it appears that rpm tries to rmdir any directories it may have created during
package installation. And, in the case of our GPHOME symlink, rpm will actually try to unlink it.
To avoid this scenario, we perform all rpm actions against the "symlink dereferenced" $GPHOME.
"""
path = os.path.normpath(path)
if not os.path.islink(path):
return path
link = os.path.normpath(os.readlink(path))
if os.path.isabs(link):
return link
return os.path.join(os.path.dirname(path), link)
GPHOME = dereference_symlink(gp.get_gphome())
GPPKG_EXTENSION = '.gppkg'
SPECFILE_NAME = 'gppkg_spec.yml'
SPECFILE_REQUIRED_TAGS = ['pkgname', 'version', 'architecture', 'os', 'description', 'gpdbversion']
SPECFILE_OPTIONAL_TAGS = ['preinstall', 'postinstall', 'preuninstall', 'postuninstall', 'postupdate']
# TODO: AK: Our interactions with the internal RPM database could benefit from an abstraction layer
# that hides the underlying commands used for installation, uninstallation, queries, etc.
RPM_DATABASE_PATH = 'share/packages/database'
RPM_DATABASE = os.path.join(GPHOME, RPM_DATABASE_PATH)
RPM_INSTALLATION_PATH = GPHOME
# TODO: AK: Our interactions with the archive could benefit from an abstraction layer
# that hides the implementations of archival, unarchival, queries, etc.
# That is, consider the query "is this package already archived?" Currently, this is implemented
# with a CheckFile. Rather, it should be a call to Archive.contains(package), where package
# is instanceof Gppkg.
ARCHIVE_PATH = 'share/packages/archive'
GPPKG_ARCHIVE_PATH = os.path.join(GPHOME, ARCHIVE_PATH)
# TODO: AK: Shouldn't this be "$GPHOME/.tmp"?
# i.e. what if remote host has its $GPHOME elsewhere?
TEMP_EXTRACTION_PATH = GPHOME + '/.tmp'
DEPS_DIR = 'deps'
class GpdbVersionError(Exception):
'''
Exception to notify that the gpdb version
does not match
'''
pass
class AlreadyInstalledError(Exception):
def __init__(self, package_name):
Exception.__init__(self, '%s is already installed.' % package_name)
class NotInstalledError(Exception):
def __init__(self, package_name):
Exception.__init__(self, '%s is not installed.' % package_name)
class BuildPkgError(Exception):
'''
Exception to notify that there was an error during
the building of a gppkg
'''
pass
class MissingDependencyError(Exception):
'''
Exception to catch missing dependency
'''
def __init__(self, value):
Exception.__init__(self, 'Dependency %s is missing' % value )
class OSCompatibilityError(Exception):
'''
Exception to notify that OS does not meet the
requirement
'''
def __init__(self, requiredos, foundos):
Exception.__init__(self, '%s OS required. %s OS found' % (requiredos, foundos))
class ArchCompatibilityError(Exception):
'''
Exception to notify that architecture does not meet
the requirement
'''
def __init__(self, requiredarch, foundarch):
Exception.__init__(self, '%s Arch required. %s Arch found' % (requiredarch, foundarch))
class RequiredDependencyError(Exception):
'''
Exception to notify that the package being uninstalled
is a dependency for another package
'''
pass
class Gppkg:
'''
This class stores all the information about a gppkg
'''
def __init__(self, pkg, pkgname, main_rpm, version, architecture, os, gpdbversion, description, abspath, preinstall, postinstall, preuninstall, postuninstall, postupdate, dependencies, file_list):
'''
The constructor takes the following arguments
pkg The complete package name e.g pgcrypto-1.0-Darwin-i386.gppkg TODO: AK: This is an awful variable name. Change to "package_filename".
pkgname The name of the package as specified in the spec file
main_rpm The name of the main rpm. e.g PL/R, PostGIS etc
version The version of the gppkg
architecture The architecture for which the package is built
os The operating system for which the package is built
gpdbversion The Greenplum Database version for which package is built
description A short description for the package
abspath This is the absolute path where the package sits on the host
preinstall The cluster level preinstallation hooks
postinstall The cluster level postinstallation hooks
preuninstall The cluster level preuninstallation hooks
postuninstall The cluster level postuninstallation hooks
postupdate The cluster level postupdate hooks
dependencies The dependencies of the package. e.g Geos, Proj in case of PostGIS
file_list The list of files present in the package
'''
logger.debug('Gppkg Constructor')
self.pkg = pkg
self.pkgname = pkgname
self.main_rpm = main_rpm
self.version = version
self.architecture = architecture
self.os = os
self.gpdbversion = gpdbversion
self.description = description
self.abspath = abspath
self.preinstall = preinstall
self.postinstall = postinstall
self.preuninstall = preuninstall
self.postuninstall = postuninstall
self.postupdate = postupdate
self.dependencies = dependencies
self.file_list = file_list
@staticmethod
def from_package_path(pkg_path):
'''
This method takes a package as the argument and
obtains all the information about the package
Details include name, arch, OS, version, description, dependencies,
list of files present in the package and returns a gppkg object
'''
logger.debug('from_package_path')
if not os.path.exists(pkg_path):
logger.error('Cannot find package %s' % pkg_path)
raise IOError
#We check for a directory first because
#is_tarfile does not accept directories as path names
if os.path.isdir(pkg_path):
logger.error('%s is a directory !' % pkg_path)
raise IOError
if not tarfile.is_tarfile(pkg_path) or not pkg_path.endswith(GPPKG_EXTENSION):
logger.error('%s is Not a valid package' % pkg_path)
raise IOError
if os.path.getsize(pkg_path) == 0:
logger.error('Package is empty')
raise IOError
pkg = {}
# XXX: AK: It's purely coincidence that the optional tags are lists.
for tag in SPECFILE_REQUIRED_TAGS:
pkg[tag] = ''
for tag in SPECFILE_OPTIONAL_TAGS:
pkg[tag] = []
pkg['file_list'] = []
pkg['dependencies'] = []
with closing(tarfile.open(pkg_path, 'r:gz')) as tarinfo:
#store the list of all files present in the archive
archive_list = tarinfo.getnames()
pkg["file_list"] = archive_list
#The spec file has to be called gppkg_spec
#so there will only be one such file,
#so we dont need to worry about the loop
#overwriting the 'specfile' variable with different values
for cur_file in archive_list:
if cur_file.endswith(SPECFILE_NAME):
specfile = tarinfo.extractfile(cur_file)
yamlfile = yaml.load(specfile)
keys = yamlfile.keys()
#store all the tags
for key in keys:
pkg[key.lower()] = yamlfile[key]
#update the pkgpath
pkg['pkg'] = os.path.split(pkg_path)[-1]
#make the version as string
pkg['version'] = str(pkg['version'])
#Convert the required version to a GpVersion
pkg['gpdbversion'] = GpVersion(str(pkg['gpdbversion']))
#update the absolute path
pkg['abspath'] = pkg_path
#store all the dependencies of the gppkg
for cur_file in archive_list:
if cur_file.find('deps/') != -1 and cur_file.endswith('.rpm'):
pkg['dependencies'].append(cur_file[cur_file.rfind('/') + 1:])
#store the main rpm
for cur_file in archive_list:
if cur_file.find('deps/') == -1 and cur_file.endswith('.rpm'):
pkg['main_rpm'] = cur_file
gppkg = Gppkg(**pkg)
return gppkg
class LocalCommand(Operation):
'''
DEPRECATED
TODO: AK: Eliminate this. Replace invocations with Command(...).run(validateAfter = True)
'''
def __init__(self, cmd_str, echo = False):
self.cmd_str = cmd_str
self.echo = echo
def execute(self):
logger.debug(self.cmd_str)
cmd = Command(name = 'LocalCommand', cmdStr = self.cmd_str)
cmd.run(validateAfter = True)
if self.echo:
echo_str = cmd.get_results().stdout.strip()
if echo_str:
logger.info(echo_str)
return cmd.get_results()
class RemoteCommand(Operation):
"""
DEPRECATED
TODO: AK: Rename as GpSsh, like GpScp below.
"""
def __init__(self, cmd_str, host_list):
self.cmd_str = cmd_str
self.host_list = host_list
self.pool = None
def execute(self):
logger.debug(self.cmd_str)
# Create Worker pool
# and add commands to it
self.pool = WorkerPool()
for host in self.host_list:
cmd = Command(name = 'Remote Command', cmdStr = self.cmd_str, ctxt = REMOTE, remoteHost = host)
self.pool.addCommand(cmd)
self.pool.join()
#This will raise ExecutionError exception if even a single command fails
self.pool.check_results()
class ListPackages(Operation):
'''
Lists all the packages present in
$GPHOME/share/packages/archive
'''
def __init__(self):
pass
def execute(self):
# Ensure archive path exists
# TODO: AK: In hindsight, this should've been named MakeDirP,
# to reflect that it won't blow up if the path already exists.
MakeDir(GPPKG_ARCHIVE_PATH).run()
package_list = ListFilesByPattern(GPPKG_ARCHIVE_PATH, '*' + GPPKG_EXTENSION).run()
package_name_list = []
for pkg in package_list:
pkg_name = pkg.split('/')[-1]
package_name_list.append(pkg_name[:pkg_name.index('-', pkg_name.index('-') + 1)])
return package_name_list
class CleanupDir(Operation):
'''
Cleans up the given dir
Returns True if either the dir is already removed
or if we were able to remove the dir successfully
False for other errors
'''
def __init__(self, dir_path):
self.dir_path = dir_path
def execute(self):
dir_path = self.dir_path
logger.debug('Cleaning up %s' % dir_path)
#If file does not exist, nothing to remove
#So we return true
if not os.path.exists(dir_path):
return True
if os.path.isdir(dir_path):
shutil.rmtree(dir_path)
else:
return False
return True
class IsVersionCompatible(Operation):
'''
Returns True if the gppkg is compatible
with the gpdb version that has been installed
'''
def __init__(self, gppkg):
self.gppkg = gppkg
def execute(self):
gppkg = self.gppkg
gpdb_version = self._get_gpdb_version()
required_gpdb_version = gppkg.gpdbversion
logger.debug('Greenplum Database Version = %s' % gpdb_version)
logger.debug('Required Greenplum Database version = %s' % required_gpdb_version)
if gpdb_version is None:
logger.error('Could not determine Greenplum Database version')
return False
if not required_gpdb_version.isVersionRelease(gpdb_version):
logger.error('%s requires Greenplum Database version %s' % (gppkg.pkgname, required_gpdb_version))
return False
# last bumped version (4.3.5.0)
orca_compatible_minor_version = 40305
gpdb_magic_num = self._convert_to_magic_number_version(gpdb_version)
if 'orca' not in gppkg.version and \
gpdb_magic_num >= orca_compatible_minor_version:
logger.error('Greenplum Database requires orca version of %s' % (gppkg.pkg))
return False
return True
def _get_gpdb_version(self):
'''
Get the version of the current GPDB
Returns a string consisting of the major
release version
'''
logger.debug('_get_gpdb_version')
self.gphome = gp.get_gphome()
version = gp.GpVersion.local('local GP software version check', self.gphome)
gpdb_version = GpVersion(version.strip())
return gpdb_version
def _convert_to_magic_number_version(self, gpversion_obj):
'''
Converts GPDB version to the GPDB magic number
Returns an int consisting of the major and minor release version
'''
logger.debug('_convert_to_magic_number_version')
ver_list = gpversion_obj.version
# The generation of the magic version number (GP_VERSION_NUM) is
# retrieved from our configure.in file
magic_num = "%d%02d%02d" % (ver_list[0], ver_list[1],
ver_list[2] if len(ver_list) > 2 else 0)
return int(magic_num)
class ValidateInstallPackage(Operation):
"""
Ensure that the given rpms can be installed safely. This is accomplished mainly
through use of rpm --test, which will have one of a few outcomes:
1) A return code of 0, indicating the installation should proceed smoothly
2) A non-zero return code, and stderr indicating some of the rpms are already installed.
We simply omit such rpms from the returned list of rpms, indicating to the caller
that to be successful, installation should only be attempted on the filtered list of rpms.
3) A non-zero return code, and stderr indicating that a failed dependency issue will arise.
This scenario must result in a MissingDependencyError.
Note: install and update share this code, because there is extensive commonality in regards
to the version, os, arch. checking, in addition to the 3 code paths enumerated just above.
Lastly, for an edge case, if we determine that all of the relevant rpms are currently installed
*and* the archive package already exists we declare the package is already installed.
TODO: This is depending on ExtractPackage having put the dependencies in this same directory.
TODO: Use regexes for more reliable string matching. CR-2865#c20112
"""
def __init__(self, gppkg, is_update = False):
self.gppkg = gppkg
self.is_update = is_update
def execute(self):
#Check the GPDB requirements
if not IsVersionCompatible(self.gppkg).run():
raise GpdbVersionError
# TODO: AK: I've changed our use of the OS tag from 'Linux' to 'rhel5' or 'suse10'.
# So, the two lines below will not work properly.
#if self.gppkg.os.lower() != platform.system().lower():
# raise OSCompatibilityError(self.gppkg.os, platform.system().lower())
#architecture compatibility
if self.gppkg.architecture.lower() != platform.machine().lower():
raise ArchCompatibilityError(self.gppkg.architecture, platform.machine().lower())
rpm_set = set([self.gppkg.main_rpm] + self.gppkg.dependencies)
rpm_install_string = ' '.join([os.path.join(TEMP_EXTRACTION_PATH, rpm) for rpm in rpm_set])
if self.is_update:
rpm_install_command = 'rpm --test -U --force %s --dbpath %s --prefix %s' % (rpm_install_string, RPM_DATABASE, RPM_INSTALLATION_PATH)
else:
rpm_install_command = 'rpm --test -i %s --dbpath %s --prefix %s' % (rpm_install_string, RPM_DATABASE, RPM_INSTALLATION_PATH)
cmd = Command('Validating rpm installation', rpm_install_command)
logger.info(cmd) # TODO: AK: This should be debug(), but RMI cannot propagate a log level.
try:
cmd.run(validateAfter = True)
except ExecutionError, e:
lines = e.cmd.get_results().stderr.splitlines()
# Forking between code paths 2 and 3 depends on some meaningful stderr
# Without such stderr, we must bubble up the ExecutionError.
if len(lines) == 0:
raise
if 'failed dependencies' in lines[0].lower():
# Code path 3 (see docstring)
# example stderr:
# error: Failed dependencies:
# geos-3.2.2-1.x86_64.rpm is needed by postgis-1.0-1.x86_64
# TODO: AK: Dependencies should be parsed out here and used to initialize
# this MissingDependencyError. However, this exception does not support
# multiple missing dependencies. Some refactoring work is needed in both places.
logger.error(e.cmd.get_results().stderr)
raise MissingDependencyError('')
# Code path 2, possibly (see docstring)
# example stderr:
# package geos-3.2.2-1.x86_64 is already installed
# package proj-4.7.0-1.x86_64 is already installed
# package postgis-1.0-1.x86_64 is already installed
for line in lines:
if 'already installed' in line.lower():
package_name = line.split()[1]
rpm_name = "%s.rpm" % package_name
rpm_set.remove(rpm_name)
else:
# This is unexpected, so bubble up the ExecutionError.
raise
# MPP-14359 - installation and uninstallation prechecks must also consider
# the archive. That is, if a partial installation had added all rpms
# but failed to add the archive package, then for our purposes, we consider
# the package not yet installed and still in need of InstallPackageLocally.
archive_package_exists = CheckFile(os.path.join(GPPKG_ARCHIVE_PATH, self.gppkg.pkg)).run()
package_already_installed = (not rpm_set) and archive_package_exists
if package_already_installed:
raise AlreadyInstalledError(self.gppkg.pkg)
# Code path 1 (See docstring)
return rpm_set
class ValidateUninstallPackage(Operation):
"""
Ensure that the given rpms can be uninstalled safely. This is accomplished mainly
through use of rpm --test, which will have one of a few outcomes:
1) A return code of 0, indicating the uninstallation should proceed smoothly
2) A non-zero return code, and stderr indicating some of the rpms are already uninstalled.
We simply omit such rpms from the returned list of rpms, indicating to the caller
that to be successful, uninstallation should only be attempted on the filtered list of rpms.
3) A non-zero return code, and stderr indicating that dependencies remain.
Lastly, for an edge case, if we determine that none of the relevant rpms are currently installed
*and* the archive package does not exist, we declare the package is not installed.
TODO: Use regexes for more reliable string matching.
"""
def __init__(self, gppkg):
self.gppkg = gppkg
def execute(self):
rpm_list = [self.gppkg.main_rpm] + self.gppkg.dependencies
def strip_extension_and_arch(filename):
# expecting filename of form %{name}-%{version}-%{release}.%{arch}.rpm
rest, ext = os.path.splitext(filename)
rest, arch = os.path.splitext(rest)
return rest
rpm_set = set([strip_extension_and_arch(rpm) for rpm in rpm_list])
rpm_uninstall_string = ' '.join(rpm_set)
rpm_uninstall_command = 'rpm --test -e %s --dbpath %s' % (rpm_uninstall_string, RPM_DATABASE)
cmd = Command('Validating rpm uninstallation', rpm_uninstall_command)
logger.info(cmd) # TODO: AK: This should be debug(), but RMI cannot propagate a log level.
try:
cmd.run(validateAfter = True)
except ExecutionError, e:
lines = e.cmd.get_results().stderr.splitlines()
# Forking between code paths 2 and 3 depends on some meaningful stderr
# Without such stderr, we must bubble up the ExecutionError.
if len(lines) == 0:
raise
if 'failed dependencies' in lines[0].lower():
# Code path 3 (see docstring)
# example stderr:
# error: Failed dependencies:
# jre = 1.6.0_26 is needed by (installed) gphdfs-1.1-1.x86_64
self.resolve_shared_dependencies(rpm_set, lines[1:])
else:
# Code path 2, possibly (see docstring)
# example stderr:
# error: package postgis-1.0-1.x86_64 is not installed
# error: package proj-4.7.0-1.x86_64 is not installed
# error: package geos-3.2.2-1.x86_64 is not installed
for line in lines:
if 'not installed' in line.lower():
package_name = line.split()[2]
rpm_set.remove(package_name)
else:
# This is unexpected, so bubble up the ExecutionError.
raise
# MPP-14359 - installation and uninstallation prechecks must also consider
# the archive. That is, if a partial uninstallation had removed all rpms
# but failed to remove the archive package, then for our purposes, we consider
# the package installed and still in need of UninstallPackageLocally.
archive_package_exists = CheckFile(os.path.join(GPPKG_ARCHIVE_PATH, self.gppkg.pkg)).run()
package_not_installed = (not rpm_set) and (not archive_package_exists)
if package_not_installed:
raise NotInstalledError(self.gppkg.pkg)
# Code path 1 (See docstring)
return rpm_set
def resolve_shared_dependencies(self, rpm_set, dependency_lines):
"""
This is a very naive resolution to shared dependencies. (See code path #3 in ValidateUninstallPackage.execute)
Among the rpms we attempt to remove from the system, a subset cannot be
removed during this particular gppkg uninstallation, because their removal would violate
the dependency constraints of other rpms that remain in the system; we simply leave these culprit rpm(s) behind.
More specifically, the preceding rpm --test -e command has given us the violated *capabilities*. For each *capability*,
we query the rpm database with --whatprovides to discern the culprit rpm(s).
In simpler terms, consider this example:
pljava depends on jre, which its gppkg contains
gphdfs depends on jre, which its gppkg contains
install the gppkgs for both pljava and gphdfs
uninstall pljava gppkg
we internally attempt to "rpm -e" the jre rpm, hitting the gphdfs dependency error here involving "jre = 1.6"
we determine that the jre rpm is responsible for *providing* "jre = 1.6"
so, we ultimately omit the jre rpm from our "rpm -e" and move on
TODO: AK: A more robust version of this function would ensure that the remaining
rpms are, in fact, bound by a remaining gppkg. We defer this responsibility for now because gppkgs
should not have external dependencies. That is, no package should have requirements on rpms
not contained in its own gppkg distro. So, it's safe to assume that if foo is a culprit rpm, there exists
some gppkg bar that internally contains foo. (I realize that, with time, this will not be a scalable requirement
for gppkgs... hence the TODO.)
@type rpm_set: set
@param rpm_set: rpms being uninstalled, among which there exists an rpm
whose removal violates the dependencies of remaining rpms
@type dependency_lines: list
@param dependency_lines: lines produced from the stderr in
code path #3 in ValidateUninstallPackage.execute
ex: [" jre >= 1.6.0_26 is needed by (installed) gphdfs-1.1-1.x86_64"]
"""
for dependency_line in dependency_lines:
violated_capability = dependency_line.split()[0] # e.g. "jre"
cmd = Command('Discerning culprit rpms for %s' % violated_capability,
'rpm -q --whatprovides %s --dbpath %s' % (violated_capability, RPM_DATABASE))
cmd.run(validateAfter = True)
culprit_rpms = set(cmd.get_results().stdout.splitlines())
rpm_set -= culprit_rpms
class ExtractPackage(Operation):
"""
Extract the contents of the package into the temp folder
TODO: AK: Extraction should be implemented as a context manager.
"""
def __init__(self, gppkg):
self.gppkg = gppkg
def execute(self):
#clean up tmp extraction folder
if os.path.exists(TEMP_EXTRACTION_PATH) and not CleanupDir(TEMP_EXTRACTION_PATH).run():
logger.error('Could not clean temp folder')
raise IOError
#untar the package into tmp folder
with closing(tarfile.open(self.gppkg.abspath)) as tarinfo:
tarinfo.extractall(TEMP_EXTRACTION_PATH)
#move all the deps into same folder as the main rpm
path = os.path.join(TEMP_EXTRACTION_PATH, DEPS_DIR)
if os.path.exists(path):
for cur_file in os.listdir(path):
shutil.move(os.path.join(TEMP_EXTRACTION_PATH, DEPS_DIR, cur_file), TEMP_EXTRACTION_PATH)
class InstallPackageLocally(Operation):
"""
Installs a package on the local host
This operation must take a slew of starting conditions and drive the state
of the local machine towards the ending state, in which the given package is successfully
installed, the rpm database is sane, and the package resides in the designated archive.
To that end, we indiscriminately squash AlreadyInstalledErrors arising from ValidateInstallPackage,
because in this context, it's not an exception, but rather an indication of our desired ending
conditions.
We must consider the following scenarios and more: package was deleted from archive,
the main comprising rpm was uninstalled, dependent rpms were removed, the rpm database was
corrupted, etc.
Again, much like ValidateInstallPackages, we make cheap reuse of this code for the purposes
of an --update as there is considerable commonality.
"""
def __init__(self, package_path, is_update = False):
self.package_path = package_path
self.is_update = is_update
def execute(self):
current_package_location = self.package_path
package_name = os.path.basename(current_package_location)
logger.info('Installing %s locally' % package_name)
final_package_location = os.path.join(GPPKG_ARCHIVE_PATH, package_name)
gppkg = Gppkg.from_package_path(current_package_location)
ExtractPackage(gppkg).run()
# squash AlreadyInstalledError here: the caller doesn't ever need to
# know that we didn't have to do anything here
try:
rpm_set = ValidateInstallPackage(gppkg, is_update = self.is_update).run()
except AlreadyInstalledError, e:
logger.info(e)
return
if rpm_set:
if self.is_update:
rpm_install_command = 'rpm -U --force %s --dbpath %s --prefix=%s'
else:
rpm_install_command = 'rpm -i %s --dbpath %s --prefix=%s'
rpm_install_command = rpm_install_command % \
(" ".join([os.path.join(TEMP_EXTRACTION_PATH, rpm) for rpm in rpm_set]),
RPM_DATABASE,
RPM_INSTALLATION_PATH)
cmd = Command('Installing rpms', rpm_install_command)
logger.info(cmd)
cmd.run(validateAfter = True)
# TODO: AK: MPP-15568
# TODO: AK: abstraction layer for archive interactions... to hide use of shutil.copy, RemoveFile, etc.
MakeDir(GPPKG_ARCHIVE_PATH).run()
shutil.copy(current_package_location, final_package_location)
logger.info("Completed local installation of %s." % package_name)
class UninstallPackageLocally(Operation):
"""
Uninstalls a package on the local host
This operation must take a slew of starting conditions and drive the state
of the local machine towards the ending state, in which the given package is successfully
uninstalled, the rpm database is sane, and the package is removed from the archive.
To that end, we indiscriminately squash NotInstalledErrors arising from ValidateUninstallPackage,
because in this context, it's not an exception, but rather an indication of our desired ending
conditions.
We must consider the following scenarios and more: package was deleted from archive,
the main comprising rpm was uninstalled, dependent rpms were removed, the rpm database was
corrupted, etc.
"""
def __init__(self, package_name):
self.package_name = package_name
def execute(self):
# TODO: AK: MPP-15737 - we're entirely dependent on the package residing in the archive
current_package_location = os.path.join(GPPKG_ARCHIVE_PATH, self.package_name)
gppkg = Gppkg.from_package_path(current_package_location)
# squash NotInstalledError here: the caller doesn't ever need to
# know that we didn't have to do anything here
try:
rpm_set = ValidateUninstallPackage(gppkg).run()
except NotInstalledError, e:
logger.info(e)
return
if rpm_set:
rpm_uninstall_command = 'rpm -e %s --dbpath %s' % (" ".join(rpm_set), RPM_DATABASE)
cmd = Command('Uninstalling rpms', rpm_uninstall_command)
logger.info(cmd)
cmd.run(validateAfter = True)
# TODO: AK: abstraction layer for archive interactions... to hide use of shutil.copy, RemoveFile, etc.
MakeDir(GPPKG_ARCHIVE_PATH).run()
RemoveFile(current_package_location).run()
logger.info("Completed local uninstallation of %s." % self.package_name)
class SyncPackages(Operation):
"""
Synchronizes packages from master to a remote host
TODO: AK: MPP-15568
"""
def __init__(self, host):
self.host = host
def execute(self):
if not CheckDir(GPPKG_ARCHIVE_PATH).run():
MakeDir(GPPKG_ARCHIVE_PATH).run()
if not CheckRemoteDir(GPPKG_ARCHIVE_PATH, self.host).run():
MakeRemoteDir(GPPKG_ARCHIVE_PATH, self.host).run()
# set of packages on the master
master_package_set = set(ListFilesByPattern(GPPKG_ARCHIVE_PATH, '*' + GPPKG_EXTENSION).run())
# set of packages on the remote host
remote_package_set = set(ListRemoteFilesByPattern(GPPKG_ARCHIVE_PATH, '*' + GPPKG_EXTENSION, self.host).run())
# packages to be uninstalled on the remote host
uninstall_package_set = remote_package_set - master_package_set
# packages to be installed on the remote host
install_package_set = master_package_set - remote_package_set
if not install_package_set and not uninstall_package_set:
logger.info('The packages on %s are consistent.' % self.host)
return
if install_package_set:
logger.info('The following packages will be installed on %s: %s' % (self.host, ', '.join(install_package_set)))
for package in install_package_set:
logger.debug('copying %s to %s' % (package, self.host))
dstFile = os.path.join(GPHOME, package)
Scp(name = 'copying %s to %s' % (package, self.host),
srcFile = os.path.join(GPPKG_ARCHIVE_PATH, package),
dstFile = dstFile,
dstHost = self.host).run(validateAfter = True)
RemoteOperation(InstallPackageLocally(dstFile), self.host).run()
RemoveRemoteFile(dstFile, self.host).run()
if uninstall_package_set:
logger.info('The following packages will be uninstalled on %s: %s' % (self.host, ', '.join(uninstall_package_set)))
for package in uninstall_package_set:
RemoteOperation(UninstallPackageLocally(package), self.host).run()
class InstallPackage(Operation):
def __init__(self, gppkg, master_host, standby_host, segment_host_list):
self.gppkg = gppkg
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
logger.info('Installing package %s' % self.gppkg.pkg)
# TODO: AK: MPP-15736 - precheck package state on master
ExtractPackage(self.gppkg).run()
ValidateInstallPackage(self.gppkg).run()
# perform any pre-installation steps
PerformHooks(hooks = self.gppkg.preinstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
# distribute package to segments
srcFile = self.gppkg.abspath
dstFile = os.path.join(GPHOME, self.gppkg.pkg)
GpScp(srcFile, dstFile, self.segment_host_list).run()
# install package on segments
HostOperation(InstallPackageLocally(dstFile), self.segment_host_list).run()
# install package on standby
if self.standby_host:
Scp(name = 'copying %s to %s' % (srcFile, self.standby_host),
srcFile = srcFile,
dstFile = dstFile,
dstHost = self.standby_host).run(validateAfter = True)
RemoteOperation(InstallPackageLocally(dstFile), self.standby_host).run()
# install package on master
InstallPackageLocally(srcFile).run()
# perform any post-installation steps
PerformHooks(hooks = self.gppkg.postinstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
logger.info('%s successfully installed.' % (self.gppkg.pkg))
class PerformHooks(Operation):
def __init__(self, hooks, master_host, standby_host, segment_host_list):
"""
Performs steps that have been specified in the yaml file for a particular
stage of gppkg execution
TODO: AK: A packager may have added commands to their hooks, with the
assumption that the current working directory would be that which contains
the spec file, rpms, and other artifacts (external scripts, perhaps.) To support
this, these commands should be prefixed with a "cd".
TODO: AK: I'm adding master_host for consistency.
But, why would we ever need master_host? We're on the master host!
"""
self.hooks = hooks
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
if self.hooks is None:
return
for hook in self.hooks:
key = hook.keys()
if key is None:
return
key_str = key[0]
if key_str.lower() == 'master':
if self.standby_host:
RemoteCommand(hook[key_str], [self.standby_host]).run()
LocalCommand(hook[key_str], True).run()
elif key_str.lower() == 'segment':
RemoteCommand(hook[key_str], self.segment_host_list).run()
class UninstallPackage(Operation):
def __init__(self, gppkg, master_host, standby_host, segment_host_list):
self.gppkg = gppkg
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
logger.info('Uninstalling package %s' % self.gppkg.pkg)
# TODO: AK: MPP-15736 - precheck package state on master
ExtractPackage(self.gppkg).run()
ValidateUninstallPackage(self.gppkg).run()
# perform any pre-uninstallation steps
PerformHooks(hooks = self.gppkg.preuninstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
# uninstall on segments
HostOperation(UninstallPackageLocally(self.gppkg.pkg), self.segment_host_list).run()
if self.standby_host:
RemoteOperation(UninstallPackageLocally(self.gppkg.pkg), self.standby_host).run()
UninstallPackageLocally(self.gppkg.pkg).run()
# perform any pre-installation steps
PerformHooks(hooks = self.gppkg.postuninstall,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
logger.info('%s successfully uninstalled.' % self.gppkg.pkg)
class QueryPackage(Operation):
INFO, LIST, ALL = range(3)
def __init__(self, query_type, package_path):
self.query_type = query_type
self.package_path = package_path
def execute(self):
if self.query_type == QueryPackage.INFO:
def package_details(p):
yield 'Name', p.pkgname
yield 'Version', p.version
yield 'Architecture', p.architecture
yield 'OS', p.os
yield 'GPDBVersion', str(p.gpdbversion)
yield 'Description', p.description
def print_package_info(package):
tabLog = TableLogger()
for name, value in package_details( package ):
tabLog.info([name, value])
tabLog.outputTable()
package = Gppkg.from_package_path(self.package_path)
print_package_info( package )
elif self.query_type == QueryPackage.LIST:
package = Gppkg.from_package_path(self.package_path)
for file in package.file_list:
print file
elif self.query_type == QueryPackage.ALL:
package_name_list = ListPackages().run()
for package_name in package_name_list:
print package_name
else:
package = Gppkg.from_package_path(self.package_path)
try:
ExtractPackage(package).run()
ValidateInstallPackage(package).run()
except AlreadyInstalledError:
print '%s is installed.' % package.pkgname
else:
print '%s is not installed.' % package.pkgname
class BuildGppkg(Operation):
'''
Builds a gppkg given a directory containing
the spec file, rpms and any pre/post installation scripts
'''
def __init__(self, directory):
self.directory = directory
def execute(self):
directory = self.directory
logger.info('Building gppkg')
#Check if the directory is valid
if not os.path.exists(directory) or not os.path.isdir(directory):
logger.error('%s is an Invalid directory' % directory)
raise BuildPkgError
filelist = os.listdir(directory)
#Check for the spec file
specfile = directory + '/' + SPECFILE_NAME
if not os.path.exists(specfile):
logger.error(' Spec file does not exist')
raise BuildPkgError
#parse the spec file and get the name, version and arch
#this is used to name the gppkg
pkg_path_details = self._get_package_name_details(specfile)
if pkg_path_details is None:
raise BuildPkgError
#The file already exists. Rewrite the original with the new one
pkg = pkg_path_details['pkgname'] + '-' + str(pkg_path_details['version']) + '-' + pkg_path_details['os'] + '-' + pkg_path_details['architecture'] + GPPKG_EXTENSION
if os.path.exists(pkg):
os.remove(pkg)
#Verify the spec file
if not self._verify_specfile(specfile, directory):
raise BuildPkgError
#tar and gzip the directory
#rename the file with .gppkg extension
with closing(tarfile.open(pkg, 'w:gz')) as tarinfo:
for cur_file in filelist:
tarinfo.add(name = os.path.join(directory, cur_file),
arcname = cur_file)
logger.info('Completed building gppkg')
def _get_package_name_details(self, specfile):
'''
Get details about the name, version, operating system, architecture
of the package. The final gppkg which will be created
will be named as <name>-<version>-<os>-<arch>.gppkg
'''
logger.debug('_get_package_name_details')
cur_file = None
with open(specfile) as cur_file:
yamlfile = yaml.load(cur_file)
tags = yamlfile.keys()
pkg_path_details = {}
#return all the required tags as a dict
for tag in tags:
if tag.lower() in SPECFILE_REQUIRED_TAGS:
pkg_path_details[tag.lower()] = yamlfile[tag]
return pkg_path_details
def _verify_specfile(self, specfile, directory):
'''
Reads the spec file and makes sure that the tags are correct.
'''
logger.debug('_verify_specfile')
cur_file = None
try:
with open(specfile) as cur_file:
yamlfile = yaml.load(cur_file)
if not self._verify_tags(yamlfile):
return False
return True
except ScannerError, ex:
return False
def _verify_tags(self, yamlfile):
'''
Verify that the tags are valid.
Returns true if all tags are valid
False otherwise
'''
logger.debug('_verify_tags')
tags = yamlfile.keys()
tags = [tag.lower() for tag in tags]
#check required tags
for required_tag in SPECFILE_REQUIRED_TAGS:
if required_tag not in tags:
logger.error(' Required tag %s missing in Spec file' % required_tag)
return False
#check for invalid tags
for tag in tags:
if tag not in SPECFILE_OPTIONAL_TAGS and tag not in SPECFILE_REQUIRED_TAGS:
logger.error(' Invalid tag %s in Spec file' % tag)
return False
return True
class UpdatePackage(Operation):
""" TODO: AK: Enforce gppkg version is higher than currently installed version """
def __init__(self, gppkg, master_host, standby_host, segment_host_list):
self.gppkg = gppkg
self.master_host = master_host
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
logger.info('Updating package %s' % self.gppkg.pkg)
ExtractPackage(self.gppkg).run()
ValidateInstallPackage(self.gppkg, is_update = True).run()
# distribute package to segments
srcFile = self.gppkg.abspath
dstFile = os.path.join(GPHOME, self.gppkg.pkg)
GpScp(srcFile, dstFile, self.segment_host_list).run()
# update package on segments
HostOperation(UpdatePackageLocally(dstFile), self.segment_host_list).run()
# update package on standby
if self.standby_host:
Scp(name = 'copying %s to %s' % (srcFile, self.standby_host),
srcFile = srcFile,
dstFile = dstFile,
dstHost = self.standby_host).run(validateAfter = True)
RemoteOperation(UpdatePackageLocally(dstFile), self.standby_host).run()
# update package on master
UpdatePackageLocally(srcFile).run()
# perform any post-update steps
PerformHooks(hooks = self.gppkg.postupdate,
master_host = self.master_host,
standby_host = self.standby_host,
segment_host_list = self.segment_host_list).run()
logger.info('%s successfully updated.' % (self.gppkg.pkg))
class UpdatePackageLocally(Operation):
"""
Updates a package on the local host
We make cheap reuse of InstallPackageLocally with the propagation of is_update = True, which
effectively changes the rpm --test command to use -U instead of -i. Beyond the invocation of
InstallPackageLocally, here, we also clean up the archive directory to remove other (ideally, older)
versions of the updated package.
"""
def __init__(self, package_path):
self.package_path = package_path
def execute(self):
InstallPackageLocally(self.package_path, is_update = True).run()
# Remove other versions of the package from archive.
# Note: Do not rely on filename format to discern such packages.
# Rather, interrogate a package only through the Gppkg class interface.
current_package = Gppkg.from_package_path(self.package_path)
MakeDir(GPPKG_ARCHIVE_PATH).run()
archived_package_paths = ListFiles(GPPKG_ARCHIVE_PATH).run()
for archived_package_path in archived_package_paths:
temp_package = Gppkg.from_package_path(os.path.join(GPPKG_ARCHIVE_PATH, archived_package_path))
if temp_package.pkgname == current_package.pkgname and temp_package.version != current_package.version:
RemoveFile(os.path.join(GPPKG_ARCHIVE_PATH, archived_package_path)).run()
class CleanGppkg(Operation):
'''
Cleans up the Gppkg from the cluster in case of partial
installation or removal. This might not be required if
we can make the install and uninstall options idempotent.
This operation is exactly the same as remove but we dont
check on each host to see if the rpm is installed or not.
'''
def __init__(self, standby_host, segment_host_list):
self.standby_host = standby_host
self.segment_host_list = segment_host_list
def execute(self):
operations = [SyncPackages(host) for host in self.segment_host_list]
if self.standby_host:
operations.append(SyncPackages(self.standby_host))
ParallelOperation(operations).run()
for operation in operations:
try:
operation.get_ret()
except Exception, e:
raise ExceptionNoStackTraceNeeded('SyncPackages failed' + str(e))
logger.info('Successfully cleaned the cluster')
class MigratePackages(Operation):
"""
Migrates packages from another $GPHOME to this one
This functionality is meant to facilitate minor version upgrade, whereby old packages
need to be brought over from the older $GPHOME to the newer $GPHOME.
Presumably, this could also be used to migrate packages across arbitrary choices
of $GPHOMEs. However, the migration will only succeed if the packages being migrated
are actually compatible with the target GPDB.
"""
def __init__(self, from_gphome, to_gphome):
self.from_gphome, self.to_gphome = from_gphome, to_gphome
def execute(self):
if not os.path.samefile(self.to_gphome, GPHOME):
raise ExceptionNoStackTraceNeeded('The target GPHOME, %s, must match the current $GPHOME used to launch gppkg.' % self.to_gphome)
if os.path.samefile(self.to_gphome, self.from_gphome):
raise ExceptionNoStackTraceNeeded('The source and target GPHOMEs, %s => %s, must differ for packages to be migrated.' % (self.from_gphome, self.to_gphome))
# TODO: AK: Given an invalid from_gphome, we'll end up creating a 'share/packages' subdirectory within it.
old_archive_path = os.path.join(self.from_gphome, ARCHIVE_PATH)
MakeDir(old_archive_path).run()
packages = ListFilesByPattern(old_archive_path, '*' + GPPKG_EXTENSION).run()
if not packages:
logger.info('There are no packages to migrate from %s.' % self.from_gphome)
return
logger.info('The following packages will be migrated: %s' % ', '.join(packages))
for package in packages:
package_path = os.path.join(old_archive_path, package)
try:
InstallPackageLocally(package_path).run()
except AlreadyInstalledError:
logger.info("%s is already installed." % package)
except Exception:
logger.exception("Failed to migrate %s from %s" % (old_archive_path, package))
logger.info('The package migration has completed.')
class GpScp(Operation):
"""
TODO: AK: This obviously does not belong here. My preference would be that it remain here until
the following problem is solved.
MPP-15270 - Improve performance of file transfer across large clusters
I suggest:
We consume an extra parameter 'fanout'. We partition the host_list into a number of buckets
given by 'fanout'. For each bucket, we scp the artifact to the first host in the bucket, and then
we recursively invoke GpScp on that machine for the remaining hosts in its bucket.
GpScp := ParallelOperation([ A(i) for i in range(0, n) ])
A := SerialOperation(B, C)
B := scp source_path target_path @ host_i
where host_i := the first host in the ith bucket
C := RemoteOperation(GpScp(target_path, target_path, host_list_i))
where host_list_i := the remaining hosts in the ith bucket
"""
def __init__(self, source_path, target_path, host_list):
self.source_path = source_path
self.target_path = target_path
self.host_list = host_list
self.pool = None
def execute(self):
self.pool = WorkerPool()
for host in self.host_list:
self.pool.addCommand(Scp(name = 'copying %s to %s' % (self.source_path, host),
srcFile = self.source_path,
dstFile = self.target_path,
dstHost = host))
self.pool.join()
class HostOperation(Operation):
"""
TODO: AK: This obviously does not belong here. My preference would be to move it to gppylib.operations.utils
when another consumer becomes clear.
TODO: AK: For generality, the underlying operation should inherit/implement NestedHostOperation so that
it may be initialized with information about the host to which it's been bound. This is fortunately not necessary
for our purposes here, so it's deferrable.
TODO: AK: Build a SegHostOperation that wraps this and is driven by GpArray content.
TODO: AK: Implement something similar for a SegmentOperation + NestedSegmentOperation.
TODO: AK: This (as well as ParallelOperation) would benefit from an appropriate choice of return value. The likely
choice would be: [op.get_ret() for op in self.operations]
"""
def __init__(self, operation, host_list):
self.operation = operation
self.host_list = host_list
def execute(self):
operations = []
for host in self.host_list:
operations.append(RemoteOperation(self.operation, host))
ParallelOperation(operations).run()
for operation in operations:
operation.get_ret()
|
lintzc/gpdb
|
gpMgmt/bin/gppylib/operations/package.py
|
Python
|
apache-2.0
| 53,482
|
[
"ORCA"
] |
a66bbcdbce75c7dcc38000e376de29b45d08286c5f95c8a903b4791b52b558d0
|
# Models implement the base Local Inspector Value-Entry Specification (LIVES)
# v1.0 - http://www.yelp.com/healthscores
from django.contrib.gis.db import models
from django.utils.translation import ugettext_lazy
class Establishment(models.Model):
"""Business or restaurant property"""
STATUS_CHOICES = (('deleted', ugettext_lazy('Deleted')), ('active', ugettext_lazy('Active')))
TYPE_CHOICES = (
(0, ugettext_lazy('Unknown')),
(1, ugettext_lazy('Restaurant')),
(2, ugettext_lazy('Food Stand')),
(3, ugettext_lazy('Mobile Food')),
(4, ugettext_lazy('Push Cart')),
(5, ugettext_lazy('Private School\'s Cafeteria')),
(6, ugettext_lazy('Educational Food Service')),
(9, ugettext_lazy('Elderly Nutrition')),
(11, ugettext_lazy('Public School\'s Cafeteria')),
(12, ugettext_lazy('Elderly Nutrition')),
(14, ugettext_lazy('Limited Food')),
(15, ugettext_lazy('Commissary (Pushcarts/Mobile Food),')),
(16, ugettext_lazy('Institutional Food Service')),
(20, ugettext_lazy('Lodging')),
(21, ugettext_lazy('Bed & Breakfast Home')),
(22, ugettext_lazy('Summer Camp')),
(23, ugettext_lazy('Bed & Breakfast Inn')),
(25, ugettext_lazy('Primitive Experience Camp')),
(26, ugettext_lazy('Resident Camp')),
(30, ugettext_lazy('Meat Market')),
(40, ugettext_lazy('Rest/Nursing Home')),
(41, ugettext_lazy('Hospital')),
(42, ugettext_lazy('Child Care')),
(43, ugettext_lazy('Residential Care')),
(44, ugettext_lazy('School Building')),
(45, ugettext_lazy('Local Confinement')),
(46, ugettext_lazy('Private Boarding School/College')),
(47, ugettext_lazy('Orphanage, Children\'s Home')),
(48, ugettext_lazy('Adult Day Care')),
(49, ugettext_lazy('Adult Day Service')),
(50, ugettext_lazy('Seasonal Swimming Pool')),
(51, ugettext_lazy('Seasonal Wading Pool')),
(52, ugettext_lazy('Seasonal Spa')),
(53, ugettext_lazy('Year-Round Swimming Pool')),
(54, ugettext_lazy('Year-Round Wading Pool')),
(55, ugettext_lazy('Year-Round Spa')),
(61, ugettext_lazy('Tattoo Artist')),
(72, ugettext_lazy('Summer Feeding Program')),
(73, ugettext_lazy('Temporary Food Establishment')),
)
external_id = models.CharField(ugettext_lazy("External ID"), max_length=128)
state_id = models.BigIntegerField(ugettext_lazy("State ID"))
property_id = models.CharField(ugettext_lazy("Property ID"), max_length=128, blank=True)
image_url = models.URLField(max_length=255, blank=True)
name = models.CharField(ugettext_lazy("Name"), max_length=255)
pretty_name = models.CharField(ugettext_lazy("Pretty Name"), max_length=255, blank=True)
type = models.PositiveIntegerField(ugettext_lazy("Type"), default=0, choices=TYPE_CHOICES)
address = models.CharField(ugettext_lazy("Address"), max_length=255)
city = models.CharField(ugettext_lazy("City"), max_length=64)
county = models.CharField(ugettext_lazy("County"), max_length=64, db_index=True)
state = models.CharField(ugettext_lazy("State"), max_length=64)
postal_code = models.CharField(ugettext_lazy("Postal Code"), max_length=16)
phone_number = models.CharField(ugettext_lazy("Phone Number"), max_length=64, blank=True)
opening_date = models.DateTimeField(ugettext_lazy("Opening Date"))
update_date = models.DateTimeField(ugettext_lazy("Update Date"), null=True, blank=True, db_index=True)
status = models.CharField(ugettext_lazy("Status"), choices=STATUS_CHOICES, max_length=32,
default='active')
location = models.PointField(ugettext_lazy("location"), null=True, blank=True)
hygeine_count = models.SmallIntegerField(ugettext_lazy("Hygeine Count"), default=-1, blank=True)
cook_temp_count = models.SmallIntegerField(ugettext_lazy("Cooking Temperature Count"), default=-1, blank=True)
source_count = models.SmallIntegerField(ugettext_lazy("Unsafe Source Count"), default=-1, blank=True)
hold_temp_count = models.SmallIntegerField(ugettext_lazy("Holding Temperature Count"), default=-1, blank=True)
contamination_count = models.SmallIntegerField(ugettext_lazy("Contamination Count"), default=-1, blank=True)
objects = models.GeoManager()
@property
def has_risk_data(self):
return any([self.hygeine_count > -1,
self.cook_temp_count > -1,
self.source_count > -1,
self.hold_temp_count > -1,
self.contamination_count > -1])
class Meta(object):
unique_together = ('external_id', 'county')
def __str__(self):
return self.name
class Inspection(models.Model):
"""Information about inspectors' visits to establishments"""
TYPE_CHOICES = (
(0, ugettext_lazy('Unknown')),
(1, ugettext_lazy('Routine Inspection')),
(2, ugettext_lazy('Re-inspection')),
(5, ugettext_lazy('Permit')),
(6, ugettext_lazy('Visit')),
(8, ugettext_lazy('Name Change')),
(9, ugettext_lazy('Verification')),
(10, ugettext_lazy('Other')),
(12, ugettext_lazy('Status Change')),
(13, ugettext_lazy('Pre-opening Visit')),
(31, ugettext_lazy('Critical Violation Visit')),
(32, ugettext_lazy('Critical Violation Followup')),
)
establishment = models.ForeignKey(Establishment,
verbose_name=ugettext_lazy("Establishment"),
related_name='inspections')
external_id = models.CharField(ugettext_lazy("External ID"), max_length=128)
date = models.DateTimeField(ugettext_lazy("Date"), db_index=True)
score = models.FloatField(ugettext_lazy("Score"), null=True, blank=True)
description = models.TextField(ugettext_lazy("Description"), blank=True)
type = models.PositiveIntegerField(ugettext_lazy("Type"), default=0,
choices=TYPE_CHOICES)
update_date = models.DateTimeField(ugettext_lazy("Update Date"), null=True, blank=True,
db_index=True)
hygeine_count = models.SmallIntegerField(ugettext_lazy("Hygeine Count"), default=-1, blank=True)
cook_temp_count = models.SmallIntegerField(ugettext_lazy("Cooking Temperature Count"), default=-1, blank=True)
source_count = models.SmallIntegerField(ugettext_lazy("Unsafe Source Count"), default=-1, blank=True)
hold_temp_count = models.SmallIntegerField(ugettext_lazy("Holding Temperature Count"), default=-1, blank=True)
contamination_count = models.SmallIntegerField(ugettext_lazy("Contamination Count"), default=-1, blank=True)
def __str__(self):
return "Inspection #{}".format(self.pk)
@property
def has_risk_data(self):
return any([self.hygeine_count > -1,
self.cook_temp_count > -1,
self.source_count > -1,
self.hold_temp_count > -1,
self.contamination_count > -1])
class Violation(models.Model):
"""Information about specific inspection violations"""
RISK_FACTOR_CHOICES = (
(0, ugettext_lazy('Unknown')),
(1, ugettext_lazy('Improper Holding Temperature')),
(2, ugettext_lazy('Improper Cooking Temperature')),
(3, ugettext_lazy('Contaminated Equipment')),
(4, ugettext_lazy('Poor Hygiene')),
(5, ugettext_lazy('Food From Unsafe Sources')),
(6, ugettext_lazy('None')),
)
establishment = models.ForeignKey(Establishment,
verbose_name=ugettext_lazy("Establishment"),
related_name='violations')
inspection = models.ForeignKey(Inspection, related_name='violations',
verbose_name=ugettext_lazy("Inspection"), null=True,
blank=True)
external_id = models.CharField(ugettext_lazy("External ID"), max_length=128)
date = models.DateTimeField(ugettext_lazy("Date"), db_index=True)
code = models.CharField(ugettext_lazy("Code"), max_length=32)
description = models.TextField(ugettext_lazy("Description"), blank=True)
update_date = models.DateTimeField(ugettext_lazy("Update Date"), null=True, blank=True,
db_index=True)
risk_factor = models.PositiveIntegerField(ugettext_lazy("Risk Factor"), default=0,
choices=RISK_FACTOR_CHOICES)
deduction_value = models.DecimalField(ugettext_lazy("Deduction Value"), default=0,
max_digits=4, decimal_places=2)
|
codefordurham/Durham-Restaurants
|
inspections/models.py
|
Python
|
bsd-3-clause
| 8,811
|
[
"VisIt"
] |
c2dcb0f3bcb652a32ad4c4c2ffbd413bc204350e6c0c1bcea88bdf69fe93047f
|
__author__ = 'tonycastronova'
import unittest
from models.ueb import ueb
from utilities.gui import parse_config
import wrappers
from coordinator.engine import Coordinator
import time
from transform import space
class testUEB(unittest.TestCase):
def setup(self):
pass
def test_run(self):
# intialize ueb
mdl = '../ueb.mdl'
config_params = parse_config(mdl)
UEB = ueb.ueb(config_params)
# run
UEB.run(None)
# finish
UEB.save()
print 'done'
def test_geometry_creation_and_ordering(self):
# intialize ueb
mdl = '../ueb.mdl'
config_params = parse_config(mdl)
UEB = ueb.ueb(config_params)
# get the active cells (these are looped through in run)
cells = UEB.activeCells
# build geometries
xdim = UEB.C_dimlen1.value
ydim = UEB.C_dimlen2.value
geoms = UEB.build_geometries(range(xdim), range(ydim))
# create point tuples from geoms
pts = [ (int(g.GetX()), int(g.GetY())) for g in geoms]
# make sure the ordering is correct
self.assertTrue(cells == pts)
def test_netcdf_input(self):
engine = Coordinator()
args = dict(ncpath = './TWDEF_distributed/prcp.nc',
tdim = 'time',
xdim = 'x',
ydim = 'y',
tunit = 'hours',
starttime = '10-26-2015 00:00:00',
type = wrappers.Types.NETCDF)
# add the WaterOneFlow component to the engine
engine.add_model(id=1234, attrib=args)
# load ueb component
mdl = '../ueb.mdl'
args = dict(mdl = mdl)
# config_params = parse_config(mdl)
engine.add_model(id=1235, attrib=args)
# assert that the models have been added correctly
models = engine.get_all_models()
self.assertTrue(len(models) == 2)
spatial_interpolation = space.spatial_nearest_neighbor()
# add a link from NetCDF to UEB
netcdf = engine.get_output_exchange_items_summary(id=1234)
ueb = engine.get_input_exchange_items_summary(id=1235)
engine.add_link(from_id=1234, from_item_id = netcdf[0]['name'],
to_id=1235, to_item_id = ueb[0]['name'],
spatial_interp=spatial_interpolation,
temporal_interp=None,
uid=None)
links = engine.get_all_links()
self.assertTrue(len(links) == 1)
# run the simulation
engine.run_simulation()
print 'done'
|
Castronova/EMIT
|
models/ueb/test/test_ueb.py
|
Python
|
gpl-2.0
| 2,649
|
[
"NetCDF"
] |
64407e3fe03ca0b4fb8c6bee76bc1577209e87ddd41116905aae9f5176448f31
|
"""
The B{0install} command-line interface.
"""
# Copyright (C) 2011, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
from zeroinstall import _, logger
import os, sys
from optparse import OptionParser
import logging
from zeroinstall import SafeException, DryRun
valid_commands = ['add', 'whatchanged',
'config', 'import', 'list', 'search', 'add-feed', 'remove-feed', 'list-feeds',
'digest', 'slave']
class UsageError(Exception): pass
def _ensure_standard_fds():
"""Ensure stdin, stdout and stderr FDs exist, to avoid confusion."""
for std in (0, 1, 2):
try:
os.fstat(std)
except OSError:
fd = os.open(os.devnull, os.O_RDONLY)
if fd != std:
os.dup2(fd, std)
os.close(fd)
class NoCommand(object):
"""Handle --help and --version"""
def add_options(self, parser):
parser.add_option("-V", "--version", help=_("display version information"), action='store_true')
def handle(self, config, options, args):
if options.version:
import zeroinstall
print("0install (zero-install) " + zeroinstall.version)
print("Copyright (C) 2013 Thomas Leonard")
print(_("This program comes with ABSOLUTELY NO WARRANTY,"
"\nto the extent permitted by law."
"\nYou may redistribute copies of this program"
"\nunder the terms of the GNU Lesser General Public License."
"\nFor more information about these matters, see the file named COPYING."))
sys.exit(0)
raise UsageError()
def main(command_args, config = None):
"""Act as if 0install was run with the given arguments.
@type command_args: [str]
@type config: L{zeroinstall.injector.config.Config} | None
@arg command_args: array of arguments (e.g. C{sys.argv[1:]})"""
_ensure_standard_fds()
if config is None:
from zeroinstall.injector.config import load_config
config = load_config()
# The first non-option argument is the command name (or "help" if none is found).
command = None
for i, arg in enumerate(command_args):
if not arg.startswith('-'):
command = arg
command_args = command_args[:i] + command_args[i + 1:]
break
elif arg == '--':
break
verbose = False
try:
# Configure a parser for the given command
my_name = os.path.basename(sys.argv[0])
if my_name == '0install-python-fallback': my_name = '0install' # Hack for python-fallback
if command:
if command not in valid_commands:
raise SafeException(_("Unknown sub-command '%s': try --help") % command)
module_name = command.replace('-', '_')
cmd = __import__('zeroinstall.cmd.' + module_name, globals(), locals(), [module_name], 0)
parser = OptionParser(usage=_("usage: %s %s [OPTIONS] %s") % (my_name, command, cmd.syntax))
else:
cmd = NoCommand()
parser = OptionParser(usage=_("usage: %s COMMAND\n\nTry --help with one of these:%s") %
(my_name, "\n\n0install " + '\n0install '.join(valid_commands)))
parser.add_option("-c", "--console", help=_("never use GUI"), action='store_false', dest='gui')
parser.add_option("", "--dry-run", help=_("just print what would be executed"), action='store_true')
parser.add_option("-g", "--gui", help=_("show graphical policy editor"), action='store_true')
parser.add_option("-v", "--verbose", help=_("more verbose output"), action='count')
parser.add_option("", "--with-store", help=_("add an implementation cache"), action='append', metavar='DIR')
cmd.add_options(parser)
(options, args) = parser.parse_args(command_args)
verbose = options.verbose
if options.verbose:
if options.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
import zeroinstall
logger.info(_("Running 0install %(version)s %(args)s; Python %(python_version)s"), {'version': zeroinstall.version, 'args': repr(command_args), 'python_version': sys.version})
if options.with_store:
from zeroinstall import zerostore
for x in options.with_store:
config.stores.stores.append(zerostore.Store(os.path.abspath(x)))
logger.info(_("Stores search path is now %s"), config.stores.stores)
config.handler.dry_run = bool(options.dry_run)
if config.handler.dry_run:
if options.gui is True:
raise SafeException(_("Can't use --gui with --dry-run"))
options.gui = False
cmd.handle(config, options, args)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
sys.exit(1)
except UsageError:
parser.print_help()
sys.exit(1)
except DryRun as ex:
print(_("[dry-run]"), ex)
except SafeException as ex:
if verbose: raise
try:
from zeroinstall.support import unicode
print(unicode(ex), file=sys.stderr)
except:
print(repr(ex), file=sys.stderr)
sys.exit(1)
return
|
linuxmidhun/0install
|
zeroinstall/cmd/__init__.py
|
Python
|
lgpl-2.1
| 4,696
|
[
"VisIt"
] |
6d77aa0836ca0a9e7361a8c4a4b105a8d50fa6b04ce9da2481b86da36b62a5ec
|
#
# Copyright (C) 2013,2014,2015,2016 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd._system as es
import espressomd
from espressomd import thermostat
from espressomd import code_info
from espressomd import integrate
from espressomd import electrostatics
from espressomd import electrostatic_extensions
import numpy
print("""
=======================================================
= p3m.py =
=======================================================
Program Information:""")
print(code_info.features())
dev = "cpu"
# System parameters
#############################################################
# 10 000 Particles
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 1.12246
lj_cap = 20
# Integration parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.skin = 0.4
#es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0')
thermostat.Thermostat().set_langevin(1.0, 1.0)
# warmup integration (with capped LJ potential)
warm_steps = 100
warm_n_times = 30
# do the warmup until the particles have at least the distance min__dist
min_dist = 0.9
# integration
int_steps = 1000
int_n_times = 10
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig,
cutoff=lj_cut, shift="auto")
system.non_bonded_inter.set_force_cap(lj_cap)
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l * box_l * box_l
n_part = int(volume * density)
for i in range(n_part):
system.part.add(id=i, pos=numpy.random.random(3) * system.box_l)
system.analysis.distto(0)
print("Simulate {} particles in a cubic simulation box {} at density {}."
.format(n_part, box_l, density).strip())
print("Interactions:\n")
act_min_dist = system.analysis.mindist()
print("Start with minimal distance {}".format(act_min_dist))
system.max_num_cells = 2744
# Assingn charge to particles
for i in range(n_part / 2 - 1):
system.part[2 * i].q = -1.0
system.part[2 * i + 1].q = 1.0
# P3M setup after charge assigned
#############################################################
print("\nSCRIPT--->Create p3m\n")
p3m = electrostatics.P3M(bjerrum_length=2.0, accuracy=1e-2)
print("\nSCRIPT--->Add actor\n")
system.actors.add(p3m)
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in p3m_params.keys():
print("{} = {}".format(key, p3m_params[key]))
print("\nSCRIPT--->Explicit tune call\n")
p3m._tune()
print("\nSCRIPT--->P3M parameter:\n")
p3m_params = p3m.get_params()
for key in p3m_params.keys():
print("{} = {}".format(key, p3m_params[key]))
# elc=electrostatic_extensions.ELC(maxPWerror=1.0,gap_size=1.0)
# system.actors.add(elc)
print(system.actors)
#############################################################
# Warmup Integration #
#############################################################
# open Observable file
obs_file = open("pylj_liquid.obs", "w")
obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n")
print("""
Start warmup integration:
At maximum {} times {} steps
Stop if minimal distance is larger than {}
""".strip().format(warm_n_times, warm_steps, min_dist))
# set LJ cap
lj_cap = 20
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# Warmup Integration Loop
i = 0
while (i < warm_n_times and act_min_dist < min_dist):
integrate.integrate(warm_steps)
# Warmup criterion
act_min_dist = system.analysis.mindist()
i += 1
# Increase LJ cap
lj_cap = lj_cap + 10
system.non_bonded_inter.set_force_cap(lj_cap)
# Just to see what else we may get from the c code
print("""
ro variables:
cell_grid {0.cell_grid}
cell_size {0.cell_size}
local_box_l {0.local_box_l}
max_cut {0.max_cut}
max_part {0.max_part}
max_range {0.max_range}
max_skin {0.max_skin}
n_nodes {0.n_nodes}
n_part {0.n_part}
n_part_types {0.n_part_types}
periodicity {0.periodicity}
transfer_rate {0.transfer_rate}
verlet_reuse {0.verlet_reuse}
""".format(system))
# write parameter file
set_file = open("pylj_liquid.set", "w")
set_file.write("box_l %s\ntime_step %s\nskin %s\n" %
(box_l, system.time_step, system.skin))
#############################################################
# Integration #
#############################################################
print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps))
# remove force capping
lj_cap = 0
system.non_bonded_inter.set_force_cap(lj_cap)
print(system.non_bonded_inter[0, 0].lennard_jones)
# print initial energies
energies = system.analysis.energy()
print(energies)
j = 0
for i in range(0, int_n_times):
print("run %d at time=%f " % (i, system.time))
integrate.integrate(int_steps)
energies = system.analysis.energy()
print(energies)
obs_file.write('{ time %s } %s\n' % (system.time, energies))
# write end configuration
end_file = open("pylj_liquid.end", "w")
end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l))
end_file.write("{ particles {id pos type} }")
for i in range(n_part):
end_file.write("%s\n" % system.part[i].pos)
obs_file.close()
set_file.close()
end_file.close()
# terminate program
print("\nFinished.")
|
tbereau/espresso
|
samples/python/p3m.py
|
Python
|
gpl-3.0
| 6,612
|
[
"ESPResSo"
] |
7aaa1e3affa5d283d5d715a908d23d97bbfca4f25b59fa24618fd5ce25e7f89c
|
import re
positive_words = set([
"addicting", "addictingly", "admirable", "admirably", "admire", "admires", "admiring", "adorable",
"adorably", "adore", "adored", "adoring", "amaze", "amazed", "amazes", "amazing",
"angelic", "appeal", "appealed", "appealing", "appealingly", "appeals", "attentive", "attracted",
"attractive", "awesome", "awesomely", "beautiful", "beautifully", "best", "bliss", "bold",
"boldly", "boss", "bravo", "breath-taking", "breathtaking", "calm", "cared", "cares",
"caring", "celebrate", "celebrated", "celebrating", "charm", "charmed", "charming", "charmingly",
"cheer", "cheered", "cheerful", "cheerfully", "classic", "colorful", "colorfully", "colourful",
"colourfully", "comfort", "comfortably", "comforting", "comfortingly", "comfy", "competent", "competently",
"congrats", "congratulations", "considerate", "considerately", "cool", "coolest", "courteous", "courteously",
"creative", "creatively", "cute", "dapper", "dazzled", "dazzling", "dazzlingly", "delicious",
"deliciously", "delight", "delighted", "delightful", "delightfully", "dope", "dynamic", "ecstatic",
"efficient", "efficiently", "elegant", "elegantly", "eloquent", "embrace", "embraced", "embracing",
"energetic", "energetically", "engaging", "engagingly", "enjoy", "enjoyed", "enjoying", "enticing",
"enticingly", "essential", "excellent", "excellently", "exceptional", "excitement", "exciting", "excitingly",
"exquisite", "exquisitely", "fantastic", "fascinating", "fashionable", "fashionably", "fast", "favorite",
"favorites", "favourite", "favourites", "fetching", "fine", "flattering", "fond", "fondly",
"friendly", "fulfilling", "fun", "generous", "generously", "genius", "genuine", "glamor",
"glamorous", "glamorously", "glamour", "glamourous", "glamourously", "glorious", "good", "good-looking",
"goodlooking", "gorgeous", "gorgeously", "grace", "graceful", "gracefully", "great", "handsome",
"happiness", "happy", "healthy", "heartwarming", "heavenly", "helpful", "hip", "imaginative",
"incredible", "ingenious", "innovative", "inspirational", "inspired", "inspiring", "intelligent", "interesting",
"invigorating", "irresistible", "irresistibly", "joy", "kawaii", "keen", "knowledgeable", "liked",
"lively", "love", "loved", "lovely", "loving", "lucky", "luscious", "lusciously",
"magical", "magnificent", "marvelous", "marvelously", "masterful", "masterfully", "memorable", "mmm",
"mmmm", "mmmmm", "natural", "neat", "neatly", "nice", "nicely", "nifty",
"optimistic", "outstanding", "outstandingly", "overjoyed", "pampered", "peace", "peaceful", "phenomenal",
"pleasant", "pleasantly", "pleasurable", "pleasurably", "plentiful", "polished", "popular", "positive",
"powerful", "powerfully", "precious", "prettily", "pretty", "profound", "proud", "proudly",
"quick", "quickly", "rad", "radiant", "rejoice", "rejoiced", "rejoicing", "remarkable",
"respectable", "respectably", "respectful", "satisfied", "serenity", "sexily", "sexy", "shiny",
"skilled", "skillful", "slick", "smooth", "spectacular", "spicy", "splendid", "straightforward",
"stunning", "stylish", "stylishly", "sublime", "succulent", "super", "superb", "swell",
"tastily", "tasty", "terrific", "thorough", "thrilled", "thrilling", "tranquil", "tranquility",
"treat", "unreal", "vivacious", "vivid", "warm", "welcoming", "well-spoken", "win",
"wonderful", "wonderfully", "wow", "wowed", "wowing", "wows", "yummy"
])
negative_words = set([
"a-hole", "a-holes", "abandoned", "abandoning", "abuse", "abused", "abysmal", "aggressive",
"agonizing", "agonizingly", "agony", "ahole", "aholes", "alarming", "anger", "angering",
"angry", "appalled", "appalling", "appalls", "argue", "argued", "arguing", "ashamed",
"asinine", "asshole", "assholes", "atrocious", "awful", "awkward", "bad", "badgered",
"badgering", "banal", "bankrupt", "barbaric", "bastard", "bastards", "belittled", "belligerent",
"berated", "bigot", "bigoted", "bigots", "bitch", "bland", "bonkers", "boring",
"bossed-around", "bothered", "bothering", "bothers", "broke", "broken", "broken-hearted", "brokenhearted",
"brutal", "buggy", "bummed", "calamitous", "callous", "cheated", "cheating", "claustrophobic",
"clumsy", "colorless", "colourless", "conceited", "condescending", "confused", "confuses", "confusing",
"contentious", "corrupt", "coward", "cowardly", "cowards", "creeper", "crestfallen", "cringe-worthy",
"cringeworthy", "cruel", "cunt", "cunts", "cursed", "cynical", "d-bag", "d-bags",
"dbag", "dbags", "deal-breaker", "deal-breaking", "degrading", "dehumanized", "dehumanizing", "delay",
"delayed", "deplorable", "depressed", "despicable", "destroyed", "destroying", "destroys", "detestable",
"dick", "dicks", "died", "dirty", "disappointed", "disappointing", "disappoints", "disaster",
"disastrous", "disastrously", "disgruntled", "disgusted", "disgusting", "disgustingly", "dismal", "disorganized",
"disrespectful", "douche", "douchebag", "douchebags", "dour", "dreadful", "dull", "dumb",
"egocentric", "egotistical", "embarrassing", "enraging", "erred", "erring", "error", "excruciating",
"fail", "failed", "failing", "fails", "failure", "fake", "falsehood", "flaw",
"flawed", "flaws", "folly", "fool", "foolish", "fools", "forgettable", "fought",
"freaked", "freaking", "frustrated", "frustrating", "fubar", "fuck", "fuckers", "fugly",
"furious", "gaudy", "ghastly", "gloomy", "greed", "greedy", "grief", "grieve",
"grieved", "grieving", "grouchy", "hassle", "hate", "hated", "hating", "heart-breaking",
"heart-broken", "heartbreaking", "heartbroken", "hellish", "hellishly", "helpless", "horrendous", "horrible",
"horribly", "horrific", "horrifically", "humiliated", "humiliating", "hurt", "hurts", "icky",
"idiot", "idiotic", "ignorant", "ignored", "ill", "immature", "inane", "inattentive",
"incompetent", "incompetently", "incomplete", "inconsiderate", "incorrect", "indoctrinated", "inelegant", "infuriating",
"infuriatingly", "insecure", "insignificant", "insufficient", "insult", "insulted", "insulting", "interrupted",
"jaded", "kill", "lame", "loathsome", "lonely", "lose", "loser", "lost",
"mad", "mean", "mediocre", "melodramatic", "miserable", "miserably", "misery", "missing",
"mistake", "mistreated", "moron", "moronic", "mother-fucker", "mother-fuckers", "motherfucker", "motherfuckers",
"mourn", "mourned", "mugged", "nagging", "nasty", "nazi", "nazis", "negative",
"neurotic", "nonsense", "noo", "nooo", "nooooo", "nut-job", "nut-jobs", "nutjob",
"nutjobs", "objectification", "objectified", "objectifying", "obscene", "odious", "offended", "oppressive",
"over-sensitive", "pain", "painfully", "panic", "panicked", "panicking", "paranoid", "pathetic",
"pessimistic", "pestered", "pestering", "petty", "pissed", "poor", "poorly", "powerless",
"prejudiced", "pretentious", "psychopath", "psychopathic", "psychopaths", "psychotic", "quarrelling", "quarrelsome",
"racist", "rage", "repugnant", "repulsive", "resent", "resentful", "resenting", "retarded",
"revolting", "ridicule", "ridiculed", "ridicules", "robbed", "rude", "sad", "sadistic",
"sadness", "scared", "screwed", "self-centered", "selfcentered", "selfish", "shambolic", "shameful",
"shamefully", "shattered", "shit", "shitty", "shoddy", "sickening", "sloppily", "sloppy",
"slow", "slowly", "smothered", "snafu", "spiteful", "square", "squares", "stereotyped",
"stifled", "stressed", "stressful", "stressing", "stuck", "stuffy", "stupid", "sub-par",
"subpar", "substandard", "suck", "sucks", "suffer", "suffering", "suicide", "superficial",
"terrible", "terribly", "train-wreck", "trainwreck", "ugly", "unappealing", "unattractive", "uncomfortable",
"uncomfy", "unengaging", "unengagingly", "unenticing", "unenticingly", "unexceptionable", "unfair", "unfashionable",
"unfashionably", "unfriendly", "ungraceful", "ungrateful", "unhelpful", "unimpressive", "uninspired", "unjust",
"unlucky", "unnotable", "unpleasant", "unpleasantly", "unsatisfactory", "unsatisfied", "unseemly", "unwelcoming",
"upset", "vicious", "vindictive", "weak", "wreck", "wrecked", "wrecking", "wrecks",
"wtf", "yucky"
])
intensifier_words = set([
"absolutely", "amazingly", "exceptionally", "fantastically", "fucking", "incredibly", "obscenely", "phenomenally",
"profoundly", "really", "remarkably", "ridiculously", "so", "spectacularly", "stunningly", "such",
"totally", "unquestionably", "very"
])
negation_words = set([
"didn't", "don't", "lack", "lacked", "no-one", "nobody", "noone", "not", "wasn't",
])
# Decorator to help udf's handle null input like Pig does (just ignore it and return null)
def null_if_input_null(fn):
def wrapped(*args, **kwargs):
for arg in args:
if arg is None:
return None
for k, v in kwargs.items():
if v is None:
return None
return fn(*args, **kwargs)
wrapped.__name__ = fn.__name__
wrapped.__doc__ = fn.__doc__
wrapped.__dict__.update(fn.__dict__)
return wrapped
# Returns whether a word is the positive_words / negative_words sets defined in this library
# Pig 0.9.2 does not have a boolean datatype (this is implemented in Pig 0.10+), so we use 1 = true, 0 = false.
@outputSchema("in_word_set: int")
@null_if_input_null
def in_word_set(word, set_name):
if set_name == 'positive':
return (1 if word in positive_words else 0);
elif set_name == 'negative':
return (1 if word in negative_words else 0);
else:
raise ValueError('Invalid set name. Should be "positive" or "negative".')
# Estimates whether an ordered bag of words expresses a positive (> 0) or negative (< 0) sentiment.
# Accounts for intensifier words (ex. "very") and negations (ex. "not"), but only if they
# directly precede a word expressing positive/negative sentiment
# (chains, ex. intensifier -> negation -> positive-word are handled)
@outputSchema("sentiment: double")
@null_if_input_null
def sentiment(words_bag):
if len(words_bag) == 0:
return 0.0
score = 0.0
words = [t[0] for t in words_bag if len(t) > 0]
positive = [i for i, word in enumerate(words) if word in positive_words]
negative = [i for i, word in enumerate(words) if word in negative_words]
for idx in positive:
word_score = 1.0
num_negations = 0
i = idx - 1
while i >= 0:
if words[i] in intensifier_words:
word_score += 1
elif words[i] in negation_words:
num_negations += 1
else:
break
i -= 1
score += word_score * ((-0.5 ** num_negations) if num_negations > 0 else 1)
for idx in negative:
word_score = -1.0
num_negations = 0
i = idx - 1
while i >= 0:
if words[i] in intensifier_words:
word_score += 1
elif words[i] in negation_words:
num_negations += 1
else:
break
i -= 1
score += word_score * ((-0.5 ** num_negations) if num_negations > 0 else 1)
return score
|
mortardata/mortar-examples
|
udfs/jython/twitter_sentiment.py
|
Python
|
apache-2.0
| 11,076
|
[
"exciting"
] |
15ef53d41c12faf022cf83311ff7cf3e69cdd6b622f8c88c92dbccae27d497a4
|
#!/usr/bin/python
# BayesMRInumpy.py: Calculate the Bayes Factor associated with a given group-level fMRI t-stat map
# This version makes use of the NumPy and NiBabel packages to perform image calculations without FSL
# Written by Tom Johnstone (2016) itjohnstone@gmail.com
# Based on code provided by Zoltan_Dienes (http://www.lifesci.sussex.ac.uk/home/Zoltan_Dienes/inference/Bayes.htm)
import os,re,getopt,sys,math,subprocess,numpy,time
import nibabel as nib
from scipy.stats import norm
def nifti_check_file(filename):
filename,theext = os.path.splitext(filename)
if theext == ".gz":
filename,theext = os.path.splitext(filename)
theext = theext + ".gz"
if theext == ".nii":
filename = filename+".nii"
return filename,os.access(filename,os.R_OK)
elif theext == ".nii.gz":
filename = filename+".nii.gz"
return filename,os.access(filename,os.R_OK)
elif theext == "":
filename = filename+".nii"
if os.access(filename,os.R_OK):
return filename,os.access(filename,os.R_OK)
else:
filename = filename+".nii.gz"
return filename,os.access(filename,os.R_OK)
else:
return filename,False
def main():
start_time = ticks = time.clock()
out_file = ""
iterations = 1000
opts,args = getopt.getopt(sys.argv[1:],')ht:c:d:o:i:u:p:n:')
for opt,param in opts:
if opt == '-h':
print ('\nbayesMRI_numpy.py\n')
print ('This program calculates the Bayes Factor associated with a given group level fMRI t-test. You can specify ')
print ('a global uniform prior, a global normal prior, or a spatially varying normal prior based on a previous analysis. ')
print ('For the normal priors, probabilities will be integrated between +/- 5 standard errors of the mean\n')
print ('The program outputs the Bayes Factor and log Bayes Factor for the model compared to the null (as well as the inverse).')
print ('Likelihoods for the model and null are also output.\n')
print ('This program requires Python with NumPy and NiBabel packages installed\n')
print ('Usage:\n\t')
print ('BayesMRI_numpy.py -t <tstat_file> -c <contrast_file> -d <dof_file> [options]\n')
print ('\ttstat_file: tstat file from group analysis')
print ('\tcontrast_file: contrast (cope) file from group analysis')
print ('\ttdof_file: file containing degrees of freedom for the group analysis\n')
print ('Options:')
print ('\t-o <ouput_file_stem> prefix for output file names (default = "")\n')
print ('\t-i <iterations> the number of bins used to integrate probabilities (larger = more precise; default = 1000)\n')
print ('\t-u <lower_limit, upper_limit> Uniform prior: a uniform distribution between lower_limit and upper_limit\n')
print ('\t-n <mean,stderr> Normal (Gaussian) prior: a normal distribution with given mean and standard error\n')
print ('\t-p <contrast_file,tstat_file> Data-dependent prior: a spatially varying Gaussian prior with voxelwise mean')
print ('\t and standard error based on a previous analysis\n')
print ('\t-h this information\n\n')
sys.exit()
elif param != '':
if opt == '-t':
tstat_file, file_ok = nifti_check_file(param)
if not file_ok:
print ("Error: Couldn't read t-stat file: "+tstat_file)
sys.exit(1)
tstatfile = nib.load(tstat_file)
if opt == '-c':
cope_file, file_ok = nifti_check_file(param)
if not file_ok:
print ("Error: Couldn't read contrast file: "+cope_file)
sys.exit(1)
copefile = nib.load(cope_file)
if opt == '-d':
dof_file, file_ok = nifti_check_file(param)
if not file_ok:
print ("Error: Couldn't read DOF file: "+dof_file)
sys.exit(1)
doffile = nib.load(dof_file)
elif opt == '-o':
out_file = param+"_"
elif opt == '-i':
iterations = int(param)
elif opt == '-u':
prior = "uniform"
[lowerstr,upperstr] = param.split(',')
priorlower = float(lowerstr)
priorupper = float(upperstr)
elif opt == '-n':
prior = "normal"
[meanstr,sdstr] = param.split(',')
priormean = float(meanstr)
priorsd = float(sdstr)
priorvar = numpy.square(priorsd)
elif opt == '-p':
prior = "dataprior"
print (prior)
[priorcopefile,priortstatfile] = param.split(',')
if not (os.access(priorcopefile+".nii.gz",os.R_OK) and os.access(priortstatfile+".nii.gz",os.R_OK)):
print ("Error: Couldn't read prior cope or tstat files: "+priorcopefile,priortstatfile)
sys.exit(1)
prior_copefile = nib.load(priorcopefile+".nii.gz")
prior_tstatfile = nib.load(priortstatfile+".nii.gz")
priormean = prior_copefile.get_data()
priortstat = prior_tstatfile.get_data()
priorvar = numpy.square(priormean / priortstat)
else:
print ('Error: Option '+opt+' not recognised. Use -h option for usage')
sys.exit()
cope = copefile.get_data()
tstat = tstatfile.get_data()
stderr = cope / tstat
dof = doffile.get_data()
sd2 = numpy.square(stderr * (1 + 20 / numpy.square(dof)))
likelyhoodTheory = stderr * 0
denom = numpy.sqrt(sd2 * 2 * numpy.pi)
if prior == "uniform":
disttheta = 1.0/(priorupper-priorlower)
elif prior == "normal" or prior == "dataprior":
priorlower = priormean - numpy.sqrt(priorvar) * 5
priorupper = priormean + numpy.sqrt(priorvar) * 5
distdenom = numpy.sqrt(2 * numpy.pi * priorvar)
else:
print ('Error: You must specify a permitted prior distribution. Use -h option for usage')
sys.exit()
incr = (priorupper-priorlower)/iterations
for A in range(0,iterations):
if A/10.0 == round(A/10.0):
print ("iteration: " + str(A))
theta = priorlower + A*incr
if prior == "normal" or prior == "dataprior":
disttheta = norm_pdf(priormean,priorvar,theta,distdenom)
#disttheta = norm(priormean,priorvar).pdf(theta) #need to change the variance to sd if using this function
likelyhoodTheory = likelyhoodTheory + norm_pdf(theta, sd2, cope, denom) * disttheta * incr
#likelyhoodTheory = likelyhoodTheory + norm(theta, sd2).pdf(cope) * disttheta * incr #need to change the variance to sd if using this function
likelyhoodNull = norm_pdf(0, sd2, cope, denom)
#likelyhoodNull = norm(0, sd2).pdf(cope) #need to change the variance to sd if using this function
bayesFactorModel = likelyhoodTheory / likelyhoodNull
bayeslogFactorModel = numpy.log10(bayesFactorModel)
bayesFactorNull = 1 / bayesFactorModel
bayeslogFactorNull = numpy.log10(bayesFactorNull)
imgs = nib.Nifti1Image(likelyhoodTheory, copefile.affine, copefile.header)
nib.save(imgs, out_file+'likelyhoodTheory.nii.gz')
imgs = nib.Nifti1Image(likelyhoodNull, copefile.affine, copefile.header)
nib.save(imgs, out_file+'likelyhoodNull.nii.gz')
imgs = nib.Nifti1Image(bayesFactorModel, copefile.affine, copefile.header)
nib.save(imgs, out_file+'bayesFactorModel.nii.gz')
imgs = nib.Nifti1Image(bayeslogFactorModel, copefile.affine, copefile.header)
nib.save(imgs, out_file+'bayeslogFactorModel.nii.gz')
imgs = nib.Nifti1Image(bayesFactorNull, copefile.affine, copefile.header)
nib.save(imgs, out_file+'bayesFactorNull.nii.gz')
imgs = nib.Nifti1Image(bayesFactorNull, copefile.affine, copefile.header)
nib.save(imgs, out_file+'bayeslogFactorNull.nii.gz')
print ("The likelihood of your data given your theory is in the file: "+out_file+"likelihoodTheory")
print ("The likelihood of your data given the null is in the file: "+out_file+"likelihoodNull")
print ("The Bayes Factor for the model versus the null is in the file: "+out_file+"bayesFactorModel")
print ("The Bayes Factor for the null versus the model is in the file: "+out_file+"bayesFactorNull")
print ("The log (base 10) Bayes Factor for the model versus the null is in the file: "+out_file+"bayeslogFactorModel")
print ("The log (base 10) Bayes Factor for the null versus the model is in the file: "+out_file+"bayeslogFactorNull")
end_time = ticks = time.clock()
time_taken = end_time - start_time
print ("Total time taken: "+str(time_taken)+" seconds")
# Define own version of normal pdf so that the constant denominator doesn't need to be recalculated for each iteration (for speed)
# The script runs about 5 times faster when using this function in place of the built in function
def norm_pdf(mean, variance, x, thedenom):
# for the normal distribution with mean mn and variance given in varianceFile, return the Y value (probability) corresponding to value X
return numpy.exp(-numpy.square(x - mean) / (2 * variance)) / thedenom
if __name__ == "__main__":
main()
|
TomEmotion/BayesMRI
|
BayesMRI_numpy.py
|
Python
|
gpl-3.0
| 9,348
|
[
"Gaussian"
] |
2439aaddcb8f574670d2bf6d87d5afdbd11f6c8d78a9d0f3aefd0af554fb22d7
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
r"""Determine information about text files.
This module efficiently determines the encoding of text files (see
_classify_encoding for details), accurately identifies binary files, and
provides detailed meta information of text files.
>>> import textinfo
>>> path = __file__
>>> if path.endswith(".pyc"): path = path[:-1]
>>> ti = textinfo.textinfo_from_path(path)
>>> ti.__class__
<class 'textinfo.TextInfo'>
>>> ti.encoding
'utf-8'
>>> ti.file_type_name
'regular file'
>>> ti.is_text
True
>>> ti.lang
'Python'
>>> ti.langinfo
<Python LangInfo>
...plus a number of other useful information gleaned from the file. To see
a list of all useful attributes see
>> list(ti.as_dict().keys())
['encoding', 'file_type', ...]
Note: This module requires at least Python 2.5 to use
`codecs.lookup(<encname>).name`.
"""
_cmdln_doc = """Determine information about text files.
"""
# TODO:
# - [high prio] prefs integration
# - aggegrate "is there an explicit encoding decl in this file" from XML, HTML,
# lang-specific, emacs and vi vars decls (as discussed with Shane)
# - fix ti with unicode paths Windows (check on Linux too)
# - '-L|--dereference' option a la `file` and `ls`
# - See: http://webblaze.cs.berkeley.edu/2009/content-sniffing/
# - Shift-JIS encoding is not detected for
# http://public.activestate.com/pub/apc/perl-current/lib/Pod/Simple/t/corpus/s2763_sjis.txt
# [Jan wrote]
# > While the document isn't identified by filename extension as POD,
# > it does contain POD and a corresponding =encoding directive.
# Could potentially have a content heuristic check for POD.
#
# ----------------
# Current Komodo (4.2) Encoding Determination Notes (used for reference,
# but not absolutely followed):
#
# Working through koDocumentBase._detectEncoding:
# encoding_name = pref:encodingDefault (on first start is set
# to encoding from locale.getdefaultlocale() typically,
# fallback to iso8859-1; default locale typically ends up being:
# Windows: cp1252
# Mac OS X: mac-roman
# (modern) Linux: UTF-8)
# encoding = the python name for this
# tryencoding = pref:encoding (no default, explicitly set
# encoding) -- i.e. if there are doc prefs for this
# path, then give this encoding a try. If not given,
# then utf-8 for XML/XSLT/VisualBasic and
# pref:encodingDefault for others (though this is
# all prefable via the 'languages' pref struct).
# tryxmldecl
# trymeta (HTML meta)
# trymodeline
# autodetect (whether to try at all)
#
# if autodetect or tryencoding:
# koUnicodeEncoding.autoDetectEncoding()
# else:
# if encoding.startswith('utf'): # note this is pref:encodingDefault
# check bom
# presume encoding is right (give up if conversion fails)
# else:
# presume encoding is right (given up if fails)
#
# Working through koUnicodeEncoding.autoDetectEncoding:
# if tryxmldecl: ...
# if tryhtmlmeta: ...
# if trymodeline: ...
# use bom: ...
# ----------------
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
from os.path import join, dirname, abspath, basename, exists
import sys
import re
from pprint import pprint
import traceback
import warnings
import logging
import optparse
import codecs
import locale
import langinfo
#---- exceptions and warnings
class TextInfoError(Exception):
pass
class TextInfoConfigError(TextInfoError):
pass
class ChardetImportWarning(ImportWarning):
pass
warnings.simplefilter("once", ChardetImportWarning)
#---- globals
log = logging.getLogger("textinfo")
# For debugging:
DEBUG_CHARDET_INFO = False # gather chardet info
def _to_str(s):
s = '%s' % s
if s.startswith("b'") and s.endswith("'") or s.startswith('b"') and s.endswith('"'):
return s[2:-1]
return s
#---- module API
def textinfo_from_filename(path):
"""Determine test info for the given path **using the filename only**.
No attempt is made to stat or read the file.
"""
return TextInfo.init_from_filename(path)
def textinfo_from_path(path, encoding=None, follow_symlinks=False,
quick_determine_lang=False):
"""Determine text info for the given path.
This raises EnvironmentError if the path doesn't not exist or could
not be read.
"""
return TextInfo.init_from_path(path, encoding=encoding,
follow_symlinks=follow_symlinks,
quick_determine_lang=quick_determine_lang)
#---- main TextInfo class
class TextInfo(object):
path = None
file_type_name = None # e.g. "regular file", "directory", ...
file_type = None # stat.S_IFMT(os.stat(path).st_mode)
file_mode = None # stat.S_IMODE(os.stat(path).st_mode)
is_text = None
encoding = None
has_bom = None # whether the text has a BOM (Byte Order Marker)
encoding_bozo = False
encoding_bozo_reasons = None
lang = None # e.g. "Python", "Perl", ...
langinfo = None # langinfo.LangInfo instance or None
# Enable chardet-based heuristic guessing of encoding as a last
# resort for file types known to not be binary.
CHARDET_ENABLED = True
CHARDET_THRESHHOLD = 0.9 # >=90% confidence to avoid false positives.
@classmethod
def init_from_filename(cls, path, lidb=None):
"""Create an instance using only the filename to initialize."""
if lidb is None:
lidb = langinfo.get_default_database()
self = cls()
self.path = path
self._classify_from_filename(lidb)
return self
@classmethod
def init_from_path(cls, path, encoding=None, lidb=None,
follow_symlinks=False,
quick_determine_lang=False,
env=None):
"""Create an instance using the filename and stat/read info
from the given path to initialize.
@param follow_symlinks {boolean} can be set to True to have
the textinfo returned for a symlink be for linked-to file. By
default the textinfo is for the symlink itself.
@param quick_determine_lang {boolean} can be set to True to have
processing stop as soon as the language has been determined.
Note that this means some fields will not be populated.
@param env {runtime environment} A "runtime environment" class
whose behaviour is used to influence processing. Currently
it is just used to provide a hook for lang determination
by filename (for Komodo).
"""
if lidb is None:
lidb = langinfo.get_default_database()
self = cls()
self.path = path
self._accessor = PathAccessor(path, follow_symlinks=follow_symlinks)
try:
# TODO: pref: Is a preference specified for this path?
self._classify_from_stat(lidb)
if self.file_type_name != "regular file":
# Don't continue if not a regular file.
return self
# TODO: add 'pref:treat_as_text' a la TextMate (or
# perhaps that is handled in _classify_from_filename())
self._classify_from_filename(lidb, env)
if self.is_text is False:
return self
if self.lang and quick_determine_lang:
return self
if not self.lang:
self._classify_from_magic(lidb)
if self.is_text is False:
return self
if self.lang and quick_determine_lang:
return self
self._classify_encoding(lidb, suggested_encoding=encoding)
if self.is_text is None and self.encoding:
self.is_text = True
if self.is_text is False:
return self
self.text = self._accessor.text
if self.text: # No `self.text' with current UTF-32 hack.
self._classify_from_content(lidb)
return self
finally:
# Free the memory used by the accessor.
del self._accessor
def __repr__(self):
if self.path:
return "<TextInfo %r>" % self.path
else:
return "<TextInfo %r>"\
% _one_line_summary_from_text(self.content, 30)
def as_dict(self):
return dict((k, v) for k, v in list(self.__dict__.items())
if not k.startswith('_'))
def as_summary(self):
"""One-liner string summary of text info."""
d = self.as_dict()
info = []
if self.file_type_name and self.file_type_name != "regular file":
info.append(self.file_type_name)
else:
info.append(self.lang or "???")
if not self.is_text:
info.append("binary")
elif self.encoding:
enc = self.encoding
if self.has_bom:
enc += " (bom)"
info.append(enc)
if DEBUG_CHARDET_INFO and hasattr(self, "chardet_info") \
and self.chardet_info["encoding"]:
info.append("chardet:%s/%.1f%%"
% (self.chardet_info["encoding"],
self.chardet_info["confidence"] * 100.0))
return "%s: %s" % (self.path, ', '.join(info))
def _classify_from_content(self, lidb):
# TODO: Plan:
# - eol_* attrs (test cases for this!)
head = self.text[:self._accessor.HEAD_SIZE]
tail = self.text[-self._accessor.TAIL_SIZE:]
# If lang is unknown, attempt to guess from XML prolog or
# shebang now that we've successfully decoded the buffer.
if self.langinfo is None:
(self.has_xml_prolog, xml_version,
xml_encoding) = self._get_xml_prolog_info_s(head)
if self.has_xml_prolog:
self.xml_version = xml_version
self.xml_encoding = xml_encoding
self.langinfo = lidb.langinfo_from_lang("XML")
self.lang = self.langinfo.name
elif self.text.startswith("#!"):
li = lidb.langinfo_from_magic(self.text, shebang_only=True)
if li:
self.langinfo = li
self.lang = li.name
# Extract Emacs local vars and Vi(m) modeline info and, if the
# lang is still unknown, attempt to use them to determine it.
self.emacs_vars = self._get_emacs_head_vars_s(head)
self.emacs_vars.update(self._get_emacs_tail_vars_s(tail))
self.vi_vars = self._get_vi_vars_s(head)
if not self.vi_vars:
self.vi_vars = self._get_vi_vars_s(tail)
if self.langinfo is None and "mode" in self.emacs_vars:
li = lidb.langinfo_from_emacs_mode(self.emacs_vars["mode"])
if li:
self.langinfo = li
self.lang = li.name
if self.langinfo is None and "filetype" in self.vi_vars \
or "ft" in self.vi_vars:
vi_filetype = self.vi_vars.get(
"filetype") or self.vi_vars.get("ft")
li = lidb.langinfo_from_vi_filetype(vi_filetype)
if li:
self.langinfo = li
self.lang = li.name
if self.langinfo is not None:
if self.langinfo.conforms_to("XML"):
if not hasattr(self, "has_xml_prolog"):
(self.has_xml_prolog, self.xml_version,
self.xml_encoding) = self._get_xml_prolog_info_s(head)
(self.has_doctype_decl, self.doctype_decl,
self.doctype_name, self.doctype_public_id,
self.doctype_system_id) = self._get_doctype_decl_info_s(head)
# If this is just plain XML, we try to use the doctype
# decl to choose a more specific XML lang.
if self.lang == "XML" and self.has_doctype_decl:
li = lidb.langinfo_from_doctype(
public_id=self.doctype_public_id,
system_id=self.doctype_system_id)
if li and li.name != "XML":
self.langinfo = li
self.lang = li.name
elif self.langinfo.conforms_to("HTML"):
(self.has_doctype_decl, self.doctype_decl,
self.doctype_name, self.doctype_public_id,
self.doctype_system_id) = self._get_doctype_decl_info_s(head)
# Allow promotion to XHTML (or other HTML flavours) based
# on doctype.
if self.lang == "HTML" and self.has_doctype_decl:
li = lidb.langinfo_from_doctype(
public_id=self.doctype_public_id,
system_id=self.doctype_system_id)
if li and li.name != "HTML":
self.langinfo = li
self.lang = li.name
# Look for XML prolog and promote HTML -> XHTML if it
# exists. Note that this wins over a plain HTML doctype.
(self.has_xml_prolog, xml_version,
xml_encoding) = self._get_xml_prolog_info_s(head)
if self.has_xml_prolog:
self.xml_version = xml_version
self.xml_encoding = xml_encoding
if self.lang == "HTML":
li = lidb.langinfo_from_lang("XHTML")
self.langinfo = li
self.lang = li.name
# Attempt to specialize the lang.
if self.langinfo is not None:
li = lidb.specialized_langinfo_from_content(
self.langinfo, self.text)
if li:
self.langinfo = li
self.lang = li.name
def _classify_from_magic(self, lidb):
"""Attempt to classify from the file's magic number/shebang
line, doctype, etc.
Note that this is done before determining the encoding, so we are
working with the *bytes*, not chars.
"""
self.has_bom, bom, bom_encoding = self._get_bom_info()
if self.has_bom:
# If this file has a BOM then, unless something funny is
# happening, this will be a text file encoded with
# `bom_encoding`. We leave that to `_classify_encoding()`.
return
# Without a BOM we assume this is an 8-bit encoding, for the
# purposes of looking at, e.g. a shebang line.
#
# UTF-16 and UTF-32 without a BOM is rare; we won't pick up on,
# e.g. Python encoded as UCS-2 or UCS-4 here (but
# `_classify_encoding()` should catch most of those cases).
head_bytes = self._accessor.head_bytes
li = lidb.langinfo_from_magic(head_bytes)
if li:
log.debug("lang from magic: %s", li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
(has_doctype_decl, doctype_decl, doctype_name, doctype_public_id,
doctype_system_id) = self._get_doctype_decl_info_b(head_bytes)
if has_doctype_decl:
li = lidb.langinfo_from_doctype(public_id=doctype_public_id,
system_id=doctype_system_id)
if li:
log.debug("lang from doctype: %s", li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
def _classify_encoding(self, lidb, suggested_encoding=None):
"""To classify from the content we need to separate text from
binary, and figure out the encoding. This is an imperfect task.
The algorithm here is to go through the following heroics to attempt
to determine an encoding that works to decode the content. If all
such attempts fail, we presume it is binary.
1. Use the BOM, if it has one.
2. Try the given suggested encoding (if any).
3. Check for EBCDIC encoding.
4. Lang-specific (if we know the lang already):
* if this is Python, look for coding: decl and try that
* if this is Perl, look for use encoding decl and try that
* ...
5. XML: According to the XML spec the rule is the XML prolog
specifies the encoding, or it is UTF-8.
6. HTML: Attempt to use Content-Type meta tag. Try the given
charset, if any.
7. Emacs-style "coding" local var.
8. Vi[m]-style "fileencoding" local var.
9. Heuristic checks for UTF-16 without BOM.
10. Give UTF-8 a try, it is a pretty common fallback.
We must do this before a possible 8-bit
`locale.getpreferredencoding()` because any UTF-8 encoded
document will decode with an 8-bit encoding (i.e. will decode,
just with bogus characters).
11. Lang-specific fallback. E.g., UTF-8 for XML, ascii for Python.
12. chardet (http://chardet.feedparser.org/), if CHARDET_ENABLED == True
13. locale.getpreferredencoding()
14. iso8859-1 (in case `locale.getpreferredencoding()` is UTF-8
we must have an 8-bit encoding attempt).
TODO: Is there a worry for a lot of false-positives for
binary files.
Notes:
- A la Universal Feed Parser, if some
supposed-to-be-authoritative encoding indicator is wrong (e.g.
the BOM, the Python 'coding:' decl for Python),
`self.encoding_bozo` is set True and a reason is appended to
the `self.encoding_bozo_reasons` list.
"""
# 1. Try the BOM.
if self.has_bom is not False: # Was set in `_classify_from_magic()`.
self.has_bom, bom, bom_encoding = self._get_bom_info()
if self.has_bom:
self._accessor.strip_bom(bom)
# Python doesn't currently include a UTF-32 codec. For now
# we'll *presume* that a UTF-32 BOM is correct. The
# limitation is that `self.text' will NOT get set
# because we cannot decode it.
if bom_encoding in ("utf-32-le", "utf-32-be") \
or self._accessor.decode(bom_encoding):
log.debug("encoding: encoding from BOM: %r", bom_encoding)
self.encoding = bom_encoding
return
else:
log.debug("encoding: BOM encoding (%r) was *wrong*",
bom_encoding)
self._encoding_bozo(
"BOM encoding (%s) could not decode %s"
% (bom_encoding, self._accessor))
head_bytes = self._accessor.head_bytes
if DEBUG_CHARDET_INFO:
sys.path.insert(0, os.path.expanduser(
"~/tm/check/contrib/chardet"))
import chardet
del sys.path[0]
self.chardet_info = chardet.detect(head_bytes)
# 2. Try the suggested encoding.
if suggested_encoding is not None:
norm_suggested_encoding = _norm_encoding(suggested_encoding)
if self._accessor.decode(suggested_encoding):
self.encoding = norm_suggested_encoding
return
else:
log.debug("encoding: suggested %r encoding didn't work for %s",
suggested_encoding, self._accessor)
# 3. Check for EBCDIC.
# TODO: Not sure this should be included, chardet may be better
# at this given different kinds of EBCDIC.
EBCDIC_MAGIC = '\x4c\x6f\xa7\x94'
if self._accessor.head_4_bytes == EBCDIC_MAGIC:
# This is EBCDIC, but I don't know if there are multiple kinds
# of EBCDIC. Python has a 'ebcdic-cp-us' codec. We'll use
# that for now.
norm_ebcdic_encoding = _norm_encoding("ebcdic-cp-us")
if self._accessor.decode(norm_ebcdic_encoding):
log.debug("EBCDIC encoding: %r", norm_ebcdic_encoding)
self.encoding = norm_ebcdic_encoding
return
else:
log.debug("EBCDIC encoding didn't work for %s",
self._accessor)
# 4. Lang-specific (if we know the lang already).
if self.langinfo and self.langinfo.conformant_attr("encoding_decl_pattern"):
m = self.langinfo.conformant_attr("encoding_decl_pattern") \
.search(head_bytes)
if m:
lang_encoding = m.group("encoding")
norm_lang_encoding = _norm_encoding(lang_encoding.decode('ascii'))
if self._accessor.decode(norm_lang_encoding):
log.debug("encoding: encoding from lang-spec: %r",
norm_lang_encoding)
self.encoding = norm_lang_encoding
return
else:
log.debug("encoding: lang-spec encoding (%r) was *wrong*",
norm_lang_encoding)
self._encoding_bozo(
"lang-spec encoding (%s) could not decode %s"
% (norm_lang_encoding, self._accessor))
# 5. XML prolog
if self.langinfo and self.langinfo.conforms_to("XML"):
has_xml_prolog, xml_version, xml_encoding \
= self._get_xml_prolog_info_b(head_bytes)
if xml_encoding is not None:
norm_xml_encoding = _norm_encoding(xml_encoding.decode('ascii'))
if self._accessor.decode(norm_xml_encoding):
log.debug("encoding: encoding from XML prolog: %r",
norm_xml_encoding)
self.encoding = norm_xml_encoding
return
else:
log.debug("encoding: XML prolog encoding (%r) was *wrong*",
norm_xml_encoding)
self._encoding_bozo(
"XML prolog encoding (%s) could not decode %s"
% (norm_xml_encoding, self._accessor))
# 6. HTML: Attempt to use Content-Type meta tag.
if self.langinfo and self.langinfo.conforms_to("HTML"):
has_http_content_type_info, http_content_type, http_encoding \
= self._get_http_content_type_info_b(head_bytes)
if has_http_content_type_info and http_encoding:
norm_http_encoding = _norm_encoding(http_encoding.decode('ascii'))
if self._accessor.decode(norm_http_encoding):
log.debug("encoding: encoding from HTTP content-type: %r",
norm_http_encoding)
self.encoding = norm_http_encoding
return
else:
log.debug(
"encoding: HTTP content-type encoding (%r) was *wrong*",
norm_http_encoding)
self._encoding_bozo(
"HTML content-type encoding (%s) could not decode %s"
% (norm_http_encoding, self._accessor))
# 7. Emacs-style local vars.
emacs_head_vars = self._get_emacs_head_vars_b(head_bytes)
emacs_encoding = emacs_head_vars.get(b"coding")
if not emacs_encoding:
tail_bytes = self._accessor.tail_bytes
emacs_tail_vars = self._get_emacs_tail_vars_b(tail_bytes)
emacs_encoding = emacs_tail_vars.get(b"coding")
if emacs_encoding:
norm_emacs_encoding = _norm_encoding(emacs_encoding.decode('ascii'))
if self._accessor.decode(norm_emacs_encoding):
log.debug("encoding: encoding from Emacs coding var: %r",
norm_emacs_encoding)
self.encoding = norm_emacs_encoding
return
else:
log.debug("encoding: Emacs coding var (%r) was *wrong*",
norm_emacs_encoding)
self._encoding_bozo(
"Emacs coding var (%s) could not decode %s"
% (norm_emacs_encoding, self._accessor))
# 8. Vi[m]-style local vars.
vi_vars = self._get_vi_vars_b(head_bytes)
vi_encoding = vi_vars.get(b"fileencoding") or vi_vars.get(b"fenc")
if not vi_encoding:
vi_vars = self._get_vi_vars_b(self._accessor.tail_bytes)
vi_encoding = vi_vars.get(b"fileencoding") or vi_vars.get(b"fenc")
if vi_encoding:
norm_vi_encoding = _norm_encoding(vi_encoding.decode('ascii'))
if self._accessor.decode(norm_vi_encoding):
log.debug("encoding: encoding from Vi[m] coding var: %r",
norm_vi_encoding)
self.encoding = norm_vi_encoding
return
else:
log.debug("encoding: Vi[m] coding var (%r) was *wrong*",
norm_vi_encoding)
self._encoding_bozo(
"Vi[m] coding var (%s) could not decode %s"
% (norm_vi_encoding, self._accessor))
# 9. Heuristic checks for UTF-16 without BOM.
utf16_encoding = None
head_odd_bytes = head_bytes[0::2]
head_even_bytes = head_bytes[1::2]
head_markers = [b'<?xml', b'#!']
for head_marker in head_markers:
length = len(head_marker)
if head_odd_bytes.startswith(head_marker) \
and head_even_bytes[0:length] == b'\x00' * length:
utf16_encoding = "utf-16-le"
break
elif head_even_bytes.startswith(head_marker) \
and head_odd_bytes[0:length] == b'\x00' * length:
utf16_encoding = "utf-16-be"
break
internal_markers = [b'coding']
for internal_marker in internal_markers:
length = len(internal_marker)
try:
idx = head_odd_bytes.index(internal_marker)
except ValueError:
pass
else:
if head_even_bytes[idx:idx+length] == b'\x00' * length:
utf16_encoding = "utf-16-le"
try:
idx = head_even_bytes.index(internal_marker)
except ValueError:
pass
else:
if head_odd_bytes[idx:idx+length] == b'\x00' * length:
utf16_encoding = "utf-16-be"
if utf16_encoding:
if self._accessor.decode(utf16_encoding):
log.debug("encoding: guessed encoding: %r", utf16_encoding)
self.encoding = utf16_encoding
return
# 10. Give UTF-8 a try.
norm_utf8_encoding = _norm_encoding("utf-8")
if self._accessor.decode(norm_utf8_encoding):
log.debug("UTF-8 encoding: %r", norm_utf8_encoding)
self.encoding = norm_utf8_encoding
return
# 11. Lang-specific fallback (e.g. XML -> utf-8, Python -> ascii, ...).
# Note: A potential problem here is that a fallback encoding here that
# is a pre-Unicode Single-Byte encoding (like iso8859-1) always "works"
# so the subsequent heuristics never get tried.
fallback_encoding = None
fallback_lang = None
if self.langinfo:
fallback_lang = self.langinfo.name
fallback_encoding = self.langinfo.conformant_attr(
"default_encoding")
if fallback_encoding:
if self._accessor.decode(fallback_encoding):
log.debug("encoding: fallback encoding for %s: %r",
fallback_lang, fallback_encoding)
self.encoding = fallback_encoding
return
else:
log.debug("encoding: %s fallback encoding (%r) was *wrong*",
fallback_lang, fallback_encoding)
self._encoding_bozo(
"%s fallback encoding (%s) could not decode %s"
% (fallback_lang, fallback_encoding, self._accessor))
# 12. chardet (http://chardet.feedparser.org/)
# Note: I'm leary of using this b/c (a) it's a sizeable perf
# hit and (b) false positives -- for example, the first 8kB of
# /usr/bin/php on Mac OS X 10.4.10 is ISO-8859-2 with 44%
# confidence. :)
# Solution: (a) Only allow for content we know is not binary
# (from langinfo association); and (b) can be disabled via
# CHARDET_ENABLED class attribute.
if self.CHARDET_ENABLED and self.langinfo and self.langinfo.is_text:
try:
import chardet
except ImportError:
warnings.warn("no chardet module to aid in guessing encoding",
ChardetImportWarning)
else:
chardet_info = chardet.detect(head_bytes)
if chardet_info["encoding"] \
and chardet_info["confidence"] > self.CHARDET_THRESHHOLD:
chardet_encoding = chardet_info["encoding"]
norm_chardet_encoding = _norm_encoding(chardet_encoding)
if self._accessor.decode(norm_chardet_encoding):
log.debug("chardet encoding: %r", chardet_encoding)
self.encoding = norm_chardet_encoding
return
# 13. locale.getpreferredencoding()
# Typical values for this:
# Windows: cp1252 (aka windows-1252)
# Mac OS X: mac-roman
# Linux: UTF-8 (modern Linux anyway)
# Solaris 8: 464 (aka ASCII)
locale_encoding = locale.getpreferredencoding()
if locale_encoding:
norm_locale_encoding = _norm_encoding(locale_encoding)
if self._accessor.decode(norm_locale_encoding):
log.debug("encoding: locale preferred encoding: %r",
locale_encoding)
self.encoding = norm_locale_encoding
return
# 14. iso8859-1
norm_fallback8bit_encoding = _norm_encoding("iso8859-1")
if self._accessor.decode(norm_fallback8bit_encoding):
log.debug(
"fallback 8-bit encoding: %r", norm_fallback8bit_encoding)
self.encoding = norm_fallback8bit_encoding
return
# We couldn't find an encoding that works. Give up and presume
# this is binary content.
self.is_text = False
def _encoding_bozo(self, reason):
self.encoding_bozo = True
if self.encoding_bozo_reasons is None:
self.encoding_bozo_reasons = []
self.encoding_bozo_reasons.append(reason)
# c.f. http://www.xml.com/axml/target.html#NT-prolog
_xml_prolog_pat_s = re.compile(
r'''<\?xml
( # strict ordering is reqd but we'll be liberal here
\s+version=['"](?P<ver>.*?)['"]
| \s+encoding=['"](?P<enc>.*?)['"]
)+
.*? # other possible junk
\s*\?>
''',
re.VERBOSE | re.DOTALL
)
_xml_prolog_pat_b = re.compile(
br'''<\?xml
( # strict ordering is reqd but we'll be liberal here
\s+version=['"](?P<ver>.*?)['"]
| \s+encoding=['"](?P<enc>.*?)['"]
)+
.*? # other possible junk
\s*\?>
''',
re.VERBOSE | re.DOTALL
)
def _get_xml_prolog_info(self, head_bytes,
_xml_prolog_pat,
_start,
):
"""Parse out info from the '<?xml version=...' prolog, if any.
Returns (<has-xml-prolog>, <xml-version>, <xml-encoding>). Examples:
(False, None, None)
(True, "1.0", None)
(True, "1.0", "UTF-16")
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check or via
# the subsequent heuristic check for UTF-16 without a BOM.
if not head_bytes.startswith(_start):
return (False, None, None)
# Try to extract more info from the prolog.
match = _xml_prolog_pat.match(head_bytes)
if not match:
if log.isEnabledFor(logging.DEBUG):
log.debug("`%s': could not match XML prolog: '%s'", self.path,
_one_line_summary_from_text(_to_str(head_bytes), 40))
return (False, None, None)
xml_version = match.group("ver")
xml_encoding = match.group("enc")
return (True, xml_version, xml_encoding)
def _get_xml_prolog_info_s(self, head_bytes):
return self._get_xml_prolog_info(head_bytes,
self._xml_prolog_pat_s,
b'<?xml',
)
def _get_xml_prolog_info_b(self, head_bytes):
return self._get_xml_prolog_info(head_bytes,
self._xml_prolog_pat_b,
'<?xml',
)
_html_meta_tag_pat_s = re.compile(r"""
(<meta
(?:\s+[\w-]+\s*=\s*(?:".*?"|'.*?'))+ # attributes
\s*/?>)
""",
re.IGNORECASE | re.VERBOSE
)
_html_meta_tag_pat_b = re.compile(br"""
(<meta
(?:\s+[\w-]+\s*=\s*(?:".*?"|'.*?'))+ # attributes
\s*/?>)
""",
re.IGNORECASE | re.VERBOSE
)
_html_attr_pat_s = re.compile(
# Currently requiring XML attrs (i.e. quoted value).
r'''(?:\s+([\w-]+)\s*=\s*(".*?"|'.*?'))'''
)
_html_attr_pat_b = re.compile(
# Currently requiring XML attrs (i.e. quoted value).
br'''(?:\s+([\w-]+)\s*=\s*(".*?"|'.*?'))'''
)
_http_content_type_splitter_s = re.compile(r";\s*")
_http_content_type_splitter_b = re.compile(br";\s*")
def _get_http_content_type_info(self, head_bytes,
_html_meta_tag_pat,
_html_attr_pat,
_http_content_type_splitter,
_http_equiv,
_content,
_content_type,
_charset,
_empty,
_str1,
_str2,
):
"""Returns info extracted from an HTML content-type meta tag if any.
Returns (<has-http-content-type-info>, <content-type>, <charset>).
For example:
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
yields:
(True, "text/html", "utf-8")
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check.
# Otherwise we rely on `chardet` to cover us.
# Parse out '<meta ...>' tags, then the attributes in them.
for meta_tag in _html_meta_tag_pat.findall(head_bytes):
meta = dict((k.lower(), v[1:-1])
for k, v in _html_attr_pat.findall(meta_tag))
if _http_equiv in meta \
and meta[_http_equiv].lower() == _content_type:
content = meta.get(_content, _empty)
break
else:
return (False, None, None)
# We found a http-equiv="Content-Type" tag, parse its content
# attribute value.
parts = [
p.strip() for p in _http_content_type_splitter.split(content)]
if not parts:
return (False, None, None)
content_type = parts[0] or None
for p in parts[1:]:
if p.lower().startswith(_charset):
charset = p[len(_charset):]
if charset and charset[0] in (_str1, _str2):
charset = charset[1:]
if charset and charset[-1] in (_str1, _str2):
charset = charset[:-1]
break
else:
charset = None
return (True, content_type, charset)
def _get_http_content_type_info_s(self, head_bytes):
return self._get_http_content_type_info(head_bytes,
self._html_meta_tag_pat_s,
self._html_attr_pat_s,
self._http_content_type_splitter_s,
"http-equiv",
"content",
"content-type",
"charset=",
"",
"'",
'"',
)
def _get_http_content_type_info_b(self, head_bytes):
return self._get_http_content_type_info(head_bytes,
self._html_meta_tag_pat_b,
self._html_attr_pat_b,
self._http_content_type_splitter_b,
b"http-equiv",
b"content",
b"content-type",
b"charset=",
b"",
b"'",
b'"',
)
# TODO: Note that this isn't going to catch the current HTML 5
# doctype: '<!DOCTYPE html>'
_doctype_decl_re_s = re.compile(r'''
<!DOCTYPE
\s+(?P<name>[a-zA-Z_:][\w:.-]*)
\s+(?:
SYSTEM\s+(["'])(?P<system_id_a>.*?)\2
|
PUBLIC
\s+(["'])(?P<public_id_b>.*?)\4
# HTML 3.2 and 2.0 doctypes don't include a system-id.
(?:\s+(["'])(?P<system_id_b>.*?)\6)?
)
(\s*\[.*?\])?
\s*>
''', re.IGNORECASE | re.DOTALL | re.UNICODE | re.VERBOSE)
_doctype_decl_re_b = re.compile(br'''
<!DOCTYPE
\s+(?P<name>[a-zA-Z_:][\w:.-]*)
\s+(?:
SYSTEM\s+(["'])(?P<system_id_a>.*?)\2
|
PUBLIC
\s+(["'])(?P<public_id_b>.*?)\4
# HTML 3.2 and 2.0 doctypes don't include a system-id.
(?:\s+(["'])(?P<system_id_b>.*?)\6)?
)
(\s*\[.*?\])?
\s*>
''', re.IGNORECASE | re.DOTALL | re.VERBOSE)
def _get_doctype_decl_info(self, head,
_doctype_decl_re,
_doctype,
_spaces,
_space,
):
"""Parse out DOCTYPE info from the given XML or HTML content.
Returns a tuple of the form:
(<has-doctype-decl>, <doctype-decl>,
<name>, <public-id>, <system-id>)
The <public-id> is normalized as per this comment in the XML 1.0
spec:
Before a match is attempted, all strings of white space in the
public identifier must be normalized to single space
characters (#x20), and leading and trailing white space must
be removed.
Examples:
(False, None, None, None, None)
(True, '<!DOCTYPE greeting SYSTEM "hello.dtd">',
'greeting', None, 'hello.dtd'),
(True,
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
'html',
'-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd')
Here is the spec for DOCTYPE decls in XML:
http://www.xml.com/axml/target.html#NT-doctypedecl
We loosely follow this to allow for some decls in HTML that isn't
proper XML. As well, we are only parsing out decls that reference
an external ID, as opposed to those that define entities locally.
"""
if _doctype not in head: # quick out
return (False, None, None, None, None)
m = _doctype_decl_re.search(head)
if not m:
return (False, None, None, None, None)
d = m.groupdict()
name = d.get("name")
system_id = d.get("system_id_a") or d.get("system_id_b")
public_id = d.get("public_id_b")
if public_id:
public_id = re.sub(_spaces, _space, public_id.strip()) # normalize
return (True, m.group(0), name, public_id, system_id)
def _get_doctype_decl_info_s(self, head):
return self._get_doctype_decl_info(head,
self._doctype_decl_re_s,
"<!DOCTYPE",
"\s+",
' ',
)
def _get_doctype_decl_info_b(self, head):
return self._get_doctype_decl_info(head,
self._doctype_decl_re_b,
b"<!DOCTYPE",
b"\s+",
b' ',
)
_emacs_vars_head_pat_s = re.compile(r'-\*-\s*(.*?)\s*-\*-')
_emacs_vars_head_pat_b = re.compile(br'-\*-\s*(.*?)\s*-\*-')
_emacs_head_vars_cache_s = None
_emacs_head_vars_cache_b = None
def _get_emacs_head_vars(self, head_bytes,
_emacs_vars_head_pat,
_one_liner,
_new_line,
_colon,
_semi_colon,
_str1,
_str2,
_mode,
):
"""Return a dictionary of emacs-style local variables in the head.
"Head" emacs vars on the ones in the '-*- ... -*-' one-liner.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check.
# Otherwise we rely on `chardet` to cover us.
# Search the head for a '-*-'-style one-liner of variables.
emacs_vars = {}
if _one_liner in head_bytes:
match = _emacs_vars_head_pat.search(head_bytes)
if match:
emacs_vars_str = match.group(1)
if _new_line in emacs_vars_str:
raise ValueError("local variables error: -*- not "
"terminated before end of line")
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(_semi_colon)
if s.strip()]
if len(emacs_var_strs) == 1 and _colon not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars[_mode] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(
_colon, 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith(_str1) and val.endswith(_str1)
or val.startswith(_str2) and val.endswith(_str2)):
emacs_vars[var] = val[1:-1]
return emacs_vars
def _get_emacs_head_vars_s(self, head_bytes):
if self._emacs_head_vars_cache_s is None:
self._emacs_head_vars_cache_s = self._get_emacs_head_vars(head_bytes,
self._emacs_vars_head_pat_s,
'-*-',
'\n',
':',
';',
'"',
"'",
'mode',
)
return self._emacs_head_vars_cache_s
def _get_emacs_head_vars_b(self, head_bytes):
if self._emacs_head_vars_cache_b is None:
self._emacs_head_vars_cache_b = self._get_emacs_head_vars(head_bytes,
self._emacs_vars_head_pat_b,
b'-*-',
b'\n',
b':',
b';',
b'"',
b"'",
b'mode',
)
return self._emacs_head_vars_cache_b
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_vars_tail_pat_s = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
_emacs_vars_tail_pat_b = re.compile(br"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
_emacs_tail_vars_cache = None
def _get_emacs_tail_vars(self, tail_bytes,
_emacs_vars_tail_pat,
_local_variables,
_continued_for,
_colon,
_space,
_str1,
_str2,
):
r"""Return a dictionary of emacs-style local variables in the tail.
"Tail" emacs vars on the ones in the multi-line "Local
Variables:" block.
>>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# foo: bar\n# End:')
{'foo': 'bar'}
>>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# foo: bar\\\n# baz\n# End:')
{'foo': 'bar baz'}
>>> TextInfo()._get_emacs_tail_vars('# Local Variables:\n# quoted: "bar "\n# End:')
{'quoted': 'bar '}
Parsing is done according to this spec (and according to some
in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_chapter/emacs_33.html#SEC485
"""
# Presuming an 8-bit encoding. If it is UTF-16 or UTF-32, then
# that should have been picked up by an earlier BOM check.
# Otherwise we rely on `chardet` to cover us.
if self._emacs_tail_vars_cache is not None:
return self._emacs_tail_vars_cache
emacs_vars = {}
if _local_variables not in tail_bytes:
self._emacs_tail_vars_cache = emacs_vars
return emacs_vars
match = _emacs_vars_tail_pat.search(tail_bytes)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
# print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix:
line = line[len(prefix):] # strip prefix
if suffix:
line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith(_continued_for):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += _space + line
else:
try:
variable, value = line.split(_colon, 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this
# block.
value = value.strip()
if value.endswith(_continued_for):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith(_str1) and val.endswith(_str1)
or val.startswith(_str2) and val.endswith(_str2)):
emacs_vars[var] = val[1:-1]
self._emacs_tail_vars_cache = emacs_vars
return emacs_vars
def _get_emacs_tail_vars_s(self, tail_bytes):
return self._get_emacs_tail_vars(tail_bytes,
self._emacs_vars_tail_pat_s,
"Local Variables",
'\\',
':',
' ',
'"',
"'",
)
def _get_emacs_tail_vars_b(self, tail_bytes):
return self._get_emacs_tail_vars(tail_bytes,
self._emacs_vars_tail_pat_b,
b"Local Variables",
b'\\',
b':',
b' ',
b'"',
b"'",
)
# Note: It might nice if parser also gave which of 'vi, vim, ex' and
# the range in the accessor.
_vi_vars_pats_and_splitters_s = [
(re.compile(r'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(r'[ \t]+')),
(re.compile(r'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*(?P<rhs>.*?)$', re.M),
re.compile(r'[ \t:]+')),
(re.compile(r'^(vi|vim([<>=]?\d{3})?):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(r'[ \t]+')),
]
_vi_vars_pats_and_splitters_b = [
(re.compile(br'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(br'[ \t]+')),
(re.compile(br'[ \t]+(vi|vim([<>=]?\d{3})?|ex):\s*(?P<rhs>.*?)$', re.M),
re.compile(br'[ \t:]+')),
(re.compile(br'^(vi|vim([<>=]?\d{3})?):\s*set? (?P<rhs>.*?)(?<!\\):', re.M),
re.compile(br'[ \t]+')),
]
_vi_vars_cache_b = None
_vi_vars_cache_s = None
def _get_vi_vars(self, bytes,
_vi_vars_pats_and_splitters,
_types,
_eq,
_colon,
_ecolon,
):
r"""Return a dict of Vi[m] modeline vars.
See ":help modeline" in Vim for a spec.
>>> TextInfo()._get_vi_vars("/* vim: set ai tw=75: */")
{'ai': None, 'tw': 75}
>>> TextInfo()._get_vi_vars("vim: set ai tw=75: bar")
{'ai': None, 'tw': 75}
>>> TextInfo()._get_vi_vars("vi: set foo:bar")
{'foo': None}
>>> TextInfo()._get_vi_vars(" vi: se foo:bar")
{'foo': None}
>>> TextInfo()._get_vi_vars(" ex: se foo:bar")
{'foo': None}
>>> TextInfo()._get_vi_vars(" vi:noai:sw=3 tw=75")
{'tw': 75, 'sw': 3, 'noai': None}
>>> TextInfo()._get_vi_vars(" vi:noai:sw=3 tw=75")
{'tw': 75, 'sw': 3, 'noai': None}
>>> TextInfo()._get_vi_vars("ex: se foo:bar")
{}
Some edge cases:
>>> TextInfo()._get_vi_vars(r"/* vi:set dir=c\:\tmp: */")
{'dir': 'c:\\tmp'}
"""
# Presume 8-bit encoding... yada yada.
vi_vars = {}
# TODO: Consider reducing support to just "vi:" for speed. This
# function takes way too much time.
if not any(t in bytes for t in _types):
return vi_vars
for pat, splitter in _vi_vars_pats_and_splitters:
match = pat.search(bytes)
if match:
for var_str in splitter.split(match.group("rhs")):
if _eq in var_str:
name, value = var_str.split(_eq, 1)
try:
vi_vars[name] = int(value)
except ValueError:
vi_vars[name] = value.replace(_ecolon, _colon)
else:
vi_vars[var_str] = None
break
return vi_vars
def _get_vi_vars_s(self, bytes):
if self._vi_vars_cache_s is None:
self._vi_vars_cache_s = self._get_vi_vars(bytes,
self._vi_vars_pats_and_splitters_s,
['vi:', 'ex:', 'vim:'],
'=',
':',
'\\:',
)
return self._vi_vars_cache_s
def _get_vi_vars_b(self, bytes):
if self._vi_vars_cache_b is None:
self._vi_vars_cache_b = self._get_vi_vars(bytes,
self._vi_vars_pats_and_splitters_b,
[b'vi:', b'ex:', b'vim:'],
b'=',
b':',
b'\\:',
)
return self._vi_vars_cache_b
def _get_bom_info(self):
r"""Returns (<has-bom>, <bom>, <bom-encoding>). Examples:
(True, '\xef\xbb\xbf', "utf-8")
(True, '\xff\xfe', "utf-16-le")
(False, None, None)
"""
boms_and_encodings = [ # in order from longest to shortest
(codecs.BOM_UTF32_LE, "utf-32-le"),
(codecs.BOM_UTF32_BE, "utf-32-be"),
(codecs.BOM_UTF8, "utf-8"),
(codecs.BOM_UTF16_LE, "utf-16-le"),
(codecs.BOM_UTF16_BE, "utf-16-be"),
]
head_4 = self._accessor.head_4_bytes
for bom, encoding in boms_and_encodings:
if head_4.startswith(bom):
return (True, bom, encoding)
break
else:
return (False, None, None)
def _classify_from_filename(self, lidb, env):
"""Classify from the path *filename* only.
Sets `lang' and `langinfo', if can be determined.
"""
filename = basename(self.path)
if env is not None:
li = env.langinfo_from_filename(filename)
if li:
log.debug("lang from env: `%s' -> `%s'", filename, li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
# ...from the ext
idx = 0
while True:
idx = filename.find('.', idx)
if idx == -1:
break
ext = filename[idx:]
li = lidb.langinfo_from_ext(ext)
if li:
log.debug("lang from ext: `%s' -> `%s'", ext, li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
idx += 1
# ...from file basename
li = lidb.langinfo_from_filename(filename)
if li:
log.debug("lang from filename: `%s' -> `%s'", filename, li.name)
self.langinfo = li
self.lang = li.name
self.is_text = li.is_text
return
def _classify_from_stat(self, lidb):
"""Set some `file_*' attributes from stat mode."""
from stat import S_ISREG, S_ISDIR, S_ISLNK, S_ISFIFO, S_ISSOCK, \
S_ISBLK, S_ISCHR, S_IMODE, S_IFMT
stat = self._accessor.stat
st_mode = stat.st_mode
self.file_type = S_IFMT(st_mode)
self.file_mode = S_IMODE(st_mode)
self.file_stat = stat
if S_ISREG(st_mode):
self.file_type_name = "regular file"
elif S_ISDIR(st_mode):
self.file_type_name = "directory"
elif S_ISLNK(st_mode):
self.file_type_name = "symbolic link"
elif S_ISFIFO(st_mode):
self.file_type_name = "fifo"
elif S_ISSOCK(st_mode):
self.file_type_name = "socket"
elif S_ISBLK(st_mode):
self.file_type_name = "block special"
elif S_ISCHR(st_mode):
self.file_type_name = "character special"
def _norm_encoding(encoding):
"""Normalize the encoding name -- where "normalized" is what
Python's codec's module calls it.
Interesting link:
The IANA-registered set of character sets.
http://www.iana.org/assignments/character-sets
"""
try:
# This requires Python >=2.5.
return codecs.lookup(encoding).name
except LookupError:
return encoding
#---- accessor API
# The idea here is to abstract accessing the text file content being
# classified to allow, e.g. classifying content without a file, from
# a Komodo buffer, etc.
class Accessor(object):
"""Virtual base class defining Accessor API for accessing
text content.
"""
# API:
# prop head_bytes -> head 8k bytes
# prop head_4_bytes -> head 4 bytes (useful for BOM detection)
# prop tail_bytes -> tail 8k bytes
# def bytes_range(start, end) -> bytes in that range
HEAD_SIZE = pow(2, 13) # 8k
TAIL_SIZE = pow(2, 13) # 8k
encoding = None
text = None
_unsuccessful_encodings = None
def decode(self, encoding):
"""Decodes bytes with the given encoding and, if successful,
sets `self.text` with the decoded result and returns True.
Otherwise, returns False.
Side-effects: On success, sets `self.text` and `self.encoding`.
Optimization: First an attempt is made to decode
`self.head_bytes` instead of all of `self.bytes`. This allows
for the normal usage in `TextInfo._classify_encoding()` to *not*
bother fully reading binary files that could not be decoded.
Optimization: Decoding attempts are cached to not bother
attempting a failed decode twice.
"""
if self._unsuccessful_encodings is None:
self._unsuccessful_encodings = set()
if encoding in self._unsuccessful_encodings:
return False
elif encoding == self.encoding:
return True
head_bytes = self.head_bytes
try:
head_bytes.decode(encoding, 'strict')
except LookupError as ex:
log.debug("encoding lookup error: %r", encoding)
self._unsuccessful_encodings.add(encoding)
return False
except UnicodeError as ex:
# If the decode failed in the last few bytes, it might be
# because a multi-surrogate was cutoff by the head. Ignore
# the error here, if it is truly not of this encoding, the
# full file decode will fail.
if ex.start >= self.HEAD_SIZE - 5:
# '5' because the max num bytes to encode a single char
# in any encoding is 6 bytes (in UTF-8).
pass
else:
self._unsuccessful_encodings.add(encoding)
return False
try:
self.text = self.bytes.decode(encoding, 'strict')
except UnicodeError as ex:
self._unsuccessful_encodings.add(encoding)
return False
self.encoding = encoding
return True
class PathAccessor(Accessor):
"""Accessor API for a path."""
(READ_NONE, # _file==None, file not opened yet
READ_HEAD, # _bytes==<head bytes>
READ_TAIL, # _bytes==<head>, _bytes_tail==<tail>
READ_ALL) = list(range(4)) # _bytes==<all>, _bytes_tail==None, _file closed
_read_state = READ_NONE # one of the READ_* states
_file = None
_bytes = None
_bytes_tail = None
def __init__(self, path, follow_symlinks=False):
self.path = path
self.follow_symlinks = follow_symlinks
def __str__(self):
return "path `%s'" % self.path
_stat_cache = None
@property
def stat(self):
if self._stat_cache is None:
if self.follow_symlinks:
self._stat_cache = os.stat(self.path)
else:
self._stat_cache = os.lstat(self.path)
return self._stat_cache
@property
def size(self):
return self.stat.st_size
def __del__(self):
self.close()
def close(self):
if self._file and not self._file.closed:
self._file.close()
def _read(self, state):
"""Read up to at least `state`."""
# TODO: If `follow_symlinks` is False and this is a symlink we
# must use os.readlink() here.
# It is the job of the caller to only call _read() if necessary.
assert self._read_state < state
try:
if self._read_state == self.READ_NONE:
assert self._file is None and self._bytes is None
self._file = open(self.path, 'rb')
if state == self.READ_HEAD:
self._bytes = self._file.read(self.HEAD_SIZE)
self._read_state = (self.size <= self.HEAD_SIZE
and self.READ_ALL or self.READ_HEAD)
elif state == self.READ_TAIL:
if self.size <= self.HEAD_SIZE + self.TAIL_SIZE:
self._bytes = self._file.read()
self._read_state = self.READ_ALL
else:
self._bytes = self._file.read(self.HEAD_SIZE)
self._file.seek(
-self.TAIL_SIZE, 2) # 2 == relative to end
self._bytes_tail = self._file.read(self.TAIL_SIZE)
self._read_state = self.READ_TAIL
elif state == self.READ_ALL:
self._bytes = self._file.read()
self._read_state = self.READ_ALL
elif self._read_state == self.READ_HEAD:
if state == self.READ_TAIL:
if self.size <= self.HEAD_SIZE + self.TAIL_SIZE:
self._bytes += self._file.read()
self._read_state = self.READ_ALL
else:
self._file.seek(
-self.TAIL_SIZE, 2) # 2 == relative to end
self._bytes_tail = self._file.read(self.TAIL_SIZE)
self._read_state = self.READ_TAIL
elif state == self.READ_ALL:
self._bytes += self._file.read()
self._read_state = self.READ_ALL
elif self._read_state == self.READ_TAIL:
assert state == self.READ_ALL
self._file.seek(self.HEAD_SIZE, 0) # 0 == relative to start
remaining_size = self.size - self.HEAD_SIZE - self.TAIL_SIZE
assert remaining_size > 0, \
"negative remaining bytes to read from '%s': %d" \
% (self.path, self.size)
self._bytes += self._file.read(remaining_size)
self._bytes += self._bytes_tail
self._bytes_tail = None
self._read_state = self.READ_ALL
if self._read_state == self.READ_ALL:
self.close()
except Exception as ex:
log.warn("Could not read file: %r due to: %r", self.path, ex)
raise
def strip_bom(self, bom):
"""This should be called by the user of this class to strip a
detected BOM from the bytes for subsequent decoding and
analysis.
"""
assert self._bytes[:len(bom)] == bom
self._bytes = self._bytes[len(bom):]
@property
def head_bytes(self):
"""The first 8k raw bytes of the document."""
if self._read_state < self.READ_HEAD:
self._read(self.READ_HEAD)
return self._bytes[:self.HEAD_SIZE]
@property
def head_4_bytes(self):
if self._read_state < self.READ_HEAD:
self._read(self.READ_HEAD)
return self._bytes[:4]
@property
def tail_bytes(self):
if self._read_state < self.READ_TAIL:
self._read(self.READ_TAIL)
if self._read_state == self.READ_ALL:
return self._bytes[-self.TAIL_SIZE:]
else:
return self._bytes_tail
def bytes_range(self, start, end):
if self._read_state < self.READ_ALL:
self._read(self.READ_ALL)
return self._bytes[start:end]
@property
def bytes(self):
if self._read_state < self.READ_ALL:
self._read(self.READ_ALL)
return self._bytes
#---- internal support stuff
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: text_escape (0.2)
def _escaped_text_from_text(text, escapes="eol"):
r"""Return escaped version of text.
"escapes" is either a mapping of chars in the source text to
replacement text for each such char or one of a set of
strings identifying a particular escape style:
eol
replace EOL chars with '\r' and '\n', maintain the actual
EOLs though too
whitespace
replace EOL chars as above, tabs with '\t' and spaces
with periods ('.')
eol-one-line
replace EOL chars with '\r' and '\n'
whitespace-one-line
replace EOL chars as above, tabs with '\t' and spaces
with periods ('.')
"""
# TODO:
# - Add 'c-string' style.
# - Add _escaped_html_from_text() with a similar call sig.
import re
if isinstance(escapes, str):
if escapes == "eol":
escapes = {'\r\n': "\\r\\n\r\n", '\n': "\\n\n", '\r': "\\r\r"}
elif escapes == "whitespace":
escapes = {'\r\n': "\\r\\n\r\n", '\n': "\\n\n", '\r': "\\r\r",
'\t': "\\t", ' ': "."}
elif escapes == "eol-one-line":
escapes = {'\n': "\\n", '\r': "\\r"}
elif escapes == "whitespace-one-line":
escapes = {'\n': "\\n", '\r': "\\r", '\t': "\\t", ' ': '.'}
else:
raise ValueError("unknown text escape style: %r" % escapes)
# Sort longer replacements first to allow, e.g. '\r\n' to beat '\r' and
# '\n'.
escapes_keys = list(escapes.keys())
try:
escapes_keys.sort(key=lambda a: len(a), reverse=True)
except TypeError:
# Python 2.3 support: sort() takes no keyword arguments
escapes_keys.sort(lambda a, b: cmp(len(a), len(b)))
escapes_keys.reverse()
def repl(match):
val = escapes[match.group(0)]
return val
escaped = re.sub("(%s)" % '|'.join([re.escape(k) for k in escapes_keys]),
repl,
text)
return escaped
def _one_line_summary_from_text(text, length=78,
escapes={'\n': "\\n", '\r': "\\r", '\t': "\\t"}):
r"""Summarize the given text with one line of the given length.
"text" is the text to summarize
"length" (default 78) is the max length for the summary
"escapes" is a mapping of chars in the source text to
replacement text for each such char. By default '\r', '\n'
and '\t' are escaped with their '\'-escaped repr.
"""
if len(text) > length:
head = text[:length-3]
else:
head = text
escaped = _escaped_text_from_text(head, escapes)
if len(text) > length:
summary = escaped[:length-3] + "..."
else:
summary = escaped
return summary
# Recipe: paths_from_path_patterns (0.5)
def _should_include_path(path, includes, excludes):
"""Return True iff the given path should be included."""
from os.path import basename
from fnmatch import fnmatch
base = basename(path)
if includes:
for include in includes:
if fnmatch(base, include):
try:
log.debug("include `%s' (matches `%s')", path, include)
except (NameError, AttributeError):
pass
break
else:
try:
log.debug("exclude `%s' (matches no includes)", path)
except (NameError, AttributeError):
pass
return False
for exclude in excludes:
if fnmatch(base, exclude):
try:
log.debug("exclude `%s' (matches `%s')", path, exclude)
except (NameError, AttributeError):
pass
return False
return True
def _walk(top, topdown=True, onerror=None, follow_symlinks=False):
"""A version of `os.walk()` with a couple differences regarding symlinks.
1. follow_symlinks=False (the default): A symlink to a dir is
returned as a *non*-dir. In `os.walk()`, a symlink to a dir is
returned in the *dirs* list, but it is not recursed into.
2. follow_symlinks=True: A symlink to a dir is returned in the
*dirs* list (as with `os.walk()`) but it *is conditionally*
recursed into (unlike `os.walk()`).
A symlinked dir is only recursed into if it is to a deeper dir
within the same tree. This is my understanding of how `find -L
DIR` works.
TODO: put as a separate recipe
"""
from os.path import join, isdir, islink, abspath
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
if follow_symlinks:
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
else:
for name in names:
path = join(top, name)
if islink(path):
nondirs.append(name)
elif isdir(path):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if follow_symlinks and islink(path):
# Only walk this path if it links deeper in the same tree.
top_abs = abspath(top)
link_abs = abspath(join(top, os.readlink(path)))
if not link_abs.startswith(top_abs + os.sep):
continue
for x in _walk(path, topdown, onerror, follow_symlinks=follow_symlinks):
yield x
if not topdown:
yield top, dirs, nondirs
_NOT_SPECIFIED = ("NOT", "SPECIFIED")
def _paths_from_path_patterns(path_patterns, files=True, dirs="never",
recursive=True, includes=[], excludes=[],
skip_dupe_dirs=False,
follow_symlinks=False,
on_error=_NOT_SPECIFIED):
"""_paths_from_path_patterns([<path-patterns>, ...]) -> file paths
Generate a list of paths (files and/or dirs) represented by the given path
patterns.
"path_patterns" is a list of paths optionally using the '*', '?' and
'[seq]' glob patterns.
"files" is boolean (default True) indicating if file paths
should be yielded
"dirs" is string indicating under what conditions dirs are
yielded. It must be one of:
never (default) never yield dirs
always yield all dirs matching given patterns
if-not-recursive only yield dirs for invocations when
recursive=False
See use cases below for more details.
"recursive" is boolean (default True) indicating if paths should
be recursively yielded under given dirs.
"includes" is a list of file patterns to include in recursive
searches.
"excludes" is a list of file and dir patterns to exclude.
(Note: This is slightly different than GNU grep's --exclude
option which only excludes *files*. I.e. you cannot exclude
a ".svn" dir.)
"skip_dupe_dirs" can be set True to watch for and skip
descending into a dir that has already been yielded. Note
that this currently does not dereference symlinks.
"follow_symlinks" is a boolean indicating whether to follow
symlinks (default False). To guard against infinite loops
with circular dir symlinks, only dir symlinks to *deeper*
are followed.
"on_error" is an error callback called when a given path pattern
matches nothing:
on_error(PATH_PATTERN)
If not specified, the default is look for a "log" global and
call:
log.error("`%s': No such file or directory")
Specify None to do nothing.
Typically this is useful for a command-line tool that takes a list
of paths as arguments. (For Unix-heads: the shell on Windows does
NOT expand glob chars, that is left to the app.)
Use case #1: like `grep -r`
{files=True, dirs='never', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield nothing
script PATH* # yield all files matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #2: like `file -r` (if it had a recursive option)
{files=True, dirs='if-not-recursive', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files (not dirs) recursively under DIR
script -r PATH* # yield files matching PATH* and files recursively
# under dirs matching PATH*; if none, call
# on_error(PATH*) callback
Use case #3: kind of like `find .`
{files=True, dirs='always', recursive=(if '-r' in opts)}
script FILE # yield FILE, else call on_error(FILE)
script DIR # yield DIR, else call on_error(DIR)
script PATH* # yield all files and dirs matching PATH*; if none,
# call on_error(PATH*) callback
script -r DIR # yield files and dirs recursively under DIR
# (including DIR)
script -r PATH* # yield files and dirs matching PATH* and recursively
# under dirs; if none, call on_error(PATH*)
# callback
TODO: perf improvements (profile, stat just once)
"""
from os.path import basename, exists, isdir, join, normpath, abspath, \
lexists, islink, realpath
from glob import glob
assert not isinstance(path_patterns, str), \
"'path_patterns' must be a sequence, not a string: %r" % path_patterns
GLOB_CHARS = '*?['
if skip_dupe_dirs:
searched_dirs = set()
for path_pattern in path_patterns:
# Determine the set of paths matching this path_pattern.
for glob_char in GLOB_CHARS:
if glob_char in path_pattern:
paths = glob(path_pattern)
break
else:
if follow_symlinks:
paths = exists(path_pattern) and [path_pattern] or []
else:
paths = lexists(path_pattern) and [path_pattern] or []
if not paths:
if on_error is None:
pass
elif on_error is _NOT_SPECIFIED:
try:
log.error("`%s': No such file or directory", path_pattern)
except (NameError, AttributeError):
pass
else:
on_error(path_pattern)
for path in paths:
if (follow_symlinks or not islink(path)) and isdir(path):
if skip_dupe_dirs:
canon_path = normpath(abspath(path))
if follow_symlinks:
canon_path = realpath(canon_path)
if canon_path in searched_dirs:
continue
else:
searched_dirs.add(canon_path)
# 'includes' SHOULD affect whether a dir is yielded.
if (dirs == "always"
or (dirs == "if-not-recursive" and not recursive)
) and _should_include_path(path, includes, excludes):
yield path
# However, if recursive, 'includes' should NOT affect
# whether a dir is recursed into. Otherwise you could
# not:
# script -r --include="*.py" DIR
if recursive and _should_include_path(path, [], excludes):
for dirpath, dirnames, filenames in _walk(path,
follow_symlinks=follow_symlinks):
dir_indeces_to_remove = []
for i, dirname in enumerate(dirnames):
d = join(dirpath, dirname)
if skip_dupe_dirs:
canon_d = normpath(abspath(d))
if follow_symlinks:
canon_d = realpath(canon_d)
if canon_d in searched_dirs:
dir_indeces_to_remove.append(i)
continue
else:
searched_dirs.add(canon_d)
if dirs == "always" \
and _should_include_path(d, includes, excludes):
yield d
if not _should_include_path(d, [], excludes):
dir_indeces_to_remove.append(i)
for i in reversed(dir_indeces_to_remove):
del dirnames[i]
if files:
for filename in sorted(filenames):
f = join(dirpath, filename)
if _should_include_path(f, includes, excludes):
yield f
elif files and _should_include_path(path, includes, excludes):
yield path
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
# Recipe: pretty_logging (0.1) in C:\trentm\tm\recipes\cookbook
class _PerLevelFormatter(logging.Formatter):
"""Allow multiple format string -- depending on the log level.
A "fmtFromLevel" optional arg is added to the constructor. It can be
a dictionary mapping a log record level to a format string. The
usual "fmt" argument acts as the default.
"""
def __init__(self, fmt=None, datefmt=None, fmtFromLevel=None):
logging.Formatter.__init__(self, fmt, datefmt)
if fmtFromLevel is None:
self.fmtFromLevel = {}
else:
self.fmtFromLevel = fmtFromLevel
def format(self, record):
record.lowerlevelname = record.levelname.lower()
if record.levelno in self.fmtFromLevel:
# XXX This is a non-threadsafe HACK. Really the base Formatter
# class should provide a hook accessor for the _fmt
# attribute. *Could* add a lock guard here (overkill?).
_saved_fmt = self._fmt
self._fmt = self.fmtFromLevel[record.levelno]
try:
return logging.Formatter.format(self, record)
finally:
self._fmt = _saved_fmt
else:
return logging.Formatter.format(self, record)
def _setup_logging(stream=None):
"""Do logging setup:
We want a prettier default format:
do: level: ...
Spacing. Lower case. Skip " level:" if INFO-level.
"""
hdlr = logging.StreamHandler(stream)
defaultFmt = "%(name)s: %(levelname)s: %(message)s"
infoFmt = "%(name)s: %(message)s"
fmtr = _PerLevelFormatter(fmt=defaultFmt,
fmtFromLevel={logging.INFO: infoFmt})
hdlr.setFormatter(fmtr)
logging.root.addHandler(hdlr)
log.setLevel(logging.INFO)
#---- mainline
def main(argv):
usage = "usage: %prog PATHS..."
version = "%prog "+__version__
parser = optparse.OptionParser(usage=usage,
version=version, description=_cmdln_doc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("-q", "--quiet", dest="log_level",
action="store_const", const=logging.WARNING,
help="quieter output")
parser.add_option("-r", "--recursive", action="store_true",
help="recursively descend into given paths")
parser.add_option("-L", "--dereference", dest="follow_symlinks",
action="store_true",
help="follow symlinks, i.e. show info about linked-to "
"files and descend into linked dirs when recursive")
parser.add_option("-Q", "--quick-determine-lang", action="store_true",
help="Skip some processing to attempt to determine "
"language. Things like specialization, emacs/vi "
"local vars, full decoding, are skipped.")
parser.add_option("--encoding", help="suggested encoding for input files")
parser.add_option("-f", "--format",
help="format of output: summary (default), dict")
parser.add_option("-x", "--exclude", dest="excludes", action="append",
metavar="PATTERN",
help="path pattern to exclude for recursive search (by default SCC "
"control dirs are skipped)")
parser.set_defaults(log_level=logging.INFO, encoding=None, recursive=False,
follow_symlinks=False, format="summary",
excludes=[".svn", "CVS", ".hg", ".git", ".bzr"],
quick_determine_lang=False)
opts, args = parser.parse_args()
log.setLevel(opts.log_level)
if opts.log_level > logging.INFO:
warnings.simplefilter("ignore", ChardetImportWarning)
if args:
path_patterns = args
elif sys.stdin.isatty():
parser.print_help()
return 0
else:
def args_from_stdin():
for line in sys.stdin:
yield line.rstrip("\r\n")
path_patterns = args_from_stdin()
for path in _paths_from_path_patterns(
path_patterns, excludes=opts.excludes,
recursive=opts.recursive,
dirs="if-not-recursive",
follow_symlinks=opts.follow_symlinks):
try:
ti = textinfo_from_path(path, encoding=opts.encoding,
follow_symlinks=opts.follow_symlinks,
quick_determine_lang=opts.quick_determine_lang)
except OSError as ex:
log.error("%s: %s", path, ex)
continue
if opts.format == "summary":
print(ti.as_summary())
elif opts.format == "dict":
d = ti.as_dict()
if "text" in d:
del d["text"]
pprint(d)
else:
raise TextInfoError("unknown output format: %r" % opts.format)
if __name__ == "__main__":
_setup_logging()
try:
if "--self-test" in sys.argv:
import doctest
retval = doctest.testmod()[0]
else:
retval = main(sys.argv)
except SystemExit:
pass
except KeyboardInterrupt:
sys.exit(1)
except:
exc_info = sys.exc_info()
if log.isEnabledFor(logging.DEBUG):
import traceback
print()
traceback.print_exception(*exc_info)
else:
if hasattr(exc_info[0], "__name__"):
# log.error("%s: %s", exc_info[0].__name__, exc_info[1])
log.error(exc_info[1])
else: # string exception
log.error(exc_info[0])
sys.exit(1)
else:
sys.exit(retval)
|
archifix/settings
|
sublime/Packages/SublimeCodeIntel/libs/textinfo.py
|
Python
|
mit
| 88,877
|
[
"VisIt"
] |
e35f466bd338e6936e8aead5f3782e7a3a07c07e81bace0fa728f6ca97a36188
|
# coding: utf-8
"""Test mdn.kumascript."""
from __future__ import unicode_literals
from django.utils.six import text_type
from mdn.html import HTMLText
from mdn.kumascript import (
Bug, CSSBox, CSSxRef, CompatAndroid, CompatChrome, CompatGeckoDesktop,
CompatGeckoFxOS, CompatGeckoMobile, CompatIE, CompatNightly, CompatNo,
CompatOpera, CompatOperaMobile, CompatSafari, CompatUnknown,
CompatVersionUnknown, CompatibilityTable, DOMEventXRef, DOMException,
DOMxRef, DeprecatedInline, EmbedCompatTable, Event,
ExperimentalInline, GeckoRelease, HTMLAttrXRef, JSxRef,
KumaHTMLElement, KnownKumaScript, KumaScript, KumaVisitor,
NonStandardInline, NotStandardInline, PropertyPrefix, Spec2,
SpecName, UnknownKumaScript, WebkitBug, WhyNoSpecBlock,
XrefCSSLength, kumascript_grammar)
from .base import TestCase
from .test_html import TestGrammar as TestHTMLGrammar
from .test_html import TestVisitor as TestHTMLVisitor
class TestUnknownKumascript(TestCase):
def test_known(self):
ks = UnknownKumaScript(raw='{{CompatNo}}', name='CompatNo')
self.assertFalse(ks.known)
class TestKnownKumaScript(TestCase):
def test_known(self):
raw = '{{CompatNo}}'
ks = KnownKumaScript(raw=raw, scope='compatibility support')
self.assertTrue(ks.known)
class TestBug(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:Bug
def test_plain(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Node/isSupported
raw = '{{bug("801425")}}'
ks = Bug(raw=raw, args=['801425'], scope='footnote')
expected = (
'<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=801425">'
'bug 801425</a>')
self.assertEqual(ks.to_html(), expected)
self.assertEqual(ks.issues, [])
class TestCompatAndroid(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatAndroid
def test_standard(self):
raw = '{{CompatAndroid(1.0)}}'
ks = CompatAndroid(
raw=raw, args=['1.0'], scope='compatibility support')
self.assertEqual(ks.version, '1.0')
self.assertEqual(ks.to_html(), '1.0')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), '{{CompatAndroid("1.0")}}')
class TestCompatGeckoDesktop(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoDesktop
def assert_value(self, gecko_version, version, issues=[]):
raw = '{{CompatGeckoDesktop("' + gecko_version + '")}}'
ks = CompatGeckoDesktop(
raw=raw, args=[gecko_version], scope='compatibility support')
self.assertEqual(ks.gecko_version, gecko_version)
self.assertEqual(ks.version, version)
self.assertEqual(ks.to_html(), version)
self.assertEqual(ks.issues, issues or [])
self.assertEqual(text_type(ks), raw)
def test_v1(self):
self.assert_value('1', '1.0')
def test_v8(self):
self.assert_value('8.0', '8.0')
def test_bad_text(self):
self.assert_value(
'Yep', None,
[('compatgeckodesktop_unknown', 0, 29, {'version': 'Yep'})])
def test_bad_num(self):
self.assert_value(
'1.1', None,
[('compatgeckodesktop_unknown', 0, 29, {'version': '1.1'})])
class TestCompatGeckoFxOS(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoFxOS
def assert_value(self, gecko_version, version, issues=[]):
raw = '{{CompatGeckoFxOS("' + gecko_version + '")}}'
ks = CompatGeckoFxOS(
raw=raw, args=[gecko_version], scope='compatibility support')
self.assertEqual(ks.gecko_version, gecko_version)
self.assertEqual(ks.version, version)
self.assertEqual(ks.to_html(), version)
self.assertEqual(ks.issues, issues or [])
self.assertEqual(text_type(ks), raw)
def test_7(self):
self.assert_value('7', '1.0')
def test_range(self):
versions = {'10': '1.0',
'24': '1.2',
'28': '1.3',
'29': '1.4',
'32': '2.0',
'34': '2.1',
'35': '2.2'
}
for gversion, oversion in versions.items():
self.assert_value(gversion, oversion)
def test_bad_gecko(self):
self.assert_value(
'999999', None,
issues=[('compatgeckofxos_unknown', 0, 29, {'version': '999999'})])
def test_bad_text(self):
self.assert_value(
'Yep', None,
issues=[('compatgeckofxos_unknown', 0, 26, {'version': 'Yep'})])
def assert_value_with_override(
self, gecko_version, override, version, issues=[]):
raw = (
'{{CompatGeckoFxOS("' + gecko_version + '", "' + override + '")}}')
ks = CompatGeckoFxOS(
raw=raw, args=[gecko_version, override],
scope='compatibility support')
self.assertEqual(ks.gecko_version, gecko_version)
self.assertEqual(ks.version, override)
self.assertEqual(ks.to_html(), override)
self.assertEqual(ks.issues, issues or [])
self.assertEqual(text_type(ks), raw)
def test_7_override_1_0_1(self):
self.assert_value_with_override('7', '1.0.1', '1.0.1')
def test_7_override_1_1(self):
self.assert_value_with_override('7', '1.1', '1.1')
def test_bad_override(self):
self.assert_value_with_override(
'18', '5.0', None,
issues=[('compatgeckofxos_override', 0, 32,
{'override': '5.0', 'version': '18'})])
class TestCompatGeckoMobile(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatGeckoMobile
def assert_value(self, gecko_version, version, issues=[]):
raw = '{{CompatGeckoMobile("' + gecko_version + '")}}'
ks = CompatGeckoMobile(
raw=raw, args=[gecko_version], scope='compatibility support')
self.assertEqual(ks.gecko_version, gecko_version)
self.assertEqual(ks.version, version)
self.assertEqual(ks.to_html(), version)
self.assertEqual(ks.issues, issues or [])
self.assertEqual(text_type(ks), raw)
def test_v1(self):
self.assert_value('1', '1.0')
def test_v1_11(self):
self.assert_value('1.11', '1.0')
def test_v2(self):
self.assert_value('2', '4.0')
class TestCompatNightly(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatNightly
def test_standard(self):
raw = '{{CompatNightly}}'
ks = CompatNightly(raw=raw, scope='compatibility support')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_with_arg(self):
raw = '{{CompatNightly("firefox")}}'
ks = CompatNightly(
raw=raw, args=['firefox'], scope='compatibility support')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestCompatNo(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatNo
scope = 'compatibility support'
def test_standard(self):
raw = '{{CompatNo}}'
ks = CompatNo(raw=raw, scope=self.scope)
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_passing_args_fails(self):
raw = '{{CompatNo("14.0")}}'
ks = CompatNo(raw=raw, args=['14.0'], scope=self.scope)
issue = ks._make_issue(
'kumascript_wrong_args', min=0, max=0, count=1, arg_names=[],
arg_spec='no arguments', arg_count='1 argument')
self.assertEqual(ks.issues, [issue])
self.assertEqual(text_type(ks), raw)
class TestCompatUnknown(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatUnknown
def test_standard(self):
raw = '{{CompatUnknown}}'
ks = CompatUnknown(raw=raw, scope='compatibility support')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestCompatVersionUnknown(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatVersionUnknown
def test_standard(self):
raw = '{{CompatVersionUnknown}}'
ks = CompatVersionUnknown(raw=raw, scope='compatibility support')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestCompatibilityTable(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:CompatibilityTable
def test_standard(self):
raw = '{{CompatibilityTable}}'
ks = CompatibilityTable(raw=raw, scope='footnote')
self.assertEqual(ks.to_html(), '')
self.assertEqual(len(ks.issues), 1)
self.assertEqual(ks.issues[0][0], 'unexpected_kumascript')
self.assertEqual(text_type(ks), raw)
class TestKumaHTMLElement(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:HTMLElement
def assert_value(self, name, html):
raw = '{{HTMLElement("' + name + '")}}'
ks = KumaHTMLElement(raw=raw, args=[name], scope='footnote')
self.assertEqual(ks.to_html(), html)
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_standard(self):
self.assert_value('isindex', '<code><isindex></code>')
def test_spaces(self):
self.assert_value('is index', '<code>is index</code>')
class TestSpec2(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:Spec2
scope = 'specification maturity'
def test_standard(self):
spec = self.get_instance('Specification', 'css3_backgrounds')
raw = '{{Spec2("CSS3 Backgrounds")}}'
ks = Spec2(raw=raw, args=['CSS3 Backgrounds'], scope=self.scope)
self.assertEqual(ks.mdn_key, 'CSS3 Backgrounds')
self.assertEqual(ks.spec, spec)
self.assertFalse(ks.issues)
self.assertEqual(
ks.to_html(),
'specification CSS Backgrounds and Borders Module Level 3')
self.assertEqual(text_type(ks), raw)
def test_unknown_mdn_key(self):
raw = "{{Spec2('CSS3 Backgrounds')}}"
ks = Spec2(raw=raw, args=['CSS3 Backgrounds'], scope=self.scope)
self.assertEqual(ks.mdn_key, 'CSS3 Backgrounds')
self.assertIsNone(ks.spec)
issues = [('unknown_spec', 0, 29, {'key': ks.mdn_key})]
self.assertEqual(ks.issues, issues)
self.assertEqual(ks.to_html(), 'specification CSS3 Backgrounds')
def test_empty_key(self):
raw = '{{Spec2()}}'
ks = Spec2(raw=raw, scope=self.scope)
self.assertIsNone(ks.mdn_key)
self.assertIsNone(ks.spec)
expected = ks._make_issue(
'kumascript_wrong_args', min=1, max=1, arg_names=['SpecKey'],
count=0, arg_count='0 arguments',
arg_spec='exactly 1 argument (SpecKey)')
self.assertEqual(ks.issues, [expected])
self.assertEqual(ks.to_html(), 'specification (None)')
def test_str_double_quote(self):
raw = "{{Spec2('The \"Foo\" Spec')}}"
ks = Spec2(raw=raw, args=['The "Foo" Spec'], scope=self.scope)
self.assertEqual(ks.mdn_key, 'The "Foo" Spec')
self.assertEqual(text_type(ks), raw)
class TestSpecName(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:SpecName
scope = 'specification name'
def test_3args(self):
self.get_instance('Specification', 'css3_backgrounds')
raw = ('{{SpecName("CSS3 Backgrounds", "#the-background-size",'
' "background-size")}}')
args = ['CSS3 Backgrounds', '#the-background-size', 'background-size']
ks = SpecName(raw=raw, args=args, scope=self.scope)
self.assertEqual(ks.mdn_key, 'CSS3 Backgrounds')
self.assertEqual(ks.subpath, '#the-background-size')
self.assertEqual(ks.section_name, 'background-size')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_1arg(self):
self.get_instance('Specification', 'css3_backgrounds')
raw = "{{SpecName('CSS3 Backgrounds')}}"
ks = SpecName(raw=raw, args=['CSS3 Backgrounds'], scope=self.scope)
self.assertEqual(ks.mdn_key, 'CSS3 Backgrounds')
self.assertEqual(ks.subpath, None)
self.assertEqual(ks.section_name, None)
self.assertFalse(ks.issues)
def test_unknown_spec(self):
raw = "{{SpecName('CSS3 Backgrounds')}}"
ks = SpecName(raw=raw, args=['CSS3 Backgrounds'], scope=self.scope)
self.assertEqual(ks.mdn_key, 'CSS3 Backgrounds')
self.assertEqual(ks.subpath, None)
self.assertEqual(ks.section_name, None)
expected = [('unknown_spec', 0, 32, {'key': u'CSS3 Backgrounds'})]
self.assertEqual(ks.issues, expected)
def test_blank_mdn_key(self):
# https://developer.mozilla.org/en-US/docs/Web/API/MIDIConnectionEvent
raw = "{{SpecName('', '#midiconnection')}}"
ks = SpecName(raw=raw, args=['', '#midiconnection'], scope=self.scope)
self.assertEqual(ks.mdn_key, '')
self.assertEqual(ks.subpath, '#midiconnection')
self.assertIsNone(ks.section_name, None)
issue = ks._make_issue('specname_blank_key')
self.assertEqual(ks.issues, [issue])
def test_no_args(self):
raw = '{{SpecName}}'
ks = SpecName(raw=raw, scope=self.scope)
issue = ks._make_issue(
'kumascript_wrong_args', min=1, max=3, count=0,
arg_names=['SpecKey', 'Anchor', 'AnchorName'],
arg_count='0 arguments',
arg_spec=(
'between 1 and 3 arguments (SpecKey, Anchor, [AnchorName])'))
self.assertEqual(ks.issues, [issue])
class TestCSSBox(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:cssbox
def test_standard(self):
raw = '{{cssbox("background-clip")}}'
ks = CSSBox(raw=raw, args=['background-clip'], scope='footnote')
self.assertEqual(ks.to_html(), '')
self.assertEqual(len(ks.issues), 1)
self.assertEqual(ks.issues[0][0], 'unexpected_kumascript')
self.assertEqual(text_type(ks), raw)
class TestCSSxRef(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:cssxref
scope = 'footnote'
def test_standard(self):
raw = '{{cssxref("z-index")}}'
ks = CSSxRef(raw=raw, args=['z-index'], scope=self.scope)
self.assertEqual(ks.api_name, 'z-index')
self.assertIsNone(ks.display_name)
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/CSS/'
'z-index"><code>z-index</code></a>'))
self.assertEqual(ks.issues, [])
self.assertEqual(text_type(ks), raw)
def test_display_override(self):
raw = '{{cssxref("the-foo", "foo")}}'
ks = CSSxRef(raw=raw, args=['the-foo', 'foo'], scope=self.scope)
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/CSS/'
'the-foo"><code>foo</code></a>'))
def test_feature_name(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/attr
raw = '{{cssxref("content")}}'
ks = CSSxRef(raw=raw, args=['content'], scope='compatibility feature')
self.assertEqual(ks.to_html(), '<code>content</code>')
class TestDeprecatedInline(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:deprecated_inline
def test_standard(self):
raw = '{{deprecated_inline}}'
ks = DeprecatedInline(raw=raw, scope='compatibility feature')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestDOMEventXRef(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:domeventxref
def test_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/Events/compositionupdate
raw = '{{domeventxref("compositionstart")}}'
ks = DOMEventXRef(raw=raw, args=['compositionstart'], scope='footnote')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/DOM/'
'DOM_event_reference/compositionstart"><code>compositionstart'
'</code></a>'))
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestDOMException(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:exception
def test_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder
raw = '{{exception("TypeError")}}'
ks = DOMException(raw=raw, args=['TypeError'], scope='footnote')
self.assertEqual(
ks.to_html(),
'<a href="https://developer.mozilla.org/en-US/docs/Web/API/'
'DOMException#TypeError"><code>TypeError</code></a>')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestDOMxRef(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:domxref
scope = 'footnote'
def test_standard(self):
# https://developer.mozilla.org/en-US/docs/Web/API/CharacterData
raw = '{{domxref("ChildNode")}}'
ks = DOMxRef(
raw=raw, args=['ChildNode'], scope='compatibility feature')
self.assertEqual(ks.to_html(), '<code>ChildNode</code>')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_with_override(self):
# https://developer.mozilla.org/en-US/docs/Web/API/CustomEvent/initCustomEvent
raw = '{{domxref("CustomEvent.CustomEvent", "CustomEvent()")}}'
args = ['CustomEvent.CustomEvent', 'CustomEvent()']
ks = DOMxRef(raw=raw, args=args, scope=self.scope)
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/API/'
'CustomEvent/CustomEvent"><code>CustomEvent()</code></a>'))
def test_space(self):
# No current pages, but in macro definition
raw = '{{domxref("Notifications API")}}'
ks = DOMxRef(raw=raw, args=['Notifications API'], scope=self.scope)
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/API/'
'Notifications_API"><code>Notifications API</code></a>'))
def test_parens_dot_caps(self):
# https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsershowmodalprompt
raw = '{{domxref("window.alert()")}}'
ks = DOMxRef(raw=raw, args=['window.alert()'], scope=self.scope)
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/API/'
'Window/alert"><code>window.alert()</code></a>'))
class TestEmbedCompatTable(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:EmbedCompatTable
scope = 'footnote'
def test_standard(self):
raw = '{{EmbedCompatTable("web-css-display")}}'
ks = EmbedCompatTable(
raw=raw, args=['web-css-display'], scope=self.scope)
self.assertFalse(ks.issues)
self.assertEqual(ks.to_html(), '')
class TestEvent(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:event
def test_feature_name(self):
# No current compat pages
raw = '{{event("close")}}'
ks = Event(raw=raw, args=['close'], scope='compatibility feature')
self.assertEqual(ks.to_html(), '<code>close</code>')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/API/DeviceLightEvent/value
raw = '{{event("devicelight")}}'
ks = Event(raw=raw, args=['devicelight'], scope='footnote')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/Events/'
'devicelight"><code>devicelight</code></a>'))
class TestExperimentalInline(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:experimental_inline
def test_standard(self):
raw = '{{experimental_inline}}'
ks = ExperimentalInline(raw=raw, scope='compatibility feature')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestGeckoRelease(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:geckoRelease
def test_early(self):
raw = '{{geckoRelease("1.9.2")}}'
ks = GeckoRelease(raw=raw, args=['1.9.2'], scope='footnote')
expected = '(Firefox 3.6 / Thunderbird 3.1 / Fennec 1.0)'
self.assertEqual(ks.to_html(), expected)
def test_recent(self):
raw = '{{geckoRelease("19.0")}}'
ks = GeckoRelease(raw=raw, args=['19.0'], scope='footnote')
expected = '(Firefox 19.0 / Thunderbird 19.0 / SeaMonkey 2.16)'
self.assertEqual(ks.to_html(), expected)
def test_fxos(self):
raw = '{{geckoRelease("18.0")}}'
ks = GeckoRelease(raw=raw, args=['18.0'], scope='footnote')
expected = (
'(Firefox 18.0 / Thunderbird 18.0 / SeaMonkey 2.15 /'
' Firefox OS 1.0.1 / Firefox OS 1.1)')
self.assertEqual(ks.to_html(), expected)
def test_with_plus(self):
raw = '{{geckoRelease("33.0+")}}'
ks = GeckoRelease(raw=raw, args=['33.0+'], scope='footnote')
expected = '(Firefox 33.0+ / Thunderbird 33.0+ / SeaMonkey 2.30+)'
self.assertEqual(ks.to_html(), expected)
class TestJSxRef(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:jsxref
def test_standard(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp/global
raw = '{{jsxref("RegExp")}}'
ks = JSxRef(
raw=raw, args=['RegExp'], scope='specification description')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript'
'/Reference/Global_Objects/RegExp"><code>RegExp</code></a>'))
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_display_name(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Function/arguments
raw = '{{jsxref("Functions/arguments", "arguments")}}'
ks = JSxRef(
raw=raw, args=['Functions/arguments', 'arguments'],
scope='specification description')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript'
'/Reference/Global_Objects/Functions/arguments"><code>arguments'
'</code></a>'))
def test_feature_name(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/SIMD
raw = '{{jsxref("Float32x4", "SIMD.Float32x4")}}'
ks = JSxRef(
raw=raw, args=['Float32x4', 'SIMD.Float32x4'],
scope='compatibility feature')
self.assertEqual(ks.to_html(), '<code>SIMD.Float32x4</code>')
def test_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Blob
raw = '{{jsxref("Array/slice", "Array.slice()")}}'
ks = JSxRef(
raw=raw, args=['Array/slice', 'Array.slice()'], scope='footnote')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript'
'/Reference/Global_Objects/Array/slice"><code>Array.slice()'
'</code></a>'))
def test_prototype(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array
raw = '{{jsxref("Array.prototype.lastIndexOf", "lastIndexOf")}}'
ks = JSxRef(
raw=raw, args=['Array.prototype.lastIndexOf', 'lastIndexOf'],
scope='specification description')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript'
'/Reference/Global_Objects/Array/lastIndexOf"><code>lastIndexOf'
'</code></a>'))
def test_dotted_function(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math
raw = '{{jsxref("Math.log10()", "log10()")}}'
ks = JSxRef(
raw=raw, args=['Math.log10()', 'log10()'],
scope='specification description')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript'
'/Reference/Global_Objects/Math/log10"><code>log10()'
'</code></a>'))
def test_global_object(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/toString
raw = '{{jsxref("Global_Objects/null", "null")}}'
ks = JSxRef(
raw=raw, args=['Global_Objects/null', 'null'],
scope='specification description')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/JavaScript'
'/Reference/Global_Objects/null"><code>null</code></a>'))
class TestHTMLAttrXRef(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:htmlattrxref
def test_feature_name(self):
# No current compat pages
raw = '{{htmlattrxref("style")}}'
ks = HTMLAttrXRef(
raw=raw, args=['style'], scope='compatibility feature')
self.assertEqual(ks.to_html(), '<code>style</code>')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
def test_spec_desc_without_element(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Element/classList
raw = '{{htmlattrxref("class")}}'
ks = HTMLAttrXRef(
raw=raw, args=['class'], scope='specification description')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/HTML/'
'Global_attributes#attr-class"><code>class</code></a>'))
def test_footnote_with_element(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Element
raw = '{{htmlattrxref("sandbox", "iframe")}}'
ks = HTMLAttrXRef(
raw=raw, args=['sandbox', 'iframe'], scope='footnote')
self.assertEqual(
ks.to_html(),
('<a href="https://developer.mozilla.org/en-US/docs/Web/HTML/'
'Element/iframe#attr-sandbox"><code>sandbox</code></a>'))
class TestNonStandardInline(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:non-standard_inline
def test_standard(self):
raw = '{{non-standard_inline}}'
ks = NonStandardInline(raw=raw, scope='compatibility feature')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestNotStandardInline(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:not_standard_inline
def test_standard(self):
raw = '{{not_standard_inline}}'
ks = NotStandardInline(raw=raw, scope='compatibility feature')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestPropertyPrefix(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:property_prefix
def test_standard(self):
raw = '{{property_prefix("-webkit")}}'
ks = PropertyPrefix(
raw=raw, args=['-webkit'], scope='compatibility support')
self.assertEqual(ks.to_html(), '')
self.assertFalse(ks.issues)
self.assertEqual(text_type(ks), raw)
class TestWebkitBug(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:WebkitBug
def test_standard(self):
# https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toBlob
raw = '{{WebKitBug("71270")}}'
ks = WebkitBug(raw=raw, args=['71270'], scope='footnote')
expected = (
'<a href="https://bugs.webkit.org/show_bug.cgi?id=71270">'
'WebKit bug 71270</a>')
self.assertEqual(ks.to_html(), expected)
self.assertEqual(ks.issues, [])
class TestWhyNoSpecBlock(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:WhyNoSpecStart
# https://developer.mozilla.org/en-US/docs/Template:WhyNoSpecEnd
def test_standard(self):
raw = """{{WhyNoSpecStart}}There is no spec.{{WhyNoSpecEnd}}"""
block = WhyNoSpecBlock(raw=raw, scope='footnote')
self.assertEqual(block.to_html(), '')
self.assertEqual(text_type(block), raw)
class TestXrefCSSLength(TestCase):
# https://developer.mozilla.org/en-US/docs/Template:xref_csslength
def test_feature_name(self):
raw = '{{xref_csslength()}}'
ks = XrefCSSLength(raw=raw, scope='compatibility feature')
self.assertEqual('<code><length></code>', ks.to_html())
self.assertEqual([], ks.issues)
self.assertEqual(text_type(ks), '{{xref_csslength}}')
def test_footnote(self):
raw = '{{xref_csslength()}}'
ks = XrefCSSLength(raw=raw, scope='footnote')
self.assertEqual(
'<a href="https://developer.mozilla.org/en-US/docs/Web/CSS/'
'length"><code><length></code></a>',
ks.to_html())
class TestGrammar(TestHTMLGrammar):
def test_no_arg_kumascript(self):
text = '<p>{{CompatNo}}</p>'
parsed = kumascript_grammar['html'].parse(text)
assert parsed
def assert_whynospec(self, text):
parsed = kumascript_grammar['html'].parse(text)
assert parsed # Hard to do more than this
def test_whynospec_plain(self):
text = '{{WhyNoSpecStart}}There is no spec{{WhyNoSpecEnd}}'
self.assert_whynospec(text)
def test_whynospec_spaces(self):
text = """\
{{ WhyNoSpecStart }} There is no spec {{ WhyNoSpecEnd }}
"""
self.assert_whynospec(text)
def test_whynospec_inner_kuma(self):
text = """\
{{WhyNoSpecStart}}
Not part of any current spec, but it was in early drafts of
{{SpecName("CSS3 Animations")}}.
{{WhyNoSpecEnd}}
"""
self.assert_whynospec(text)
def test_single_curly(self):
text = 'Here is some sample text: { ... }.'
parsed = kumascript_grammar['text_block'].parse(text)
self.assertEqual(text, parsed.text)
class TestVisitor(TestHTMLVisitor):
def setUp(self):
self.visitor = KumaVisitor()
def assert_kumascript(
self, text, name, args, scope, known=True, issues=None):
parsed = kumascript_grammar['kumascript'].parse(text)
self.visitor.scope = scope
ks = self.visitor.visit(parsed)
self.assertIsInstance(ks, KumaScript)
self.assertEqual(ks.name, name)
self.assertEqual(ks.args, args)
self.assertEqual(ks.known, known)
self.assertEqual(ks.issues, issues or [])
def test_kumascript_no_args(self):
self.assert_kumascript(
'{{CompatNo}}', 'CompatNo', [], 'compatibility support')
def test_kumascript_no_parens_and_spaces(self):
self.assert_kumascript(
'{{ CompatNo }}', 'CompatNo', [], 'compatibility support')
def test_kumascript_empty_parens(self):
self.assert_kumascript(
'{{CompatNo()}}', 'CompatNo', [], 'compatibility support')
def test_kumascript_one_arg(self):
self.assert_kumascript(
'{{cssxref("-moz-border-image")}}', 'cssxref',
['-moz-border-image'], 'footnote')
def test_kumascript_one_arg_no_quotes(self):
self.assert_kumascript(
'{{CompatGeckoDesktop(27)}}', 'CompatGeckoDesktop', ['27'],
'compatibility support')
def test_kumascript_three_args(self):
self.get_instance('Specification', 'css3_backgrounds')
self.assert_kumascript(
("{{SpecName('CSS3 Backgrounds', '#the-background-size',"
" 'background-size')}}"),
'SpecName',
['CSS3 Backgrounds', '#the-background-size',
'background-size'], 'specification name')
def test_kumascript_empty_string(self):
# https://developer.mozilla.org/en-US/docs/Web/API/MIDIConnectionEvent
raw = "{{SpecName('', '#midiconnection')}}"
name = 'SpecName'
args = ['', '#midiconnection']
issue = (
'specname_blank_key', 0, 35,
{'name': name, 'args': args, 'scope': 'specification name',
'kumascript': '{{SpecName("", "#midiconnection")}}'})
self.assert_kumascript(
raw, name, args, 'specification name', issues=[issue])
def test_kumascript_unknown(self):
issue = (
'unknown_kumascript', 0, 10,
{'name': 'CSSRef', 'args': [], 'scope': 'footnote',
'kumascript': '{{CSSRef}}'})
self.assert_kumascript(
'{{CSSRef}}', 'CSSRef', [], scope='footnote', known=False,
issues=[issue])
def test_kumascript_in_html(self):
html = """\
<tr>
<td>{{SpecName('CSS3 Display', '#display', 'display')}}</td>
<td>{{Spec2('CSS3 Display')}}</td>
<td>Added the <code>run-in</code> and <code>contents</code> values.</td>
</tr>"""
parsed = kumascript_grammar['html'].parse(html)
out = self.visitor.visit(parsed)
self.assertEqual(len(out), 1)
tr = out[0]
self.assertEqual(tr.tag, 'tr')
texts = [None] * 4
texts[0], td1, texts[1], td2, texts[2], td3, texts[3] = tr.children
for text in texts:
self.assertIsInstance(text, HTMLText)
self.assertFalse(text.cleaned)
self.assertEqual(td1.tag, 'td')
self.assertEqual(len(td1.children), 1)
self.assertIsInstance(td1.children[0], SpecName)
self.assertEqual(td2.tag, 'td')
self.assertEqual(len(td2.children), 1)
self.assertIsInstance(td2.children[0], Spec2)
self.assertEqual(td3.tag, 'td')
text1, code1, text2, code2, text3 = td3.children
self.assertEqual(str(text1), 'Added the')
self.assertEqual(str(code1), '<code>run-in</code>')
self.assertEqual(str(text2), 'and')
self.assertEqual(str(code2), '<code>contents</code>')
self.assertEqual(str(text3), 'values.')
def test_kumascript_and_text_and_HTML(self):
html = """\
<td>
Add the {{ xref_csslength() }} value and allows it to be applied to
element with a {{ cssxref("display") }} type of <code>table-cell</code>.
</td>"""
parsed = kumascript_grammar['html'].parse(html)
out = self.visitor.visit(parsed)
self.assertEqual(len(out), 1)
tr = out[0]
self.assertEqual(tr.tag, 'td')
txts = [None] * 4
ks = [None] * 2
txts[0], ks[0], txts[1], ks[1], txts[2], code, txts[3] = tr.children
for text in txts:
self.assertIsInstance(text, HTMLText)
self.assertTrue(text.cleaned)
self.assertEqual('Add the', str(txts[0]))
self.assertEqual(
'value and allows it to be applied to element with a',
str(txts[1]))
self.assertEqual('type of', str(txts[2]))
self.assertEqual('.', str(txts[3]))
self.assertEqual('{{xref_csslength}}', str(ks[0]))
self.assertEqual('{{cssxref("display")}}', str(ks[1]))
self.assertEqual('<code>table-cell</code>', str(code))
def assert_compat_version(self, html, cls, version):
"""Check that Compat* KumaScript is parsed correctly."""
parsed = kumascript_grammar['html'].parse(html)
out = self.visitor.visit(parsed)
self.assertEqual(len(out), 1)
ks = out[0]
self.assertIsInstance(ks, cls)
self.assertEqual(version, ks.version)
def test_compatchrome(self):
self.assert_compat_version(
'{{CompatChrome("10.0")}}', CompatChrome, '10.0')
def test_compatie(self):
self.assert_compat_version(
'{{CompatIE("9")}}', CompatIE, '9.0')
def test_compatopera(self):
self.assert_compat_version(
'{{CompatOpera("9")}}', CompatOpera, '9.0')
def test_compatoperamobile(self):
self.assert_compat_version(
'{{CompatOperaMobile("11.5")}}', CompatOperaMobile, '11.5')
def test_compatsafari(self):
self.assert_compat_version(
'{{CompatSafari("2")}}', CompatSafari, '2.0')
def assert_a(self, html, converted, issues=None):
parsed = kumascript_grammar['html'].parse(html)
out = self.visitor.visit(parsed)
self.assertEqual(len(out), 1)
a = out[0]
self.assertEqual('a', a.tag)
self.assertEqual(converted, a.to_html())
self.assertEqual(issues or [], self.visitor.issues)
def test_a_missing(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/flex
issues = [
('unexpected_attribute', 3, 13,
{'node_type': 'a', 'ident': 'name', 'value': 'bc1',
'expected': 'the attribute href'}),
('missing_attribute', 0, 14, {'node_type': 'a', 'ident': 'href'})]
self.assert_a(
'<a name="bc1">[1]</a>', '<a>[1]</a>', issues=issues)
def test_a_MDN_relative(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/image
self.assert_a(
'<a href="/en-US/docs/Web/CSS/CSS3">CSS3</a>',
('<a href="https://developer.mozilla.org/en-US/docs/Web/CSS/CSS3">'
'CSS3</a>'))
def test_a_external(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Web_Speech_API
self.assert_a(
('<a href="https://dvcs.w3.org/hg/speech-api/raw-file/tip/'
'speechapi.html" class="external external-icon">Web Speech API'
'</a>'),
('<a href="https://dvcs.w3.org/hg/speech-api/raw-file/tip/'
'speechapi.html">Web Speech API</a>'))
def test_a_bad_class(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Element/getElementsByTagNameNS
self.assert_a(
('<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=542185#c5"'
' class="link-https"'
' title="https://bugzilla.mozilla.org/show_bug.cgi?id=542185#c5">'
'comment from Henri Sivonen about the change</a>'),
('<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=542185#c5"'
'>comment from Henri Sivonen about the change</a>'),
[('unexpected_attribute', 65, 83,
{'node_type': 'a', 'ident': 'class', 'value': 'link-https',
'expected': 'the attribute href'})])
|
mdn/browsercompat
|
mdn/tests/test_kumascript.py
|
Python
|
mpl-2.0
| 39,220
|
[
"VisIt"
] |
ee2d782e5fcce79bb452ff7c167834f904ab1dbe21fc35489448e729c8d58247
|
print("How old are you?", end=' ')
age = input()
print("How tall are you?", end=' ')
height = input()
print("How much do you weigh?", end=' ')
weight = input()
print(f"So, you're {age} old, {height} tall and {weight} heavy.")
name = input("What's your surname? ")
print(f"""Nice to meet you Mr. {name}, we've been expecting you.
It's been a long time since your last visit.""")
print("When where you born?", end=' ' )
born_in = input()
year = 2017
years_old = year - int(born_in)
print(f"So you are approximately {years_old} years-old")
|
jpilorget/lpthw
|
ex11.py
|
Python
|
gpl-3.0
| 549
|
[
"VisIt"
] |
6187a55151a359401013f0d69d85deaf4d84431fc4215e06ed25018c2d115a2f
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sources.galaxyfinder Contains the GalaxyFinder class.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import astronomical modules
import numpy as np
from astropy.units import Unit
from astropy.coordinates import Angle
import pdb
import numbers
# Import the relevant PTS classes and modules
from ..basics.mask import Mask
from ..basics.region import Region
from ..basics.skyregion import SkyRegion
from ..basics.vector import Extent
from ..basics.geometry import Coordinate, Ellipse
from ..basics.skygeometry import SkyCoordinate
from ..core.frame import Frame
from ..object.galaxy import Galaxy
from ..basics.skygeometry import SkyEllipse
from ...core.basics.configurable import OldConfigurable
from ...core.tools import tables
from ...core.tools import filesystem as fs
from ...core.tools.logging import log
# -----------------------------------------------------------------
class GalaxyFinder(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
"""
# Call the constructor of the base class
super(GalaxyFinder, self).__init__(config, "magic")
# -- Attributes --
# Initialize an empty list for the galaxies
self.galaxies = []
# The image frame
self.frame = None
# The mask covering objects that require special attentation (visual feedback)
self.special_mask = None
# The mask covering pixels that should be ignored
self.ignore_mask = None
# The mask of bad pixels
self.bad_mask = None
# The galactic catalog
self.catalog = None
# The galactic statistics
self.statistics = None
# The galaxy region
self.region = None
# The segmentation map
self.segments = None
# -----------------------------------------------------------------
def run(self, frame, catalog, special=None, ignore=None, bad=None):
"""
This function ...
:param frame:
:param catalog:
:param special:
:param ignore:
:param bad:
"""
# 1. Call the setup function
self.setup(frame, catalog, special, ignore, bad)
# 2. Find the galaxies
self.find_galaxies()
# 3. Set the statistics
self.set_statistics()
# 4. Create the region
self.create_region()
# 5. Create the segmentation map
self.create_segments()
# -----------------------------------------------------------------
def setup(self, frame, catalog, special_mask=None, ignore_mask=None, bad_mask=None):
"""
This function ...
:param frame:
:param catalog
:param special_mask:
:param ignore_mask:
:param bad_mask:
"""
# Call the setup function of the base class
super(GalaxyFinder, self).setup()
# Inform the user
log.info("Setting up the galaxy extractor ...")
# Make a local reference to the image frame
self.frame = frame
self.catalog = catalog
# Masks
self.special_mask = special_mask
self.ignore_mask = ignore_mask
self.bad_mask = bad_mask
# Create an empty frame for the segments
self.segments = Frame.zeros_like(self.frame)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing the galaxy extractor ...")
# Clear the list of galaxies
self.galaxies = []
# Clear the frame
self.frame = None
# -----------------------------------------------------------------
def find_galaxies(self):
"""
This function ...
:return:
"""
# Load the galaxies from the galactic catalog
self.load_galaxies()
# Find the sources
self.find_sources()
# Find apertures
if self.config.find_apertures: self.find_contours()
# -----------------------------------------------------------------
@property
def positions(self):
"""
This function ...
:return:
"""
# Initialize a list to contain the object positions
positions = []
# Loop over the galaxies
for galaxy in self.galaxies:
# Calculate the pixel coordinate in the frame and add it to the list
positions.append(galaxy.pixel_position(self.frame.wcs))
# Return the list
return positions
# -----------------------------------------------------------------
@property
def principal(self):
"""
This function ...
:return:
"""
# Loop over the list of galaxies
for galaxy in self.galaxies:
# Check if it is the principal galaxy; if so, return it
if galaxy.principal: return galaxy
# If the principal galaxy is not determined, return None
return None
# -----------------------------------------------------------------
@property
def companions(self):
"""
This function ...
:return:
"""
# Initialize a list to contain the companion galaxies
companions = []
# Loop over the list of galaxies
for galaxy in self.galaxies:
# Check if it is a companion galaxy; if so, add it to the list
if galaxy.companion: companions.append(galaxy)
# Return the list of companion galaxies
return companions
# -----------------------------------------------------------------
def find_sources(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Looking for sources near the galaxy positions ...")
# Loop over all galaxies in the list
for galaxy in self.galaxies:
# If this sky object should be ignored, skip it
if galaxy.ignore: continue
# If the galaxy is the principal galaxy and a region file is
if galaxy.principal and self.config.principal_region is not None:
# Load the principal galaxy region file
region = SkyRegion.from_file(self.config.principal_region)
shape = region[0].to_pixel(self.frame.wcs)
# Create a source for the galaxy from the shape in the region file
outer_factor = self.config.detection.background_outer_factor
galaxy.source_from_shape(self.frame, shape, outer_factor)
else:
# If requested, use the galaxy extents obtained from the catalog to create the source
if self.config.detection.use_d25 and galaxy.has_extent:
outer_factor = self.config.detection.background_outer_factor
expansion_factor = self.config.detection.d25_expansion_factor
galaxy.source_from_parameters(self.frame, outer_factor, expansion_factor)
else:
# Find a source
try: galaxy.find_source(self.frame, self.config.detection)
except Exception as e:
#import traceback
log.error("Error when finding source")
print(type(e))
print(e)
#traceback.print_exc()
if self.config.plot_track_record_if_exception:
if galaxy.has_track_record: galaxy.track_record.plot()
else: log.warning("Track record is not enabled")
# If a source was not found for the principal or companion galaxies, force it
outer_factor = self.config.detection.background_outer_factor
if galaxy.principal and not galaxy.has_source: galaxy.source_from_parameters(self.frame, outer_factor)
elif galaxy.companion and not galaxy.has_source and galaxy.has_extent: galaxy.source_from_parameters(self.frame, outer_factor)
# Inform the user
log.info("Found a source for {0} out of {1} objects ({2:.2f}%)".format(self.have_source, len(self.galaxies), self.have_source/len(self.galaxies)*100.0))
# -----------------------------------------------------------------
def load_galaxies(self):
"""
This function creates the galaxy list from the galaxy catalog.
:return:
"""
# Inform the user
log.info("Loading the galaxies from the catalog ...")
# Set to be strings columns that should be strings
for colname in self.catalog.colnames:
if colname in ["Name","Type","Alternative names","Companion galaxies","Parent galaxy"]:
self.catalog[colname] = self.catalog[colname].astype(str)
if colname in ["Principal"]:
self.catalog[colname] = self.catalog[colname].astype(bool)
# Create the list of galaxies
for i in range(len(self.catalog)):
# Get the galaxy properties
name = self.catalog["Name"][i]
redshift = self.catalog["Redshift"][i] if isinstance(self.catalog["Redshift"][i], numbers.Number) else None
galaxy_type = self.catalog["Type"][i] if bool(len(self.catalog["Type"][i])) else None
distance = self.catalog["Distance"][i] * Unit("Mpc") if isinstance(self.catalog["Distance"][i], str) else None
inclination = Angle(self.catalog["Inclination"][i], Unit("deg")) if isinstance(self.catalog["Inclination"][i], numbers.Number) else None
d25 = self.catalog["D25"][i] * Unit("arcmin") if isinstance(self.catalog["D25"][i], numbers.Number) else None
major = self.catalog["Major axis length"][i] * Unit("arcmin") if isinstance(self.catalog["Major axis length"][i], numbers.Number) else None
minor = self.catalog["Minor axis length"][i] * Unit("arcmin") if isinstance(self.catalog["Minor axis length"][i], numbers.Number) else None
position_angle = Angle(self.catalog["Position angle"][i], Unit("deg")) if isinstance(self.catalog["Position angle"][i], numbers.Number) else None
ra = Angle(str(self.catalog["Right ascension"][i])+' hours').deg
dec = self.catalog["Declination"][i]
names = self.catalog["Alternative names"][i].split(", ") if bool(len(self.catalog["Type"][i])) else []
principal = self.catalog["Principal"][i]
companions = self.catalog["Companion galaxies"][i].split(", ") if bool(len((self.catalog["Companion galaxies"][i].split(", "))[0])) else []
parent = self.catalog["Parent galaxy"][i] if bool(len(self.catalog["Parent galaxy"][i])) else None
# Create a SkyCoordinate for the galaxy center position
position = SkyCoordinate(ra=ra, dec=dec, unit="deg", frame="fk5")
# If the galaxy falls outside of the frame, skip it
if len(self.catalog) > 1:
if not self.frame.contains(position): continue
# Create a new Galaxy instance
galaxy = Galaxy(i, name, position, redshift, galaxy_type, names, distance, inclination, d25, major, minor, position_angle)
# Calculate the pixel position of the galaxy in the frame
pixel_position = galaxy.pixel_position(self.frame.wcs)
# Set other attributes
galaxy.principal = principal
galaxy.companion = parent is not None
galaxy.companions = companions
galaxy.parent = parent
# Enable track record if requested
if self.config.track_record: galaxy.enable_track_record()
# Set attributes based on masks (special and ignore)
if self.special_mask is not None: galaxy.special = self.special_mask.masks(pixel_position)
if self.ignore_mask is not None: galaxy.ignore = self.ignore_mask.masks(pixel_position)
# If the input mask masks this galaxy's position, skip it (don't add it to the list of galaxies)
if len(self.catalog) > 1:
if self.bad_mask is not None and self.bad_mask.masks(pixel_position) and not galaxy.principal: continue
# Add the new galaxy to the list
self.galaxies.append(galaxy)
# If we're having the problem where no principal galaxy have been set, do it explicitly here
if not hasattr(self, "principal"):
if len(self.catalog) == 1:
self.principal = self.galaxies[0]
"""index = np.where(self.catalog["Major axis length"] == max(self.catalog["Major axis length"]))[0][0]
self.principal = Galaxy(index,
self.catalog[index]["Name"],
SkyCoordinate(ra=Angle(str(self.catalog["Right ascension"][index])+' hours').deg, dec=self.catalog["Declination"][index], unit="deg", frame="fk5"),
self.catalog["Redshift"][index] if isinstance(self.catalog["Redshift"][index], numbers.Number) else None,
self.catalog["Type"][index] if bool(len(self.catalog["Type"][i])) else None,
self.catalog["Alternative names"][index].split(", ") if bool(len(self.catalog["Type"][index])) else [],
self.catalog["Distance"][index] * Unit("Mpc") if isinstance(self.catalog["Distance"][index], str) else None,
Angle(self.catalog["Inclination"][index], Unit("deg")) if isinstance(self.catalog["Inclination"][index], numbers.Number) else None,
self.catalog["D25"][index] * Unit("arcmin") if isinstance(self.catalog["D25"][index], numbers.Number) else None,
self.catalog["Major axis length"][index] * Unit("arcmin") if isinstance(self.catalog["Major axis length"][index], numbers.Number) else None,
self.catalog["Minor axis length"][index] * Unit("arcmin") if isinstance(self.catalog["Minor axis length"][index], numbers.Number) else None,
Angle(self.catalog["Position angle"][index], Unit("deg")) if isinstance(self.catalog["Position angle"][index], numbers.Number) else None)"""
# Debug messages
log.debug(self.principal.name + " is the principal galaxy in the frame")
log.debug("The following galaxies are its companions: " + str(self.principal.companions))
# -----------------------------------------------------------------
def find_contours(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Constructing elliptical contours to encompass the detected galaxies ...")
# Loop over all galaxies
for galaxy in self.galaxies:
# If this galaxy should be ignored, skip it
if galaxy.ignore: continue
# If the galaxy does not have a source, continue
try:
if galaxy.has_source: galaxy.find_contour(self.frame, self.config.apertures)
except:
continue
# -----------------------------------------------------------------
@property
def principal_shape(self):
"""
This function ...
:return:
"""
return self.principal.shape
# -----------------------------------------------------------------
@property
def principal_ellipse(self):
"""
This function ...
:return:
"""
# Get the center in pixel coordinates
center = self.principal.pixel_position(self.frame.wcs)
# Get the angle
angle = self.principal.pa_for_wcs(self.frame.wcs)
x_radius = 0.5 * self.principal.major.to("arcsec").value / self.frame.average_pixelscale.to("arcsec/pix").value
y_radius = 0.5 * self.principal.minor.to("arcsec").value / self.frame.average_pixelscale.to("arcsec/pix").value
radius = Extent(x_radius, y_radius)
# Create and return an ellipse
return Ellipse(center, radius, angle)
# -----------------------------------------------------------------
@property
def principal_sky_ellipse(self):
"""
This function ...
:return:
"""
# Get the ellipse in image coordinates
ellipse = self.principal_ellipse
# Create a SkyEllipse
sky_ellipse = SkyEllipse.from_pixel(ellipse, self.frame.wcs)
# Return the sky ellipse
return sky_ellipse
# -----------------------------------------------------------------
@property
def principal_mask(self):
"""
This function ...
:return:
"""
# Create a new mask with the dimensions of the frame
mask = Mask.empty_like(self.frame)
# Add the principal galaxy's mask to the total mask
mask[self.principal.source.cutout.y_slice, self.principal.source.cutout.x_slice] = self.principal.source.mask
# Return the mask
return mask
# -----------------------------------------------------------------
@property
def companion_mask(self):
"""
This function ...
:return:
"""
# Create a new mask with the dimension of the frame
mask = Mask.empty_like(self.frame)
# Loop over all companion galaxies
for galaxy in self.companions:
# Check if the galaxy has a source and add its mask to the total mask
if galaxy.has_source: mask[galaxy.source.cutout.y_slice, galaxy.source.cutout.x_slice] = galaxy.source.mask
# Return the mask
return mask
# -----------------------------------------------------------------
def set_statistics(self):
"""
This function ...
:return:
"""
index_column = []
have_source_column = []
# Loop over all galaxies
for galaxy in self.galaxies:
index_column.append(galaxy.index)
have_source_column.append(galaxy.has_source)
# Create data structure and set column names
data = [index_column, have_source_column]
names = ["Galaxy index", "Detected"]
# Create the statistics table
self.statistics = tables.new(data, names)
# -----------------------------------------------------------------
def create_region(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating galaxy region ...")
# Initialize the region
self.region = Region()
# Loop over all galaxies
for galaxy in self.galaxies:
# Get the center in pixel coordinates
center = galaxy.pixel_position(self.frame.wcs)
# Set the angle
angle = galaxy.pa_for_wcs(self.frame.wcs).to("deg") if galaxy.pa is not None else 0.0
if galaxy.major is None:
color = "red"
x_radius = self.config.region.default_radius
y_radius = self.config.region.default_radius
elif galaxy.minor is None or galaxy.pa is None:
color = "green"
x_radius = 0.5 * galaxy.major.to("arcsec").value / self.frame.average_pixelscale.to("arcsec/pix").value
y_radius = x_radius
else:
color = "green"
x_radius = 0.5 * galaxy.major.to("arcsec").value / self.frame.average_pixelscale.to("arcsec/pix").value
y_radius = 0.5 * galaxy.minor.to("arcsec").value / self.frame.average_pixelscale.to("arcsec/pix").value
radius = Extent(x_radius, y_radius)
# Create a coordinate for the center and add it to the region
meta = {"point": "x"}
self.region.append(Coordinate(center.x, center.y, meta=meta))
text = galaxy.name
if galaxy.principal: text += " (principal)"
# If hand-drawn principal region
if galaxy.principal and self.config.principal_region is not None: shape = galaxy.shape
# Create an ellipse for the galaxy
else: shape = Ellipse(center, radius, angle, meta=meta)
# Set meta information
meta = {"text": text, "color": color}
shape.meta = meta
# Add the shape to the region
self.region.append(shape)
# -----------------------------------------------------------------
def create_segments(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the segmentation map of galaxies ...")
# Loop over all galaxies
for galaxy in self.galaxies:
# Skip galaxies without source
if not galaxy.has_source: continue
# Determine the label for the galaxy
if galaxy.principal: label = 1
elif galaxy.companion: label = 2
else: label = 3
# Add the galaxy mask to the segmentation map
self.segments[galaxy.source.y_slice, galaxy.source.x_slice][galaxy.source.mask] = label
# -----------------------------------------------------------------
def write_cutouts(self):
"""
This function ...
:return:
"""
# Determine the full path to the cutouts directory
directory_path = self.full_output_path(self.config.writing.cutouts_path)
# Inform the user
log.info("Writing cutout boxes to " + directory_path + " ...")
# Keep track of the number of stars encountered
principals = 0
companions = 0
with_source = 0
# Loop over all galaxies
for galaxy in self.galaxies:
# Check if this is the principal galaxy
if galaxy.principal:
# Save the cutout as a FITS file
path = fs.join(directory_path, "galaxy_principal_" + str(principals) + ".fits")
galaxy.source.save(path, origin=self.name)
# Increment the counter of the number of principal galaxies (there should only be one, really...)
principals += 1
# Check if this is a companion galaxy
elif galaxy.companion:
# Save the cutout as a FITS file
path = fs.join(directory_path, "galaxy_companion_" + str(companions) + ".fits")
galaxy.source.save(path, origin=self.name)
# Increment the counter of the number of companion galaxies
companions += 1
# Check if this galaxy has a source
elif galaxy.has_source:
# Save the cutout as a FITS file
path = fs.join(directory_path, "galaxy_source_" + str(principals) + ".fits")
galaxy.source.save(path, origin=self.name)
# Increment the counter of the number of galaxies with a source
with_source += 1
# -----------------------------------------------------------------
@property
def have_source(self):
"""
This function ...
:return:
"""
count = 0
for galaxy in self.galaxies: count += galaxy.has_source
return count
# -----------------------------------------------------------------
|
Stargrazer82301/CAAPR
|
CAAPR/CAAPR_AstroMagic/PTS/pts/magic/sources/galaxyfinder.py
|
Python
|
mit
| 24,200
|
[
"Galaxy"
] |
980691a7bdfb9ac12faacbeb24fc10b783471996b6d5c1ddc55d748bcfb8ab93
|
# -*- coding: utf-8 -*-
#
# pulsepacket.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# This script compares the average and individual membrane potential excursions
# in response to a single pulse packet with an analytically acquired voltage trace.
# A pulse packet is a transient spike volley with a Gaussian rate profile.
#
#
# The user can specify the neural parameters, the parameters of the
# pulse-packet and the number of trials.
# Sven Schrader, Nov 2008
import nest
import numpy
import pylab
import array
from numpy import exp
a = 100 # number of spikes in one pulse packet
sdev = 50. # ms width of pulse packet
weight = 0.1 # mV psp amplitude
n_neurons = 10 # number of trials
pulsetime = 500. # ms occurrence time (center) of pulse-packet
simtime = 1000. # ms total simulation duration
Cm = 200. # pF, capacitance
tau_s = 0.5 # ms, synaptic time constant
tau_m = 20. # ms, membrane time constant
V0 = 0.0 # mV, resting potential
Vth = 9999.0 # mV, firing threshold. Keep high when looking at passive properties.
simulation_resolution = 0.1 # ms
sampling_resolution = 1. # ms, for voltmeter
convolution_resolution = 1. # ms, for the analytics
#####################################################################
# analytical section. Here we need to convert to SI units
e = exp(1)
pF = 1e-12
ms = 1e-3 # helper variables as units
# make gauss-kernel
sigma = (sdev*ms)**2
mu = 0.0
x = numpy.arange(-4*sdev , 4*sdev , convolution_resolution * ms )
gauss = 1 / (sigma * numpy.sqrt(2) * numpy.pi) * exp( -(x-mu)**2 / (sigma*numpy.sqrt(2) ) )
gauss /= pylab.sum(gauss) # normalize to unit area
# make unit psp
#
# step 1: make time axis. We use the tenfold sum of tau_s and tau_m as width. This should suffice.
t_psp = numpy.arange(0, 10 * (tau_m * ms + tau_s * ms) , convolution_resolution * ms )
# step 2 : calculate psp. Its maximum is used below as a fudge-factor for the psp amplitude
psp = ( Cm * pF ) / ( tau_s * ms ) * (1/( Cm * pF )) * (e/( tau_s * ms ) ) * \
( ((-t_psp * exp(-t_psp/(tau_s * ms) )) / (1/( tau_s * ms )-1 / ( tau_m * ms ) )) +\
(exp(-t_psp/( tau_m * ms )) - exp(-t_psp/( tau_s * ms ))) / ((1/( tau_s * ms ) - 1/( tau_m * ms ))**2) )
# step 3: normalize to amplitude 1, thereby obtaining the maximum
fudge = 1/numpy.max(psp) # fudge is also used below
psp *= fudge
# step 4: pad with zeros on the left side. Avoids offsets when using convolution
tmp = numpy.zeros(2*len(psp))
tmp[len(psp)-1:-1] += psp
psp = tmp
del tmp
P = a * weight * pylab.convolve(gauss, psp)
l = len(P)
t_P = convolution_resolution * numpy.linspace( -l/2., l/2., l) + pulsetime + 1. # one ms delay
#########################################################################
# simulation section
nest.ResetKernel()
nest.SetStatus([0],[{'resolution':simulation_resolution}])
J = Cm*weight/tau_s*fudge
nest.SetDefaults('static_synapse', {'weight':J} )
n = nest.Create('iaf_psc_alpha',n_neurons,
{'V_th':Vth, 'tau_m':tau_m, 'tau_syn_ex':tau_s, 'C_m':Cm, 'E_L':V0,'V_reset':V0,'V_m':V0})
pp = nest.Create('pulsepacket_generator',n_neurons, {'pulse_times':[pulsetime], 'activity':a, 'sdev':sdev})
vm = nest.Create('voltmeter', 1, {'record_to':['memory'], 'withtime':True, 'withgid':True, 'interval':sampling_resolution})
nest.Connect(pp,n)
nest.DivergentConnect(vm,n)
nest.Simulate(simtime)
V = nest.GetStatus(vm,'events')[0]['V_m']
t_V = nest.GetStatus(vm,'events')[0]['times']
senders = nest.GetStatus(vm,'events')[0]['senders']
#########################################################################
# plotting...
v={}
t={}
for s in range(senders.size):
currentsender=senders[s]
if not v.has_key(currentsender) :
v[currentsender] = array.array('f')
v[currentsender].append(float(V[s]))
if not t.has_key(currentsender) :
t[currentsender] = array.array('f')
t[currentsender].append(float(t_V[s]))
if n_neurons >1:
average_V = numpy.zeros(len(v[n[0]]))
for neuron in n:
average_V += v[neuron]
average_V /= n_neurons
pylab.hold(True)
p2 = pylab.plot(t_P,P+V0,color='red',linewidth=3)
p2[0].set_zorder(n_neurons+1)
if n_neurons > 1:
p3 = pylab.plot(t[n[0]], average_V ,color='blue',linewidth=2)
p3[0].set_zorder(n_neurons+2)
for neuron in n:
pylab.plot(t[neuron],v[neuron],color='gray')
if n_neurons > 1:
pylab.legend( ( 'analytical solution', 'averaged potential','membrane potential') )
else:
pylab.legend( ( 'analytical solution','membrane potential') )
pylab.xlabel('ms')
pylab.ylabel('mV')
pylab.xlim((-10*(tau_m+tau_s) + pulsetime , 10*(tau_m+tau_s) + pulsetime))
|
QJonny/CyNest
|
pynest/examples/pulsepacket.py
|
Python
|
gpl-2.0
| 5,355
|
[
"Gaussian",
"NEURON"
] |
3b64956c86b47996a57b0a07b6c4a1fb90570432e0959cab418c39f482c91494
|
## \file
## \ingroup tutorial_roofit
## \notebook -nodraw
## Organization and simultaneous fits: creating and writing a workspace
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Create model and dataset
# -----------------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5, 0, 10)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Build Chebychev polynomial pdf
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg = ROOT.RooChebychev("bkg", "Background", x, ROOT.RooArgList(a0, a1))
# Sum the signal components into a composite signal pdf
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(sig1, sig2), ROOT.RooArgList(sig1frac))
# Sum the composite signal and background
bkgfrac = ROOT.RooRealVar("bkgfrac", "fraction of background", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model", "g1+g2+a", ROOT.RooArgList(bkg, sig), ROOT.RooArgList(bkgfrac))
# Generate a data sample of 1000 events in x from model
data = model.generate(ROOT.RooArgSet(x), 1000)
# Create workspace, import data and model
# -----------------------------------------------------------------------------
# Create a empty workspace
w = ROOT.RooWorkspace("w", "workspace")
# Import model and all its components into the workspace
w.Import(model)
# Import data into the workspace
w.Import(data)
# Print workspace contents
w.Print()
# Save workspace in file
# -------------------------------------------
# Save the workspace into a ROOT file
w.writeToFile("rf502_workspace.root")
|
root-mirror/root
|
tutorials/roofit/rf502_wspacewrite.py
|
Python
|
lgpl-2.1
| 2,078
|
[
"Gaussian"
] |
9168faf5d5b1af774b2c806056f1cbbd2a0f21ab4f41c0dcc76d0c461373bbfe
|
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
Outlier detection operations
Functions
=========
"""
import fnmatch
import xarray as xr
import numpy as np
from cate.core.op import op, op_input, op_return
from cate.core.types import VarNamesLike, DatasetLike
from cate.util.monitor import Monitor
from cate import __version__
@op(tags=['filter'], version='1.0')
@op_input('ds', data_type=DatasetLike)
@op_input('var', value_set_source='ds', data_type=VarNamesLike)
@op_return(add_history=True)
def detect_outliers(ds: xr.Dataset,
var: VarNamesLike.TYPE,
threshold_low: float = 0.05,
threshold_high: float = 0.95,
quantiles: bool = True,
mask: bool = False,
monitor: Monitor = Monitor.NONE) -> xr.Dataset:
"""
Detect outliers in the given Dataset.
When mask=True the input dataset should not contain nan values, otherwise
all existing nan values will be marked as 'outliers' in the mask data array
added to the output dataset.
:param ds: The dataset or dataframe for which to do outlier detection
:param var: Variable or variables in the dataset to which to do outlier
detection. Note that when multiple variables are selected, absolute
threshold values might not make much sense. Wild cards can be used to
select multiple variables matching a pattern.
:param threshold_low: Values less or equal to this will be removed/masked
:param threshold_high: Values greater or equal to this will be removed/masked
:param quantiles: If True, threshold values are treated as quantiles,
otherwise as absolute values.
:param mask: If True, an ancillary variable containing flag values for
outliers will be added to the dataset. Otherwise, outliers will be replaced
with nan directly in the data variables.
:param monitor: A progress monitor.
:return: The dataset with outliers masked or replaced with nan
"""
ds = DatasetLike.convert(ds)
# Create a list of variable names on which to perform outlier detection
# based on the input comma separated list that can contain wildcards
var_patterns = VarNamesLike.convert(var)
all_vars = list(ds.data_vars.keys())
variables = list()
for pattern in var_patterns:
leave = fnmatch.filter(all_vars, pattern)
variables = variables + leave
# For each array in the dataset for which we should detect outliers, detect
# outliers
ret_ds = ds.copy()
with monitor.starting("detect_outliers", total_work=len(variables) * 3):
for var_name in variables:
if quantiles:
# Get threshold values
with monitor.child(1).observing("quantile low"):
threshold_low = ret_ds[var_name].quantile(threshold_low)
with monitor.child(1).observing("quantile high"):
threshold_high = ret_ds[var_name].quantile(threshold_high)
else:
monitor.progress(2)
# If not mask, put nans in the data arrays for min/max outliers
if not mask:
arr = ret_ds[var_name]
attrs = arr.attrs
ret_ds[var_name] = arr.where((arr > threshold_low) & (arr < threshold_high))
ret_ds[var_name].attrs = attrs
else:
# Create and add a data variable containing the mask for this data
# variable
_mask_outliers(ret_ds, var_name, threshold_low, threshold_high)
monitor.progress(1)
return ret_ds
def _mask_outliers(ds: xr.Dataset, var_name: str, threshold_low: float,
threshold_high: float):
"""
Create a mask data array for the given variable of the dataset and given
absolute threshold values. Add the mask data array as an ancillary data
array to the original array as per CF conventions.
For explanation about the relevant attributes, see::
http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#flags
http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#ancillary-data
:param ds: The dataset (will be mutated)
:param var_name: variable name
:param threshold_low: absolute threshold bottom value
:param threshold_high: absolute threshold top value
"""
arr = ds[var_name]
# Create a boolean mask where True denotes an outlier, convert it to 8-bit
# integer dtype, as to_netcdf will complain about a boolean dtype
mask = arr.where((arr > threshold_low) & (arr < threshold_high))
mask = mask.isnull()
mask = mask.astype('i1')
# According to CF conventions, the actual variable name in the netCDF can
# be whatever, but appending things after an underscore is a reasonable
# convention
mask_name = var_name + '_outlier_mask'
# Set the flag data array attributes as per CF conventions
try:
mask.attrs['long_name'] = arr.attrs['long_name'] + ' outlier mask'
except KeyError:
# The dataset is not CF compliant, add the attribute anyway
mask.attrs['long_name'] = 'Outlier mask'
try:
mask.attrs['standard_name'] = arr.attrs['standard_name'] + ' status_flag'
except KeyError:
# The dataset is not CF compliant, add the attribute anyway
mask.attrs['standard_name'] = 'status_flag'
mask.attrs['_FillValue'] = 0
mask.attrs['valid_range'] = np.array([1.0, 1.0], dtype='i1')
mask.attrs['flag_values'] = np.array([1], dtype='i1')
mask.attrs['flag_meanings'] = "is_outlier"
mask.attrs['source'] = "Cate v" + __version__
# Add the mask array to the dataset
ds[mask_name] = mask
# Create an ancillary variable link between the parent data array and the
# mask array
try:
anc_var = ds[var_name].attrs['ancillary_variables']
except KeyError:
# No ancillary variables associated with this variable yet
anc_var = ''
ds[var_name].attrs['ancillary_variables'] = anc_var + ' ' + mask_name
|
CCI-Tools/cate-core
|
cate/ops/outliers.py
|
Python
|
mit
| 7,222
|
[
"NetCDF"
] |
936a104a34785f504481362069c674d7d1182b48592f3f42d58e55b387738a01
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("djangocali-portal.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'^blog/', include('blog.urls')),
url(r'^forum/', include('forum.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
fullprogramaciondev/portal
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,324
|
[
"VisIt"
] |
04d6f7890a7d2a0a49805480741a470b964cb3b9b2ee7673b24f54faa6f2872c
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for preconditioned_hmc."""
import collections
# Dependency imports
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import distribute_lib
from tensorflow_probability.python.internal import distribute_test_lib
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.internal import unnest
JAX_MODE = False
tfb = tfp.bijectors
tfd = tfp.distributions
tfde = tfp.experimental.distributions
# Allowed type of preconditioning schemes to use.
# See code for details.
PRECONDITION_SCHEMES = {
'direct', 'precision_factor', 'sqrtm', 'scale',
# `None` ==> No preconditioner. This is different than a "bad"
# preconditioner. We will be able to check asymptotics with "None".
'no_preconditioner',
}
RunHMCResults = collections.namedtuple('RunHMCResults', [
'draws',
'step_size',
'final_step_size',
'asymptotic_step_size',
'accept_prob',
'mean_accept_prob',
'min_ess',
'sample_mean',
'sample_cov',
'sample_var',
'mean_atol',
'cov_atol',
'var_rtol',
])
def _make_composite_tensor(dist):
"""Wrapper to make distributions of linear operators composite."""
if JAX_MODE:
return dist
if dist is None:
return dist
composite_dist = tfp.experimental.auto_composite_tensor(dist.__class__,
omit_kwargs='name')
p = dist.parameters
for k in p:
if isinstance(p[k], tfp.distributions.Distribution):
p[k] = _make_composite_tensor(p[k])
elif isinstance(p[k], tf.linalg.LinearOperator):
composite_linop = tfp.experimental.auto_composite_tensor(p[k].__class__)
p[k] = composite_linop(**p[k].parameters)
ac_dist = composite_dist(**p)
return ac_dist
def as_composite(obj):
if JAX_MODE:
return obj
return tfp.experimental.as_composite(obj)
@test_util.test_graph_and_eager_modes
class PreconditionedHMCCorrectnessTest(test_util.TestCase):
"""More careful tests that sampling/preconditioning is actually working."""
def _calculate_asymptotic_step_size(self, scales, prob_accept):
"""Calculate the (asymptotic) expected step size for given scales/P[accept].
The distribution should be a multivariate Gaussian, and the approximation is
appropriate in high dimensions when the spectrum is polynomially decreasing.
For details, see [1], equations (3.1, 3.2).
Args:
scales: Tensor with the square roots of the eigenvalues of the
covariance matrix.
prob_accept: Average acceptance probability.
Returns:
step_size: Float of approximate step size to achieve the target acceptance
rate.
#### References
[1]: Langmore, Ian, Michael Dikovsky, Scott Geraedts, Peter Norgaard, and
Rob Von Behren. 2019. “A Condition Number for Hamiltonian Monte Carlo."
http://arxiv.org/abs/1905.09813.
"""
inv_nu = tf.reduce_sum((1. / scales) ** 4, axis=-1) ** -0.25
step_size = (inv_nu *
(2**1.75) *
tf.sqrt(tfd.Normal(0., 1.).quantile(1 - prob_accept / 2.)))
return step_size
def _run_hmc_with_step_size(
self,
target_mvn,
precondition_scheme,
target_accept=0.75,
num_results=2000,
num_adaptation_steps=20,
):
"""Run HMC with step_size adaptation, and return RunHMCResults."""
assert precondition_scheme in PRECONDITION_SCHEMES
dims = target_mvn.event_shape[0]
target_cov = target_mvn.covariance()
cov_linop = tf.linalg.LinearOperatorFullMatrix(
target_cov,
is_self_adjoint=True,
is_positive_definite=True)
if precondition_scheme == 'no_preconditioner':
momentum_distribution = None
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.sqrt(tf.linalg.eigvalsh(target_cov))
elif precondition_scheme == 'direct':
momentum_distribution = tfd.MultivariateNormalLinearOperator(
# The covariance of momentum is inv(covariance of position), and we
# parameterize distributions by a square root of the covariance.
scale=cov_linop.inverse().cholesky(),
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
elif precondition_scheme == 'precision_factor':
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# The precision of momentum is the covariance of position.
# The "factor" is the cholesky factor.
precision_factor=cov_linop.cholesky(),
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
elif precondition_scheme == 'sqrtm':
if JAX_MODE:
self.skipTest('`sqrtm` is not yet implemented in JAX.')
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# The symmetric square root is a perfectly valid "factor".
precision_factor=tf.linalg.LinearOperatorFullMatrix(
tf.linalg.sqrtm(target_cov)),
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
elif precondition_scheme == 'scale':
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# Nothing wrong with using "scale", since the scale should be the
# same as cov_linop.cholesky().
precision_factor=target_mvn.scale,
)
# Internal to the sampler, these scales are being used (implicitly).
internal_scales = tf.ones(dims)
else:
raise RuntimeError(
'Unhandled precondition_scheme: {}'.format(precondition_scheme))
momentum_distribution = _make_composite_tensor(momentum_distribution)
# Asyptotic step size, assuming P[accept] = target_accept.
expected_step = self._calculate_asymptotic_step_size(
scales=internal_scales,
prob_accept=target_accept,
)
# Initialize step size to something close to the expected required step
# size. This helps reduce the need for a long burn-in. Don't use the
# expected step size exactly, since that would be cheating.
initial_step_size = expected_step / 2.345
# Set num_leapfrog_steps so that we get decent ESS.
max_internal_scale = tf.reduce_max(internal_scales)
num_leapfrog_steps = tf.minimum(
tf.cast(
tf.math.ceil(1.5 * max_internal_scale / expected_step),
dtype=tf.int32), 30)
hmc_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=target_mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=initial_step_size,
num_leapfrog_steps=num_leapfrog_steps),
num_adaptation_steps=num_adaptation_steps,
target_accept_prob=target_accept)
def trace_fn(_, pkr):
results = pkr.inner_results
return {
'accept_prob':
tf.exp(tf.minimum(0., results.log_accept_ratio)),
'step_size':
results.accepted_results.step_size,
}
@tf.function
def do_run_run_run():
"""Do a run, return RunHMCResults."""
states, trace = tfp.mcmc.sample_chain(
num_results,
current_state=tf.identity(target_mvn.sample(
seed=test_util.test_seed())),
kernel=hmc_kernel,
num_burnin_steps=num_adaptation_steps,
seed=test_util.test_seed(),
trace_fn=trace_fn)
# If we had some number of chain dimensions, we would change sample_axis.
sample_axis = 0
sample_cov = tfp.stats.covariance(states, sample_axis=sample_axis)
max_variance = tf.reduce_max(tf.linalg.diag_part(sample_cov))
max_stddev = tf.sqrt(max_variance)
min_ess = tf.reduce_min(tfp.mcmc.effective_sample_size(states))
mean_accept_prob = tf.reduce_mean(trace['accept_prob'])
# Asymptotic step size given that P[accept] = mean_accept_prob.
asymptotic_step_size = self._calculate_asymptotic_step_size(
scales=internal_scales,
prob_accept=mean_accept_prob,
)
return RunHMCResults(
draws=states,
step_size=trace['step_size'],
final_step_size=trace['step_size'][-1],
asymptotic_step_size=asymptotic_step_size,
accept_prob=trace['accept_prob'],
mean_accept_prob=mean_accept_prob,
min_ess=tf.reduce_min(tfp.mcmc.effective_sample_size(states)),
sample_mean=tf.reduce_mean(states, axis=sample_axis),
sample_cov=sample_cov,
sample_var=tf.linalg.diag_part(sample_cov),
# Standard error in variance estimation is related to standard
# deviation of variance estimates. For a Normal, this is just Sqrt(2)
# times variance divided by sqrt sample size (or so my old notes say).
# So a relative tolerance is useful.
# Add in a factor of 5 as a buffer.
var_rtol=5 * tf.sqrt(2.) / tf.sqrt(min_ess),
# For covariance matrix estimates, there can be terms that have
# expectation = 0 (e.g. off diagonal entries). So the above doesn't
# hold. So use an atol.
cov_atol=5 * max_variance / tf.sqrt(min_ess),
# Standard error in mean estimation is stddev divided by sqrt
# sample size. This is an absolute tolerance.
# Add in a factor of 5 as a buffer.
mean_atol=5 * max_stddev / tf.sqrt(min_ess),
)
# Evaluate now, to ensure that states/accept_prob/etc... all match up with
# the same graph evaluation. This is a gotcha about TFP MCMC in graph mode.
return self.evaluate(do_run_run_run())
def _check_correctness_of_moments_and_preconditioning(
self,
target_mvn,
num_results,
precondition_scheme,
check_step_size_asymptotics=True,
asymptotic_step_size_rtol=0.2,
):
"""Test that step size adaptation finds the theoretical optimal step size.
See _caclulate_expected_step_size for formula details, but roughly, for a
high dimensional Gaussian posterior, we can calculate the approximate step
size to achieve a given target accept rate. For such a posterior,
`PreconditionedHMC` mimics the dynamics of sampling from an isotropic
standard normal distribution, and so should adapt to the step size where
the scales are all ones.
In the example below, `expected_step` is around 0.00002, so there is
significantly different behavior when conditioning.
Args:
target_mvn: Multivariate normal instance to sample from.
num_results: Number of samples to collect (post burn-in).
precondition_scheme: String telling how to do preconditioning.
Should be in PRECONDITION_SCHEMES.
check_step_size_asymptotics: Boolean telling whether to check that the
step size and P[accept] match up with expected values. This checks
that the "internal/implicit" sampling distribution is as expected. E.g.
when preconditioning, we expect the internal distribution to be a
standard Normal. When not preconditioning we expect it to be the target.
asymptotic_step_size_rtol: rtol for the asymptotic step size test.
The "nastier" spectra (with a small number of tiny eigenvalues) often
require larger tolerance. About 10% rtol is what we can expect.
20% is the default for safety. When a "bad preconditioner" is used,
these two are off by 100% or more (but no guarantee, since luck may
prevail).
Returns:
RunHMCResults
"""
results = self._run_hmc_with_step_size(
target_mvn, precondition_scheme=precondition_scheme)
if check_step_size_asymptotics:
self.assertAllClose(
results.final_step_size,
results.asymptotic_step_size,
rtol=asymptotic_step_size_rtol)
self.assertAllClose(
results.sample_mean, target_mvn.mean(), atol=results.mean_atol)
self.assertAllClose(
results.sample_var, target_mvn.variance(), rtol=results.var_rtol)
self.assertAllClose(
results.sample_cov, target_mvn.covariance(), atol=results.cov_atol)
return results
@parameterized.named_parameters(
dict(testcase_name='_' + str(scheme), precondition_scheme=scheme)
for scheme in PRECONDITION_SCHEMES)
def test_correctness_with_2d_mvn_tril(self, precondition_scheme):
# Low dimensional test to help people who want to step through and debug.
target_mvn = tfd.MultivariateNormalTriL(
loc=tf.constant([0., 0.]),
scale_tril=[[1., 0.], [0.5, 2.]],
)
self._check_correctness_of_moments_and_preconditioning(
target_mvn,
# Lots of results, to test tight tolerance.
# We're using a small dims here, so this isn't a big deal.
num_results=5000,
precondition_scheme=precondition_scheme,
# We're in such low dimensions that we don't expect asymptotics to work.
check_step_size_asymptotics=False)
@parameterized.named_parameters(
dict(testcase_name='_' + str(scheme), precondition_scheme=scheme)
for scheme in PRECONDITION_SCHEMES)
def test_correctness_with_200d_mvn_tril(self, precondition_scheme):
# This is an almost complete check of the Gaussian case.
dims = 200
scale_wishart = tfd.WishartLinearOperator(
# Important that df is just slightly bigger than dims. This makes the
# scale_wishart ill condtioned. The result is that tests fail if we do
# not handle transposes correctly.
df=1.1 * dims,
scale=tf.linalg.LinearOperatorIdentity(dims),
input_output_cholesky=True,
name='wishart_for_samples',
)
# evaluate right here to avoid working with a random target_mvn in graph
# mode....that would cause issues, since we read off expected statistics
# from looking at the mvn properties, so it would be bad if these properties
# changed with every graph eval.
scale_tril = self.evaluate(scale_wishart.sample(seed=test_util.test_seed()))
target_mvn = tfd.MultivariateNormalTriL(
# Non-trivial "loc" ensures we do not rely on being centered at 0.
loc=tf.range(0., dims),
scale_tril=scale_tril,
)
self._check_correctness_of_moments_and_preconditioning(
target_mvn,
# Lots of results, to test tight tolerance.
num_results=3000,
precondition_scheme=precondition_scheme,
asymptotic_step_size_rtol=(
0.5 if precondition_scheme == 'no_preconditioner' else 0.25),
)
def test_sets_kinetic_energy(self):
dist = tfd.MultivariateNormalDiag(scale_diag=tf.constant([0.1, 10.]))
step_size = 0.1
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=dist.log_prob,
step_size=step_size,
num_leapfrog_steps=1,
store_parameters_in_results=True)
init_state = tf.constant([0.1, 0.1])
kr = kernel.bootstrap_results(init_state)
# Manually set the momentum distribution.
kr = unnest.replace_innermost(kr, momentum_distribution=dist)
# Take one leapfrog step using the kernel.
_, nkr = kernel.one_step(init_state, kr, seed=test_util.test_seed())
# Need to evaluate here for consistency in graph mode.
(momentum_parts,
target_grad_parts,
proposed_state,
final_momentum,
target_log_prob,
grads_target_log_prob) = self.evaluate([
nkr.proposed_results.initial_momentum,
nkr.accepted_results.grads_target_log_prob,
nkr.proposed_state,
nkr.proposed_results.final_momentum,
nkr.proposed_results.target_log_prob,
nkr.proposed_results.grads_target_log_prob])
# Take one leapfrog step manually.
leapfrog = tfp.mcmc.internal.leapfrog_integrator.SimpleLeapfrogIntegrator(
target_fn=dist.log_prob,
step_sizes=[step_size],
num_steps=1)
# Again, need to evaluate here for graph mode consistency.
(next_momentum,
next_state,
next_target_log_prob,
grads_next_target_log_prob) = self.evaluate(leapfrog(
momentum_parts=momentum_parts,
state_parts=[init_state],
target=dist.log_prob(init_state),
target_grad_parts=target_grad_parts,
kinetic_energy_fn=lambda x: -dist.log_prob(x)))
# Verify resulting states are the same
self.assertAllClose(proposed_state,
next_state[0])
self.assertAllClose(final_momentum,
next_momentum)
self.assertAllClose(target_log_prob,
next_target_log_prob)
self.assertAllClose(grads_target_log_prob,
grads_next_target_log_prob)
class _PreconditionedHMCTest(test_util.TestCase):
@test_util.test_graph_and_eager_modes()
def test_f64(self):
if self.use_default_momentum_distribution:
momentum_distribution = None
else:
momentum_distribution = as_composite(
tfd.Normal(0., tf.constant(.5, dtype=tf.float64)))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
lambda x: -x**2, step_size=.5, num_leapfrog_steps=2,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel, current_state=tf.ones([], tf.float64),
num_burnin_steps=5, trace_fn=None, seed=test_util.test_seed()))
@test_util.test_graph_and_eager_modes()
def test_f64_multichain(self):
if self.use_default_momentum_distribution:
momentum_distribution = None
else:
momentum_distribution = as_composite(
tfd.Normal(0., tf.constant(.5, dtype=tf.float64)))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
lambda x: -x**2, step_size=.5, num_leapfrog_steps=2,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
nchains = 7
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel, current_state=tf.ones([nchains], tf.float64),
num_burnin_steps=5, trace_fn=None, seed=test_util.test_seed()))
@test_util.test_graph_and_eager_modes()
def test_f64_multichain_multipart(self):
if self.use_default_momentum_distribution:
momentum_distribution = None
else:
momentum_distribution = _make_composite_tensor(
tfd.JointDistributionSequential([
tfd.Normal(0., tf.constant(.5, dtype=tf.float64)),
tfd.Normal(0., tf.constant(.25, dtype=tf.float64))]))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
lambda x, y: -x**2 - y**2, step_size=.5, num_leapfrog_steps=2,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
nchains = 7
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel,
current_state=(tf.ones([nchains], tf.float64),
tf.ones([nchains], tf.float64)),
num_burnin_steps=5, trace_fn=None, seed=test_util.test_seed()))
@test_util.test_graph_mode_only() # Long chains are very slow in eager mode.
def test_diag(self):
"""Test that a diagonal multivariate normal can be effectively sampled from.
Note that the effective sample size is expected to be exactly 100: this is
because the step size is tuned well enough that a single HMC step takes
a point to nearly the antipodal point, which causes a negative lag 1
autocorrelation, and the effective sample size calculation cuts off when
the autocorrelation drops below zero.
"""
mvn = tfd.MultivariateNormalDiag(
loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
if self.use_default_momentum_distribution:
momentum_distribution = None
step_size = 0.1
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=mvn.scale,
)
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(3),
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not self.use_default_momentum_distribution:
self.assertAllClose(ess, tf.fill([3], 100.))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
@test_util.test_graph_mode_only() # Long chains are very slow in eager mode.
@test_util.jax_disable_test_missing_functionality('dynamic shapes')
def test_tril(self):
cov = 0.9 * tf.ones([3, 3]) + 0.1 * tf.eye(3)
scale = tf.linalg.cholesky(cov)
mv_tril = tfd.MultivariateNormalTriL(loc=[1., 2., 3.],
scale_tril=scale)
if self.use_default_momentum_distribution:
momentum_distribution = None
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
# TODO(b/170015229) Don't use the covariance as inverse scale,
# it is the wrong preconditioner.
precision_factor=tf.linalg.LinearOperatorFullMatrix(cov),
)
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mv_tril.log_prob,
momentum_distribution=momentum_distribution,
step_size=0.2,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
120,
tf.zeros(3),
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
# TODO(b/170015229): These and other tests like it, which assert ess is
# greater than some number, were all passing, even though the preconditioner
# was the wrong one. Why is that? A guess is that since there are *many*
# ways to have larger ess, these tests don't really test correctness.
# Perhaps remove all tests like these.
if not self.use_default_momentum_distribution:
self.assertAllClose(ess, tf.fill([3], 100.))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
@test_util.test_graph_mode_only() # Long chains are very slow in eager mode.
def test_transform(self):
mvn = tfd.MultivariateNormalDiag(loc=[1., 2., 3.], scale_diag=[1., 1., 1.])
diag_variance = tf.constant([0.1, 1., 10.])
if self.use_default_momentum_distribution:
momentum_distribution = None
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.math.sqrt(diag_variance)))
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=0.3,
num_leapfrog_steps=10)
transformed_kernel = tfp.mcmc.TransformedTransitionKernel(
hmc_kernel, bijector=tfb.Scale(tf.math.rsqrt(diag_variance)))
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(3),
kernel=transformed_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not self.use_default_momentum_distribution:
self.assertAllClose(ess, tf.fill([3], 100.))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
@test_util.test_graph_mode_only() # Long chains are very slow in eager mode.
def test_multi_state_part(self):
mvn = tfd.JointDistributionSequential([
tfd.Normal(1., 0.1),
tfd.Normal(2., 1.),
tfd.Independent(tfd.Normal(3 * tf.ones([2, 3, 4]), 10.), 3)
])
if self.use_default_momentum_distribution:
momentum_distribution = None
step_size = 0.1
else:
reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
reshape_to_234 = tfp.bijectors.Reshape(event_shape_out=[2, 3, 4])
momentum_distribution = tfd.JointDistributionSequential([
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag([0.1]))),
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag([1.]))),
reshape_to_234(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([24], 10.))))
])
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
100, [0., 0., tf.zeros((2, 3, 4))],
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws,
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not self.use_default_momentum_distribution:
self.assertAllCloseNested(
self.evaluate(ess),
[tf.constant(100.),
tf.constant(100.), 100. * tf.ones((2, 3, 4))])
else:
self.assertLess(
self.evaluate(
tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
50.)
@test_util.test_graph_mode_only() # Long chains are very slow in eager mode.
def test_batched_state(self):
mvn = tfd.MultivariateNormalDiag(
loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
batch_shape = [2, 4]
if self.use_default_momentum_distribution:
momentum_distribution = None
step_size = 0.1
else:
momentum_distribution = tfde.MultivariateNormalPrecisionFactorLinearOperator(
tf.zeros((2, 4, 3)), precision_factor=mvn.scale)
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(batch_shape + [3]),
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[10:], cross_chain_dims=[1, 2],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not self.use_default_momentum_distribution:
self.assertAllClose(self.evaluate(ess), 100 * 2. * 4. * tf.ones(3))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
@test_util.test_graph_mode_only() # Long chains are very slow in eager mode.
def test_batches(self):
mvn = tfd.JointDistributionSequential(
[tfd.Normal(1., 0.1),
tfd.Normal(2., 1.),
tfd.Normal(3., 10.)])
n_chains = 10
if self.use_default_momentum_distribution:
momentum_distribution = None
step_size = 0.1
else:
reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
momentum_distribution = tfd.JointDistributionSequential([
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 0.1)))),
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 1.)))),
reshape_to_scalar(
tfde.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 10.)))),
])
step_size = 0.3
hmc_kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
num_leapfrog_steps=10)
draws = tfp.mcmc.sample_chain(
100, [tf.zeros([n_chains]) for _ in range(3)],
kernel=hmc_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(
draws, cross_chain_dims=[1 for _ in draws],
filter_threshold=0, filter_beyond_positive_pairs=False)
if not self.use_default_momentum_distribution:
self.assertAllClose(self.evaluate(ess), 100 * n_chains * tf.ones(3))
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
class PreconditionedHMCTestDefaultMomentum(_PreconditionedHMCTest):
use_default_momentum_distribution = True
class PreconditionedHMCTestExplicitMomentum(_PreconditionedHMCTest):
use_default_momentum_distribution = False
del _PreconditionedHMCTest # Don't try to run base class tests.
@test_util.test_all_tf_execution_regimes
class DistributedPHMCTest(distribute_test_lib.DistributedTest):
def test_hmc_kernel_tracks_axis_names(self):
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
tfd.Normal(0, 1).log_prob,
step_size=1.9,
num_leapfrog_steps=2)
self.assertIsNone(kernel.experimental_shard_axis_names)
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
tfd.Normal(0, 1).log_prob,
step_size=1.9,
num_leapfrog_steps=2,
experimental_shard_axis_names=['a'])
self.assertListEqual(kernel.experimental_shard_axis_names, ['a'])
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
tfd.Normal(0, 1).log_prob,
step_size=1.9,
num_leapfrog_steps=2).experimental_with_shard_axes(['a'])
self.assertListEqual(kernel.experimental_shard_axis_names, ['a'])
def test_phmc_kernel_samples_correct_momenta_for_sharded_state(self):
if not JAX_MODE:
self.skipTest('Test in TF runs into `merge_call` error: see b/178944108')
def target_log_prob(a, b):
dist = tfd.Normal(0., 1.)
return dist.log_prob(a) + dist.log_prob(b)
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob,
step_size=1.9,
num_leapfrog_steps=2)
sharded_kernel = kernel.experimental_with_shard_axes([None, ['foo']])
def run(seed):
state = [0., 0.]
kr = sharded_kernel.bootstrap_results(state)
_, kr = sharded_kernel.one_step(state, kr, seed=seed)
return kr.proposed_results.initial_momentum
momentum = self.evaluate(self.per_replica_to_tensor(
self.strategy_run(run, args=(samplers.zeros_seed(),),
in_axes=None, axis_name='foo'), 0))
# Unsharded state momenta should all be equal
for i in range(distribute_test_lib.NUM_DEVICES):
self.assertAllClose(momentum[0][i], momentum[0][0])
# Sharded state momenta should be different
for i in range(distribute_test_lib.NUM_DEVICES):
for j in range(distribute_test_lib.NUM_DEVICES):
if i == j:
continue
self.assertNotAllClose(momentum[1][i], momentum[1][j])
def test_computes_same_log_acceptance_correction_with_sharded_state(self):
if not JAX_MODE:
self.skipTest('Test in TF runs into `merge_call` error: see b/178944108')
def target_log_prob(a, b):
return (
tfd.Normal(0., 1.).log_prob(a)
+ distribute_lib.psum(tfd.Normal(
distribute_lib.pbroadcast(a, 'foo'), 1.).log_prob(b), 'foo'))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob,
step_size=1.9,
num_leapfrog_steps=2)
sharded_kernel = kernel.experimental_with_shard_axes([None, ['foo']])
def run(seed):
state = [0., 0.]
kr = sharded_kernel.bootstrap_results(state)
_, kr = sharded_kernel.one_step(state, kr, seed=seed)
return kr.proposed_results.log_acceptance_correction
log_acceptance_correction = self.evaluate(self.per_replica_to_tensor(
self.strategy_run(run, args=(samplers.zeros_seed(),),
in_axes=None, axis_name='foo'), 0))
for i in range(distribute_test_lib.NUM_DEVICES):
self.assertAllClose(log_acceptance_correction[i],
log_acceptance_correction[0])
def test_unsharded_state_remains_synchronized_across_devices(self):
if not JAX_MODE:
self.skipTest('Test in TF runs into `merge_call` error: see b/178944108')
def target_log_prob(a, b):
return (
tfd.Normal(0., 1.).log_prob(a)
+ distribute_lib.psum(tfd.Normal(
distribute_lib.pbroadcast(a, 'foo'), 1.).log_prob(b), 'foo'))
kernel = tfp.experimental.mcmc.PreconditionedHamiltonianMonteCarlo(
target_log_prob,
step_size=1e-1,
num_leapfrog_steps=2)
sharded_kernel = kernel.experimental_with_shard_axes([None, ['foo']])
def run(seed):
state = [-10., -10.]
kr = sharded_kernel.bootstrap_results(state)
state, _ = sharded_kernel.one_step(state, kr, seed=seed)
return state
state = self.evaluate(self.per_replica_to_tensor(
self.strategy_run(run, args=(samplers.zeros_seed(),),
in_axes=None, axis_name='foo'), 0))
for i in range(distribute_test_lib.NUM_DEVICES):
self.assertAllClose(state[0][i],
state[0][0])
if __name__ == '__main__':
test_util.main()
|
tensorflow/probability
|
tensorflow_probability/python/experimental/mcmc/preconditioned_hmc_test.py
|
Python
|
apache-2.0
| 35,324
|
[
"Gaussian"
] |
55881753f3ba21f812c518782abfe9ee4bbebb157510907a1f8dee98f1ff2f0b
|
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
def sigma_from_waist(w):
return w/np.sqrt(2)
class Params:
'''
gt_scale_factor : "Greater than" scale factor
w0 : Gaussian Beam waist
'''
def __init__(self,w,l_scale_factor,eps_scale_factor):
## Parameter for the tuning of the step length after amplification
self.xi = 1/eps_scale_factor
##Gaussian Beam's waist
self.waist = w
## Standard Deviation for the initial gaussian state
self.sigma = np.vectorize(sigma_from_waist)(self.waist)
## eps: TBD parameter for the shift length
self.eps = w/(np.sqrt(2)*l_scale_factor)
## Angle parameter for the HWP which enables judicious postselection
self.theta = np.arctan(1+self.eps)/2
## Converting theta to degrees.
self.theta_deg = self.theta*180/np.pi
## Delta length after judicious postselection (l*Sw)
self.d = self.eps*(np.sin(2*self.theta)+np.cos(2*self.theta))/(np.sin(2*self.theta)-np.cos(2*self.theta))
## Additional Kick for states distinguishibality
self.d0 = (12*self.sigma-self.d)
params = Params(0.08,100,100)
def gauss_state(x,x0,sigma):
return np.power((2*np.pi*sigma**2),-1/4)*np.exp(-1*np.power(x-x0,2)/(4*sigma**2))
def walk(x,x0,sigma,N,dtot):
start_prob = 0
print(dtot)
for n in range(N):
start_prob += gauss_state(x,x0+n*dtot,sigma)
return start_prob
N = 6
xspace = np.linspace(-10*params.sigma,6*10*params.sigma,1000)
walk_vals = walk(xspace,0,params.sigma,N,params.d+params.d0)
yspace = np.linspace(-5*params.sigma,5*params.sigma,1000)
yvals = gauss_state(yspace,0,params.sigma)
z = np.outer(yvals,walk_vals)
plt.imshow(z,cmap=mpl.cm.hot)
plt.show()
|
deot95/Tesis
|
Monografía Física/Workspace/Code/Python/adz.py
|
Python
|
mit
| 1,843
|
[
"Gaussian"
] |
2582523733e93b35cbf34d92e818071e55b7d202feead79eeb693df5c18f23b1
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import license
import gl_XML, glX_XML
import sys, getopt
class PrintGenericStubs(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "gl_x86_asm.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2001 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004, 2005""", "BRIAN PAUL, IBM")
return
def get_stack_size(self, f):
size = 0
for p in f.parameterIterator():
if p.is_padding:
continue
size += p.get_stack_size()
return size
def printRealHeader(self):
print '#include "x86/assyntax.h"'
print ''
print '#if defined(STDCALL_API)'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n2))'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n2))'
print '# endif'
print '#else'
print '# if defined(USE_MGL_NAMESPACE)'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(mgl,n))'
print '# define _glapi_Dispatch _mglapi_Dispatch'
print '# else'
print '# define GL_PREFIX(n,n2) GLNAME(CONCAT(gl,n))'
print '# endif'
print '#endif'
print ''
print '#define GL_OFFSET(x) CODEPTR(REGOFF(4 * x, EAX))'
print ''
print '#if defined(GNU_ASSEMBLER) && !defined(__DJGPP__) && !defined(__MINGW32__) && !defined(__APPLE__)'
print '#define GLOBL_FN(x) GLOBL x ; .type x, @function'
print '#else'
print '#define GLOBL_FN(x) GLOBL x'
print '#endif'
print ''
print '#if defined(HAVE_PTHREAD) || defined(_WIN32)'
print '# define THREADS'
print '#endif'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '#ifdef GLX_X86_READONLY_TEXT'
print '# define CTX_INSNS MOV_L(GS:(EAX), EAX)'
print '#else'
print '# define CTX_INSNS NOP /* Pad for init_glapi_relocs() */'
print '#endif'
print ''
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tCTX_INSNS ; \\'
print '\tJMP(GL_OFFSET(off))'
print ''
print '#elif defined(HAVE_PTHREAD)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_x86_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#elif defined(THREADS)'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tTEST_L(EAX, EAX) ;\t\t\t\t\\'
print '\tJE(1f) ;\t\t\t\t\t\\'
print '\tJMP(GL_OFFSET(off)) ;\t\t\t\t\\'
print '1:\tCALL(_glapi_get_dispatch) ;\t\t\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#else /* Non-threaded version. */'
print '# define GL_STUB(fn,off,fn_alt)\t\t\t\\'
print 'ALIGNTEXT16;\t\t\t\t\t\t\\'
print 'GLOBL_FN(GL_PREFIX(fn, fn_alt));\t\t\t\\'
print 'GL_PREFIX(fn, fn_alt):\t\t\t\t\t\\'
print '\tMOV_L(CONTENT(GLNAME(_glapi_Dispatch)), EAX) ;\t\\'
print '\tJMP(GL_OFFSET(off))'
print '#endif'
print ''
print '#ifdef HAVE_ALIAS'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print '\t.globl\tGL_PREFIX(fn, fn_alt) ;\t\t\t\\'
print '\t.set\tGL_PREFIX(fn, fn_alt), GL_PREFIX(alias, alias_alt)'
print '#else'
print '# define GL_STUB_ALIAS(fn,off,fn_alt,alias,alias_alt)\t\\'
print ' GL_STUB(fn, off, fn_alt)'
print '#endif'
print ''
print 'SEG_TEXT'
print ''
print '#ifdef GLX_USE_TLS'
print ''
print '\tGLOBL\tGLNAME(_x86_get_dispatch)'
print '\tHIDDEN(GLNAME(_x86_get_dispatch))'
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tcall 1f'
print '1:\tpopl %eax'
print '\taddl $_GLOBAL_OFFSET_TABLE_+[.-1b], %eax'
print '\tmovl _glapi_tls_Dispatch@GOTNTPOFF(%eax), %eax'
print '\tret'
print ''
print '#elif defined(HAVE_PTHREAD)'
print 'EXTERN GLNAME(_glapi_Dispatch)'
print 'EXTERN GLNAME(_gl_DispatchTSD)'
print 'EXTERN GLNAME(pthread_getspecific)'
print ''
print 'ALIGNTEXT16'
print 'GLNAME(_x86_get_dispatch):'
print '\tSUB_L(CONST(24), ESP)'
print '\tPUSH_L(GLNAME(_gl_DispatchTSD))'
print '\tCALL(GLNAME(pthread_getspecific))'
print '\tADD_L(CONST(28), ESP)'
print '\tRET'
print '#elif defined(THREADS)'
print 'EXTERN GLNAME(_glapi_get_dispatch)'
print '#endif'
print ''
print '#if defined( GLX_USE_TLS ) && !defined( GLX_X86_READONLY_TEXT )'
print '\t\t.section\twtext, "awx", @progbits'
print '#endif /* defined( GLX_USE_TLS ) */'
print ''
print '\t\tALIGNTEXT16'
print '\t\tGLOBL GLNAME(gl_dispatch_functions_start)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_start))'
print 'GLNAME(gl_dispatch_functions_start):'
print ''
return
def printRealFooter(self):
print ''
print '\t\tGLOBL\tGLNAME(gl_dispatch_functions_end)'
print '\t\tHIDDEN(GLNAME(gl_dispatch_functions_end))'
print '\t\tALIGNTEXT16'
print 'GLNAME(gl_dispatch_functions_end):'
print ''
print '#if defined (__ELF__) && defined (__linux__)'
print ' .section .note.GNU-stack,"",%progbits'
print '#endif'
return
def printBody(self, api):
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
print '\tGL_STUB(%s, %d, %s)' % (name, f.offset, alt)
if not f.is_static_entry_point(f.name):
print '\tHIDDEN(GL_PREFIX(%s, %s))' % (name, alt)
for f in api.functionIterateByOffset():
name = f.dispatch_name()
stack = self.get_stack_size(f)
alt = "%s@%u" % (name, stack)
for n in f.entry_points:
if f.is_static_entry_point(n):
if n != f.name:
alt2 = "%s@%u" % (n, stack)
text = '\tGL_STUB_ALIAS(%s, %d, %s, %s, %s)' % (n, f.offset, alt2, name, alt)
if f.has_different_protocol(n):
print '#ifndef GLX_INDIRECT_RENDERING'
print text
print '#endif'
else:
print text
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode]" % sys.argv[0]
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
mode = "generic"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "m:f:")
except Exception,e:
show_usage()
for (arg,val) in args:
if arg == '-m':
mode = val
elif arg == "-f":
file_name = val
if mode == "generic":
printer = PrintGenericStubs()
else:
print "ERROR: Invalid mode \"%s\" specified." % mode
show_usage()
api = gl_XML.parse_GL_API(file_name, glX_XML.glx_item_factory())
printer.Print(api)
|
execunix/vinos
|
xsrc/external/mit/MesaLib/dist/src/mapi/glapi/gen/gl_x86_asm.py
|
Python
|
apache-2.0
| 9,401
|
[
"Brian"
] |
f359cfcd462f9e7a9abec490d4db32a8980fba4cf72ff6677bbd08c6889ff3f5
|
#!/usr/bin/python
"""
Pull RGID information (from CCCB NextSeq naming scheme) from FASTQ.
"""
import argparse
import gzip
__author__ = 'dkdeconti'
__copyright__ = "Copyright 2017"
__credits__ = ["Derrick DeConti"]
__license__ = "MIT"
__maintainer__ = "Derrick DeConti"
__email__ = "deconti@jimmy.harvard.edu"
__status__ = "Production"
def get_rg_values(filename, sample_name):
"""
Parses gzipped fastq for RG values and returns RG str for BWA.
"""
with gzip.open(filename, 'rU') as handle:
line = handle.readline()
arow = line.strip('\n').split()
info = arow[0].split(':')[1:]
instrument_id = info[0]
run_id = info[1]
flowcell_id = info[2]
flowcell_lane = info[3]
index_seq = arow[1].split(':')[3]
rgid = '.'.join([sample_name, flowcell_id, flowcell_lane])
rglb = '.'.join([sample_name, run_id])
rgpu = '.'.join([instrument_id,
flowcell_lane,
index_seq])
rgsm = sample_name
rgcn = "DFCI-CCCB"
rgpl = "ILLUMINA"
rg_vals = "@RG\\tID:" + rgid + "\\tPL:" + rgpl + "\\tLB:" + \
rglb + "\\tSM:" + rgsm + "\\tCN:" + rgcn + "\\tPU:" + rgpu
return rg_vals
def main():
"""
Arg parsing and central dispatch.
"""
# Arg parsing
desc = "Pull RGID info from CCCB NextSeq produced FASTQ"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("samplename", metavar="SAMPLENAME", help="sample name")
parser.add_argument("fastq", metavar="FASTQ", help="input fastq file")
args = parser.parse_args()
# Central dispatch
print get_rg_values(args.fastq, args.samplename)
if __name__ == "__main__":
main()
|
dkdeconti/HLA_epitope_prediction_from_WES
|
get_RGID_from_FASTQ.py
|
Python
|
mit
| 1,730
|
[
"BWA"
] |
f9f76bc25bd5bd0dac06ba781111fb4fb2573c492557d3f0c3073c14a6c82ce0
|
from django.conf.urls import patterns, url
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic import TemplateView
# For adding explicit grouping resource endpoints in API documentation.
from rest_framework_swagger.urlparser import UrlParser
from catmaid.views import CatmaidView, ExportWidgetView
from catmaid.control.client import ClientDataList, \
ClientDatastoreDetail, ClientDatastoreList
from catmaid.control.suppressed_virtual_treenode import SuppressedVirtualTreenodeDetail, SuppressedVirtualTreenodeList
# A regular expression matching floating point and integer numbers
num = r'[-+]?[0-9]*\.?[0-9]+'
integer = r'[-+]?[0-9]+'
# A regular expression matching lists of integers with comma as delimiter
intlist = r'[0-9]+(,[0-9]+)*'
# A list of words, not containing commas
wordlist= r'\w+(,\w+)*'
# Add the main index.html page at the root:
urlpatterns = patterns('',
url(r'^$', ensure_csrf_cookie(CatmaidView.as_view(template_name='catmaid/index.html')),
name="home"),
url(r'^version$', 'catmaid.control.common.get_catmaid_version')
)
# Authentication and permissions
urlpatterns += patterns('catmaid.control.authentication',
(r'^accounts/login$', 'login_user'),
(r'^accounts/logout$', 'logout_user'),
(r'^accounts/(?P<project_id>\d+)/all-usernames$', 'all_usernames'),
(r'^permissions$', 'user_project_permissions'),
(r'^classinstance/(?P<ci_id>\d+)/permissions$',
'get_object_permissions'),
(r'^register$', 'register'),
)
# Users
urlpatterns += patterns('catmaid.control.user',
(r'^user-list$', 'user_list'),
(r'^user-table-list$', 'user_list_datatable'),
(r'^user-profile/update$', 'update_user_profile'),
)
# Django related user URLs
urlpatterns += patterns('django.contrib.auth.views',
url(r'^user/password_change/$', 'password_change', {'post_change_redirect': '/'}),
)
# Log
urlpatterns += patterns('catmaid.control.log',
(r'^(?P<project_id>\d+)/logs/list$', 'list_logs'),
(r'^log/(?P<level>(info|error|debug))$', 'log_frontent_event'),
)
# Messages
urlpatterns += patterns('catmaid.control.message',
(r'^messages/list$', 'list_messages'),
(r'^messages/mark_read$', 'read_message'),
(r'^messages/latestunreaddate', 'get_latest_unread_date'),
)
# CATMAID client datastore and data access
urlpatterns += patterns('catmaid.control.client',
(r'^client/datastores/$', ClientDatastoreList.as_view()),
(r'^client/datastores/(?P<name>[\w-]+)$', ClientDatastoreDetail.as_view()),
(r'^client/datastores/(?P<name>[\w-]+)/$', ClientDataList.as_view()),
)
# General project model access
urlpatterns += patterns('catmaid.control.project',
(r'^projects/$', 'projects'),
)
# General stack model access
urlpatterns += patterns('catmaid.control.stack',
(r'^(?P<project_id>\d+)/stacks$', 'stacks'),
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/info$', 'stack_info'),
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/models$', 'stack_models'),
)
# General stack group access
urlpatterns += patterns('catmaid.control.stackgroup',
(r'^(?P<project_id>\d+)/stackgroup/(?P<stackgroup_id>\d+)/info$', 'get_stackgroup_info'),
)
# Tile access
urlpatterns += patterns('catmaid.control.tile',
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tile$', 'get_tile'),
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/put_tile$', 'put_tile'),
)
# Tracing general
urlpatterns += patterns('catmaid.control.tracing',
(r'^(?P<project_id>\d+)/tracing/setup/rebuild$', 'rebuild_tracing_setup_view'),
(r'^(?P<project_id>\d+)/tracing/setup/test$', 'check_tracing_setup_view'),
)
# Statistics
urlpatterns += patterns('catmaid.control.stats',
(r'^(?P<project_id>\d+)/stats$',
TemplateView.as_view(template_name="catmaid/projectstatistics.html")),
(r'^(?P<project_id>\d+)/stats/nodecount$', 'stats_nodecount'),
(r'^(?P<project_id>\d+)/stats/editor$', 'stats_editor'),
(r'^(?P<project_id>\d+)/stats/summary$', 'stats_summary'),
(r'^(?P<project_id>\d+)/stats/history$', 'stats_history'),
(r'^(?P<project_id>\d+)/stats/user-history$', 'stats_user_history'),
(r'^(?P<project_id>\d+)/stats/user-activity$', 'stats_user_activity'),
)
# Annotations
urlpatterns += patterns('catmaid.control.neuron_annotations',
(r'^(?P<project_id>\d+)/annotations/$', 'list_annotations'),
(r'^(?P<project_id>\d+)/annotations/query$', 'annotations_for_entities'),
(r'^(?P<project_id>\d+)/annotations/forskeletons$', 'annotations_for_skeletons'),
(r'^(?P<project_id>\d+)/annotations/table-list$', 'list_annotations_datatable'),
(r'^(?P<project_id>\d+)/annotations/add$', 'annotate_entities'),
(r'^(?P<project_id>\d+)/annotations/remove$', 'remove_annotations'),
(r'^(?P<project_id>\d+)/annotations/(?P<annotation_id>\d+)/remove$',
'remove_annotation'),
(r'^(?P<project_id>\d+)/annotations/query-targets$', 'query_annotated_classinstances'),
)
# Text labels
urlpatterns += patterns('catmaid.control.textlabel',
(r'^(?P<project_id>\d+)/textlabel/create$', 'create_textlabel'),
(r'^(?P<project_id>\d+)/textlabel/delete$', 'delete_textlabel'),
(r'^(?P<project_id>\d+)/textlabel/update$', 'update_textlabel'),
(r'^(?P<project_id>\d+)/textlabel/all', 'textlabels'),
)
# Treenode labels
urlpatterns += patterns('catmaid.control.label',
(r'^(?P<project_id>\d+)/labels/$', 'labels_all'),
(r'^(?P<project_id>\d+)/labels-for-nodes$', 'labels_for_nodes'),
(r'^(?P<project_id>\d+)/labels-for-node/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)$', 'labels_for_node'),
(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/update$', 'label_update'),
(r'^(?P<project_id>\d+)/label/(?P<ntype>(treenode|location|connector))/(?P<location_id>\d+)/remove$', 'remove_label_link'),
(r'^(?P<project_id>\d+)/label/remove$', 'label_remove'),
)
# Links
urlpatterns += patterns('catmaid.control.link',
(r'^(?P<project_id>\d+)/link/create$', 'create_link'),
(r'^(?P<project_id>\d+)/link/delete$', 'delete_link'),
)
# Connector access
urlpatterns += patterns('catmaid.control.connector',
(r'^(?P<project_id>\d+)/connector/create$', 'create_connector'),
(r'^(?P<project_id>\d+)/connector/delete$', 'delete_connector'),
(r'^(?P<project_id>\d+)/connector/table/list$', 'list_connector'),
(r'^(?P<project_id>\d+)/connector/list/graphedge$', 'graphedge_list'),
(r'^(?P<project_id>\d+)/connector/list/one_to_many$', 'one_to_many_synapses'),
(r'^(?P<project_id>\d+)/connector/list/many_to_many$', 'many_to_many_synapses'),
(r'^(?P<project_id>\d+)/connector/list/completed$', 'list_completed'),
(r'^(?P<project_id>\d+)/connector/skeletons$', 'connector_skeletons'),
(r'^(?P<project_id>\d+)/connector/edgetimes$', 'connector_associated_edgetimes'),
(r'^(?P<project_id>\d+)/connector/pre-post-info$', 'connectors_info'),
(r'^(?P<project_id>\d+)/connector/user-info$', 'connector_user_info'),
)
# Neuron access
urlpatterns += patterns('catmaid.control.neuron',
(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/get-all-skeletons$', 'get_all_skeletons_of_neuron'),
(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/give-to-user$', 'give_neuron_to_other_user'),
(r'^(?P<project_id>\d+)/neuron/(?P<neuron_id>\d+)/delete$', 'delete_neuron'),
(r'^(?P<project_id>\d+)/neurons/(?P<neuron_id>\d+)/rename$', 'rename_neuron'),
)
# Node access
UrlParser.explicit_root_paths |= set(['{project_id}/nodes'])
urlpatterns += patterns('catmaid.control.node',
(r'^(?P<project_id>\d+)/node/(?P<node_id>\d+)/reviewed$', 'update_location_reviewer'),
(r'^(?P<project_id>\d+)/node/most_recent$', 'most_recent_treenode'),
(r'^(?P<project_id>\d+)/node/nearest$', 'node_nearest'),
(r'^(?P<project_id>\d+)/node/update$', 'node_update'),
(r'^(?P<project_id>\d+)/node/list$', 'node_list_tuples'),
(r'^(?P<project_id>\d+)/node/get_location$', 'get_location'),
(r'^(?P<project_id>\d+)/node/user-info$', 'user_info'),
(r'^(?P<project_id>\d+)/nodes/find-labels$', 'find_labels'),
)
# Treenode access
UrlParser.explicit_root_paths |= set(['{project_id}/treenodes'])
urlpatterns += patterns('catmaid.control.treenode',
(r'^(?P<project_id>\d+)/treenode/create$', 'create_treenode'),
(r'^(?P<project_id>\d+)/treenode/insert$', 'insert_treenode'),
(r'^(?P<project_id>\d+)/treenode/delete$', 'delete_treenode'),
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/info$', 'treenode_info'),
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/children$', 'find_children'),
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/confidence$', 'update_confidence'),
(r'^(?P<project_id>\d+)/treenode/(?P<treenode_id>\d+)/parent$', 'update_parent'),
(r'^(?P<project_id>\d+)/treenode/(?P<treenode_id>\d+)/radius$', 'update_radius'),
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/previous-branch-or-root$', 'find_previous_branchnode_or_root'),
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/next-branch-or-end$', 'find_next_branchnode_or_end'),
)
# Suppressed virtual treenode access
urlpatterns += patterns('catmaid.control.suppressed_virtual_treenode',
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/$',
SuppressedVirtualTreenodeList.as_view()),
(r'^(?P<project_id>\d+)/treenodes/(?P<treenode_id>\d+)/suppressed-virtual/(?P<suppressed_id>\d+)$',
SuppressedVirtualTreenodeDetail.as_view()),
)
# General skeleton access
urlpatterns += patterns('catmaid.control.skeleton',
(r'^(?P<project_id>\d+)/skeletons/$', 'list_skeletons'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/node_count$', 'node_count'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuronname$', 'neuronname'),
(r'^(?P<project_id>\d+)/skeleton/neuronnames$', 'neuronnames'),
(r'^(?P<project_id>\d+)/skeleton/node/(?P<treenode_id>\d+)/node_count$', 'node_count'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/review/reset-own$', 'reset_own_reviewer_ids'),
(r'^(?P<project_id>\d+)/skeletons/connectivity$', 'skeleton_info_raw'),
(r'^(?P<project_id>\d+)/skeleton/connectivity_matrix$', 'connectivity_matrix'),
(r'^(?P<project_id>\d+)/skeletons/review-status$', 'review_status'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/statistics$', 'skeleton_statistics'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/contributor_statistics$', 'contributor_statistics'),
(r'^(?P<project_id>\d+)/skeleton/contributor_statistics_multiple$', 'contributor_statistics_multiple'),
(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/find-labels$', 'find_labels'),
(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/open-leaves$', 'open_leaves'),
(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/root$', 'root_for_skeleton'),
(r'^(?P<project_id>\d+)/skeleton/split$', 'split_skeleton'),
(r'^(?P<project_id>\d+)/skeleton/ancestry$', 'skeleton_ancestry'),
(r'^(?P<project_id>\d+)/skeleton/join$', 'join_skeleton'),
(r'^(?P<project_id>\d+)/skeleton/reroot$', 'reroot_skeleton'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/permissions$',
'get_skeleton_permissions'),
(r'^(?P<project_id>\d+)/skeleton/annotationlist$', 'annotation_list'),
(r'^(?P<project_id>\d+)/skeletongroup/adjacency_matrix$', 'adjacency_matrix'),
(r'^(?P<project_id>\d+)/skeletongroup/skeletonlist_subgraph', 'skeletonlist_subgraph'),
(r'^(?P<project_id>\d+)/skeletongroup/all_shared_connectors', 'all_shared_connectors'),
)
# Skeleton export
urlpatterns += patterns('catmaid.control.skeletonexport',
(r'^(?P<project_id>\d+)/neuroml/neuroml_level3_v181$', 'export_neuroml_level3_v181'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/swc$', 'skeleton_swc'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/neuroml$', 'skeletons_neuroml'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/json$', 'skeleton_with_metadata'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/compact-json$', 'skeleton_for_3d_viewer'),
(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-skeleton$', 'compact_skeleton'),
(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor$', 'compact_arbor'),
(r'^(?P<project_id>\d+)/(?P<skeleton_id>\d+)/(?P<with_nodes>\d)/(?P<with_connectors>\d)/(?P<with_tags>\d)/compact-arbor-with-minutes$', 'compact_arbor_with_minutes'),
(r'^(?P<project_id>\d+)/skeletons/(?P<skeleton_id>\d+)/review$', 'export_review_skeleton'),
(r'^(?P<project_id>\d+)/skeleton/(?P<skeleton_id>\d+)/reviewed-nodes$', 'export_skeleton_reviews'),
(r'^(?P<project_id>\d+)/skeletons/measure$', 'measure_skeletons'),
(r'^(?P<project_id>\d+)/skeleton/connectors-by-partner$', 'skeleton_connectors_by_partner'),
(r'^(?P<project_id>\d+)/skeletons/within-spatial-distance$', 'within_spatial_distance'),
(r'^(?P<project_id>\d+)/skeletons/partners-by-connector$', 'partners_by_connector'),
)
# Treenode and Connector image stack archive export
urlpatterns += patterns('catmaid.control.treenodeexport',
(r'^(?P<project_id>\d+)/connectorarchive/export$', 'export_connectors'),
(r'^(?P<project_id>\d+)/treenodearchive/export$', 'export_treenodes'),
)
# Cropping
urlpatterns += patterns('catmaid.control.cropping',
(r'^(?P<project_id>\d+)/stack/(?P<stack_ids>%s)/crop/(?P<x_min>%s),(?P<x_max>%s)/(?P<y_min>%s),(?P<y_max>%s)/(?P<z_min>%s),(?P<z_max>%s)/(?P<zoom_level>\d+)/(?P<single_channel>[0|1])/$' % (intlist, num, num, num, num, num, num), 'crop'),
(r'^crop/download/(?P<file_path>.*)/$', 'download_crop')
)
# Tagging
urlpatterns += patterns('catmaid.control.project',
(r'^(?P<project_id>\d+)/tags/list$', 'list_project_tags'),
(r'^(?P<project_id>\d+)/tags/clear$', 'update_project_tags'),
(r'^(?P<project_id>\d+)/tags/(?P<tags>.*)/update$', 'update_project_tags'),
)
urlpatterns += patterns('catmaid.control.stack',
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/list$', 'list_stack_tags'),
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/clear$', 'update_stack_tags'),
(r'^(?P<project_id>\d+)/stack/(?P<stack_id>\d+)/tags/(?P<tags>.*)/update$', 'update_stack_tags'),
)
# Data views
urlpatterns += patterns('catmaid.control.data_view',
(r'^dataviews/list$', 'get_available_data_views'),
(r'^dataviews/default$', 'get_default_properties'),
(r'^dataviews/show/(?P<data_view_id>\d+)$', 'get_data_view'),
(r'^dataviews/show/default$', 'get_default_data_view'),
(r'^dataviews/type/comment$', 'get_data_view_type_comment'),
(r'^dataviews/type/(?P<data_view_id>\d+)$', 'get_data_view_type'),
)
# Ontologies
urlpatterns += patterns('catmaid.control.ontology',
(r'^ontology/knownroots$', 'get_known_ontology_roots'),
(r'^(?P<project_id>%s)/ontology/list$' % (integer),
'list_ontology'),
(r'^(?P<project_id>%s)/ontology/relations$' % (integer),
'get_available_relations'),
(r'^(?P<project_id>%s)/ontology/relations/add$' % (integer),
'add_relation_to_ontology'),
(r'^(?P<project_id>%s)/ontology/relations/rename$' % (integer),
'rename_relation'),
(r'^(?P<project_id>%s)/ontology/relations/remove$' % (integer),
'remove_relation_from_ontology'),
(r'^(?P<project_id>%s)/ontology/relations/removeall$' % (integer),
'remove_all_relations_from_ontology'),
(r'^(?P<project_id>%s)/ontology/relations/list$' % (integer),
'list_available_relations'),
(r'^(?P<project_id>%s)/ontology/classes$' % (integer),
'get_available_classes'),
(r'^(?P<project_id>%s)/ontology/classes/add$' % (integer),
'add_class_to_ontology'),
(r'^(?P<project_id>%s)/ontology/classes/rename$' % (integer),
'rename_class'),
(r'^(?P<project_id>%s)/ontology/classes/remove$' % (integer),
'remove_class_from_ontology'),
(r'^(?P<project_id>%s)/ontology/classes/removeall$' % (integer),
'remove_all_classes_from_ontology'),
(r'^(?P<project_id>%s)/ontology/classes/list$' % (integer),
'list_available_classes'),
(r'^(?P<project_id>%s)/ontology/links/add$' % (integer),
'add_link_to_ontology'),
(r'^(?P<project_id>%s)/ontology/links/remove$' % (integer),
'remove_link_from_ontology'),
(r'^(?P<project_id>%s)/ontology/links/removeselected$' % (integer),
'remove_selected_links_from_ontology'),
(r'^(?P<project_id>%s)/ontology/links/removeall$' % (integer),
'remove_all_links_from_ontology'),
(r'^(?P<project_id>%s)/ontology/restrictions/add$' % (integer),
'add_restriction'),
(r'^(?P<project_id>%s)/ontology/restrictions/remove$' % (integer),
'remove_restriction'),
(r'^(?P<project_id>%s)/ontology/restrictions/(?P<restriction>[^/]*)/types$' % (integer),
'get_restriction_types'),
)
# Classification
urlpatterns += patterns('catmaid.control.classification',
(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/number$'.format(integer),
'get_classification_number'),
(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/show$'.format(integer),
'show_classification_editor'),
(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/show/(?P<link_id>\d+)$'.format(integer),
'show_classification_editor'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/select$'.format(integer),
'select_classification_graph', name='select_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/setup/test$'.format(integer),
'check_classification_setup_view', name='test_classification_setup'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/setup/rebuild$'.format(integer),
'rebuild_classification_setup_view', name='rebuild_classification_setup'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/new$'.format(integer),
'add_classification_graph', name='add_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/list$'.format(integer),
'list_classification_graph', name='list_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/list/(?P<link_id>\d+)$'.format(integer),
'list_classification_graph', name='list_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/(?P<link_id>\d+)/remove$'.format(integer),
'remove_classification_graph', name='remove_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/instance-operation$'.format(integer),
'classification_instance_operation',
name='classification_instance_operation'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/(?P<link_id>\d+)/autofill$'.format(integer),
'autofill_classification_graph', name='autofill_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/link$'.format(integer),
'link_classification_graph', name='link_classification_graph'),
url(r'^(?P<project_id>{0})/classification/(?P<workspace_pid>{0})/stack/(?P<stack_id>{0})/linkroi/(?P<ci_id>{0})/$'.format(integer),
'link_roi_to_classification', name='link_roi_to_classification'),
url(r'^classification/(?P<workspace_pid>{0})/export$'.format(integer),
'export', name='export_classification'),
url(r'^classification/(?P<workspace_pid>{0})/export/excludetags/(?P<exclusion_tags>{1})/$'.format(integer, wordlist),
'export', name='export_classification'),
url(r'^classification/(?P<workspace_pid>{0})/search$'.format(integer),
'search', name='search_classifications'),
url(r'^classification/(?P<workspace_pid>{0})/export_ontology$'.format(integer),
'export_ontology', name='export_ontology'),
)
# Notifications
urlpatterns += patterns('catmaid.control.notifications',
(r'^(?P<project_id>\d+)/notifications/list$', 'list_notifications'),
(r'^(?P<project_id>\d+)/changerequest/approve$', 'approve_change_request'),
(r'^(?P<project_id>\d+)/changerequest/reject$', 'reject_change_request'),
)
# Regions of interest
urlpatterns += patterns('catmaid.control.roi',
url(r'^(?P<project_id>{0})/roi/(?P<roi_id>{0})/info$'.format(integer),
'get_roi_info', name='get_roi_info'),
url(r'^(?P<project_id>{0})/roi/link/(?P<relation_id>{0})/stack/(?P<stack_id>{0})/ci/(?P<ci_id>{0})/$'.format(integer),
'link_roi_to_class_instance', name='link_roi_to_class_instance'),
url(r'^(?P<project_id>{0})/roi/(?P<roi_id>{0})/remove$'.format(integer),
'remove_roi_link', name='remove_roi_link'),
url(r'^(?P<project_id>{0})/roi/(?P<roi_id>{0})/image$'.format(integer),
'get_roi_image', name='get_roi_image'),
url(r'^(?P<project_id>{0})/roi/add$'.format(integer),
'add_roi', name='add_roi'),
)
# Clustering
urlpatterns += patterns('catmaid.control.clustering',
url(r'^clustering/(?P<workspace_pid>{0})/setup$'.format(integer),
'setup_clustering', name="clustering_setup"),
url(r'^clustering/(?P<workspace_pid>{0})/show$'.format(integer),
TemplateView.as_view(template_name="catmaid/clustering/display.html"),
name="clustering_display"),
)
# Volumes
urlpatterns += patterns('catmaid.control.volume',
(r'^(?P<project_id>\d+)/volumes/$', 'volume_collection'),
(r'^(?P<project_id>\d+)/volumes/add$', 'add_volume'),
(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/$', 'volume_detail'),
(r'^(?P<project_id>\d+)/volumes/(?P<volume_id>\d+)/intersect$', 'intersects'),
)
# Front-end tests
urlpatterns += patterns('',
url(r'^tests$', login_required(CatmaidView.as_view(template_name="catmaid/tests.html")), name="frontend_tests"),
)
# Collection of various parts of the CATMAID API. These methods are usually
# one- or two-liners and having them in a separate statement would not improve
# readability. Therefore, they are all declared in this general statement.
urlpatterns += patterns('catmaid.control',
# User analytics and proficiency
(r'^useranalytics$', 'useranalytics.plot_useranalytics'),
(r'^(?P<project_id>\d+)/userproficiency$', 'user_evaluation.evaluate_user'),
(r'^(?P<project_id>\d+)/exportwidget$', ExportWidgetView.as_view() ),
(r'^(?P<project_id>\d+)/graphexport/json$', 'graphexport.export_jsongraph' ),
# Graphs
(r'^(?P<project_id>\d+)/skeletons/confidence-compartment-subgraph', 'graph2.skeleton_graph'),
# Circles
(r'^(?P<project_id>\d+)/graph/circlesofhell', 'circles.circles_of_hell'),
(r'^(?P<project_id>\d+)/graph/directedpaths', 'circles.find_directed_paths'),
# Analytics
(r'^(?P<project_id>\d+)/skeleton/analytics$', 'analytics.analyze_skeletons'),
# Review
(r'^(?P<project_id>\d+)/user/reviewer-whitelist$', 'review.reviewer_whitelist'),
# Search
(r'^(?P<project_id>\d+)/search$', 'search.search'),
# Wiring diagram export
(r'^(?P<project_id>\d+)/wiringdiagram/json$', 'wiringdiagram.export_wiring_diagram'),
(r'^(?P<project_id>\d+)/wiringdiagram/nx_json$', 'wiringdiagram.export_wiring_diagram_nx'),
# Annotation graph export
(r'^(?P<project_id>\d+)/annotationdiagram/nx_json$', 'object.convert_annotations_to_networkx'),
# Treenode table
(r'^(?P<project_id>\d+)/treenode/table/(?P<skid>\d+)/content$', 'treenodetable.treenode_table_content'),
)
# Patterns for FlyTEM access
urlpatterns += patterns('catmaid.control.flytem',
(r'^flytem/projects/$', 'project.projects'),
(r'^(?P<project_id>.+)/user/reviewer-whitelist$', 'review.reviewer_whitelist'),
(r'^flytem/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', 'stack.stack_info'),
(r'^flytem/(?P<project_id>.+)/stacks$', 'stack.stacks'),
)
# Patterns for DVID access
urlpatterns += patterns('catmaid.control.dvid',
(r'^dvid/projects/$', 'project.projects'),
(r'^(?P<project_id>.+)/user/reviewer-whitelist$', 'review.reviewer_whitelist'),
(r'^dvid/(?P<project_id>.+)/stack/(?P<stack_id>.+)/info$', 'stack.stack_info'),
(r'^dvid/(?P<project_id>.+)/stacks$', 'stack.stacks'),
)
|
aschampion/CATMAID
|
django/applications/catmaid/urls.py
|
Python
|
gpl-3.0
| 24,476
|
[
"NEURON"
] |
37d1b409b9d591dd4f92fc507b1db9ca5535aa92cc2a3ab12b5944bb7a49b54a
|
# coding: utf-8
#############################
#### Various experiments ####
#############################
import numpy as np
import pickle
import time
from os import listdir
from learning import build_translation, extract_name, extract_features
from skimage.io import imread
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
image_vectors = pickle.load(open("vectors.p", "rb"))
image_targets = pickle.load(open("target.p", "rb"))
learning_dir = "../database"
images = listdir(learning_dir)
images.sort()
transl = build_translation(images)
test = [("Gaussian", GaussianNB()),
("Linear", LinearRegression()),
("DTree", DecisionTreeClassifier()),
('Knn 5', KNeighborsClassifier(5)),
('Knn 10', KNeighborsClassifier(10)),
('Knn 15', KNeighborsClassifier(15)),
('Knn 20', KNeighborsClassifier(20)),
('Logistic Regression', LogisticRegression()),
('Linear SVM', SVC(kernel='linear', probability=True)),
('Poly SVM', SVC(kernel='poly', degree=2, probability=True)),
('RBF SVM', SVC(kernel='rbf', gamma=2, C=1, probability=True)),
('Random Forest', RandomForestClassifier())]
def compute_score(model):
# Warning : score computation over 1 class, and not ten most likely ?
score_dir = "../database"
images = listdir(score_dir)
images.sort()
score = 0
for x in images:
i = imread(score_dir + "/" + x)
if(extract_name(x) == transl[int(model.predict([extract_features(i)]))]):
score += 1
return score/len(images)
def isin_top10(pos, proba_array):
# Given an array of proba_array, is proba_array[pos] one of the top 10 values of this array ?
return pos in np.argpartition(proba_array, -10)[-10:]
def compute_score10(model):
# Warning : score computation over 1 class, and not ten most likely ?
score_dir = "../database"
images = listdir(score_dir)
images.sort()
score = 0
for x in images:
i = imread(score_dir + "/" + x)
if(isin_top10(transl[extract_name(x)], model.predict_proba([extract_features(i)])[0])):
score += 1
return score/len(images)
t = time.time()
for x in test:
model = x[1].fit(image_vectors, image_targets)
try:
print("Score of " + x[0] + " : " + str(compute_score10(model)))
except AttributeError:
print("Class %s has no predict_proba" % x[0])
print("Computed in : " + str(time.time()-t) + "s")
t = time.time()
for x in range(5,20):
model = KNeighborsClassifier(x).fit(image_vectors, image_targets)
try:
print("Score of KNN(" + str(x) + ") : " + str(compute_score10(model)))
except AttributeError:
print("Class %s has no predict_proba" % x[0])
print("Computed in " + str(time.time()-t) + "s")
|
rmonat/princess-or-frog
|
src/exps.py
|
Python
|
gpl-3.0
| 3,123
|
[
"Gaussian"
] |
8a176d0b09b1300d4483e4e17cb37896d5f3b53185b9bf450c5830735d0bd57e
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
The Refextract test suite.
"""
import unittest
from invenio.testutils import make_test_suite, run_test_suite
## Import the minimal necessary methods and variables needed to run Refextract
from invenio.refextract import CFG_REFEXTRACT_KB_JOURNAL_TITLES, \
CFG_REFEXTRACT_KB_REPORT_NUMBERS, \
create_marc_xml_reference_section, \
build_titles_knowledge_base, \
build_reportnum_knowledge_base, \
display_xml_record, \
compress_subfields, \
restrict_m_subfields, \
cli_opts
# Initially, build the titles knowledge base
(title_search_kb, \
title_search_standardised_titles, \
title_search_keys) = build_titles_knowledge_base(CFG_REFEXTRACT_KB_JOURNAL_TITLES)
# Initially, build the report numbers knowledge base
(preprint_reportnum_sre, \
standardised_preprint_reportnum_categs) = build_reportnum_knowledge_base(CFG_REFEXTRACT_KB_REPORT_NUMBERS)
class RefextractTest(unittest.TestCase):
""" bibrecord - testing output of refextract """
def setUp(self):
"""Initialize the example reference section, and the expected output"""
# Set the record id to be solely used inside the '001' controlfield
self.rec_id = "1234"
# Set the output journal title format to match that of INVENIO's
cli_opts['inspire'] = 0
def extract_references(self, reference_lines):
""" Given a list of raw reference lines, output the MARC-XML content extracted version"""
# Identify journal titles, report numbers, URLs, DOIs, and authors...
# Generate marc xml using the example reference lines
(processed_references, count_misc, \
count_title, count_reportnum, \
count_url, count_doi, count_auth_group, record_titles_count) = \
create_marc_xml_reference_section(map(lambda x: unicode(x, 'utf-8'), reference_lines), \
preprint_reportnum_sre, \
standardised_preprint_reportnum_categs, \
title_search_kb, \
title_search_standardised_titles, \
title_search_keys)
# Generate the xml string to be outputted
tmp_out = display_xml_record(0, \
count_reportnum, \
count_title, \
count_url, \
count_doi, \
count_misc, \
count_auth_group, \
self.rec_id, \
processed_references)
# Remove redundant misc subfields
(m_restricted, ref_lines) = restrict_m_subfields(tmp_out.split('\n'))
# Build the final xml string of the output of Refextract
out = ''
for rec in ref_lines:
rec = rec.rstrip()
if rec:
out += rec + '\n'
# Compress mulitple 'm' and 'h' subfields in a datafield
out = compress_subfields(out, 'm')
out = compress_subfields(out, 'h')
# Remove the ending statistical datafield from the final extracted references
out = out[:out.find('<datafield tag="999" ind1="C" ind2="6">')].rstrip()
return out
def test_author_recognition(self):
""" refextract - test author example """
ex_author_lines = ["""[1] M. I. Trofimov, N. De Filippis and E. A. Smolenskii. Application of the electronegativity indices of organic molecules to tasks of chemical informatics.""",
"""[2] M. Gell-Mann, P. Ramon ans R. Slansky, in Supergravity, P. van Niewenhuizen and D. Freedman (North-Holland 1979); T. Yanagida, in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, ed. O. Sawaga and A. Sugamoto (Tsukuba 1979); R.N. Mohapatra and G. Senjanovic, some more misc text. Smith W.H., L. Altec et al some personal communication.""",
"""[3] S. Hawking, C. Hunter and M. Taylor-Robinson.""",
"""[4] E. Schrodinger, Sitzungsber. Preuss. Akad. Wiss. Phys. Math. Kl. 24, 418(1930); K. Huang, Am. J. Phys. 20, 479(1952); H. Jehle, Phys, Rev. D3, 306(1971); G. A. Perkins, Found. Phys. 6, 237(1976); J. A. Lock, Am. J. Phys. 47, 797(1979); A. O. Barut et al, Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); Phys. Rev. Lett. 52, 2009(1984).""",
"""[5] Hawking S., P. van Niewenhuizen, L.S. Durkin, D. Freeman, some title of some journal""",
"""[6] Hawking S., D. Freeman, some title of some journal""",
"""[7] Hawking S. and D. Freeman, another random title of some random journal""",
"""[8] L.S. Durkin and P. Langacker, Phys. Lett B166, 436 (1986); Amaldi et al., Phys. Rev. D36, 1385 (1987); Hayward and Yellow et al., eds. Phys. Lett B245, 669 (1990); Nucl. Phys. B342, 15 (1990);
""",
"""[9] M. I. Moli_ero, and J. C. Oller, Performance test of the CMS link alignment system
""",
"""[10] Hush, D.R., R.Leighton, and B.G. Horne, 1993. "Progress in supervised Neural Netw. Whats new since Lippmann?" IEEE Signal Process. Magazine 10, 8-39
""",
"""[11] T.G. Rizzo, Phys. Rev. D40, 3035 (1989); Proceedings of the 1990 Summer Study on High Energy Physics. ed E. Berger, June 25-July 13, 1990, Snowmass Colorado (World Scientific, Singapore, 1992) p. 233; V. Barger, J.L. Hewett and T.G. Rizzo, Phys. Rev. D42, 152 (1990); J.L. Hewett, Phys. Lett. B238, 98 (1990);
"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">M. I. Trofimov, N. De Filippis and E. A. Smolenskii</subfield>
<subfield code="m">Application of the electronegativity indices of organic molecules to tasks of chemical informatics</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">M. Gell-Mann, P. Ramon</subfield>
<subfield code="m">ans R. Slansky in Supergravity</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">P. van Niewenhuizen and D. Freedman</subfield>
<subfield code="m">(North-Holland 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">T. Yanagida (O. Sawaga and A. Sugamoto (eds.))</subfield>
<subfield code="m">in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, (Tsukuba 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">R.N. Mohapatra and G. Senjanovic</subfield>
<subfield code="m">some more misc text</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">Smith W.H., L. Altec et al</subfield>
<subfield code="m">some personal communication</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="h">S. Hawking, C. Hunter and M. Taylor-Robinson</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">E. Schrodinger</subfield>
<subfield code="m">Sitzungsber. Sitzungsber. K\xf6nigl. Preuss. Akad. Wiss. Phys. Math. Kl. : 24 (1930) 418;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">K. Huang</subfield>
<subfield code="s">Am. J. Phys. 20 (1952) 479</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">H. Jehle</subfield>
<subfield code="s">Phys. Rev D 3 (1971) 306</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">G. A. Perkins</subfield>
<subfield code="s">Found. Phys. 6 (1976) 237</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">J. A. Lock</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">A. O. Barut et al</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="s">Phys. Rev. Lett. 52 (1984) 2009</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">Hawking S., P. van Niewenhuizen, L.S. Durkin, D. Freeman</subfield>
<subfield code="m">some title of some journal</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">Hawking S., D. Freeman</subfield>
<subfield code="m">some title of some journal</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="h">Hawking S. and D. Freeman</subfield>
<subfield code="m">another random title of some random journal</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="h">L.S. Durkin and P. Langacker</subfield>
<subfield code="s">Phys. Lett B 166 (1986) 436</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="h">Amaldi et al</subfield>
<subfield code="s">Phys. Rev D 36 (1987) 1385</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="h">(Hayward and Yellow et al (ed.))</subfield>
<subfield code="s">Phys. Lett B 245 (1990) 669</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="s">Nucl. Phys B 342 (1990) 15</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">M. I. Moli_ero, and J. C. Oller, Performance test of the CMS link alignment system</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">Hush, D.R., 1993. "Progress in supervised Neural Netw. Whats new since Lippmann?" IEEE Signal Process. Magazine 10, 8-39</subfield>
<subfield code="h">R.Leighton, and B.G. Horne</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="h">T.G. Rizzo</subfield>
<subfield code="s">Phys. Rev D 40 (1989) 3035</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">Proceedings of the 1990 Summer Study on High Energy Physics June 25-July 13, 1990, Snowmass Colorado (World Scientific, Singapore, 1992) p. 233;</subfield>
<subfield code="h">(E. Berger (ed.))</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="h">V. Barger, J.L. Hewett and T.G. Rizzo</subfield>
<subfield code="s">Phys. Rev D 42 (1990) 152</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="h">J.L. Hewett</subfield>
<subfield code="s">Phys. Lett B 238 (1990) 98</subfield>
</datafield>"""
out = self.extract_references(ex_author_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_doi_recognition(self):
""" refextract - test doi example """
ex_doi_lines = ["""[1] Some example misc text, for this doi: http://dx.doi.org/10.1007/s11172-006-0105-6""",
"""[2] 10.1007/s11172-006-0105-6."""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">Some example misc text, for this doi:</subfield>
<subfield code="a">10.1007/s11172-006-0105-6</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="a">10.1007/s11172-006-0105-6</subfield>
</datafield>"""
out = self.extract_references(ex_doi_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_url_recognition(self):
""" refextract - test url example """
ex_url_lines = ["""[1] <a href="http://cdsweb.cern.ch/">CERN Document Server</a>; http://cdsweb.cern.ch/ then http://www.itp.ucsb.edu/online/susyc99/discussion/; hello world <a href="http://uk.yahoo.com/">Yahoo!</a>""",
"""[2] CERN Document Server <a href="http://cdsweb.cern.ch/">CERN Document Server</a>""",
"""[3] A list of authors, and a title. http://cdsweb.cern.ch/"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<subfield code="u">http://uk.yahoo.com/</subfield>
<subfield code="z">Yahoo!</subfield>
</datafield>
<subfield code="u">http://cdsweb.cern.ch/</subfield>
<subfield code="z">CERN Document Server</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">A list of authors, and a title</subfield>
<subfield code="u">http://cdsweb.cern.ch/</subfield>
</datafield>"""
out = self.extract_references(ex_url_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_report_number_recognition(self):
""" refextract - test report number example """
ex_repno_lines = ["""[1] hep-th/9806087""",
"""[2] arXiv:0708.3457""",
"""[3] some misc lkjslkdjlksjflksj [hep-th/9804058] arXiv:0708.3457, hep-th/1212321, some more misc,"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">hep-th/9806087</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">arXiv 0708.3457</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">some misc lkjslkdjlksjflksj</subfield>
<subfield code="r">hep-th/9804058</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="r">arXiv:0708.3457</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="r">hep-th/1212321</subfield>
<subfield code="m">some more misc</subfield>
</datafield>"""
out = self.extract_references(ex_repno_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_journal_title_recognition(self):
""" refextract - test journal title example """
ex_journal_title_lines = ["""[1] Phys. Rev. D52 (1995) 5681.""",
"""[2] Phys. Rev. D59 (1999) 064005;""",
"""[3] Am. J. Phys. 47, 797(1979);""",
"""[4] R. Soc. London, Ser. A155, 447(1936); ibid, D24, 3333(1981).""",
"""[5] Commun. Math. Phys. 208 (1999) 413;""",
"""[6] Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); More text, followed by an IBID A 546 (1999) 96""",
"""[7] Phys. Math. Kl. 24, 418(1930); Am. J. Phys. 20, 479(1952); Phys, Rev. D3, 306(1971); Phys. 6, 237(1976); Am. J. Phys. 47, 797(1979); Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); Phys. Rev. Lett. 52, 2009(1984)."""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev D 52 (1995) 5681</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="s">Phys. Rev D 59 (1999) 064005</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">R. Soc</subfield>
<subfield code="m">London, Ser. A : 155 (1936) 447; ibid, D : 24 (1981) 3333</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="s">Commun. Math. Phys. 208 (1999) 413</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">More text, followed by an</subfield>
<subfield code="s">Phys. Rev A 546 (1999) 96</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">Phys. Math. Kl. : 24 (1930) 418;</subfield>
<subfield code="s">Am. J. Phys. 20 (1952) 479</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 3 (1971) 306</subfield>
<subfield code="m">Phys. : 6 (1976) 237;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="s">Phys. Rev. Lett. 52 (1984) 2009</subfield>
</datafield>"""
out = self.extract_references(ex_journal_title_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
def test_mixed(self):
""" refextract - test mixed content example """
ex_mixed_lines = ["""[1] E. Schrodinger, Sitzungsber. Preuss. Akad. Wiss. Phys. Math. Kl. 24, 418(1930); ibid, 3, 1(1931); K. Huang, Am. J. Phys. 20, 479(1952); H. Jehle, Phys, Rev. D3, 306(1971); G. A. Perkins, Found. Phys. 6, 237(1976); J. A. Lock, Am. J. Phys. 47, 797(1979); A. O. Barut et al, Phys. Rev. D23, 2454(1981); ibid, D24, 3333(1981); ibid, D31, 1386(1985); Phys. Rev. Lett. 52, 2009(1984).""",
"""[2] P. A. M. Dirac, Proc. R. Soc. London, Ser. A155, 447(1936); ibid, D24, 3333(1981).""",
"""[3] O.O. Vaneeva, R.O. Popovych and C. Sophocleous, Enhanced Group Analysis and Exact Solutions of Vari-able Coefficient Semilinear Diffusion Equations with a Power Source, Acta Appl. Math., doi:10.1007/s10440-008-9280-9, 46 p., arXiv:0708.3457.""",
"""[4] M. I. Trofimov, N. De Filippis and E. A. Smolenskii. Application of the electronegativity indices of organic molecules to tasks of chemical informatics. Russ. Chem. Bull., 54:2235-2246, 2005. http://dx.doi.org/10.1007/s11172-006-0105-6.""",
"""[5] M. Gell-Mann, P. Ramon and R. Slansky, in Supergravity, P. van Niewenhuizen and D. Freedman (North-Holland 1979); T. Yanagida, in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, ed. O. Sawaga and A. Sugamoto (Tsukuba 1979); R.N. Mohapatra and G. Senjanovic, Phys. Rev. Lett. 44, 912, (1980).
""",
"""[6] L.S. Durkin and P. Langacker, Phys. Lett B166, 436 (1986); Amaldi et al., Phys. Rev. D36, 1385 (1987); Hayward and Yellow et al., eds. Phys. Lett B245, 669 (1990); Nucl. Phys. B342, 15 (1990);
""",
"""[7] Wallet et al, Some preceedings on Higgs Phys. Rev. Lett. 44, 912, (1980) 10.1007/s11172-006-0105-6; Pod I., C. Jennings, et al, Blah blah blah blah blah blah blah blah blah blah, Nucl. Phys. B342, 15 (1990)"""]
references_expected = u"""<record>
<controlfield tag="001">1234</controlfield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">E. Schrodinger</subfield>
<subfield code="m">Sitzungsber. Sitzungsber. K\xf6nigl. Preuss. Akad. Wiss. Phys. Math. Kl. : 24 (1930) 418;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Sitzungsber. K\xf6nigl. Preuss. Akad. Wiss. 3 (1931) 1</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">K. Huang</subfield>
<subfield code="s">Am. J. Phys. 20 (1952) 479</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">H. Jehle</subfield>
<subfield code="s">Phys. Rev D 3 (1971) 306</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">G. A. Perkins</subfield>
<subfield code="s">Found. Phys. 6 (1976) 237</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">J. A. Lock</subfield>
<subfield code="s">Am. J. Phys. 47 (1979) 797</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="h">A. O. Barut et al</subfield>
<subfield code="s">Phys. Rev D 23 (1981) 2454</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev D 24 (1981) 3333</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev D 31 (1985) 1386</subfield>
<subfield code="h">A. O. Barut et al</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="s">Phys. Rev. Lett. 52 (1984) 2009</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="h">P. A. M. Dirac</subfield>
<subfield code="s">Proc. R. Soc. Lond., A 155 (1936) 447</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="s">Proc. R. Soc. Lond., D 24 (1981) 3333</subfield>
<subfield code="h">P. A. M. Dirac</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="h">O.O. Vaneeva, R.O. Popovych and C. Sophocleous</subfield>
<subfield code="m">Enhanced Group Analysis and Exact Solutions of Vari-able Coefficient Semilinear Diffusion Equations with a Power Source, Acta Appl. Math., , 46 p</subfield>
<subfield code="a">10.1007/s10440-008-9280-9</subfield>
<subfield code="r">arXiv:0708.3457</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="h">M. I. Trofimov, N. De Filippis and E. A. Smolenskii</subfield>
<subfield code="m">Application of the electronegativity indices of organic molecules to tasks of chemical informatics. Russ. Chem. Bull.: 54 (2005) 2235</subfield>
<subfield code="a">10.1007/s11172-006-0105-6</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">M. Gell-Mann, P. Ramon and R. Slansky</subfield>
<subfield code="m">in Supergravity</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">P. van Niewenhuizen and D. Freedman</subfield>
<subfield code="m">(North-Holland 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">T. Yanagida (O. Sawaga and A. Sugamoto (eds.))</subfield>
<subfield code="m">in Proceedings of the Workshop on the Unified Thoery and the Baryon Number in teh Universe, (Tsukuba 1979);</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="h">R.N. Mohapatra and G. Senjanovic</subfield>
<subfield code="s">Phys. Rev. Lett. 44 (1980) 912</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">L.S. Durkin and P. Langacker</subfield>
<subfield code="s">Phys. Lett B 166 (1986) 436</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">Amaldi et al</subfield>
<subfield code="s">Phys. Rev D 36 (1987) 1385</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="h">(Hayward and Yellow et al (ed.))</subfield>
<subfield code="s">Phys. Lett B 245 (1990) 669</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="s">Nucl. Phys B 342 (1990) 15</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="h">Wallet et al</subfield>
<subfield code="m">Some preceedings on Higgs</subfield>
<subfield code="s">Phys. Rev. Lett. 44 (1980) 912</subfield>
<subfield code="a">10.1007/s11172-006-0105-6;</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="h">Pod I., C. Jennings, et al</subfield>
<subfield code="m">Blah blah blah blah blah blah blah blah blah blah</subfield>
<subfield code="s">Nucl. Phys B 342 (1990) 15</subfield>
</datafield>"""
out = self.extract_references(ex_mixed_lines)
#Compare the recieved output with the expected references
self.assertEqual(out, references_expected)
TEST_SUITE = make_test_suite(RefextractTest)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
kaplun/Invenio-OpenAIRE
|
modules/bibedit/lib/refextract_tests.py
|
Python
|
gpl-2.0
| 29,629
|
[
"DIRAC"
] |
2aaaa3c9d70b503d2edb9fa9b826183df03f756e4ac818c881fe0b5621601663
|
#!/usr/bin/env python
#
# $File: backwardMigrate.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
sim.turnOnDebug('DBG_MIGRATOR')
pop = sim.Population(size=[10000, 5000, 8000], infoFields=['migrate_to', 'migrate_from'])
def originOfInds(pop):
print('Observed backward migration matrix at generation {}'.format(pop.dvars().gen))
for sp in range(pop.numSubPop()):
# get source subpop for all individuals in subpopulation i
origins = pop.indInfo('migrate_from', sp)
spSize = pop.subPopSize(sp)
B_sp = [origins.count(j) * 1.0 /spSize for j in range(pop.numSubPop())]
print(' ' + ', '.join(['{:.3f}'.format(x) for x in B_sp]))
return True
pop.evolve(
initOps=sim.InitSex(),
preOps=
# mark the source subpopulation of each individual
[sim.InitInfo(i, subPops=i, infoFields='migrate_from') for i in range(3)] + [
# perform migration
sim.BackwardMigrator(rate=[
[0, 0.04, 0.02],
[0.05, 0, 0.02],
[0.02, 0.01, 0]
]),
# calculate and print observed backward migration matrix
sim.PyOperator(func=originOfInds),
# calculate population size
sim.Stat(popSize=True),
# and print it
sim.PyEval(r'"Pop size after migration: {}\n".format(", ".join([str(x) for x in subPopSize]))'),
],
matingScheme=sim.RandomMating(),
gen = 5
)
|
BoPeng/simuPOP
|
docs/backwardMigrate.py
|
Python
|
gpl-2.0
| 2,439
|
[
"VisIt"
] |
2e3d71f0946aeb9f3cccfe6c20f059c8271a65f64247b05759e1b494e72260be
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
from string import ascii_letters, digits
from ansible.errors import AnsibleOptionsError
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text
from ansible.parsing.quoting import unquote
from ansible.utils.path import makedirs_safe
BOOL_TRUE = frozenset( [ "true", "t", "y", "1", "yes", "on" ] )
def mk_boolean(value):
ret = value
if not isinstance(value, bool):
if value is None:
ret = False
ret = (str(value).lower() in BOOL_TRUE)
return ret
def shell_expand(path, expand_relative_paths=False):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
if expand_relative_paths and not path.startswith('/'):
# paths are always 'relative' to the config?
if 'CONFIG_FILE' in globals():
CFGDIR = os.path.dirname(CONFIG_FILE)
path = os.path.join(CFGDIR, path)
path = os.path.abspath(path)
return path
def get_config(p, section, key, env_var, default, value_type=None, expand_relative_paths=False):
''' return a configuration variable with casting
:arg p: A ConfigParser object to look for the configuration in
:arg section: A section of the ini config that should be examined for this section.
:arg key: The config key to get this config from
:arg env_var: An Environment variable to check for the config var. If
this is set to None then no environment variable will be used.
:arg default: A default value to assign to the config var if nothing else sets it.
:kwarg value_type: The type of the value. This can be any of the following strings:
:boolean: sets the value to a True or False value
:integer: Sets the value to an integer or raises a ValueType error
:float: Sets the value to a float or raises a ValueType error
:list: Treats the value as a comma separated list. Split the value
and return it as a python list.
:none: Sets the value to None
:path: Expands any environment variables and tilde's in the value.
:tmp_path: Create a unique temporary directory inside of the directory
specified by value and return its path.
:pathlist: Treat the value as a typical PATH string. (On POSIX, this
means colon separated strings.) Split the value and then expand
each part for environment variables and tildes.
:kwarg expand_relative_paths: for pathlist and path types, if this is set
to True then also change any relative paths into absolute paths. The
default is False.
'''
value = _get_config(p, section, key, env_var, default)
if value_type == 'boolean':
value = mk_boolean(value)
elif value:
if value_type == 'integer':
value = int(value)
elif value_type == 'float':
value = float(value)
elif value_type == 'list':
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif value_type == 'none':
if value == "None":
value = None
elif value_type == 'path':
value = shell_expand(value, expand_relative_paths=expand_relative_paths)
elif value_type == 'tmppath':
value = shell_expand(value)
if not os.path.exists(value):
makedirs_safe(value, 0o700)
prefix = 'ansible-local-%s' % os.getpid()
value = tempfile.mkdtemp(prefix=prefix, dir=value)
elif value_type == 'pathlist':
if isinstance(value, string_types):
value = [shell_expand(x, expand_relative_paths=expand_relative_paths) for x in value.split(os.pathsep)]
elif isinstance(value, string_types):
value = unquote(value)
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
value = default
if p is not None:
try:
value = p.get(section, key, raw=True)
except:
pass
if env_var is not None:
env_value = os.environ.get(env_var, None)
if env_value is not None:
value = env_value
return to_text(value, errors='surrogate_or_strict', nonstring='passthru')
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
try:
path1 = os.getcwd() + "/ansible.cfg"
except OSError:
path1 = None
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# non configurable but used as defaults
BLACKLIST_EXTS = ('.pyc', '.pyo', '.swp', '.bak', '~', '.rpm', '.md', '.txt')
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = [ 'bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www', ]
# sections in config file
DEFAULTS='defaults'
#### DEPRECATED VARS ### # FIXME: add deprecation warning when these get set
#none left now
#### DEPRECATED FEATURE TOGGLES: these will eventually be removed as it becomes the standard ####
# If --tags or --skip-tags is given multiple times on the CLI and this is True, merge the lists of tags together.
# If False, let the last argument overwrite any previous ones.
# Behaviour is overwrite through 2.2. 2.3 overwrites but prints deprecation. 2.4 the default is to merge.
MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'ANSIBLE_MERGE_MULTIPLE_CLI_TAGS', True, value_type='boolean')
# Controls which 'precedence path' to take, remove when decide on which!
SOURCE_OVER_GROUPS = get_config(p, 'vars', 'source_over_groups', 'ANSIBLE_SOURCE_OVER_GROUPS', True, value_type='boolean')
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer')
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH',
'~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FACT_PATH = get_config(p, DEFAULTS, 'fact_path', 'ANSIBLE_FACT_PATH', None, value_type='path')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, value_type='integer')
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_MODULE_SET_LOCALE = get_config(p, DEFAULTS, 'module_set_locale','ANSIBLE_MODULE_SET_LOCALE',False, value_type='boolean')
DEFAULT_MODULE_COMPRESSION= get_config(p, DEFAULTS, 'module_compression', None, 'ZIP_DEFLATED')
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, value_type='integer')
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, value_type='integer')
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, value_type='boolean')
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, value_type='path')
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, value_type='integer')
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, value_type='boolean')
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, value_type='path')
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', 'smart')
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, value_type='boolean')
DEFAULT_SSH_TRANSFER_METHOD = get_config(p, 'ssh_connection', 'transfer_method', 'ANSIBLE_SSH_TRANSFER_METHOD', None)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, value_type='boolean')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, value_type='boolean')
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GATHER_SUBSET', 'all').lower()
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
DEFAULT_ALLOW_UNSAFE_LOOKUPS = get_config(p, DEFAULTS, 'allow_unsafe_lookups', None, False, value_type='boolean')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
SHOW_CUSTOM_STATS = get_config(p, DEFAULTS, 'show_custom_stats', 'ANSIBLE_SHOW_CUSTOM_STATS', False, value_type='boolean')
NAMESPACE_FACTS = get_config(p, DEFAULTS, 'restrict_facts_namespace', 'ANSIBLE_RESTRICT_FACTS', False, value_type='boolean')
# Inventory
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path', expand_relative_paths=True)
INVENTORY_ENABLED = get_config(p, DEFAULTS,'inventory_enabled', 'ANSIBLE_INVENTORY_ENABLED',
[ 'host_list', 'script', 'ini', 'yaml' ], value_type='list')
INVENTORY_IGNORE_EXTS = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE',
BLACKLIST_EXTS + (".orig", ".ini", ".cfg", ".retry"), value_type='list')
INVENTORY_IGNORE_PATTERNS = get_config(p, DEFAULTS, 'inventory_ignore_patterns', 'ANSIBLE_INVENTORY_IGNORE_REGEX', [], value_type='list')
VARIABLE_PRECEDENCE = get_config(p, DEFAULTS, 'precedence', 'ANSIBLE_PRECEDENCE',
['all_inventory', 'groups_inventory', 'all_plugins_inventory', 'all_plugins_play',
'groups_plugins_inventory', 'groups_plugins_play'],
value_type='list')
# Static includes
DEFAULT_TASK_INCLUDES_STATIC = get_config(p, DEFAULTS, 'task_includes_static', 'ANSIBLE_TASK_INCLUDES_STATIC', False, value_type='boolean')
DEFAULT_HANDLER_INCLUDES_STATIC = get_config(p, DEFAULTS, 'handler_includes_static', 'ANSIBLE_HANDLER_INCLUDES_STATIC', False, value_type='boolean')
# Disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, value_type='boolean')
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, value_type='boolean')
ALLOW_WORLD_READABLE_TMPFILES = get_config(p, DEFAULTS, 'allow_world_readable_tmpfiles', None, False, value_type='boolean')
# Selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs, 9p', value_type='list')
DEFAULT_LIBVIRT_LXC_NOSECLABEL = get_config(p, 'selinux', 'libvirt_lxc_noseclabel', 'LIBVIRT_LXC_NOSECLABEL', False, value_type='boolean')
# PRIVILEGE ESCALATION
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, value_type='boolean')
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, value_type='boolean')
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, value_type='boolean')
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become
BECOME_ERROR_STRINGS = {
'sudo': 'Sorry, try again.',
'su': 'Authentication failure',
'pbrun': '',
'pfexec': '',
'doas': 'Permission denied',
'dzdo': '',
'ksu': 'Password incorrect',
'pmrun': 'You are not permitted to run this command'
} # FIXME: deal with i18n
BECOME_MISSING_STRINGS = {
'sudo': 'sorry, a password is required to run sudo',
'su': '',
'pbrun': '',
'pfexec': '',
'doas': 'Authorization required',
'dzdo': '',
'ksu': 'No password given',
'pmrun': ''
} # FIXME: deal with i18n
BECOME_METHODS = ['sudo', 'su', 'pbrun', 'pfexec', 'doas', 'dzdo', 'ksu', 'runas', 'pmrun']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD',
'sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo').lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME', False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, value_type='boolean')
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS',
"apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS',
'~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS',
'~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS',
'~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS',
'~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS',
'~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY',
'~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules', value_type='pathlist')
DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS',
'~/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils', value_type='pathlist')
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS',
'~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS',
'~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS',
'~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS',
'~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS',
'~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules', 'NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'ce',
'vyos', 'sros', 'dellos9', 'dellos10', 'dellos6'],
value_type='list')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, value_type='integer')
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, value_type='boolean')
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, value_type='boolean')
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, value_type='boolean')
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, value_type='list')
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, value_type='boolean')
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, value_type='boolean')
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, value_type='boolean')
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, value_type='boolean')
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, value_type='boolean')
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], value_type='list')
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, value_type='boolean')
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, value_type='boolean')
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], value_type='list')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, value_type='boolean')
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', None, value_type='path')
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, value_type='none')
DISPLAY_ARGS_TO_STDOUT = get_config(p, DEFAULTS, 'display_args_to_stdout', 'ANSIBLE_DISPLAY_ARGS_TO_STDOUT', False, value_type='boolean')
MAAX_FILE_SIZE_FOR_DIFF = get_config(p, DEFAULTS, 'max_diff_size', 'ANSIBLE_MAX_DIFF_SIZE', 1024 * 1024, value_type='integer')
# CONNECTION RELATED
USE_PERSISTENT_CONNECTIONS = get_config(p, DEFAULTS, 'use_persistent_connections', 'ANSIBLE_USE_PERSISTENT_CONNECTIONS', False, value_type='boolean')
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-C -o ControlMaster=auto -o ControlPersist=60s')
# WARNING: Someone might be tempted to switch this from percent-formatting
# to .format() in the future. be sure to read this:
# http://lucumr.pocoo.org/2016/12/29/careful-with-str-format/ and understand
# that it may be a security risk to do so.
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', None)
ANSIBLE_SSH_CONTROL_PATH_DIR = get_config(p, 'ssh_connection', 'control_path_dir', 'ANSIBLE_SSH_CONTROL_PATH_DIR', u'~/.ansible/cp')
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, value_type='boolean')
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, value_type='integer')
ANSIBLE_SSH_EXECUTABLE = get_config(p, 'ssh_connection', 'ssh_executable', 'ANSIBLE_SSH_EXECUTABLE', 'ssh')
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, value_type='boolean')
PARAMIKO_HOST_KEY_AUTO_ADD = get_config(p, 'paramiko_connection', 'host_key_auto_add', 'ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD', False, value_type='boolean')
PARAMIKO_PROXY_COMMAND = get_config(p, 'paramiko_connection', 'proxy_command', 'ANSIBLE_PARAMIKO_PROXY_COMMAND', None)
PARAMIKO_LOOK_FOR_KEYS = get_config(p, 'paramiko_connection', 'look_for_keys', 'ANSIBLE_PARAMIKO_LOOK_FOR_KEYS', True, value_type='boolean')
PERSISTENT_CONNECT_TIMEOUT = get_config(p, 'persistent_connection', 'connect_timeout', 'ANSIBLE_PERSISTENT_CONNECT_TIMEOUT', 30, value_type='integer')
PERSISTENT_CONNECT_RETRIES = get_config(p, 'persistent_connection', 'connect_retries', 'ANSIBLE_PERSISTENT_CONNECT_RETRIES', 30, value_type='integer')
PERSISTENT_CONNECT_INTERVAL = get_config(p, 'persistent_connection', 'connect_interval', 'ANSIBLE_PERSISTENT_CONNECT_INTERVAL', 1, value_type='integer')
# obsolete -- will be formally removed
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, value_type='integer')
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, value_type='integer')
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, value_type='float')
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, value_type='integer')
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, value_type='boolean')
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, value_type='boolean')
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, value_type='boolean')
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path')
GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'],
value_type='list')
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS',
['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list')
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'changed', 'ANSIBLE_COLOR_CHANGED', 'yellow')
COLOR_DIFF_ADD = get_config(p, 'colors', 'diff_add', 'ANSIBLE_COLOR_DIFF_ADD', 'green')
COLOR_DIFF_REMOVE = get_config(p, 'colors', 'diff_remove', 'ANSIBLE_COLOR_DIFF_REMOVE', 'red')
COLOR_DIFF_LINES = get_config(p, 'colors', 'diff_lines', 'ANSIBLE_COLOR_DIFF_LINES', 'cyan')
# diff
DIFF_CONTEXT = get_config(p, 'diff', 'context', 'ANSIBLE_DIFF_CONTEXT', 3, value_type='integer')
DIFF_ALWAYS = get_config(p, 'diff', 'always', 'ANSIBLE_DIFF_ALWAYS', False, value_type='bool')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'win_command', 'shell', 'win_shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'win_command', 'shell', 'win_shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_PASSWORD_CHARS = to_text(ascii_letters + digits + ".,:-_", errors='strict') # characters included in auto-generated passwords
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
# module search
IGNORE_FILES = ["COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION", "GUIDELINES"]
INTERNAL_RESULT_KEYS = ['add_host', 'add_group']
RESTRICTED_RESULT_KEYS = ['ansible_rsync_path', 'ansible_playbook_python']
# check all of these extensions when looking for 'variable' files which should be YAML or JSON.
YAML_FILENAME_EXTENSIONS = [ ".yml", ".yaml", ".json" ]
|
HuaweiSwitch/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 30,728
|
[
"Galaxy",
"MOOSE"
] |
cd8f5e11a47df981f9831d65c296ad83ce06e2b151b682c0d993b80b6caf1ba8
|
# -----------------------------------------------------------------------------
# Download data:
# - Browser:
# http://midas3.kitware.com/midas/folder/10409 => VisibleMale/vm_head_frozenct.mha
# - Terminal
# curl "http://midas3.kitware.com/midas/download?folders=&items=235235" -o vm_head_frozenct.mha
# -----------------------------------------------------------------------------
from vtk import *
from vtk.web.query_data_model import *
from vtk.web.dataset_builder import *
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/vm_head_frozenct_vi_%s_%s_%s'
file_path = '/Users/seb/Downloads/vm_head_frozenct.mha'
field = 'MetaImage'
fieldRange = [0.0, 4095.0]
nbSteps = 4
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWise(pwf, dataRange, center, halfSpread):
scalarOpacity.RemoveAllPoints()
if (center - halfSpread) <= dataRange[0]:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center, 1.0)
else:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center - halfSpread, 0.0)
scalarOpacity.AddPoint(center, 1.0)
if (center + halfSpread) >= dataRange[1]:
scalarOpacity.AddPoint(dataRange[1], 0.0)
else:
scalarOpacity.AddPoint(center + halfSpread, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
reader = vtkMetaImageReader()
reader.SetFileName(file_path)
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(reader.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(fieldRange[0], 1.0, 1.0, 1.0)
colorFunction.AddRGBPoint(fieldRange[1], 1.0, 1.0, 1.0)
halfSpread = (fieldRange[1] - fieldRange[0]) / float(2*nbSteps)
centers = [ fieldRange[0] + halfSpread*float(2*i+1) for i in range(nbSteps)]
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(499, 400)
renderer = vtkRenderer()
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
# Camera setting
camera = {
'position': [-0.264, -890.168, -135.0],
'focalPoint': [-0.264, -30.264, -135.0],
'viewUp': [0,0,1]
}
update_camera(renderer, camera)
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
vcdsb = SortedCompositeDataSetBuilder(dataset_destination_path % (nbSteps, halfSpread, window.GetSize()[0]), {'type': 'spherical', 'phi': [0], 'theta': [0]})
idx = 0
vcdsb.start(window, renderer)
for center in centers:
idx += 1
updatePieceWise(scalarOpacity, fieldRange, center, halfSpread)
# Capture layer
vcdsb.activateLayer(field, center)
# Write data
vcdsb.writeData(mapper)
vcdsb.stop()
|
Kitware/arctic-viewer
|
scripts/examples/vtk/medical/head-ct-volume.py
|
Python
|
bsd-3-clause
| 3,622
|
[
"VTK"
] |
b13581c5f1bfa2a5ee64bff47a1c81906bb7cd0115aa1603dc25fcce5e0dcbf3
|
# The code is rewritten based on source code from tensorflow tutorial for Recurrent Neural Network.
# https://www.tensorflow.org/versions/0.6.0/tutorials/recurrent/index.html
# You can get source code for the tutorial from
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/rnn/ptb/ptb_word_lm.py
#
# There is dropout on each hidden layer to prevent the model from overfitting
#
# Here is an useful practical guide for training dropout networks
# https://www.cs.toronto.edu/~hinton/absps/JMLRdropout.pdf
# You can find the practical guide on Appendix A
import numpy as np
import tensorflow as tf
import time
import csv
from random import shuffle
import random
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn import metrics
from math import sqrt
# flags
tf.flags.DEFINE_float("epsilon", 0.1, "Epsilon value for Adam Optimizer.")
tf.flags.DEFINE_float("l2_lambda", 0.3, "Lambda for l2 loss.")
tf.flags.DEFINE_float("learning_rate", 0.1, "Learning rate")
tf.flags.DEFINE_float("max_grad_norm", 20.0, "Clip gradients to this norm.")
tf.flags.DEFINE_float("keep_prob", 0.6, "Keep probability for dropout")
tf.flags.DEFINE_integer("hidden_layer_num", 1, "The number of hidden layers (Integer)")
tf.flags.DEFINE_integer("hidden_size", 200, "The number of hidden nodes (Integer)")
tf.flags.DEFINE_integer("evaluation_interval", 5, "Evaluate and print results every x epochs")
tf.flags.DEFINE_integer("batch_size", 32, "Batch size for training.")
tf.flags.DEFINE_integer("epochs", 150, "Number of epochs to train for.")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("train_data_path", 'data/0910_b_train.csv', "Path to the training dataset")
tf.flags.DEFINE_string("test_data_path", 'data/0910_b_test.csv', "Path to the testing dataset")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
def add_gradient_noise(t, stddev=1e-3, name=None):
"""
Adds gradient noise as described in http://arxiv.org/abs/1511.06807 [2].
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks [2].
"""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
class StudentModel(object):
def __init__(self, is_training, config):
self._batch_size = batch_size = FLAGS.batch_size
self.num_skills = num_skills = config.num_skills
self.hidden_size = size = FLAGS.hidden_size
self.num_steps = num_steps = config.num_steps
input_size = num_skills*2
inputs = self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
self._target_id = target_id = tf.placeholder(tf.int32, [None])
self._target_correctness = target_correctness = tf.placeholder(tf.float32, [None])
final_hidden_size = size
hidden_layers = []
for i in range(FLAGS.hidden_layer_num):
final_hidden_size = size/(i+1)
hidden1 = tf.nn.rnn_cell.LSTMCell(final_hidden_size, state_is_tuple=True)
if is_training and config.keep_prob < 1:
hidden1 = tf.nn.rnn_cell.DropoutWrapper(hidden1, output_keep_prob=FLAGS.keep_prob)
hidden_layers.append(hidden1)
cell = tf.nn.rnn_cell.MultiRNNCell(hidden_layers, state_is_tuple=True)
input_data = tf.reshape(self._input_data, [-1])
#one-hot encoding
with tf.device("/cpu:0"):
labels = tf.expand_dims(input_data, 1)
indices = tf.expand_dims(tf.range(0, batch_size*num_steps, 1), 1)
concated = tf.concat(1, [indices, labels])
inputs = tf.sparse_to_dense(concated, tf.pack([batch_size*num_steps, input_size]), 1.0, 0.0)
inputs.set_shape([batch_size*num_steps, input_size])
# [batch_size, num_steps, input_size]
inputs = tf.reshape(inputs, [-1, num_steps, input_size])
x = tf.transpose(inputs, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, input_size])
# Split to get a list of 'n_steps'
# tensors of shape (doc_num, n_input)
x = tf.split(0, num_steps, x)
#inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, num_steps, inputs)]
#outputs, state = tf.nn.rnn(hidden1, x, dtype=tf.float32)
outputs, state = tf.nn.rnn(cell, x, dtype=tf.float32)
print('num_skills: ', num_skills)
print('batch_size: ', batch_size)
print('len(outputs): ', len(outputs))
print('outputs[0]: ', outputs[0])
output = tf.concat(1, outputs)
print('output: ', output)
output = tf.reshape(output, [-1, int(final_hidden_size)])
print('output after reshaping: ', output)
# calculate the logits from last hidden layer to output layer
sigmoid_w = tf.get_variable("sigmoid_w", [final_hidden_size, num_skills])
sigmoid_b = tf.get_variable("sigmoid_b", [num_skills])
logits = tf.matmul(output, sigmoid_w) + sigmoid_b
# from output nodes to pick up the right one we want
logits = tf.reshape(logits, [-1])
selected_logits = tf.gather(logits, self.target_id)
#make prediction
self._pred = self._pred_values = pred_values = tf.sigmoid(selected_logits)
# loss function
loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(selected_logits, target_correctness))
#self._cost = cost = tf.reduce_mean(loss)
self._cost = cost = loss
@property
def batch_size(self):
return self._batch_size
@property
def input_data(self):
return self._input_data
@property
def auc(self):
return self._auc
@property
def pred(self):
return self._pred
@property
def target_id(self):
return self._target_id
@property
def target_correctness(self):
return self._target_correctness
@property
def initial_state(self):
return self._initial_state
@property
def pred_values(self):
return self._pred_values
@property
def cost(self):
return self._cost
@property
def final_state(self):
return self._final_state
class HyperParamsConfig(object):
"""Small config."""
init_scale = 0.05
num_steps = 0
max_grad_norm = FLAGS.max_grad_norm
max_max_epoch = FLAGS.epochs
keep_prob = FLAGS.keep_prob
num_skills = 0
def run_epoch(session, m, students, eval_op, verbose=False):
"""Runs the model on the given data."""
start_time = time.time()
index = 0
pred_labels = []
actual_labels = []
while(index+m.batch_size < len(students)):
x = np.zeros((m.batch_size, m.num_steps))
target_id = []
target_correctness = []
count = 0
for i in range(m.batch_size):
student = students[index+i]
problem_ids = student[1]
correctness = student[2]
for j in range(len(problem_ids)-1):
problem_id = int(problem_ids[j])
label_index = 0
if(int(correctness[j]) == 0):
label_index = problem_id
else:
label_index = problem_id + m.num_skills
x[i, j] = label_index
target_id.append(i*m.num_steps*m.num_skills+j*m.num_skills+int(problem_ids[j+1]))
target_correctness.append(int(correctness[j+1]))
actual_labels.append(int(correctness[j+1]))
index += m.batch_size
pred, _ = session.run([m.pred, eval_op], feed_dict={
m.input_data: x, m.target_id: target_id,
m.target_correctness: target_correctness})
for p in pred:
pred_labels.append(p)
#print pred_labels
rmse = sqrt(mean_squared_error(actual_labels, pred_labels))
fpr, tpr, thresholds = metrics.roc_curve(actual_labels, pred_labels, pos_label=1)
auc = metrics.auc(fpr, tpr)
#calculate r^2
r2 = r2_score(actual_labels, pred_labels)
return rmse, auc, r2
def read_data_from_csv_file(fileName):
config = HyperParamsConfig()
inputs = []
targets = []
rows = []
max_skill_num = 0
max_num_problems = 0
with open(fileName, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
index = 0
i = 0
print ("the number of rows is " + str(len(rows)))
tuple_rows = []
#turn list to tuple
while(index < len(rows)-1):
problems_num = int(rows[index][0])
tmp_max_skill = max(map(int, rows[index+1]))
if(tmp_max_skill > max_skill_num):
max_skill_num = tmp_max_skill
if(problems_num <= 2):
index += 3
else:
if problems_num > max_num_problems:
max_num_problems = problems_num
tup = (rows[index], rows[index+1], rows[index+2])
tuple_rows.append(tup)
index += 3
#shuffle the tuple
random.shuffle(tuple_rows)
print ("The number of students is " + str(len(tuple_rows)))
print ("Finish reading data")
return tuple_rows, max_num_problems, max_skill_num+1
def main(unused_args):
config = HyperParamsConfig()
eval_config = HyperParamsConfig()
timestamp = str(time.time())
train_data_path = FLAGS.train_data_path
#path to your test data set
test_data_path = FLAGS.test_data_path
#the file to store your test results
result_file_path = "run_logs_{}".format(timestamp)
#your model name
model_name = "DKT"
train_students, train_max_num_problems, train_max_skill_num = read_data_from_csv_file(train_data_path)
config.num_steps = train_max_num_problems
config.num_skills = train_max_skill_num
test_students, test_max_num_problems, test_max_skill_num = read_data_from_csv_file(test_data_path)
eval_config.num_steps = test_max_num_problems
eval_config.num_skills = test_max_skill_num
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
global_step = tf.Variable(0, name="global_step", trainable=False)
# decay learning rate
starter_learning_rate = FLAGS.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 3000, 0.96, staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=FLAGS.epsilon)
with tf.Session(config=session_conf) as session:
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
# training model
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = StudentModel(is_training=True, config=config)
# testing model
with tf.variable_scope("model", reuse=True, initializer=initializer):
mtest = StudentModel(is_training=False, config=eval_config)
grads_and_vars = optimizer.compute_gradients(m.cost)
grads_and_vars = [(tf.clip_by_norm(g, FLAGS.max_grad_norm), v)
for g, v in grads_and_vars if g is not None]
grads_and_vars = [(add_gradient_noise(g), v) for g, v in grads_and_vars]
train_op = optimizer.apply_gradients(grads_and_vars, name="train_op", global_step=global_step)
session.run(tf.initialize_all_variables())
# log hyperparameters to results file
with open(result_file_path, "a+") as f:
print("Writing hyperparameters into file")
f.write("Hidden layer size: %d \n" % (FLAGS.hidden_size))
f.write("Dropout rate: %.3f \n" % (FLAGS.keep_prob))
f.write("Batch size: %d \n" % (FLAGS.batch_size))
f.write("Max grad norm: %d \n" % (FLAGS.max_grad_norm))
saver = tf.train.Saver(tf.all_variables())
for i in range(config.max_max_epoch):
rmse, auc, r2 = run_epoch(session, m, train_students, train_op, verbose=True)
print("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (i + 1, rmse, auc, r2))
if((i+1) % FLAGS.evaluation_interval == 0):
print ("Save variables to disk")
save_path = saver.save(session, model_name)
print("*"*10)
print("Start to test model....")
rmse, auc, r2 = run_epoch(session, mtest, test_students, tf.no_op())
print("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % (i+1, rmse, auc, r2))
with open(result_file_path, "a+") as f:
f.write("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % ((i+1)/2, rmse, auc, r2))
f.write("\n")
print("*"*10)
if __name__ == "__main__":
tf.app.run()
|
JSLBen/KnowledgeTracing
|
reference_py/student_model.py
|
Python
|
mit
| 13,536
|
[
"Gaussian"
] |
7a22f6619f87ed49636dec6d65de0b1bf422e5d7e31f297fd3cccdddf891d6f9
|
#!/usr/bin/env python3
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import chigger
reader = chigger.exodus.ExodusReader('../input/simple_diffusion_out.e')
result = chigger.exodus.ExodusResult(reader, variable='aux', viewport=[0,0,0.5,1], opacity=0.1,
range=[-1, 1])
cbar = chigger.exodus.ExodusColorBar(result)
result.update()
sample = chigger.exodus.ExodusResultLineSampler(result, point1=[0,0,0.5], resolution=100)
sample.update()
x = sample[0].getDistance()
y = sample[0].getSample('aux')
line = chigger.graphs.Line(x, y, width=4, label='probe')
graph = chigger.graphs.Graph(line, viewport=[0.5,0,1,1])
graph.setOptions('xaxis', lim=[0, 1.4])
graph.setOptions('yaxis', lim=[0, 1.])
window = chigger.RenderWindow(result, cbar, sample, graph, size=[800, 400], test=True)
window.write('line_sample_elem.png')
window.start()
|
nuclear-wizard/moose
|
python/chigger/tests/line_sample/line_sample_elem.py
|
Python
|
lgpl-2.1
| 1,166
|
[
"MOOSE"
] |
ed5efa658ab7d57981e22f9cafabf9ba42f65d77d2d99e6655eeae5da126a19a
|
#!/usr/bin/env python
"""This is the setup.py file for the GRR client.
This is just a meta-package which pulls in the minimal requirements to create a
full grr server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import os
import shutil
import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.sdist import sdist
GRR_NO_MAKE_UI_FILES_VAR = "GRR_NO_MAKE_UI_FILES"
# TODO: Fix this import once support for Python 2 is dropped.
# pylint: disable=g-import-not-at-top
if sys.version_info.major == 2:
import ConfigParser as configparser
else:
import configparser
# pylint: enable=g-import-not-at-top
def find_data_files(source, ignore_dirs=None):
ignore_dirs = ignore_dirs or []
result = []
for directory, dirnames, files in os.walk(source):
dirnames[:] = [d for d in dirnames if d not in ignore_dirs]
files = [os.path.join(directory, x) for x in files]
result.append((directory, files))
return result
def make_ui_files():
"""Builds necessary assets from sources."""
# Install node_modules, but keep package(-lock).json frozen.
# Using shell=True, otherwise npm is not found in a nodeenv-built
# virtualenv on Windows.
subprocess.check_call(
"npm ci", shell=True, cwd="grr_response_server/gui/static")
subprocess.check_call(
"npm run gulp compile", shell=True, cwd="grr_response_server/gui/static")
def get_config():
"""Get INI parser with version.ini data."""
ini_path = os.path.join(THIS_DIRECTORY, "version.ini")
if not os.path.exists(ini_path):
ini_path = os.path.join(THIS_DIRECTORY, "../../version.ini")
if not os.path.exists(ini_path):
raise RuntimeError("Couldn't find version.ini")
config = configparser.SafeConfigParser()
config.read(ini_path)
return config
IGNORE_GUI_DIRS = ["node_modules", "tmp"]
THIS_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# If you run setup.py from the root GRR dir you get very different results since
# setuptools uses the MANIFEST.in from the root dir. Make sure we are in the
# package dir.
os.chdir(THIS_DIRECTORY)
VERSION = get_config()
class Develop(develop):
"""Build developer version (pip install -e)."""
user_options = develop.user_options + [
# TODO: This has to be `bytes` on Python 2. Remove this `str`
# call once support for Python 2 is dropped.
(str("no-make-ui-files"), None, "Don't build UI JS/CSS bundles."),
]
def initialize_options(self):
self.no_make_ui_files = None
develop.initialize_options(self)
def run(self):
# pip install -e . --install-option="--no-make-ui-files" passes the
# --no-make-ui-files flag to all GRR dependencies, which doesn't make
# much sense. Checking an environment variable to have an easy way
# to set the flag for grr-response-server package only.
if (not self.no_make_ui_files and
not os.environ.get(GRR_NO_MAKE_UI_FILES_VAR)):
make_ui_files()
develop.run(self)
class Sdist(sdist):
"""Build sdist."""
user_options = sdist.user_options + [
# TODO: This has to be `bytes` on Python 2. Remove this `str`
# call once support for Python 2 is dropped.
(str("no-make-ui-files"), None, "Don't build UI JS/CSS bundles."),
]
def initialize_options(self):
self.no_make_ui_files = None
sdist.initialize_options(self)
def run(self):
# For consistency, respsecting GRR_NO_MAKE_UI_FILES variable just like
# Develop command does.
if (not self.no_make_ui_files and
not os.environ.get(GRR_NO_MAKE_UI_FILES_VAR)):
make_ui_files()
sdist.run(self)
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
sdist_version_ini = os.path.join(base_dir, "version.ini")
if os.path.exists(sdist_version_ini):
os.unlink(sdist_version_ini)
shutil.copy(
os.path.join(THIS_DIRECTORY, "../../version.ini"), sdist_version_ini)
data_files = list(
itertools.chain(
find_data_files("grr_response_server/checks"),
find_data_files("grr_response_server/databases/mysql_migrations"),
find_data_files("grr_response_server/gui/templates"),
find_data_files(
"grr_response_server/gui/static", ignore_dirs=IGNORE_GUI_DIRS),
find_data_files(
"grr_response_server/gui/local/static",
ignore_dirs=IGNORE_GUI_DIRS),
# TODO: This has to be `bytes` on Python 2. Remove this
# `str` call once support for Python 2 is dropped.
[str("version.ini")],
))
setup_args = dict(
name="grr-response-server",
version=VERSION.get("Version", "packageversion"),
description="The GRR Rapid Response Server.",
license="Apache License, Version 2.0",
maintainer="GRR Development Team",
maintainer_email="grr-dev@googlegroups.com",
url="https://github.com/google/grr",
cmdclass={
"sdist": Sdist,
"develop": Develop
},
packages=find_packages(),
entry_points={
"console_scripts": [
"grr_console = "
"grr_response_server.distro_entry:Console",
"grr_api_shell_raw_access = "
"grr_response_server.distro_entry:ApiShellRawAccess",
"grr_config_updater = "
"grr_response_server.distro_entry:ConfigUpdater",
"grr_frontend = "
"grr_response_server.distro_entry:GrrFrontend",
"grr_server = "
"grr_response_server.distro_entry:GrrServer",
"grr_worker = "
"grr_response_server.distro_entry:Worker",
"grr_admin_ui = "
"grr_response_server.distro_entry:AdminUI",
]
},
install_requires=[
"google-api-python-client==1.7.11",
"google-auth==1.6.3",
"google-cloud-bigquery==1.20.0",
"grr-api-client==%s" % VERSION.get("Version", "packagedepends"),
"grr-response-client-builder==%s" %
VERSION.get("Version", "packagedepends"),
"grr-response-core==%s" % VERSION.get("Version", "packagedepends"),
"Jinja2==2.10.3",
"pexpect==4.7.0",
"portpicker==1.3.1",
"prometheus_client==0.7.1",
"pyjwt==1.7.1",
"pyopenssl==19.0.0", # https://github.com/google/grr/issues/704
"python-crontab==2.3.9",
"python-debian==0.1.36",
"Werkzeug==0.16.0",
],
extras_require={
# This is an optional component. Install to get MySQL data
# store support: pip install grr-response[mysqldatastore]
# When installing from .deb, the python-mysqldb package is used as
# dependency instead of this pip dependency. This is because we run into
# incompatibilities between the system mysqlclient/mariadbclient and the
# Python library otherwise. Thus, this version has to be equal to the
# python-mysqldb version of the system we support. This is currently
# Ubuntu Xenial, see https://packages.ubuntu.com/xenial/python-mysqldb
#
# NOTE: the Xenial-provided 1.3.7 version is not properly Python 3
# compatible. Versions 1.3.13 or later are API-compatible with 1.3.7
# when running on Python 2 and work correctly on Python 3. However,
# they don't have Python 2 wheels released, which makes GRR packaging
# for Python 2 much harder if one of these versions is used.
#
# TODO(user): Find a way to use the latest mysqlclient version
# in GRR server DEB.
"mysqldatastore": ["mysqlclient==1.3.10"],
},
data_files=data_files)
setup(**setup_args)
|
dunkhong/grr
|
grr/server/setup.py
|
Python
|
apache-2.0
| 7,769
|
[
"GULP"
] |
de368e7d4df4383106f1842132681a7131073458a5187f461ca5ee4c8abc5eb5
|
# calculate CO dimer
from ase import *
from hotbit import Hotbit
for SCC in [False,True]:
calc=Hotbit(SCC=SCC,width=0.05,txt='test.cal')
atoms=Atoms('CO',positions=[(0,0,0),(1.13,0,0)],cell=(10,10,10),pbc=False)
atoms.center(vacuum=10) #not necessary
atoms.set_calculator(calc)
print(atoms.get_potential_energy())
|
pekkosk/hotbit
|
hotbit/doc/examples/CO.py
|
Python
|
gpl-2.0
| 336
|
[
"ASE"
] |
647305caddf234d2bc4e08a3e6d197f187e899b9bbf4e7df11071c02bdd74879
|
"""
ALFAFA "source" .sav file
"""
import idlsave
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import pyspeckit
import numpy as np
def read_alfalfa_file(filename):
"""
Read the contents of a whole ALFALFA source file
"""
savfile = idlsave.read(filename)
source_dict = dict([(name,read_alfalfa_source(savfile,ii)) for ii,name in
enumerate(savfile.src.SRCNAME)])
return source_dict
def read_alfalfa_source(savfile, sourcenumber=0):
"""
Create an Observation Block class for a single source in an ALFALFA
'source' IDL save file
"""
if type(savfile) is str and ".src" in savfile:
savfile = idlsave.read(savfile)
src = savfile.src[sourcenumber]
header = pyfits.Header()
splist = []
for spectra in src.spectra:
for par in spectra.dtype.names:
try:
len(spectra[par])
except TypeError:
header[par[:8]] = spectra[par]
# assume ALFALFA spectra in Kelvin
header['BUNIT'] = 'K'
xarr = pyspeckit.spectrum.units.SpectroscopicAxis(spectra.velarr,
refX=header['RESTFRQ'], refX_unit='MHz', unit='km/s')
data = np.ma.masked_where(np.isnan(spectra.spec), spectra.spec)
sp = pyspeckit.Spectrum(xarr=xarr, data=data, header=header)
# the Source has a baseline presubtracted (I think)
sp.baseline.baselinepars = spectra.baseline[::-1]
sp.baseline.subtracted = True
sp.baseline.order = len(spectra.baseline)
sp.baseline.basespec = np.poly1d(sp.baseline.baselinepars)(np.arange(xarr.shape[0]))
# There are multiple components in each Spectrum, but I think they are not indepedent
sp.specfit.fittype = 'gaussian'
sp.specfit.fitter = sp.specfit.Registry.multifitters['gaussian']
modelpars = zip(spectra['STON'],spectra['VCEN'],spectra['WIDTH'])
modelerrs = zip(spectra['STON'],spectra['VCENERR_STAT'],spectra['WIDTHERR'])
sp.specfit.modelpars = modelpars[0] # only use the first fit
sp.specfit.fitter.mpp = modelpars[0]
sp.specfit.modelerrs = modelerrs[0]
sp.specfit.fitter.mpperr = modelerrs[0]
sp.specfit._full_model()
splist.append(sp)
return pyspeckit.ObsBlock(splist)
|
bsipocz/pyspeckit
|
pyspeckit/spectrum/readers/alfalfa.py
|
Python
|
mit
| 2,331
|
[
"Gaussian"
] |
ce81ff803a09435b0b1f9f9c205a7169c9fa4c270d28a99ea006f2eaf45368a1
|
# -*- coding:utf-8; mode:python -*-
"""
Each time Sunny and Johnny take a trip to the Ice Cream Parlor, they pool together dollars for ice cream. On any given day, the parlor offers a line of flavors. Each flavor, , is numbered sequentially with a unique ID number from to and has a cost, , associated with it.
Given the value of and the cost of each flavor for trips to the Ice Cream Parlor, help Sunny and Johnny choose two distinct flavors such that they spend their entire pool of money () during each visit. For each trip to the parlor, print the ID numbers for the two types of ice cream that Sunny and Johnny purchase as two space-separated integers on a new line. You must print the smaller ID first and the larger ID second.
Note: Two ice creams having unique IDs and may have the same cost (i.e., ).
Input Format
The first line contains an integer, , denoting the number of trips to the ice cream parlor. The subsequent lines describe all of Sunny and Johnny's trips to the parlor; each trip is described as follows:
The first line contains .
The second line contains .`
The third line contains space-separated integers denoting the cost of each respective flavor. The integer corresponds to the cost, , for the ice cream with ID number (where ).
Constraints
, where
It is guaranteed that there will always be a unique solution.
Output Format
Print two space-separated integers denoting the respective ID numbers for the two distinct flavors they choose to purchase, where the smaller ID is printed first and the larger ID is printed second. Recall that each ice cream flavor has a unique ID number in the inclusive range from to .
Sample Input
2
4
5
1 4 5 3 2
4
4
2 2 4 3
Sample Output
1 4
1 2
Explanation
Sunny and Johnny make the following two trips to the parlor:
The first time, they pool together dollars. There are five flavors available that day and flavors and have a total cost of . Thus, we print 1 4 on a new line.
The second time, they pool together dollars. There are four flavors available that day and flavors and have a total cost of . Thus, we print 1 2 on a new line.
"""
from io import StringIO
from pythonpractice.binarysearchnonrecursive import binary_search
def print_choices(stream, money, flavor_prices):
flavor_prices.sort()
for index1, price in enumerate(flavor_prices):
complement = money - price
index2 = binary_search(flavor_prices, complement)
if index2 != -1:
if index1 > index2:
stream.write("%s %s\n" % (index2 + 2, index1 + 1))
else:
stream.write("%s %s\n" % (index1 + 1, index2 + 2))
break
def parse_input(input_stream, output_stream):
t = int(input_stream.readline().strip())
for a0 in range(t):
m = int(input_stream.readline().strip())
n = int(input_stream.readline().strip())
a = list(map(int, input_stream.readline().strip().split(' ')))
print_choices(output_stream, m, a)
def main():
input_stream = StringIO(test_input)
output_stream = StringIO()
parse_input(input_stream, output_stream)
try:
assert(input_stream.getvalue() == test_output)
print("Passed")
except AssertionError:
print("Failed")
print("Expected: ")
print(test_output)
print()
print("Actual: ")
print(output_stream.getvalue())
if __name__ == "__main__":
main()
test_input = """10
100
3
5 75 25
200
7
150 24 79 50 88 345 3
8
8
2 1 9 4 4 56 90 3
542
100
230 863 916 585 981 404 316 785 88 12 70 435 384 778 887 755 740 337 86 92 325 422 815 650 920 125 277 336 221 847 168 23 677 61 400 136 874 363 394 199 863 997 794 587 124 321 212 957 764 173 314 422 927 783 930 282 306 506 44 926 691 568 68 730 933 737 531 180 414 751 28 546 60 371 493 370 527 387 43 541 13 457 328 227 652 365 430 803 59 858 538 427 583 368 375 173 809 896 370 789
789
65
591 955 829 805 312 83 764 841 12 744 104 773 627 306 731 539 349 811 662 341 465 300 491 423 569 405 508 802 500 747 689 506 129 325 918 606 918 370 623 905 321 670 879 607 140 543 997 530 356 446 444 184 787 199 614 685 778 929 819 612 737 344 471 645 726
101
5
722 600 905 54 47
35
51
210 582 622 337 626 580 994 299 386 274 591 921 733 851 770 300 380 225 223 861 851 525 206 714 985 82 641 270 5 777 899 820 995 397 43 973 191 885 156 9 568 256 659 673 85 26 631 293 151 143 423
890
62
286 461 830 216 539 44 989 749 340 51 505 178 50 305 341 292 415 40 239 950 404 965 29 972 536 922 700 501 730 430 630 293 557 542 598 795 28 344 128 461 368 683 903 744 430 648 290 135 437 336 152 698 570 3 827 901 796 682 391 693 161 145
163
90
22 391 140 874 75 339 439 638 158 519 570 484 607 538 459 758 608 784 26 792 389 418 682 206 232 432 537 492 232 219 3 517 460 271 946 418 741 31 874 840 700 58 686 952 293 848 55 82 623 850 619 380 359 479 48 863 813 797 463 683 22 285 522 60 472 948 234 971 517 494 218 857 261 115 238 290 158 326 795 978 364 116 730 581 174 405 575 315 101 99
295
17
678 227 764 37 956 982 118 212 177 597 519 968 866 121 771 343 561
"""
test_output = """2 3
1 4
4 5
29 46
11 56
4 5
40 46
16 35
55 74
7 9
"""
main()
|
wkmanire/StructuresAndAlgorithms
|
pythonpractice/hackerrank/icecreamparlor.py
|
Python
|
gpl-3.0
| 5,208
|
[
"VisIt"
] |
252d45072953f962ba2b275dbc8bead13a6b77a940e30e342215c9006249ff8b
|
#!/usr/bin/python
"""Test of column header output."""
from macaroon.playback import *
import utils
sequence = MacroSequence()
sequence.append(KeyComboAction("End"))
sequence.append(KeyComboAction("<Shift>Right"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Down"))
sequence.append(KeyComboAction("Return"))
sequence.append(PauseAction(3000))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("<Shift>ISO_Left_Tab"))
sequence.append(utils.AssertPresentationAction(
"1. Bug number column header",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Bug number table column header'",
" VISIBLE: 'Bug number table column header', cursor=1",
"SPEECH OUTPUT: 'Bug number table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"2. Severity column header",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Severity table column header'",
" VISIBLE: 'Severity table column header', cursor=1",
"SPEECH OUTPUT: 'Severity table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Right"))
sequence.append(utils.AssertPresentationAction(
"3. Description column header",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Description table column header'",
" VISIBLE: 'Description table column header', cursor=1",
"SPEECH OUTPUT: 'Description table column header'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"4. Enter table",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Fixed? column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '< > Fixed? 60482 Normal scrollab', cursor=1",
"SPEECH OUTPUT: 'Fixed? check box not checked 60482 Normal scrollable notebooks and hidden tabs image'"]))
# GtkTreeView swallows this keypress (for all users; not just Orca users).
sequence.append(KeyComboAction("Left"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"5. Normal cell",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'Severity column header Normal'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"6. Normal cell basic Where Am I",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'table Severity table cell Normal column 3 of 6 row 1 of 14'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(KeyComboAction("KP_Enter"))
sequence.append(utils.AssertPresentationAction(
"7. Normal cell detailed Where Am I",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Severity column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: 'Normal scrollable notebooks and ', cursor=1",
"SPEECH OUTPUT: 'table Severity table cell Normal column 3 of 6 row 1 of 14'",
"SPEECH OUTPUT: 'table Severity table cell Normal column 3 of 6 row 1 of 14 Fixed? check box not checked 60482 Normal scrollable notebooks and hidden tabs'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Left"))
sequence.append(utils.AssertPresentationAction(
"8. 60482 cell",
["BRAILLE LINE: 'gtk3-demo application GtkListStore demo frame table Bug number column header < > Fixed? 60482 Normal scrollable notebooks and hidden tabs '",
" VISIBLE: '60482 Normal scrollable notebook', cursor=1",
"SPEECH OUTPUT: 'Bug number column header 60482'"]))
sequence.append(KeyComboAction("<Alt>F4"))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
|
pvagner/orca
|
test/keystrokes/gtk3-demo/role_column_header.py
|
Python
|
lgpl-2.1
| 4,640
|
[
"ORCA"
] |
e02f2246ae026622142ce033d0e781631bd72ea16cf716675729b65c4e51b0ca
|
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from cdo import Cdo
from pycmbs.data import Data
import tempfile as tempfile
import copy
import glob
import os
import sys
import numpy as np
from pycmbs.benchmarking import preprocessor
from pycmbs.benchmarking.utils import get_T63_landseamask, get_temporary_directory
from pycmbs.benchmarking.models.model_basic import *
class JSBACH_BOT(Model):
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, **kwargs):
super(JSBACH_BOT, self).__init__(filename, dic_variables, name=name, **kwargs)
self.experiment = experiment
self.shift_lon = shift_lon
self.type = 'JSBACH_BOT'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
@return: string with unique combination of models and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_albedo_data(self, interval='season'):
"""
get albedo data for JSBACH
returns Data object
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
v = 'var176'
filename = self.data_dir + 'data/model1/' + self.experiment + '_echam6_BOT_mm_1979-2006_albedo_yseasmean.nc'
ls_mask = get_T63_landseamask(self.shift_lon)
albedo = Data(filename, v, read=True,
label='MPI-ESM albedo ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
return albedo
def get_tree_fraction(self, interval='season'):
"""
todo implement this for data from a real run !!!
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
ls_mask = get_T63_landseamask(self.shift_lon)
filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_forest_shrub.nc'
v = 'var12'
tree = Data(filename, v, read=True,
label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')))
return tree
def get_grass_fraction(self, interval='season'):
"""
todo implement this for data from a real run !!!
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
ls_mask = get_T63_landseamask(self.shift_lon)
filename = '/home/m300028/shared/dev/svn/trstools-0.0.1/lib/python/pyCMBS/framework/external/vegetation_benchmarking/VEGETATION_COVER_BENCHMARKING/example/historical_r1i1p1-LR_1850-2005_grass_crop_pasture_2001.nc'
v = 'var12'
grass = Data(filename, v, read=True,
label='MPI-ESM tree fraction ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
#shift_lon=shift_lon,
mask=ls_mask.data.data, start_time=pl.num2date(pl.datestr2num('2001-01-01')), stop_time=pl.num2date(pl.datestr2num('2001-12-31')), squeeze=True)
return grass
def get_surface_shortwave_radiation_down(self, interval='season'):
"""
get surface shortwave incoming radiation data for JSBACH
returns Data object
"""
if interval != 'season':
raise ValueError('Other temporal sampling than SEASON not supported yet for JSBACH BOT files, sorry')
v = 'var176'
y1 = '1979-01-01'
y2 = '2006-12-31'
rawfilename = self.data_dir + 'data/model/' + self.experiment + '_echam6_BOT_mm_1979-2006_srads.nc'
if not os.path.exists(rawfilename):
return None
#--- read data
cdo = pyCDO(rawfilename, y1, y2)
if interval == 'season':
seasfile = cdo.seasmean()
del cdo
print 'seasfile: ', seasfile
cdo = pyCDO(seasfile, y1, y2)
filename = cdo.yseasmean()
else:
raise ValueError('Invalid interval option %s ' % interval)
#--- read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#--- read SIS data
sis = Data(filename, v, read=True,
label='MPI-ESM SIS ' + self.experiment, unit='-', lat_name='lat', lon_name='lon',
#shift_lon=shift_lon,
mask=ls_mask.data.data)
return sis
def get_rainfall_data(self, interval='season'):
"""
get rainfall data for JSBACH
returns Data object
"""
if interval == 'season':
pass
else:
raise ValueError('Invalid value for interval: %s' % interval)
#/// PREPROCESSING: seasonal means ///
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
filename1 = self.data_dir + self.experiment + '_echam6_BOT_mm_1980_sel.nc'
tmp = pyCDO(filename1, s_start_time, s_stop_time).seldate()
tmp1 = pyCDO(tmp, s_start_time, s_stop_time).seasmean()
filename = pyCDO(tmp1, s_start_time, s_stop_time).yseasmean()
#/// READ DATA ///
#1) land / sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#2) precipitation data
try:
v = 'var4'
rain = Data(filename, v, read=True, scale_factor=86400.,
label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
except:
v = 'var142'
rain = Data(filename, v, read=True, scale_factor=86400.,
label='MPI-ESM ' + self.experiment, unit='mm/day', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data)
return rain
class JSBACH_RAW2(Model):
"""
Class for RAW JSBACH model output
works on the real raw output
"""
#def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs):
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, input_format='grb', raw_outdata='outdata/jsbach/', **kwargs):
"""
The assignment of certain variables to different input streams is done in the routine
get_jsbach_data_generic()
Parameters
----------
input_format : str
specifies file format of input data
['nc','grb']
"""
super(JSBACH_RAW2, self).__init__(filename, dic_variables, name=name, **kwargs)
self.experiment = experiment
self.shift_lon = shift_lon
#self.get_data()
self.type = 'JSBACH_RAW2'
self.input_format = input_format
assert self.input_format in ['nc', 'grb']
self.raw_outdata = raw_outdata
self._unique_name = self._get_unique_name()
# do preprocessing of streams (only needed once!) ---
self.files = {}
self._preproc_streams()
#~ self.model_dict = copy.deepcopy(model_dict)
self.model = 'JSBACH'
def _get_filenames_jsbach_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_main_mm_*.' + self.input_format
def _get_filenames_veg_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_veg_mm_*.' + self.input_format
def _get_filenames_land_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_land_mm_*.' + self.input_format
def _get_filenames_surf_stream(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_surf_mm_*.' + self.input_format
def _get_filenames_albedo_VIS(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_VIS_albedo.' + self.input_format
def _get_filenames_albedo_NIR(self):
return self.data_dir + self.raw_outdata + self.experiment + '_jsbach_mm_*_NIR_albedo.' + self.input_format
def _get_filenames_echam_BOT(self):
return self.data_dir + self.raw_outdata + '../echam6/' + self.experiment + '_echam6_BOT_mm_*.sz'
def _preproc_streams(self):
"""
It is assumed that the standard JSBACH postprocessing scripts have been applied.
Thus monthly mean data is available for each stream and code tables still need to be applied.
This routine does the following:
1) merge all times from individual (monthly mean) output files
2) assign codetables to work with proper variable names
3) aggregate data from tiles to gridbox values
"""
print 'Preprocessing JSBACH raw data streams (may take a while) ...'
cdo = Cdo()
# jsbach stream
print ' JSBACH stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_', dir=get_temporary_directory()) # temporary file
#~ print self.data_dir
#~ print self.raw_outdata
#~ print 'Files: ', self._get_filenames_jsbach_stream()
#~ stop
if len(glob.glob(self._get_filenames_jsbach_stream())) > 0: # check if input files existing at all
print 'Mering the following files:', self._get_filenames_jsbach_stream()
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_jsbach_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
print 'Outfile: ', outfile
#~ os.remove(tmp)
print 'Temporary name: ', tmp
self.files.update({'jsbach': outfile})
# veg stream
print ' VEG stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_veg_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_veg.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_veg_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_veg_stream())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_veg_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'veg': outfile})
# veg land
print ' LAND stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_land_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_land.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_land_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_land_stream())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_land_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'land': outfile})
# surf stream
print ' SURF stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_surf_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_jsbach_surf.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_jsbach_surf_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_surf_stream())) > 0: # check if input files existing at all
print glob.glob(self._get_filenames_surf_stream())
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_surf_stream())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'surf': outfile})
# ECHAM BOT stream
print ' BOT stream ...'
outfile = get_temporary_directory() + self.experiment + '_echam6_echam_mm_full.nc'
if os.path.exists(outfile):
pass
else:
codetable = self.data_dir + 'log/' + self.experiment + '_echam6_echam.codes'
tmp = tempfile.mktemp(suffix='.nc', prefix=self.experiment + '_echam6_echam_', dir=get_temporary_directory()) # temporary file
if len(glob.glob(self._get_filenames_echam_BOT())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=tmp, input=self._get_filenames_echam_BOT())
if os.path.exists(codetable):
cdo.monmean(options='-f nc', output=outfile, input='-setpartab,' + codetable + ' ' + tmp) # monmean needed here, as otherwise interface does not work
else:
cdo.monmean(options='-f nc', output=outfile, input=tmp) # monmean needed here, as otherwise interface does not work
os.remove(tmp)
self.files.update({'echam': outfile})
# ALBEDO file
# albedo files as preprocessed by a script of Thomas
print ' ALBEDO VIS stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_VIS_albedo_mm_full.nc'
if os.path.exists(outfile):
pass
else:
if len(glob.glob(self._get_filenames_albedo_VIS())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_VIS())
self.files.update({'albedo_vis': outfile})
print ' ALBEDO NIR stream ...'
outfile = get_temporary_directory() + self.experiment + '_jsbach_NIR_albedo_mm_full.nc'
if os.path.exists(outfile):
pass
else:
if len(glob.glob(self._get_filenames_albedo_NIR())) > 0: # check if input files existing at all
cdo.mergetime(options='-f nc', output=outfile, input=self._get_filenames_albedo_NIR())
self.files.update({'albedo_nir': outfile})
def _get_unique_name(self):
"""
get unique name from model and experiment
@return: string with unique combination of models and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_albedo_data(self, interval='season'):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
This routine uses the definitions of the routines how to
read upward and downward fluxes
"""
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#~ tmpdict = copy.deepcopy(kwargs)
#~ print self.dic_vars
routine_up = self.dic_vars['surface_upward_flux']
routine_down = self.dic_vars['sis']
#sw_down = self.get_surface_shortwave_radiation_down(interval=interval, **kwargs)
cmd = 'sw_down = self.' + routine_down
exec(cmd)
#sw_up = self.get_surface_shortwave_radiation_up(interval=interval, **kwargs)
cmd = 'sw_up = self.' + routine_up
exec(cmd)
# climatological mean
alb = sw_up[0].div(sw_down[0])
alb.label = self.experiment + ' albedo'
alb.unit = '-'
# original data
alb_org = sw_up[1][2].div(sw_down[1][2])
alb_org.label = self.experiment + ' albedo'
alb_org.unit = '-'
retval = (alb_org.time, alb_org.fldmean(), alb_org)
return alb, retval
def get_albedo_data_vis(self, interval='season', **kwargs):
"""
This routine retrieves the JSBACH albedo information for VIS
it requires a preprocessing with a script that aggregates from tile
to box values!
Parameters
----------
interval : str
['season','monthly']
"""
#~ tmpdict = copy.deepcopy(self.model_dict['albedo_vis'])
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_albedo_data_nir(self, interval='season', **kwargs):
"""
This routine retrieves the JSBACH albedo information for VIS
it requires a preprocessing with a script that aggregates from tile
to box values!
Parameters
----------
interval : str
['season','monthly']
"""
#~ tmpdict = copy.deepcopy(self.model_dict['albedo_nir'])
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_up(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_surface_shortwave_radiation_down(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_rainfall_data(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_temperature_2m(self, interval='season', **kwargs):
return self.get_jsbach_data_generic(interval=interval, **kwargs)
def get_jsbach_data_generic(self, interval='season', **kwargs):
"""
unique parameters are:
filename - file basename
variable - name of the variable as the short_name in the netcdf file
kwargs is a dictionary with keys for each model. Then a dictionary with properties follows
"""
if not self.type in kwargs.keys():
print 'WARNING: it is not possible to get data using generic function, as method missing: ', self.type, kwargs.keys()
return None
print self.type
print kwargs
locdict = kwargs[self.type]
# read settings and details from the keyword arguments
# no defaults; everything should be explicitely specified in either the config file or the dictionaries
varname = locdict.pop('variable')
units = locdict.pop('unit', 'Unit not specified')
lat_name = locdict.pop('lat_name', 'lat')
lon_name = locdict.pop('lon_name', 'lon')
#model_suffix = locdict.pop('model_suffix')
#model_prefix = locdict.pop('model_prefix')
file_format = locdict.pop('file_format')
scf = locdict.pop('scale_factor')
valid_mask = locdict.pop('valid_mask')
custom_path = locdict.pop('custom_path', None)
thelevel = locdict.pop('level', None)
target_grid = self._actplot_options['targetgrid']
interpolation = self._actplot_options['interpolation']
if self.type != 'JSBACH_RAW2':
print self.type
raise ValueError('Invalid data format here!')
# define from which stream of JSBACH data needs to be taken for specific variables
if varname in ['swdown_acc', 'swdown_reflect_acc']:
filename1 = self.files['jsbach']
elif varname in ['precip_acc']:
filename1 = self.files['land']
elif varname in ['temp2']:
filename1 = self.files['echam']
elif varname in ['var14']: # albedo vis
filename1 = self.files['albedo_vis']
elif varname in ['var15']: # albedo NIR
filename1 = self.files['albedo_nir']
else:
print varname
raise ValueError('Unknown variable type for JSBACH_RAW2 processing!')
force_calc = False
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
#/// PREPROCESSING ///
cdo = Cdo()
s_start_time = str(self.start_time)[0:10]
s_stop_time = str(self.stop_time)[0:10]
#1) select timeperiod and generate monthly mean file
if target_grid == 't63grid':
gridtok = 'T63'
else:
gridtok = 'SPECIAL_GRID'
file_monthly = filename1[:-3] + '_' + s_start_time + '_' + s_stop_time + '_' + gridtok + '_monmean.nc' # target filename
file_monthly = get_temporary_directory() + os.path.basename(file_monthly)
sys.stdout.write('\n *** Model file monthly: %s\n' % file_monthly)
if not os.path.exists(filename1):
print 'WARNING: File not existing: ' + filename1
return None
cdo.monmean(options='-f nc', output=file_monthly, input='-' + interpolation + ',' + target_grid + ' -seldate,' + s_start_time + ',' + s_stop_time + ' ' + filename1, force=force_calc)
sys.stdout.write('\n *** Reading model data... \n')
sys.stdout.write(' Interval: ' + interval + '\n')
#2) calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing! ')
if not os.path.exists(mdata_clim_file):
return None
#3) read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self.model, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel, time_cycle=thetime_cylce)
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.model + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, level=thelevel, time_cycle=thetime_cylce)
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self.model + ' std', unit='-', lat_name=lat_name, lon_name=lon_name, shift_lon=False, scale_factor=scf, level=thelevel)
mdata.n = mdata_N.data.copy()
del mdata_N
#ensure that climatology always starts with J anuary, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self.model, unit=units, lat_name=lat_name, lon_name=lon_name, shift_lon=False, time_cycle=12, scale_factor=scf, level=thelevel)
mdata_all.adjust_time(day=15)
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
class JSBACH_SPECIAL(JSBACH_RAW2):
"""
special class for more flexible reading of JSBACH input data
it allows to specify the input format and the directory of the input data
in case that you use a different setup, it is probably easiest to
just copy this class and make the required adaptations.
"""
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, model_dict=None, input_format='nc', raw_outdata='', **kwargs):
super(JSBACH_SPECIAL, self).__init__(filename, dic_variables, experiment, name=name, shift_lon=shift_lon, model_dict=model_dict, input_format=input_format, raw_outdata=raw_outdata, **kwargs)
class xxxxxxxxJSBACH_RAW(Model):
"""
Class for RAW JSBACH model output
works on manually preprocessed already concatenated data
"""
def __init__(self, filename, dic_variables, experiment, name='', shift_lon=False, intervals='monthly', **kwargs):
super(JSBACH_RAW, self).__init__(filename, dic_variables, name=name, intervals=intervals, **kwargs)
print('WARNING: This model class should be depreciated as it contained a lot of hardcoded dependencies and is only intermediate')
#TODO: depreciate this class
stop
self.experiment = experiment
self.shift_lon = shift_lon
self.type = 'JSBACH_RAW'
self._unique_name = self._get_unique_name()
def _get_unique_name(self):
"""
get unique name from model and experiment
"""
return self.name.replace(' ', '') + '-' + self.experiment.replace(' ', '')
def get_temperature_2m(self, interval='monthly', **kwargs):
"""
get surface temperature (2m) from JSBACH model results
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
variable = 'temp2'
rawfile = self.data_dir + self.experiment + '_echam6_echam_' + variable + '_ALL.nc'
files = glob.glob(rawfile)
if len(files) != 1:
print 'Inputfiles: ', files
raise ValueError('Something went wrong: Invalid number of input files!')
else:
rawfile = files[0]
mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
def get_albedo_data(self, interval='monthly', **kwargs):
"""
calculate albedo as ratio of upward and downwelling fluxes
first the monthly mean fluxes are used to calculate the albedo,
"""
# read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon) # TODO make this more flexible
if self.start_time is None:
raise ValueError('Start time needs to be specified')
if self.stop_time is None:
raise ValueError('Stop time needs to be specified')
Fd = self.get_surface_shortwave_radiation_down(**kwargs)
Fu = self.get_surface_shortwave_radiation_up(**kwargs)
if Fu is None:
print 'File not existing for UPWARD flux!: ', self.name
return None
else:
Fu_i = Fu[0]
if Fu_i is None:
return None
if Fd is None:
print 'File not existing for DOWNWARD flux!: ', self.name
return None
else:
Fd_i = Fd[0]
if Fd_i is None:
return None
lab = Fu_i.label
# albedo for chosen interval as caluclated as ratio of means of fluxes in that interval (e.g. season, months)
Fu_i.div(Fd_i, copy=False)
del Fd_i # Fu contains now the albedo
Fu_i._apply_mask(ls_mask.data)
#albedo for monthly data (needed for global mean plots )
Fu_m = Fu[1][2]
del Fu
Fd_m = Fd[1][2]
del Fd
Fu_m.div(Fd_m, copy=False)
del Fd_m
Fu_m._apply_mask(ls_mask.data)
Fu_m._set_valid_range(0., 1.)
Fu_m.label = lab + ' albedo'
Fu_i.label = lab + ' albedo'
Fu_m.unit = '-'
Fu_i.unit = '-'
# center dates of months
Fu_m.adjust_time(day=15)
Fu_i.adjust_time(day=15)
# return data as a tuple list
retval = (Fu_m.time, Fu_m.fldmean(), Fu_m)
return Fu_i, retval
#-----------------------------------------------------------------------
def _do_preprocessing(self, rawfile, varname, s_start_time, s_stop_time, interval='monthly', force_calc=False, valid_mask='global', target_grid='t63grid'):
"""
perform preprocessing
* selection of variable
* temporal subsetting
"""
cdo = Cdo()
if not os.path.exists(rawfile):
print('File not existing! %s ' % rawfile)
return None, None
# calculate monthly means
file_monthly = get_temporary_directory() + os.sep + os.path.basename(rawfile[:-3]) + '_' + varname + '_' + s_start_time + '_' + s_stop_time + '_mm.nc'
if (force_calc) or (not os.path.exists(file_monthly)):
cdo.monmean(options='-f nc', output=file_monthly, input='-seldate,' + s_start_time + ',' + s_stop_time + ' ' + '-selvar,' + varname + ' ' + rawfile, force=force_calc)
else:
pass
if not os.path.exists(file_monthly):
raise ValueError('Monthly preprocessing did not work! %s ' % file_monthly)
# calculate monthly or seasonal climatology
if interval == 'monthly':
mdata_clim_file = file_monthly[:-3] + '_ymonmean.nc'
mdata_sum_file = file_monthly[:-3] + '_ymonsum.nc'
mdata_N_file = file_monthly[:-3] + '_ymonN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_ymonstd.nc'
cdo.ymonmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.ymonsum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.ymonstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
elif interval == 'season':
mdata_clim_file = file_monthly[:-3] + '_yseasmean.nc'
mdata_sum_file = file_monthly[:-3] + '_yseassum.nc'
mdata_N_file = file_monthly[:-3] + '_yseasN.nc'
mdata_clim_std_file = file_monthly[:-3] + '_yseasstd.nc'
cdo.yseasmean(options='-f nc -b 32', output=mdata_clim_file, input=file_monthly, force=force_calc)
cdo.yseassum(options='-f nc -b 32', output=mdata_sum_file, input=file_monthly, force=force_calc)
cdo.yseasstd(options='-f nc -b 32', output=mdata_clim_std_file, input=file_monthly, force=force_calc)
cdo.div(options='-f nc -b 32', output=mdata_N_file, input=mdata_sum_file + ' ' + mdata_clim_file, force=force_calc) # number of samples
else:
raise ValueError('Unknown temporal interval. Can not perform preprocessing!')
if not os.path.exists(mdata_clim_file):
return None
# read data
if interval == 'monthly':
thetime_cylce = 12
elif interval == 'season':
thetime_cylce = 4
else:
print interval
raise ValueError('Unsupported interval!')
mdata = Data(mdata_clim_file, varname, read=True, label=self.name, shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon')
mdata_std = Data(mdata_clim_std_file, varname, read=True, label=self.name + ' std', unit='-', shift_lon=False, time_cycle=thetime_cylce, lat_name='lat', lon_name='lon')
mdata.std = mdata_std.data.copy()
del mdata_std
mdata_N = Data(mdata_N_file, varname, read=True, label=self.name + ' std', shift_lon=False, lat_name='lat', lon_name='lon')
mdata.n = mdata_N.data.copy()
del mdata_N
# ensure that climatology always starts with January, therefore set date and then sort
mdata.adjust_time(year=1700, day=15) # set arbitrary time for climatology
mdata.timsort()
#4) read monthly data
mdata_all = Data(file_monthly, varname, read=True, label=self.name, shift_lon=False, time_cycle=12, lat_name='lat', lon_name='lon')
mdata_all.adjust_time(day=15)
#mask_antarctica masks everything below 60 degree S.
#here we only mask Antarctica, if only LAND points shall be used
if valid_mask == 'land':
mask_antarctica = True
elif valid_mask == 'ocean':
mask_antarctica = False
else:
mask_antarctica = False
if target_grid == 't63grid':
mdata._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
mdata_all._apply_mask(get_T63_landseamask(False, area=valid_mask, mask_antarctica=mask_antarctica))
else:
tmpmsk = get_generic_landseamask(False, area=valid_mask, target_grid=target_grid, mask_antarctica=mask_antarctica)
mdata._apply_mask(tmpmsk)
mdata_all._apply_mask(tmpmsk)
del tmpmsk
mdata_mean = mdata_all.fldmean()
# return data as a tuple list
retval = (mdata_all.time, mdata_mean, mdata_all)
del mdata_all
return mdata, retval
def get_surface_shortwave_radiation_down(self, interval='monthly', **kwargs):
"""
get surface shortwave incoming radiation data for JSBACH
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc'
mdata, retval = self._do_preprocessing(rawfile, 'swdown_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_surface_shortwave_radiation_up(self, interval='monthly', **kwargs):
"""
get surface shortwave upward radiation data for JSBACH
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO: move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
rawfile = self.data_dir + self.experiment + '_jsbach_' + y1[0: 4] + '_' + y2[0: 4] + '.nc'
mdata, retval = self._do_preprocessing(rawfile, 'swdown_reflect_acc', y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_model_data_generic(self, interval='monthly', **kwargs):
"""
This is only a wrapper to redirect to individual functions
for the JSBACH_RAW class
Currently only the usage for rainfall is supported!
"""
# HACK: only a wrapper, should be depreciated
raise ValueError('Rainfall analysis not working yet!')
self.get_rainfall_data(interval=interval, **kwargs)
def get_rainfall_data(self, interval='monthly', **kwargs):
"""
get surface rainfall data for JSBACH
uses already preprocessed data where the convective and
advective rainfall has been merged
Parameters
----------
interval : str
specifies the aggregation interval. Possible options: ['season','monthly']
"""
locdict = kwargs[self.type]
y1 = '1980-01-01' # TODO : move this to the JSON dictionary or some parameter file
y2 = '2010-12-31'
variable = 'aprc'
rawfile = self.data_dir + self.experiment + '_echam6_echam_*_precipitation.nc'
files = glob.glob(rawfile)
if len(files) != 1:
print 'Inputfiles: ', files
raise ValueError('Something went wrong: Invalid number of input files!')
else:
rawfile = files[0]
mdata, retval = self._do_preprocessing(rawfile, variable, y1, y2, interval=interval, valid_mask=locdict['valid_mask'])
return mdata, retval
#-----------------------------------------------------------------------
def get_gpp_data(self, interval='season'):
"""
get surface GPP data for JSBACH
todo temporal aggregation of data --> or leave it to the user!
"""
cdo = Cdo()
v = 'var167'
y1 = str(self.start_time)[0:10]
y2 = str(self.stop_time)[0:10]
rawfilename = self.data_dir + 'data/model/' + self.experiment + '_' + y1[0:4] + '-' + y2[0:4] + '.nc'
times_in_file = int(''.join(cdo.ntime(input=rawfilename)))
if interval == 'season':
if times_in_file != 4:
tmp_file = get_temporary_directory() + os.path.basename(rawfilename)
cdo.yseasmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_yseasmean.nc')
rawfilename = tmp_file[:-3] + '_yseasmean.nc'
if interval == 'monthly':
if times_in_file != 12:
tmp_file = get_temporary_directory() + os.path.basename(rawfilename)
cdo.ymonmean(options='-f nc -b 32 -r ', input='-selvar,' + v + ' ' + rawfilename, output=tmp_file[:-3] + '_ymonmean.nc')
rawfilename = tmp_file[:-3] + '_ymonmean.nc'
if not os.path.exists(rawfilename):
return None
filename = rawfilename
#--- read land-sea mask
ls_mask = get_T63_landseamask(self.shift_lon)
#--- read SW up data
gpp = Data4D(filename, v, read=True,
label=self.experiment + ' ' + v, unit='gC m-2 a-1', lat_name='lat', lon_name='lon',
shift_lon=self.shift_lon,
mask=ls_mask.data.data, scale_factor=3600. * 24. * 30. / 0.083
)
return gpp.sum_data4D()
#-----------------------------------------------------------------------
|
pygeo/pycmbs
|
pycmbs/benchmarking/models/mpi_esm.py
|
Python
|
mit
| 41,720
|
[
"NetCDF"
] |
b4b2489ef2ae5f1756aa475d6e9f1c9c9f4b634a58ea1b7fb1d287dcfd89eba3
|
"""Traits-based GUI for head-MRI coregistration"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import warnings
import numpy as np
from scipy.spatial.distance import cdist
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, warning, OK, YES, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo,
Directory, Enum, Float, HasTraits,
HasPrivateTraits, Instance, Int, on_trait_change,
Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
EnumEditor, Handler, Label, TextEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except Exception:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
NoButtons = SceneEditor = trait_wraith
from ..coreg import bem_fname, trans_fname
from ..forward import prepare_bem_model
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles, Transform)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_point_cloud_error)
from ..utils import get_subjects_dir, logger
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import (set_mne_root, trans_wildcard, InstSource,
SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
_testing_mode)
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(InstSource, ())
# parameters
grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="Right (X)")
scale_y = Float(1, label="Anterior (Y)")
scale_z = Float(1, label="Superior (Z)")
rot_x = Float(0, label="Right (X)")
rot_y = Float(0, label="Anterior (Y)")
rot_z = Float(0, label="Superior (Z)")
trans_x = Float(0, label="Right (X)")
trans_y = Float(0, label="Anterior (Y)")
trans_z = Float(0, label="Superior (Z)")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
'scale_z'])
has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
desc="Required fiducials data is present.")
has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
# MRI dependent
mri_origin = Property(depends_on=['mri.nasion', 'scale'],
desc="Coordinates of the scaled MRI's nasion.")
# target transforms
mri_scale_trans = Property(depends_on=['scale'])
head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
'rot_z', 'trans_x', 'trans_y',
'trans_z', 'mri_origin'],
desc="Transformaiton of the head shape to "
"match the scaled MRI.")
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
'subject_has_bem'])
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(depends_on='hsp.inst_fname', desc="Subject guess "
"based on the raw file name.")
# transformed geometry
processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
transformed_mri_points = Property(depends_on=['processed_mri_points',
'mri_scale_trans'])
transformed_hsp_points = Property(depends_on=['hsp.points',
'head_mri_trans'])
transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(depends_on=['mri.nasion',
'mri_scale_trans'])
transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
'head_mri_trans'])
transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(depends_on=['transformed_mri_lpa',
'transformed_hsp_lpa'])
nasion_distance = Property(depends_on=['transformed_mri_nasion',
'transformed_hsp_nasion'])
rpa_distance = Property(depends_on=['transformed_mri_rpa',
'transformed_hsp_rpa'])
point_distance = Property(depends_on=['transformed_mri_points',
'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
'rpa_distance'])
points_eval_str = Property(depends_on='point_distance')
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if np.isscalar(self.scale) or self.scale.ndim == 0:
if self.scale == 1:
return np.eye(4)
else:
s = self.scale
return scaling(s, s, s)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
if np.isscalar(self.scale) and self.scale == 1:
return self.mri.nasion
else:
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_processed_mri_points(self):
if self.grow_hair:
if len(self.mri.norms):
if self.n_scale_params == 0:
scaled_hair_dist = self.grow_hair / 1000
else:
scaled_hair_dist = self.grow_hair / self.scale / 1000
points = self.mri.points.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += self.mri.norms[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing form bem, can't grow hair")
self.grow_hair = 0
return self.mri.points
@cached_property
def _get_transformed_mri_points(self):
points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0 or
len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
return txt
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = np.mean(self.point_distance)
return "Average Points Error: %.1f mm" % (av_dist * 1000)
def _get_raw_subject(self):
# subject name guessed based on the inst file name
if '_' in self.hsp.inst_fname:
subject, _ = self.hsp.inst_fname.split('_', 1)
if not subject:
subject = None
else:
subject = None
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
with warnings.catch_warnings(record=True): # Traits None comp
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False) # noqa
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"Find rotation to fit LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"Find rotation and translation to fit all 3 fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"Find rotation to fit head shapes"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"Find rotation and MRI scaling based on LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"Find translation, rotation and scaling based on the three fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"Find MRI scaling and rotation to match head shape points"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to):
desc = 'Scaling %s' % subject_to
func = scale_mri
args = (self.mri.subject, subject_to, self.scale)
kwargs = dict(overwrite=True, subjects_dir=self.mri.subjects_dir)
return (desc, func, args, kwargs)
def get_prepare_bem_model_job(self, subject_to):
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_name = 'inner_skull-bem'
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name=bem_name)
if not os.path.exists(bem_file):
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='(.+-bem)')
bem_dir, bem_file = os.path.split(pattern)
m = None
bem_file_pattern = re.compile(bem_file)
for name in os.listdir(bem_dir):
m = bem_file_pattern.match(name)
if m is not None:
break
if m is None:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='*-bem')
err = ("No bem file found; looking for files matching "
"%s" % pattern)
error(None, err)
bem_name = m.group(1)
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name=bem_name)
# job
desc = 'mne_prepare_bem_model for %s' % subject_to
func = prepare_bem_model
args = (bem_file,)
kwargs = {}
return (desc, func, args, kwargs)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration"""
self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation parameters from a transformation matrix
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
write_trans(fname, Transform('head', 'mri', self.head_mri_trans))
class CoregFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
return True
class CoregPanel(HasPrivateTraits):
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
grow_hair = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
scale_step = Float(1.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save As...")
load_trans = Button
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(VGroup(Item('grow_hair', show_label=True),
Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:No Scaling',
1: '2:1 Param',
3: '3:3 Params'},
cols=3)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0'),
Item('scale_x_dec',
enabled_when='n_scale_params > 0'),
Item('scale_x_inc',
enabled_when='n_scale_params > 0'),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0'),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_y_dec',
enabled_when='n_scale_params > 1'),
Item('scale_y_inc',
enabled_when='n_scale_params > 1'),
Label('(Step)'),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_z_dec',
enabled_when='n_scale_params > 1'),
Item('scale_z_inc',
enabled_when='n_scale_params > 1'),
show_labels=False, columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
'_',
Label("Translation:"),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis"),
'trans_x_dec', 'trans_x_inc',
Item('trans_step', tooltip="Movement step"),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_y_dec', 'trans_y_inc',
Label('(Step)'),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_z_dec', 'trans_z_inc',
show_labels=False, columns=4),
Label("Rotation:"),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis"),
'rot_x_dec', 'rot_x_inc',
Item('rot_step', tooltip="Rotation step"),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_y_dec', 'rot_y_inc',
Label('(Step)'),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_z_dec', 'rot_z_inc',
show_labels=False, columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point"),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place"),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials"),
Item('load_trans', enabled_when='has_fid_data'),
show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
HGroup(Item('prepare_bem_model'),
Label("Run mne_prepare_bem_model"),
show_labels=False,
enabled_when='can_prepare_bem_model'),
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton])
def __init__(self, *args, **kwargs):
super(CoregPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
desc, cmd, args, kwargs = self.queue.get()
self.queue_len -= 1
self.queue_current = 'Processing: %s' % desc
# task
try:
cmd(*args, **kwargs)
except Exception as err:
self.error = str(err)
res = "Error in %s"
else:
res = "Done: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % desc
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _n_scale_params_changed(self, new):
if not new:
return
# Make sure that MNE_ROOT environment variable is set
if not set_mne_root(True):
err = ("MNE_ROOT environment variable could not be set. "
"You will be able to scale MRIs, but the "
"mne_prepare_bem_model tool will fail. Please install "
"MNE.")
warning(None, err, "MNE_ROOT Not Set")
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
self.model.load_trans(trans_file)
def _save_fired(self):
if self.n_scale_params:
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
subject_to = self.model.raw_subject or self.model.mri.subject
else:
subject_to = self.model.mri.subject
# ask for target subject
if self.n_scale_params:
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if ui.result != True: # noqa
return
subject_to = mridlg.subject_to
# find bem file to run mne_prepare_bem_model
if self.can_prepare_bem_model and self.prepare_bem_model:
bem_job = self.model.get_prepare_bem_model_job(subject_to)
else:
bem_job = None
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file = trans_file + '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, str(e), "Error Saving Trans File")
return
# save the scaled MRI
if self.n_scale_params:
job = self.model.get_scaling_job(subject_to)
self.queue.put(job)
self.queue_len += 1
if bem_job is not None:
self.queue.put(bem_job)
self.queue_len += 1
def _scale_x_dec_fired(self):
step = 1. / self.scale_step
self.scale_x *= step
def _scale_x_inc_fired(self):
self.scale_x *= self.scale_step
def _scale_x_changed(self, old, new):
if self.n_scale_params == 1:
self.scale_y = new
self.scale_z = new
def _scale_y_dec_fired(self):
step = 1. / self.scale_step
self.scale_y *= step
def _scale_y_inc_fired(self):
self.scale_y *= self.scale_step
def _scale_z_dec_fired(self):
step = 1. / self.scale_step
self.scale_z *= step
def _scale_z_inc_fired(self):
self.scale_z *= self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
width=500,
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=-1):
"""Create a view for the CoregFrame
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
returns
-------
view : traits View
View object for the CoregFrame.
"""
view_options = VGroup(Item('headview', style='custom'), 'view_options',
show_border=True, show_labels=False, label='View')
scene = VGroup(Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=500),
view_options)
data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
label="MRI Subject", show_border=True,
show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2,
values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True,
show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup(Item('distance', show_label=True),
'omit_points', 'reset_omit_points',
show_labels=False),
Item('omitted_info', style='readonly',
show_label=False),
label='Head Shape Source (Raw/Epochs/Evoked)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
coreg_panel = VGroup(Item('coreg_panel', style='custom'),
label="Coregistration", show_border=True,
show_labels=False,
enabled_when="fid_panel.locked")
if split:
main_layout = 'split'
else:
main_layout = 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons)
return view
class ViewOptionsPanel(HasTraits):
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom', # show_border=True,
label="MRI Head Surface"),
Item('hsp_obj', style='custom', # show_border=True,
label="Head Shape Points")),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration
"""
model = Instance(CoregModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
raw_src = DelegatesTo('model', 'hsp')
# Omit Points
distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
"head shape points from MRI in mm")
omit_points = Button(label='Omit Points', desc="Omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset Omission', desc="Reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
# visualization
hsp_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
view_options_panel = Instance(ViewOptionsPanel)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
return panel
def _coreg_panel_default(self):
panel = CoregPanel(model=self.model)
return panel
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None):
super(CoregFrame, self).__init__()
subjects_dir = get_subjects_dir(subjects_dir)
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if subject is not None:
self.model.mri.subject = subject
if raw is not None:
self.model.hsp.file = raw
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
color=color, tri=self.model.mri.tris,
scene=self.scene)
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale)
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['hsp_point_color']
point_scale = defaults['hsp_points_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5)
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['hsp_fid_scale']
opacity = defaults['hsp_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale)
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
if not _testing_mode():
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
self.scene.disable_render = False
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
|
cmoutard/mne-python
|
mne/gui/_coreg_gui.py
|
Python
|
bsd-3-clause
| 53,969
|
[
"Mayavi"
] |
d959f1e6603fe24c25fffd01610ae4f3d845f4c5c930ec60b6d7f11ffcf262fa
|
from ase import Atoms
a = Atoms('H2', positions=[(0, 0, 0), (0, 0, 1.1)])
a.pbc[0] = 1
assert a.pbc.any()
assert not a.pbc.all()
a.pbc = 1
assert a.pbc.all()
a.cell = (1, 2, 3)
a.cell *= 2
a.cell[0, 0] = 3
assert not (a.cell.diagonal() - (3, 4, 6)).any()
|
grhawk/ASE
|
tools/ase/test/properties.py
|
Python
|
gpl-2.0
| 257
|
[
"ASE"
] |
57c93f1b712ece72971761e5123382df76b53c48b7a7f204e3f67b6141381d36
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Common test support for pymatgen test scripts.
This single module should provide all the common functionality for pymatgen
tests in a single location, so that test scripts can just import it and work
right away.
"""
import json
import tempfile
import unittest
from io import open
from pathlib import Path
import numpy.testing as nptu
from monty.dev import requires
from monty.json import MontyDecoder, MSONable
from monty.serialization import loadfn
from pymatgen.core import SETTINGS
from pymatgen.ext.matproj import MPRester
class PymatgenTest(unittest.TestCase):
"""
Extends unittest.TestCase with functions (taken from numpy.testing.utils)
that support the comparison of arrays.
"""
_multiprocess_shared_ = True
MODULE_DIR = Path(__file__).absolute().parent
STRUCTURES_DIR = MODULE_DIR / "structures"
try:
TEST_FILES_DIR = Path(SETTINGS["PMG_TEST_FILES_DIR"])
except KeyError:
import warnings
warnings.warn(
"It is recommended that you set the PMG_TEST_FILES_DIR environment variable explicity. "
"Now using a fallback location based on relative path from this module."
)
TEST_FILES_DIR = MODULE_DIR / ".." / ".." / "test_files"
"""
Dict for test structures to aid testing.
"""
TEST_STRUCTURES = {}
for fn in STRUCTURES_DIR.iterdir():
TEST_STRUCTURES[fn.name.rsplit(".", 1)[0]] = loadfn(str(fn))
@classmethod
def get_structure(cls, name):
"""
Get a structure from the template directories.
:param name: Name of a structure.
:return: Structure
"""
return cls.TEST_STRUCTURES[name].copy()
@classmethod
@requires(SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY needs to be set.")
def get_mp_structure(cls, mpid):
"""
Get a structure from MP.
:param mpid: Materials Project id.
:return: Structure
"""
m = MPRester()
return m.get_structure_by_material_id(mpid)
@staticmethod
def assertArrayAlmostEqual(actual, desired, decimal=7, err_msg="", verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
return nptu.assert_almost_equal(actual, desired, decimal, err_msg, verbose)
@staticmethod
def assertDictsAlmostEqual(actual, desired, decimal=7, err_msg="", verbose=True):
"""
Tests if two arrays are almost equal to a tolerance. The CamelCase
naming is so that it is consistent with standard unittest methods.
"""
for k, v in actual.items():
if k not in desired:
return False
v2 = desired[k]
if isinstance(v, dict):
pass_test = PymatgenTest.assertDictsAlmostEqual(
v, v2, decimal=decimal, err_msg=err_msg, verbose=verbose
)
if not pass_test:
return False
elif isinstance(v, (list, tuple)):
pass_test = nptu.assert_almost_equal(v, v2, decimal, err_msg, verbose)
if not pass_test:
return False
elif isinstance(v, (int, float)):
PymatgenTest.assertAlmostEqual(v, v2) # pylint: disable=E1120
else:
assert v == v2
return True
@staticmethod
def assertArrayEqual(actual, desired, err_msg="", verbose=True):
"""
Tests if two arrays are equal. The CamelCase naming is so that it is
consistent with standard unittest methods.
"""
return nptu.assert_equal(actual, desired, err_msg=err_msg, verbose=verbose)
@staticmethod
def assertStrContentEqual(actual, desired, err_msg="", verbose=True):
"""
Tests if two strings are equal, ignoring things like trailing spaces,
etc.
"""
lines1 = actual.split("\n")
lines2 = desired.split("\n")
if len(lines1) != len(lines2):
return False
failed = []
for l1, l2 in zip(lines1, lines2):
if l1.strip() != l2.strip():
failed.append("%s != %s" % (l1, l2))
return len(failed) == 0
def serialize_with_pickle(self, objects, protocols=None, test_eq=True):
"""
Test whether the object(s) can be serialized and deserialized with
pickle. This method tries to serialize the objects with pickle and the
protocols specified in input. Then it deserializes the pickle format
and compares the two objects with the __eq__ operator if
test_eq == True.
Args:
objects: Object or list of objects.
protocols: List of pickle protocols to test. If protocols is None,
HIGHEST_PROTOCOL is tested.
Returns:
Nested list with the objects deserialized with the specified
protocols.
"""
# Use the python version so that we get the traceback in case of errors
import pickle
from pymatgen.util.serialization import pmg_pickle_dump, pmg_pickle_load
# Build a list even when we receive a single object.
got_single_object = False
if not isinstance(objects, (list, tuple)):
got_single_object = True
objects = [objects]
if protocols is None:
# protocols = set([0, 1, 2] + [pickle.HIGHEST_PROTOCOL])
protocols = [pickle.HIGHEST_PROTOCOL]
# This list will contains the object deserialized with the different
# protocols.
objects_by_protocol, errors = [], []
for protocol in protocols:
# Serialize and deserialize the object.
mode = "wb"
fd, tmpfile = tempfile.mkstemp(text="b" not in mode)
try:
with open(tmpfile, mode) as fh:
pmg_pickle_dump(objects, fh, protocol=protocol)
except Exception as exc:
errors.append("pickle.dump with protocol %s raised:\n%s" % (protocol, str(exc)))
continue
try:
with open(tmpfile, "rb") as fh:
new_objects = pmg_pickle_load(fh)
except Exception as exc:
errors.append("pickle.load with protocol %s raised:\n%s" % (protocol, str(exc)))
continue
# Test for equality
if test_eq:
for old_obj, new_obj in zip(objects, new_objects):
self.assertEqual(old_obj, new_obj)
# Save the deserialized objects and test for equality.
objects_by_protocol.append(new_objects)
if errors:
raise ValueError("\n".join(errors))
# Return nested list so that client code can perform additional tests.
if got_single_object:
return [o[0] for o in objects_by_protocol]
return objects_by_protocol
def assertMSONable(self, obj, test_if_subclass=True):
"""
Tests if obj is MSONable and tries to verify whether the contract is
fulfilled.
By default, the method tests whether obj is an instance of MSONable.
This check can be deactivated by setting test_if_subclass to False.
"""
if test_if_subclass:
self.assertIsInstance(obj, MSONable)
self.assertDictEqual(obj.as_dict(), obj.__class__.from_dict(obj.as_dict()).as_dict())
json.loads(obj.to_json(), cls=MontyDecoder)
|
gmatteo/pymatgen
|
pymatgen/util/testing.py
|
Python
|
mit
| 7,709
|
[
"pymatgen"
] |
d7c35b26d923b75a8c37ea62ad16b90f6e3366bdf77f5c5846707cf4deb86067
|
""" Accounting class to stores network metrics gathered by perfSONARs.
Filled by "Accounting/Network" agent
"""
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
class Network(BaseAccountingType):
"""
Accounting type to stores network metrics gathered by perfSONARs.
"""
def __init__(self):
super(Network, self).__init__()
# IPv6 address has up to 45 chars
self.definitionKeyFields = [
("SourceIP", "VARCHAR(50)"),
("DestinationIP", "VARCHAR(50)"),
("SourceHostName", "VARCHAR(50)"),
("DestinationHostName", "VARCHAR(50)"),
("Source", "VARCHAR(50)"),
("Destination", "VARCHAR(50)"),
]
self.definitionAccountingFields = [
("Jitter", "FLOAT"),
("OneWayDelay", "FLOAT"),
("PacketLossRate", "TINYINT UNSIGNED"),
]
self.bucketsLength = [
(86400 * 3, 900), # <3d = 15m
(86400 * 8, 3600), # <1w+1d = 1h
(15552000, 86400), # >1w+1d <6m = 1d
(31104000, 604800), # >6m = 1w
]
self.checkType()
|
DIRACGrid/DIRAC
|
src/DIRAC/AccountingSystem/Client/Types/Network.py
|
Python
|
gpl-3.0
| 1,178
|
[
"DIRAC"
] |
d0e8376f23123d2b4af09ce823b4c574edaf2f76d82b1fd834378d4f0527b467
|
from datetime import timedelta
from django import forms
from edc_constants.constants import YES, NO, UNKNOWN, OTHER, NOT_APPLICABLE
from .constants import ONGOING
from .models import Enrollment, EntryToCare
from ba_namotswe.models.subject_visit import SubjectVisit
from dateutil.relativedelta import relativedelta
comparison_phrase = {
'gt': 'must be greater than',
'gte': 'must be greater than or equal to',
'lt': 'must be less than',
'lte': 'must be less than or equal to',
'ne': 'may not equal', }
class SimpleApplicableByAgeValidatorMixin:
def validate_applicable_by_age(self, field, op, age, subject_identifier=None, errmsg=None):
subject_identifier = subject_identifier or self.cleaned_data.get('subject_visit').subject_identifier
dob = Enrollment.objects.get(subject_identifier=subject_identifier).dob
age_delta = relativedelta(self.cleaned_data.get('subject_visit').previous_visit.visit_date, dob)
applicable = True
if self.cleaned_data.get(field):
applicable = False
if op == 'gt':
if age_delta.years > age:
applicable = True
elif op == 'gte':
if age_delta.years >= age:
applicable = True
elif op == 'lt':
if age_delta.years < age:
applicable = True
elif op == 'lte':
if age_delta.years <= age:
applicable = True
elif op == 'ne':
if age_delta.years != age:
applicable = True
elif op == 'eq':
if age_delta.years == age:
applicable = True
if not applicable and self.cleaned_data.get(field) != NOT_APPLICABLE:
raise forms.ValidationError({
field: [errmsg or ('Not applicable. Age {phrase} {age}y at previous visit. '
'Got {subject_age}y').format(
phrase=comparison_phrase.get(op),
age=age, subject_age=age_delta.years)]})
if applicable and self.cleaned_data.get(field) == NOT_APPLICABLE:
raise forms.ValidationError({
field: [errmsg or ('Applicable. Age {phrase} {age}y at previous visit to be "not applicable". '
'Got {subject_age}y').format(
phrase=comparison_phrase.get(op),
age=age, subject_age=age_delta.years)]})
class SimpleYesNoValidationMixin:
def require_if_yes(self, yesno_field, required_field, required_msg=None, not_required_msg=None):
if self.cleaned_data.get(yesno_field) in [NO, UNKNOWN] and self.cleaned_data.get(required_field):
raise forms.ValidationError({
required_field: [not_required_msg or 'This field is not required based on previous answer.']})
elif self.cleaned_data.get(yesno_field) == YES and not self.cleaned_data.get(required_field):
raise forms.ValidationError({
required_field: [required_msg or 'This field is required based on previous answer.']})
class SimpleOtherSpecifyValidationMixin:
def require_if_other(self, other_field, specify_field, required_msg=None, not_required_msg=None):
if self.cleaned_data.get(other_field) != OTHER and self.cleaned_data.get(specify_field):
raise forms.ValidationError({
specify_field: [not_required_msg or 'This field is not required.']})
elif self.cleaned_data.get(other_field) == OTHER and not self.cleaned_data.get(specify_field):
raise forms.ValidationError({
specify_field: [required_msg or 'Specify answer for OTHER.']})
class SimpleStartStopDateValidationMixin:
def validate_start_stop_dates(self):
cleaned_data = self.cleaned_data
if not cleaned_data.get('started'):
raise forms.ValidationError({'started': 'Required.'})
if cleaned_data.get('status') != ONGOING and not cleaned_data.get('stopped'):
raise forms.ValidationError({'stopped': 'Required.'})
if cleaned_data.get('status') == ONGOING and cleaned_data.get('stopped'):
raise forms.ValidationError({'stopped': 'Leave blank.'})
if cleaned_data.get('started') - cleaned_data.get('stopped') == timedelta(days=0):
raise forms.ValidationError({'stopped': 'Cannot be equal.'})
if cleaned_data.get('started') - cleaned_data.get('stopped') > timedelta(days=0):
raise forms.ValidationError({'stopped': 'Cannot be less than started.'})
class SimpleDateFieldValidatorMixin:
def validate_date_with_dob(self, field1, op, verbose_name=None, subject_identifier=None):
"""Validate that date is greater than subject's DoB."""
subject_identifier = subject_identifier or self.cleaned_data.get('subject_visit').subject_identifier
value2 = Enrollment.objects.get(subject_identifier=subject_identifier).dob
self.validate_dates(field1, op, value2=value2, verbose_name1=verbose_name, verbose_name2='DoB')
def validate_date_with_art_init(self, field1, op, verbose_name=None, subject_identifier=None):
"""Validate that date is greater than subject's ART initiation date."""
subject_identifier = subject_identifier or self.cleaned_data.get('subject_visit').subject_identifier
value2 = EntryToCare.objects.get(
subject_visit__subject_identifier=subject_identifier).art_init_date
self.validate_dates(field1, op, value2=value2, verbose_name1=verbose_name, verbose_name2='ART initiation date')
def validate_date_with_hiv_dx(self, field1, op, verbose_name=None, subject_identifier=None):
"""Validate that date is greater than subject's HIV Dx date."""
subject_identifier = subject_identifier or self.cleaned_data.get('subject_visit').subject_identifier
value2 = EntryToCare.objects.get(
subject_visit__subject_identifier=subject_identifier).hiv_dx_date
self.validate_dates(field1, op, value2=value2, verbose_name1=verbose_name, verbose_name2='HIV Dx date')
def entry_to_care(self, subject_identifier):
try:
entry_to_care = EntryToCare.objects.get(
subject_visit__subject_identifier=subject_identifier)
except EntryToCare.DoesNotExist:
raise forms.ValidationError('The date the patient was entered into care is required to validate this form. '
'Please go back and complete the \'Entry-To-Care\' form before proceeding.')
return entry_to_care
def validate_date_with_entry_to_care_date(self, field1, op, verbose_name=None, subject_identifier=None):
"""Validate that date is greater than subject's Entry-to-care date."""
subject_identifier = subject_identifier or self.cleaned_data.get('subject_visit').subject_identifier
value2 = self.entry_to_care(subject_identifier).entry_date
self.validate_dates(field1, op, value2=value2, verbose_name1=verbose_name, verbose_name2='Entry-to-care date')
def validate_date_with_previous_visit(self, field1, op, verbose_name=None, subject_identifier=None):
"""Validate that date is greater than subject's previous visit date."""
subject_identifier = subject_identifier or self.cleaned_data.get('subject_visit').subject_identifier
previous_visit = self.cleaned_data.get('subject_visit').previous_visit
self.validate_dates(field1, op, value2=previous_visit.visit_date,
verbose_name1=verbose_name,
verbose_name2='previous visit {} on {}'.format(
previous_visit.visit_code,
previous_visit.visit_date.strftime('%Y-%m-%d')))
def validate_dates(self, field1=None, op=None, field2=None, errmsg=None,
verbose_name1=None, verbose_name2=None, value1=None, value2=None):
"""Validate that date1 is greater than date2."""
date1 = self.cleaned_data.get(field1, value1)
date2 = self.cleaned_data.get(field2, value2)
if not self.compare_dates(date1, op, date2):
raise forms.ValidationError({
field1: [errmsg or '{field1} {phrase} {field2}.'.format(
field1=verbose_name1 or field1 or date1,
phrase=comparison_phrase.get(op),
field2=verbose_name2 or field2 or date2)]})
def compare_dates(self, date1, op, date2):
ret = True
if date1 and date2:
ret = False
if op == 'gt':
if date1 > date2:
ret = True
elif op == 'gte':
if date1 >= date2:
ret = True
elif op == 'lt':
if date1 < date2:
ret = True
elif op == 'lte':
if date1 <= date2:
ret = True
elif op == 'ne':
if date1 != date2:
ret = True
elif op == 'eq':
if date1 == date2:
ret = True
return ret
|
botswana-harvard/ba-namotswe
|
ba_namotswe/validators.py
|
Python
|
gpl-3.0
| 9,291
|
[
"VisIt"
] |
339cc26b47d169f59392bedef6757d90f95b4ad532753c96efd10b867a0ab5e7
|
#!/usr/bin/python2.7
import logging
import gzip
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
import pysam
from optparse import OptionParser
import os
import re
import sys
from progressbar import Bar, Timer, Percentage, ProgressBar
def main():
parser = OptionParser(description = "Split aligned read from BAM file "\
"according to the annotation file.",
usage = "%prog -b <aln_file.bam> -a <annotation_file.csv> "\
"-f <fasta_dir> [-r <chr:start-stop> | -g <gene_name>]")
parser.add_option("-b",
metavar = "<aln_file.bam>",
help = "Alignment file in BAM format.")
parser.add_option("-a",
metavar = "<annotation_file.csv>",
help = "Gene annotation file in CSV format.")
parser.add_option("-f",
metavar = "<fasta_dir>",
help = "Directory containing FASTA (.gz) files of the chromosomes.")
parser.add_option("-r",
metavar = "<chr:start-stop>",
help = "Region to be examined in the form: chr:start-stop. Ex. 1:100-200.",
default = "")
parser.add_option("-g",
metavar = "<gene_name>",
help = "Gene name.",
default = "")
parser.add_option("-o",
metavar = "<output-dir>",
help = "Output (root) directory.",
default = ".")
parser.add_option('-v',
help='increase output verbosity',
action='count', default=0)
(options, args) = parser.parse_args()
in_bam_file = options.b
in_region = options.r
in_fasta_dir = options.f
in_fasta_prefix_name = "Homo_sapiens.GRCh38.dna.chromosome."
in_gene = options.g
in_annot_file = options.a
out_root_dir = options.o
if options.v == 0:
log_level = logging.INFO
elif options.v == 1:
log_level = logging.DEBUG
else:
log_level = logging.DEBUG
logging.basicConfig(level=log_level,
format='%(levelname)-8s [%(asctime)s] %(message)s',
datefmt="%y%m%d %H%M%S")
if not (in_bam_file and in_annot_file and in_fasta_dir):
logging.error("Missing input argument(s).")
sys.exit(1)
if (in_region != "" and in_gene != ""):
logging.error("Specify only one option between region (-r) and gene name (-g).")
sys.exit(1)
if not (os.path.exists(out_root_dir)):
logging.error("Output dir not found.")
sys.exit(1)
regexp_reg = re.compile(r'^(\w+):(\d+)-(\d+)')
regexp_annot = re.compile(r'^([\dXY]+)\t(\d+)\t(\d+)\t([+\-])' \
'\tgene_id=\"(\w+)\"\tgene_version=\"(\d+)\"\tgene_name=\"([\w\-\.]+)\"')
if not (os.path.exists(in_fasta_dir)):
logging.error("Directory " + in_fasta_dir + " not found.")
sys.exit(1)
logging.info("splitBAM: Program Started")
in_annot = open(in_annot_file, "r")
in_sam = pysam.AlignmentFile(in_bam_file, "rb")
fetch_aln = in_sam.fetch()
tot_fetch_aln = 0
count = 0
match_elem = {}
if(in_region != ""):
region = re.search(regexp_reg, in_region)
if (region):
in_chr = region.group(1)
in_start = int(region.group(2))
in_stop = int(region.group(3))
else:
logging.error("Wrong input region.")
sys.exit(1)
if (in_start > in_stop):
logging.error("Wrong input region.")
sys.exit(1)
for a in in_annot:
ga = re.search(regexp_annot, a)
if (ga):
count = count + 1
chr = ga.group(1)
start = int(ga.group(2))
stop = int(ga.group(3))
strand = str(ga.group(4))
gene_id = str(ga.group(5))
gene_version = int(ga.group(6))
gene_name = str(ga.group(7))
insert = False
el = {'chr' : chr,
'start' : start,
'stop' : stop,
'strand' : strand,
'gene_id' : gene_id,
'gene_version' : gene_version,
'gene_name' : gene_name}
if(in_gene != ""):
if(in_gene == gene_name or in_gene == gene_id):
insert = True
elif(in_region != ""):
if(in_chr == chr and in_start <= start and in_stop >= stop):
insert = True
else:
insert = True
if(insert):
if not (match_elem.has_key(gene_name)):
match_elem[gene_name] = el
elif (match_elem[gene_name]['gene_version'] < gene_version):
match_elem[gene_name] = el
else:
logging.warn("Ducplicated gene name")
logging.debug(a)
else:
logging.error("Error in parsing annotation file.")
logging.error(a)
sys.exit(1)
logging.info("Parsed " + str(count) + " annotated genes.")
logging.info("Num. of retrieved genes: " + str(len(match_elem)))
for k in match_elem.keys():
logging.info("")
logging.info("Creating gene " + k)
r_chr = match_elem[k]['chr']
r_start = match_elem[k]['start'] - 1000
r_stop = match_elem[k]['stop'] + 1000
r_strand = match_elem[k]['strand']
fetch_aln = in_sam.fetch(r_chr, r_start, r_stop)
tot_fetch_aln = in_sam.count(r_chr, r_start, r_stop)
if(tot_fetch_aln == 0 or tot_fetch_aln < 10):
logging.warn("No valid alignments found.")
continue
out_dir = out_root_dir + "/" + k
if not (os.path.exists(out_dir)):
os.mkdir(out_dir)
#Compute reads
widgets = ['Processing: ', Percentage(),
' ', Bar(marker='=', left='[', right=']'),
' ', Timer()]
bar = ProgressBar(widgets=widgets, maxval=tot_fetch_aln).start()
num_proc_seq = 0
num_valid_seq = 0
num_disc_seq = 0
valid_id = set()
valid = []
discarded = []
for read in fetch_aln:
num_proc_seq = num_proc_seq + 1
bar.update(num_proc_seq)
ref_name = in_sam.getrname(read.reference_id)
read_name = read.query_name
if read.is_paired:
read_name += ("_R1" if read.is_read1 else "_R2")
fasta_hdr = "/gb=" + read_name
fasta_hdr += " /clone_end=3'" + " /reversed="
fasta_hdr += ("yes" if read.is_reverse else "no")
fasta_hdr += " /ref_start=" + ref_name
fasta_hdr += ":" + str(read.reference_start)
fasta_hdr += " /ref_end=" + ref_name
fasta_hdr += ":" + str(read.reference_end)
record = SeqRecord(Seq(read.query_sequence, generic_dna))
record.id = fasta_hdr
record.description = ""
logging.debug(fasta_hdr)
logging.debug(read.query_sequence)
if not (read.is_paired):
if read_name not in valid_id:
num_valid_seq = num_valid_seq + 1
valid.append(record)
valid_id.add(read_name)
else:
if not (read.mate_is_unmapped):
if (read.reference_id == read.next_reference_id and
read.next_reference_start >= r_start and
read.next_reference_start <= r_stop):
if read_name not in valid_id:
num_valid_seq = num_valid_seq + 1
valid.append(record)
valid_id.add(read_name)
else:
num_disc_seq = num_disc_seq + 1
discarded.append(record)
else:
num_disc_seq = num_disc_seq + 1
discarded.append(record)
bar.finish()
out_fasta = open(out_dir + "/" + k + ".fa", "w")
SeqIO.write(valid, out_fasta, "fasta")
out_fasta.close()
out_dis = open(out_dir + "/" + "discarded.fa", "w")
SeqIO.write(discarded, out_dis, "fasta")
out_dis.close()
logging.info("Num. Processed Sequences: " + str(num_proc_seq))
logging.info("Num. Valid Sequences: " + str(num_valid_seq))
logging.info("Num. Discarded Sequences: " + str(num_disc_seq))
#Compute genomics
logging.info("Cutting genomic sequence.")
found = True
sequences = []
seq_name = ""
if not(r_chr[:3] == "chr"):
seq_name += "chr"
fasta_seq_name = in_fasta_dir + "/" + in_fasta_prefix_name + r_chr + ".fa.gz"
if not os.path.isfile(fasta_seq_name):
logging.error("File " + fasta_seq_name + " not found.")
sys.exit(1)
seq_handle = gzip.open(fasta_seq_name, "r")
for sequence in SeqIO.parse(seq_handle, "fasta"):
if (sequence.id == r_chr):
found = True
sub_s = Seq(str(sequence.seq[r_start:r_stop]), generic_dna)
seq_id = "{0}{1}:{2}:{3}:".format(seq_name,r_chr,r_start,r_stop)
if(r_strand == "-"):
sub_s = sub_s.reverse_complement()
seq_id += "-1"
#descr += " ReverseComplemented"
else:
seq_id += "+1"
seqrec = SeqRecord(sub_s)
seqrec.id = str(seq_id)
#descr += " Length={0}bp.".format(len(sub_s))
seqrec.description = ""
sequences.append(seqrec)
logging.debug(seqrec.seq)
logging.info("Cut sequence of " + str(len(sub_s)) + "bp.")
if not (found):
logging.warn("No sequence " + chr + " found in " + in_fasta_file)
else:
out_genomic = open(out_dir + "/" + "genomic.txt", "w")
SeqIO.write(sequences, out_genomic, "fasta")
out_genomic.close()
in_sam.close()
logging.info("splitBAM: Program Completed")
if __name__ == '__main__':
main()
|
AlgoLab/PIntron-scripts
|
Preprocessing/splitBAM.py
|
Python
|
agpl-3.0
| 11,950
|
[
"pysam"
] |
b33114b46fd11f7fb149be55f4489c08037fdb9739982a88e17411f32a084d13
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013-2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
""" Classes for inventory details """
import decimal
from kiwi.currency import currency
from kiwi.ui.objectlist import Column
import pango
from stoqlib.domain.inventory import Inventory, InventoryItemsView
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.editors.noteeditor import NoteEditor
from stoqlib.gui.utils.printing import print_report
from stoqlib.lib.formatters import format_sellable_description
from stoqlib.lib.translation import stoqlib_gettext
from stoqlib.reporting.inventory import InventoryReport
_ = stoqlib_gettext
class InventoryDetailsDialog(BaseEditor):
"""This class is for Inventory Details Dialog. This dialog display
general informations about the selected inventory item on InventoryApp
and about items related on the inventory.
This dialog have seven widgets. They will display the informations of the
inventory selected. The |status_str| show dialog status string,
the |identifier| show identifier, |branch_name| show the branch of the
inventory, |open_date| is the open date of the inventory, |close_date|
is the close date of the inventory if it was closed,
|invoice_number| show the invoice number of the current inventory, and
|responsible_name| show the username who opened the inventory.
"""
gladefile = "InventoryDetailsDialog"
model_type = Inventory
title = _(u"Inventory Details")
size = (800, 460)
hide_footer = True
proxy_widgets = ('status_str',
'identifier',
'branch_name',
'open_date',
'close_date',
'invoice_number',
'responsible_name')
def _setup_widgets(self):
self.info_button.set_sensitive(False)
self.items_list.set_columns(self._get_items_columns())
# Create a list to avoid the query being executed twice (object list
# does a if objects somewhere)
items = list(InventoryItemsView.find_by_inventory(self.store, self.model))
self.items_list.add_list(items)
self.print_button.set_sensitive(any(self._get_report_items()))
def _get_report_items(self):
for i in self.items_list:
item = i.inventory_item
if (item.recorded_quantity != item.counted_quantity or
item.actual_quantity is not None):
yield item
def _get_items_columns(self):
return [Column('code', _("Code"), sorted=True, data_type=str),
Column('description', _("Description"), data_type=str,
expand=True, format_func=self._format_description,
format_func_data=True),
Column('reason', _('Reason'), data_type=str,
ellipsize=pango.ELLIPSIZE_END),
Column('recorded_quantity', _("Recorded"), data_type=decimal.Decimal),
Column('counted_quantity', _("Counted"), data_type=decimal.Decimal),
Column('actual_quantity', _("Actual"), data_type=decimal.Decimal),
Column('is_adjusted', _("Adjusted"), data_type=bool),
Column('product_cost', _("Cost"), data_type=currency, visible=False)]
def _format_description(self, item, data): # pragma no cover
return format_sellable_description(item.sellable, item.batch)
#
# BaseEditor hooks
#
def setup_proxies(self):
self._setup_widgets()
self.add_proxy(self.model, InventoryDetailsDialog.proxy_widgets)
#
# Callbacks
#
def on_print_button__clicked(self, button):
items = list(self._get_report_items())
assert items
print_report(InventoryReport, self.items_list, items)
def on_info_button__clicked(self, button):
item = self.items_list.get_selected()
run_dialog(NoteEditor, self, self.store, item, 'reason',
title=_('Reason'), label_text=_('Adjust reason'),
visual_mode=True)
def on_items_list__selection_changed(self, objectlist, item):
self.info_button.set_sensitive(bool(item and item.reason))
def on_items_list__double_click(self, objectlist, item):
if not item.reason:
return
run_dialog(NoteEditor, self, self.store, item, 'reason',
title=_('Reason'), label_text=_('Adjust reason'),
visual_mode=True)
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/inventorydetails.py
|
Python
|
gpl-2.0
| 5,391
|
[
"VisIt"
] |
305dd3f9c7ba8ec4197f7debea0a8bc76d55476999a61dc503116d2751a4eac3
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import numpy as np
from numpy.testing import (
assert_,
assert_array_equal,
assert_equal,
assert_raises,
)
import mock
from MDAnalysisTests.datafiles import PSF, GRO, XTC
from MDAnalysisTests import make_Universe
import MDAnalysis
import MDAnalysis as mda
class TestUpdatingSelection(object):
def setUp(self):
self.u = mda.Universe(GRO, XTC)
self.ag = self.u.select_atoms(
"prop x < 5 and prop y < 5 and prop z < 5")
self.ag_updating = self.u.select_atoms(
"prop x < 5 and prop y < 5 and prop z < 5", updating=True)
self.ag_updating_compounded = self.u.select_atoms("around 2 group sele",
sele=self.ag, updating=True)
self.ag_updating_chained = self.u.select_atoms("around 2 group sele",
sele=self.ag_updating, updating=True)
self.ag_updating_chained2 = self.ag_updating.select_atoms("all",
updating=True)
def test_update(self):
assert_array_equal(self.ag_updating.indices, self.ag.indices)
target_idxs = np.array([ 4469, 4470, 4472, 6289, 6290, 6291,
6292, 31313, 31314, 31315, 31316, 34661,
34663, 34664])
self.u.trajectory.next()
assert_equal(self.ag_updating._lastupdate, 0)
assert_(not self.ag_updating.is_uptodate)
assert_array_equal(self.ag_updating.indices, target_idxs)
assert_(self.ag_updating.is_uptodate)
self.ag_updating.is_uptodate = False
assert_(self.ag_updating._lastupdate is None)
def test_compounded_update(self):
target_idxs0 = np.array([ 3650, 7406, 22703, 31426, 40357,
40360, 41414])
target_idxs1 = np.array([ 3650, 8146, 23469, 23472, 31426,
31689, 31692, 34326, 41414])
assert_array_equal(self.ag_updating_compounded.indices,
target_idxs0)
self.u.trajectory.next()
assert_array_equal(self.ag_updating_compounded.indices,
target_idxs1)
def test_chained_update(self):
target_idxs = np.array([ 4471, 7406, 11973, 11975, 34662, 44042])
assert_array_equal(self.ag_updating_chained.indices,
self.ag_updating_compounded.indices)
self.u.trajectory.next()
assert_array_equal(self.ag_updating_chained.indices, target_idxs)
def test_chained_update2(self):
assert_array_equal(self.ag_updating_chained2.indices,
self.ag_updating.indices)
self.u.trajectory.next()
assert_array_equal(self.ag_updating_chained2.indices,
self.ag_updating.indices)
def test_slice_is_static(self):
ag_static1 = self.ag_updating[:]
ag_static2 = self.ag_updating.select_atoms("all")
assert_array_equal(ag_static1.indices, self.ag.indices)
assert_array_equal(ag_static2.indices, self.ag.indices)
self.u.trajectory.next()
assert_array_equal(ag_static1.indices, self.ag.indices)
assert_array_equal(ag_static2.indices, self.ag.indices)
def test_kwarg_check(self):
assert_raises(TypeError, self.u.select_atoms, "group updating",
{"updating":True})
class TestUpdatingSelectionNotraj(object):
def setUp(self):
self.u = mda.Universe(PSF)
self.ag = self.u.select_atoms("name N*")
self.ag_updating = self.u.select_atoms("name N*", updating=True)
def test_update(self):
assert_(self.ag_updating.is_uptodate)
assert_array_equal(self.ag_updating.indices, self.ag.indices)
assert_equal(self.ag_updating._lastupdate, -1)
self.ag_updating.is_uptodate = False
assert_(self.ag_updating._lastupdate is None)
class UAGReader(mda.coordinates.base.ReaderBase):
"""
Positions in this reader are defined as:
(atom number + frame number, 0, 0)
Eg::
Frame 1:
(0, 0, 0),
(1, 0, 0),
etc
Frame 2:
(1, 0, 0),
(2, 0, 0),
etc
Whilst quite possible not the best data for molecular simulation,
it does make for easy to write tests.
"""
def __init__(self, n_atoms):
super(UAGReader, self).__init__('UAGReader')
self._auxs = {}
self.n_frames = 10
self.n_atoms = n_atoms
self.ts = self._Timestep(self.n_atoms)
self._read_next_timestep()
def _reopen(self):
self.ts.frame = -1
def _read_next_timestep(self):
ts = self.ts
ts.frame += 1
if ts.frame >= self.n_frames:
raise EOFError
pos = np.zeros((self.n_atoms, 3))
pos[:, 0] = np.arange(self.n_atoms) + ts.frame
ts.positions = pos
return ts
def _read_frame(self, frame):
self.ts.frame = frame - 1 # gets +1'd next
return self._read_next_frame
class TestUAGCallCount(object):
# make sure updates are only called when required!
#
# these tests check that potentially expensive selection operations are only
# done when necessary
def setUp(self):
self.u = u = make_Universe(('names',))
u.trajectory = UAGReader(125)
def tearDown(self):
del self.u
@mock.patch.object(MDAnalysis.core.groups.UpdatingAtomGroup, 'update_selection',
autospec=True, # required to make it get self when called
)
def test_updated_when_creating(self, mock_update_selection):
uag = self.u.select_atoms('name XYZ', updating=True)
assert_(mock_update_selection.call_count == 1)
def test_updated_when_next(self):
uag = self.u.select_atoms('name XYZ', updating=True)
# Use mock.patch.object to start inspecting the uag update selection method
# wraps= keyword makes it still function as normal, just we're spying on it now
with mock.patch.object(uag, 'update_selection',
wraps=uag.update_selection) as mock_update:
self.u.trajectory.next()
assert_(mock_update.call_count == 0)
# Access many attributes..
pos = uag.positions
names = uag.names
# But check we only got updated once
assert_(mock_update.call_count == 1)
class TestDynamicUAG(object):
def setUp(self):
self.u = u = make_Universe(('names',))
u.trajectory = UAGReader(125)
def tearDown(self):
del self.u
def test_nested_uags(self):
bg = self.u.atoms[[3, 4]]
uag1 = self.u.select_atoms('around 1.5 group bg', bg=bg, updating=True)
uag2 = self.u.select_atoms('around 1.5 group uag', uag=uag1, updating=True)
for ts in self.u.trajectory:
assert_equal(len(bg), 2)
assert_equal(len(uag1), 2) # around doesn't include bg, so 2
assert_equal(len(uag2), 4) # doesn't include uag1
def test_driveby(self):
uag = self.u.select_atoms('prop x < 5.5', updating=True)
n_init = 6
for i, ts in enumerate(self.u.trajectory):
# should initially be 6 atoms with x < 5.5
n_expected = max(n_init - i, 0) # floors at 0
assert_equal(len(uag), n_expected)
def test_representations():
u = make_Universe()
ag_updating = u.select_atoms("bynum 0", updating=True)
rep = repr(ag_updating)
assert "0 atoms," in rep
assert "selection " in rep
assert "bynum 0" in rep
assert "entire Universe" in rep
ag_updating = u.select_atoms("bynum 1", updating=True)
rep = repr(ag_updating)
assert "1 atom," in rep
ag_updating = u.atoms[:-1].select_atoms("bynum 1", "bynum 2",
updating=True)
rep = repr(ag_updating)
assert "2 atoms," in rep
assert "selections 'bynum 1' + 'bynum 2'" in rep
assert "another AtomGroup" in rep
def test_empty_UAG():
u = make_Universe()
# technically possible to make a UAG without any selections..
uag = mda.core.groups.UpdatingAtomGroup(u.atoms, (), '')
assert_(isinstance(uag, mda.core.groups.UpdatingAtomGroup))
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/core/test_updating_atomgroup.py
|
Python
|
gpl-2.0
| 9,338
|
[
"MDAnalysis"
] |
762b5b8e8b6464a8a24198ed3a2212cd2c2bc82a2a2231ff51bb6396af386301
|
import copy
import gc
import pickle
import sys
import unittest
import weakref
import inspect
from test import support
try:
import _testcapi
except ImportError:
_testcapi = None
# This tests to make sure that if a SIGINT arrives just before we send into a
# yield from chain, the KeyboardInterrupt is raised in the innermost
# generator (see bpo-30039).
@unittest.skipUnless(_testcapi is not None and
hasattr(_testcapi, "raise_SIGINT_then_send_None"),
"needs _testcapi.raise_SIGINT_then_send_None")
class SignalAndYieldFromTest(unittest.TestCase):
def generator1(self):
return (yield from self.generator2())
def generator2(self):
try:
yield
except KeyboardInterrupt:
return "PASSED"
else:
return "FAILED"
def test_raise_and_yield_from(self):
gen = self.generator1()
gen.send(None)
try:
_testcapi.raise_SIGINT_then_send_None(gen)
except BaseException as _exc:
exc = _exc
self.assertIs(type(exc), StopIteration)
self.assertEqual(exc.value, "PASSED")
class FinalizationTest(unittest.TestCase):
def test_frame_resurrect(self):
# A generator frame can be resurrected by a generator's finalization.
def gen():
nonlocal frame
try:
yield
finally:
frame = sys._getframe()
g = gen()
wr = weakref.ref(g)
next(g)
del g
support.gc_collect()
self.assertIs(wr(), None)
self.assertTrue(frame)
del frame
support.gc_collect()
def test_refcycle(self):
# A generator caught in a refcycle gets finalized anyway.
old_garbage = gc.garbage[:]
finalized = False
def gen():
nonlocal finalized
try:
g = yield
yield 1
finally:
finalized = True
g = gen()
next(g)
g.send(g)
self.assertGreater(sys.getrefcount(g), 2)
self.assertFalse(finalized)
del g
support.gc_collect()
self.assertTrue(finalized)
self.assertEqual(gc.garbage, old_garbage)
def test_lambda_generator(self):
# Issue #23192: Test that a lambda returning a generator behaves
# like the equivalent function
f = lambda: (yield 1)
def g(): return (yield 1)
# test 'yield from'
f2 = lambda: (yield from g())
def g2(): return (yield from g())
f3 = lambda: (yield from f())
def g3(): return (yield from f())
for gen_fun in (f, g, f2, g2, f3, g3):
gen = gen_fun()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(2)
self.assertEqual(cm.exception.value, 2)
class GeneratorTest(unittest.TestCase):
def test_name(self):
def func():
yield 1
# check generator names
gen = func()
self.assertEqual(gen.__name__, "func")
self.assertEqual(gen.__qualname__,
"GeneratorTest.test_name.<locals>.func")
# modify generator names
gen.__name__ = "name"
gen.__qualname__ = "qualname"
self.assertEqual(gen.__name__, "name")
self.assertEqual(gen.__qualname__, "qualname")
# generator names must be a string and cannot be deleted
self.assertRaises(TypeError, setattr, gen, '__name__', 123)
self.assertRaises(TypeError, setattr, gen, '__qualname__', 123)
self.assertRaises(TypeError, delattr, gen, '__name__')
self.assertRaises(TypeError, delattr, gen, '__qualname__')
# modify names of the function creating the generator
func.__qualname__ = "func_qualname"
func.__name__ = "func_name"
gen = func()
self.assertEqual(gen.__name__, "func_name")
self.assertEqual(gen.__qualname__, "func_qualname")
# unnamed generator
gen = (x for x in range(10))
self.assertEqual(gen.__name__,
"<genexpr>")
self.assertEqual(gen.__qualname__,
"GeneratorTest.test_name.<locals>.<genexpr>")
def test_copy(self):
def f():
yield 1
g = f()
with self.assertRaises(TypeError):
copy.copy(g)
def test_pickle(self):
def f():
yield 1
g = f()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(g, proto)
class ExceptionTest(unittest.TestCase):
# Tests for the issue #23353: check that the currently handled exception
# is correctly saved/restored in PyEval_EvalFrameEx().
def test_except_throw(self):
def store_raise_exc_generator():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except Exception as exc:
# exception raised by gen.throw(exc)
self.assertEqual(sys.exc_info()[0], ValueError)
self.assertIsNone(exc.__context__)
yield
# ensure that the exception is not lost
self.assertEqual(sys.exc_info()[0], ValueError)
yield
# we should be able to raise back the ValueError
raise
make = store_raise_exc_generator()
next(make)
try:
raise ValueError()
except Exception as exc:
try:
make.throw(exc)
except Exception:
pass
next(make)
with self.assertRaises(ValueError) as cm:
next(make)
self.assertIsNone(cm.exception.__context__)
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_next(self):
def gen():
self.assertEqual(sys.exc_info()[0], ValueError)
yield "done"
g = gen()
try:
raise ValueError
except Exception:
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_gen_except(self):
def gen():
try:
self.assertEqual(sys.exc_info()[0], None)
yield
# we are called from "except ValueError:", TypeError must
# inherit ValueError in its context
raise TypeError()
except TypeError as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# here we are still called from the "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception:
next(g)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_except_throw_exception_context(self):
def gen():
try:
try:
self.assertEqual(sys.exc_info()[0], None)
yield
except ValueError:
# we are called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
raise TypeError()
except Exception as exc:
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# we are still called from "except ValueError:"
self.assertEqual(sys.exc_info()[0], ValueError)
yield
self.assertIsNone(sys.exc_info()[0])
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception as exc:
g.throw(exc)
self.assertEqual(next(g), "done")
self.assertEqual(sys.exc_info(), (None, None, None))
def test_stopiteration_error(self):
# See also PEP 479.
def gen():
raise StopIteration
yield
with self.assertRaisesRegex(RuntimeError, 'raised StopIteration'):
next(gen())
def test_tutorial_stopiteration(self):
# Raise StopIteration" stops the generator too:
def f():
yield 1
raise StopIteration
yield 2 # never reached
g = f()
self.assertEqual(next(g), 1)
with self.assertRaisesRegex(RuntimeError, 'raised StopIteration'):
next(g)
def test_return_tuple(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send((2,))
self.assertEqual(cm.exception.value, (2,))
def test_return_stopiteration(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(StopIteration(2))
self.assertIsInstance(cm.exception.value, StopIteration)
self.assertEqual(cm.exception.value.value, 2)
class YieldFromTests(unittest.TestCase):
def test_generator_gi_yieldfrom(self):
def a():
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING)
self.assertIsNone(gen_b.gi_yieldfrom)
yield
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_RUNNING)
self.assertIsNone(gen_b.gi_yieldfrom)
def b():
self.assertIsNone(gen_b.gi_yieldfrom)
yield from a()
self.assertIsNone(gen_b.gi_yieldfrom)
yield
self.assertIsNone(gen_b.gi_yieldfrom)
gen_b = b()
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CREATED)
self.assertIsNone(gen_b.gi_yieldfrom)
gen_b.send(None)
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED)
self.assertEqual(gen_b.gi_yieldfrom.gi_code.co_name, 'a')
gen_b.send(None)
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_SUSPENDED)
self.assertIsNone(gen_b.gi_yieldfrom)
[] = gen_b # Exhaust generator
self.assertEqual(inspect.getgeneratorstate(gen_b), inspect.GEN_CLOSED)
self.assertIsNone(gen_b.gi_yieldfrom)
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print(i)
1
2
>>> g = f()
>>> next(g)
1
>>> next(g)
2
"Falling off the end" stops the generator:
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> next(g)
1
>>> next(g)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> next(g) # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, "return" and StopIteration are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(g2()))
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = next(me)
... yield i
>>> me = g()
>>> next(me)
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print(list(f1()))
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print(list(f2()))
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> next(k)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> next(k) # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print(list(f()))
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print(' '+x, end='')
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = list(range(1, 5))
>>> for k in range(len(seq) + 2):
... print("%d-combs of %s:" % (k, seq))
... for c in gcomb(seq, k):
... print(" ", c)
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<class 'function'>
>>> i = g()
>>> type(i)
<class 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'gi_yieldfrom', 'send', 'throw']
>>> from test.support import HAVE_DOCSTRINGS
>>> print(i.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).')
Implement next(self).
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<class 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
AttributeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return next(self.generator)
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.Random(42)
>>> while 1:
... for s in sets:
... print(" %s->%s" % (s, s.find()), end='')
... print()
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print("merged", s1, "into", s2)
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged K into B
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged A into F
A->F B->B C->C D->D E->E F->F G->G H->H I->I J->J K->B L->L M->M
merged E into F
A->F B->B C->C D->D E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged D into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->M
merged M into C
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->J K->B L->L M->C
merged J into B
A->F B->B C->C D->C E->F F->F G->G H->H I->I J->B K->B L->L M->C
merged B into C
A->F B->C C->C D->C E->F F->F G->G H->H I->I J->C K->C L->L M->C
merged F into G
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->L M->C
merged L into C
A->G B->C C->C D->C E->G F->G G->G H->H I->I J->C K->C L->C M->C
merged G into I
A->I B->C C->C D->C E->I F->I G->I H->H I->I J->C K->C L->C M->C
merged I into H
A->H B->C C->C D->C E->H F->H G->H H->H I->H J->C K->C L->C M->C
merged C into H
A->H B->H C->H D->H E->H F->H G->H H->H I->H J->H K->H L->H M->H
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [next(g) for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = next(ints)
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = next(g)
... nh = next(h)
... while 1:
... if ng < nh:
... yield ng
... ng = next(g)
... elif ng > nh:
... yield nh
... nh = next(h)
... else:
... yield ng
... ng = next(g)
... nh = next(h)
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print(firstn(result, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.__next__
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print([m235[j] for j in range(15*i, 15*(i+1))])
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def tail(g):
... next(g) # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print(firstn(it, 15))
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield next(g) + next(h)
...
... def _fib():
... yield 1
... yield 2
... next(fibTail) # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<class 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<class 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<class 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<class 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<class 'generator'>
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print(next(g))
0
>>> print(next(g))
1
>>> print(next(g))
2
>>> print(next(g))
Traceback (most recent call last):
StopIteration
Test the gi_code attribute
>>> def f():
... yield 5
...
>>> g = f()
>>> g.gi_code is f.__code__
True
>>> next(g)
5
>>> next(g)
Traceback (most recent call last):
StopIteration
>>> g.gi_code is f.__code__
True
Test the __name__ attribute and the repr()
>>> def f():
... yield 5
...
>>> g = f()
>>> g.__name__
'f'
>>> repr(g) # doctest: +ELLIPSIS
'<generator object f at ...>'
Lambdas shouldn't have their usual return behavior.
>>> x = lambda: (yield 1)
>>> list(x())
[1]
>>> x = lambda: ((yield 1), (yield 2))
>>> list(x())
[1, 2]
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def simple_conjoin(gs):
values = [None] * len(gs)
def gen(i):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().__next__
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possibilities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1 << j) | # column ordinal
(1 << (n + i-j + n-1)) | # NW-SE ordinal
(1 << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print(sep)
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print("|" + "|".join(squares) + "|")
print(sep)
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finally, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 through m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 through m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print(sep)
for i in range(m):
row = squares[i]
print("|" + "|".join(row) + "|")
print(sep)
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print(c)
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print(n, len(all), all[0] == [0] * n, all[-1] == [1] * n)
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print(count, "solutions in all.")
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print("Solution", count)
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print((yield 1))
... yield 2
>>> g = f()
>>> next(g)
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
Yield is allowed only in the outermost iterable in generator expression:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<class 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> next(c)
>>> print(seq)
[]
>>> c.send(10)
>>> print(seq)
[10]
>>> c.send(10)
>>> print(seq)
[10, 20]
>>> c.send(10)
>>> print(seq)
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2)
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):
...
SyntaxError: cannot assign to yield expression
>>> def f(): (yield bar) += y
Traceback (most recent call last):
...
SyntaxError: cannot assign to yield expression
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print((yield))
... except ValueError as v:
... print("caught ValueError (%s)" % (v))
>>> import sys
>>> g = f()
>>> next(g)
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> g.throw("abc")
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not str
>>> g.throw(0)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not int
>>> g.throw(list)
Traceback (most recent call last):
...
TypeError: exceptions must be classes or instances deriving from BaseException, not type
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print(g.gi_frame)
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Plain "raise" inside a generator should preserve the traceback (#13188).
The traceback should have 3 levels:
- g.throw()
- f()
- 1/0
>>> def f():
... try:
... yield
... except:
... raise
>>> g = f()
>>> try:
... 1/0
... except ZeroDivisionError as v:
... try:
... g.throw(v)
... except Exception as w:
... tb = w.__traceback__
>>> levels = 0
>>> while tb:
... levels += 1
... tb = tb.tb_next
>>> levels
3
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print("exiting")
>>> g = f()
>>> next(g)
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> next(g)
>>> g.close() # close normally
And finalization:
>>> def f():
... try: yield
... finally:
... print("exiting")
>>> g = f()
>>> next(g)
>>> del g
exiting
GeneratorExit is not caught by except Exception:
>>> def f():
... try: yield
... except Exception:
... print('except')
... finally:
... print('finally')
>>> g = f()
>>> next(g)
>>> del g
finally
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> with support.catch_unraisable_exception() as cm:
... g = f()
... next(g)
... del g
...
... cm.unraisable.exc_type == RuntimeError
... "generator ignored GeneratorExit" in str(cm.unraisable.exc_value)
... cm.unraisable.exc_traceback is not None
True
True
True
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> next(g)
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<class 'generator'>
>>> def f(): x = yield
>>> type(f())
<class 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<class 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<class 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def __next__(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = next(it)
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
This test isn't really generator related, but rather exception-in-cleanup
related. The coroutine tests (above) just happen to cause an exception in
the generator's __del__ (tp_del) method. We can also test for this
explicitly, without generators. We do have to redirect stderr to avoid
printing warnings and to doublecheck that we actually tested what we wanted
to test.
>>> from test import support
>>> class Leaker:
... def __del__(self):
... def invoke(message):
... raise RuntimeError(message)
... invoke("del failed")
...
>>> with support.catch_unraisable_exception() as cm:
... l = Leaker()
... del l
...
... cm.unraisable.object == Leaker.__del__
... cm.unraisable.exc_type == RuntimeError
... str(cm.unraisable.exc_value) == "del failed"
... cm.unraisable.exc_traceback is not None
True
True
True
True
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import support, test_generators
support.run_unittest(__name__)
support.run_doctest(test_generators, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
kikocorreoso/brython
|
www/src/Lib/test/test_generators.py
|
Python
|
bsd-3-clause
| 59,919
|
[
"VisIt"
] |
d5f77a99f8b22a81ea303e09ed69981b7ee0beb6fe896f7fea060429da7c63ce
|
# Copyright(C) 2011-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd import electrokinetics
import math
##########################################################################
# Set up the System #
##########################################################################
# Build plates using two ek species.
@utx.skipIfMissingGPU()
@utx.skipIfMissingFeatures(["ELECTROKINETICS"])
class ek_charged_plate(ut.TestCase):
es = espressomd.System(box_l=[1.0, 1.0, 1.0])
def test(self):
system = self.es
# Set parameters
box_x = 20
box_y = 20
box_z = 20
system.box_l = [box_x, box_y, box_z]
system.cell_system.skin = 0.2
system.time_step = 0.1
system.periodicity = [1, 1, 1]
bjerrum_length = 2.13569
agrid = 0.5
system.thermostat.turn_off()
# Setup the Fluid
ek = electrokinetics.Electrokinetics(
agrid=agrid,
lb_density=1.0,
viscosity=1.0,
friction=1.0,
T=1.0,
prefactor=bjerrum_length,
stencil="linkcentered",
advection=False,
es_coupling=True)
positive_ions = electrokinetics.Species(
density=0.0, D=0.0, valency=1.0)
negative_ions = electrokinetics.Species(
density=0.0, D=0.0, valency=-1.0)
ek.add_species(positive_ions)
ek.add_species(negative_ions)
system.actors.add(ek)
##################################################################
# X
# Setup EK species
for i in range(int(box_y / agrid)):
for j in range(int(box_z / agrid)):
positive_ions[10, i, j].density = 1.0 / agrid
negative_ions[30, i, j].density = 1.0 / agrid
# Setup MD particle and integrate
system.part.add(id=0, pos=[0, 0, 0], q=-1.0, type=0)
force_difference = 0.0
for i in range(7, 14):
system.part[0].pos = [i, 0, 0]
system.integrator.run(0)
# Check Force
expected_force = -2 * math.pi * bjerrum_length
particle_force = system.part[0].f
if abs(expected_force - particle_force[0]) > force_difference:
force_difference = abs(expected_force - particle_force[0])
print("Force deviation: {}".format(force_difference))
self.assertLess(force_difference, 1.0e-04,
"Force accuracy in X not achieved, allowed deviation: "
"1.0e-04, measured: {}".format(force_difference))
# Unset species
for i in range(int(box_y / agrid)):
for j in range(int(box_z / agrid)):
positive_ions[10, i, j].density = 0.0
negative_ions[30, i, j].density = 0.0
##################################################################
# Y
# Setup EK species
for i in range(int(box_x / agrid)):
for j in range(int(box_z / agrid)):
positive_ions[i, 10, j].density = 1.0 / agrid
negative_ions[i, 30, j].density = 1.0 / agrid
# Setup MD particle and integrate
force_difference = 0.0
for i in range(7, 14):
system.part[0].pos = [0, i, 0]
system.integrator.run(0)
# Check Force
expected_force = -2 * math.pi * bjerrum_length
particle_force = system.part[0].f
if abs(expected_force - particle_force[1]) > force_difference:
force_difference = abs(expected_force - particle_force[1])
print("Force deviation: {}".format(force_difference))
self.assertLess(force_difference, 1.0e-04,
"Force accuracy in Y not achieved, allowed deviation: "
"1.0e-04, measured: {}".format(force_difference))
# Unset species
for i in range(int(box_x / agrid)):
for j in range(int(box_z / agrid)):
positive_ions[i, 10, j].density = 0.0
negative_ions[i, 30, j].density = 0.0
##################################################################
# Y
# Setup EK species
for i in range(int(box_x / agrid)):
for j in range(int(box_y / agrid)):
positive_ions[i, j, 10].density = 1.0 / agrid
negative_ions[i, j, 30].density = 1.0 / agrid
# Setup MD particle and integrate
force_difference = 0.0
for i in range(7, 14):
system.part[0].pos = [0, 0, i]
system.integrator.run(0)
# Check Force
expected_force = -2 * math.pi * bjerrum_length
particle_force = system.part[0].f
if abs(expected_force - particle_force[2]) > force_difference:
force_difference = abs(expected_force - particle_force[2])
print("Force deviation: {}".format(force_difference))
self.assertLess(force_difference, 1.0e-04,
"Force accuracy in Z not achieved, allowed deviation: "
"1.0e-04, measured: {}".format(force_difference))
# Unset species
for i in range(int(box_x / agrid)):
for j in range(int(box_y / agrid)):
positive_ions[i, j, 10].density = 0.0
negative_ions[i, j, 30].density = 0.0
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/ek_charged_plate.py
|
Python
|
gpl-3.0
| 6,187
|
[
"ESPResSo"
] |
26a7e150b076d9407395fe71de297ccaef7aa9f876384a6f9aec42e3ef2226df
|
from time import sleep
from lettuce import *
from rapidsms.contrib.locations.models import LocationType, Location
from survey.features.page_objects.household_member import NewHouseholdMemberPage, EditHouseholdMemberPage, DeleteHouseholdMemberPage
from survey.features.page_objects.households import HouseholdDetailsPage
from survey.models import EnumerationArea
from survey.models.households import HouseholdMember, HouseholdHead, Household
from survey.models.investigator import Investigator
@step(u'And I have a household')
def and_i_have_a_household(step):
district = LocationType.objects.get(slug='district')
world.kampala = Location.objects.create(name='Kampala', type=district)
world.ea = EnumerationArea.objects.create(name="EA")
world.ea.locations.add(world.kampala_village)
world.investigator = Investigator.objects.create(
name="Investigator 1", mobile_number="1", ea=world.ea)
world.household = Household.objects.create(
investigator=world.investigator, ea=world.investigator.ea, uid=4)
HouseholdHead.objects.create(
household=world.household,
surname="Test",
first_name="User",
date_of_birth="1980-09-01",
male=True,
occupation='Agricultural labor',
level_of_education='Primary',
resident_since_year=2013,
resident_since_month=2)
@step(u'And I visit new household member page')
def and_i_visit_new_household_member_page(step):
world.page = NewHouseholdMemberPage(world.browser, world.household)
world.page.visit()
@step(u'And I see all household member fields are present')
def and_i_see_all_household_member_fields_are_present(step):
world.page.validate_fields()
@step(u'Then I should see member successfully created message')
def then_i_should_see_member_successfully_created_message(step):
world.page.see_success_message('Household member', 'created')
@step(u'And I fill all member related fields')
def and_i_fill_all_member_related_fields(step):
data = {'surname': 'xyz',
'male': True
}
world.page.fill_valid_member_values(data)
world.page.select_date("#id_date_of_birth")
sleep(3)
@step(u'And also I have a household member')
def and_also_i_have_a_household_member(step):
world.household_member = HouseholdMember.objects.create(
surname='member1',
date_of_birth='2013-08-30',
male=True,
household=world.household)
@step(u'And I visit edit household member page')
def and_i_visit_edit_household_member_page(step):
world.page = EditHouseholdMemberPage(
world.browser, world.household, world.household_member)
world.page.visit()
@step(u'And I see all details of household member are present')
def and_i_see_all_details_of_household_member_are_present(step):
world.page.validate_member_details(world.household_member)
@step(u'And I edit member related fields')
def and_i_edit_member_related_fields(step):
data = {'male': False,
'surname': 'member1edited'
}
world.page.fill_valid_member_values(data)
world.page.select_date("#id_date_of_birth")
sleep(3)
@step(u'And I submit the form')
def and_i_submit_the_form(step):
world.page.submit()
@step(u'Then I should see member successfully edited message')
def then_i_should_see_member_successfully_edited_message(step):
world.page.is_text_present('Household member successfully edited.')
@step(u'And I visit that household details page')
def and_i_visit_that_household_details_page(step):
world.page = HouseholdDetailsPage(world.browser, world.household)
world.page.visit()
@step(u'And I click delete member')
def and_i_click_delete_member(step):
world.page.click_delete_link(world.household_member.pk)
@step(u'Then I should see a confirmation modal')
def then_i_should_see_a_confirmation_modal(step):
world.page = DeleteHouseholdMemberPage(
world.browser, world.household, world.household_member)
world.page.see_delete_confirmation_modal()
@step(u'Then that member is successfully deleted')
def then_that_member_is_successfully_deleted(step):
world.page.see_success_message('Household member', 'deleted')
|
unicefuganda/uSurvey
|
survey/features/household_member-steps.py
|
Python
|
bsd-3-clause
| 4,202
|
[
"VisIt"
] |
22cc1d0c1d8abd4bc51ed85914b6198234e292bc0f35acb235a54f4e0036ad96
|
from collections import OrderedDict
import re
import copy
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import LinearSegmentedColormap
import time
import warnings
from pathlib import Path
from tqdm.auto import tqdm
import matplotlib.patheffects as PathEffects
from skimage.exposure import rescale_intensity
from matplotlib.animation import FuncAnimation
from .plotter_utils_consts import n_pts_smooth, default_fourier_n_harm
from .scale import np_scale
from .dc_time import _n64_to_datetime, _n64_datetime_to_scalar, _scalar_to_n64_datetime
def impute_missing_data_1D(data1D):
"""
This function returns the data in the same format as it was
passed in, but with missing values either masked out or imputed with appropriate values
(currently only using a linear trend). Many linear plotting functions for 1D data often
(and should) only connect contiguous, non-nan data points. This leaves gaps in the
piecewise linear plot, which are sometimes graphically undesirable.
Parameters
----------
data: numpy.ndarray
A 1D NumPy array for which missing values are to be masked or imputed
suitably for at least matplotlib plotting. If formatting for other libraries such
as seaborn or plotly is necessary, add that formatting requirement as a parameter.
"""
from scipy.interpolate import interp1d
nan_mask = ~np.isnan(data1D)
x = np.arange(len(data1D))
x_no_nan = x[nan_mask]
data_no_nan = data1D[nan_mask]
if len(x_no_nan) >= 2:
f = interp1d(x_no_nan, data_no_nan)
# Select points for interpolation.
interpolation_x_mask = (x_no_nan[0] <= x) & (x <= x_no_nan[-1])
interpolation_x = x[interpolation_x_mask]
data1D_interp = np.arange(len(data1D), dtype=np.float32)
# The ends of data1D may contain NaNs that must be included.
end_nan_inds = x[(x <= x_no_nan[0]) | (x_no_nan[-1] <= x)]
data1D_interp[end_nan_inds] = np.nan
data1D_interp[interpolation_x_mask] = f(interpolation_x)
return data1D_interp
else: # Cannot interpolate with a single non-nan point.
return data1D
## Datetime functions ##
def n64_to_epoch(timestamp):
ts = pd.to_datetime(str(timestamp))
time_format = "%Y-%m-%d"
ts = ts.strftime(time_format)
epoch = int(time.mktime(time.strptime(ts, time_format)))
return epoch
def np_dt64_to_str(np_datetime, fmt='%Y-%m-%d'):
"""Converts a NumPy datetime64 object to a string based on a format string supplied to pandas strftime."""
return pd.to_datetime(str(np_datetime)).strftime(fmt)
def tfmt(x, pos=None):
return time.strftime("%Y-%m-%d", time.gmtime(x))
## End datetime functions ##
def regression_massage(ds):
t_len = len(ds["time"])
s_len = len(ds["latitude"]) * len(ds["longitude"])
flat_values = ds.values.reshape(t_len * s_len)
return list(zip(list(map(n64_to_epoch, ds.time.values)), flat_values))
def remove_nans(aList):
i = 0
while i < len(aList):
if np.isnan(aList[i][1]):
del aList[i]
i = 0
else:
i += 1
return aList
def full_linear_regression(ds):
myList = regression_massage(ds)
myList = remove_nans(myList)
myList = sorted(myList, key=lambda tup: tup[0])
time, value = zip(*myList)
value = [int(x) for x in value]
value = np.array(value)
value.astype(int)
time = np.array(time)
time.astype(int)
return list(zip(time, value))
def xarray_plot_data_vars_over_time(dataset, colors=['orange', 'blue']):
"""
Plot a line plot of all data variables in an xarray.Dataset on a shared set of axes.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot. The only dimension and coordinate must be 'time'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
"""
data_var_names = sorted(list(dataset.data_vars))
len_dataset = dataset.time.size
nan_mask = np.full(len_dataset, True)
for i, data_arr_name in enumerate(data_var_names):
data_arr = dataset[data_arr_name]
nan_mask = nan_mask & data_arr.notnull().values
plt.plot(data_arr[nan_mask], marker='o', c=colors[i])
times = dataset.time.values
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times)))
plt.xticks(np.arange(len(date_strs[nan_mask])), date_strs[nan_mask],
rotation=45, ha='right', rotation_mode='anchor')
plt.legend(data_var_names, loc='upper right')
plt.show()
def xarray_scatterplot_data_vars(dataset, figure_kwargs={'figsize': (12, 6)}, colors=['blue', 'orange'], markersize=5):
"""
Plot a scatterplot of all data variables in an xarray.Dataset on a shared set of axes.
Currently requires a 'time' coordinate, which constitutes the x-axis.
Parameters
----------
dataset: xarray.Dataset
The Dataset containing data variables to plot.
frac_dates: float
The fraction of dates to label on the x-axis.
figure_kwargs: dict
A dictionary of kwargs for matplotlib figure creation.
colors: list
A list of strings denoting abbreviated colors for each data variable's points.
For example, 'r' is red and 'b' is blue.
markersize: float
The size of markers in the scatterplot.
"""
from scipy import stats
plt.figure(**figure_kwargs)
data_var_names = list(dataset.data_vars)
len_dataset = dataset.time.size
nan_mask = np.full(len_dataset, True)
for i, data_arr in enumerate(dataset.data_vars.values()):
if len(list(dataset.dims)) > 1:
dims_to_check_for_nulls = [dim for dim in list(dataset.dims) if dim != 'time']
nan_mask = nan_mask & data_arr.notnull().any(dim=dims_to_check_for_nulls).values
else:
nan_mask = data_arr.notnull().values
times = data_arr.to_dataframe().index.get_level_values('time').values
plt.scatter(stats.rankdata(times, method='dense') - 1, data_arr.values.flatten(), c=colors[i], s=markersize)
unique_times = dataset.time.values
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), unique_times)))
plt.xticks(np.arange(len(date_strs))[nan_mask], date_strs[nan_mask],
rotation=45, ha='right', rotation_mode='anchor')
plt.xlabel('time')
plt.legend(data_var_names, loc='upper right')
plt.show()
def xarray_plot_ndvi_boxplot_wofs_lineplot_over_time(dataset, resolution=None, colors=['orange', 'blue']):
"""
For an xarray.Dataset, plot a boxplot of NDVI and line plot of WOFS across time.
Parameters
----------
dataset: xarray.Dataset
A Dataset formatted as follows:
coordinates: time, latitude, longitude.
data variables: ndvi, wofs
resolution: str
Denotes the resolution of aggregation. Only options are None or 'weekly'.
colors: list
A list of strings denoting colors for each data variable's points.
For example, 'red' or 'blue' are acceptable.
"""
plotting_data = dataset.stack(lat_lon=('latitude', 'longitude'))
time_agg_str = 'weekofyear' if resolution is not None and resolution == 'weekly' else 'time'
if time_agg_str != 'time':
plotting_data = plotting_data.groupby('time.' + time_agg_str).mean(dim='time')
fig, ax = plt.subplots(figsize=(9, 6))
ndvi_box_color, wofs_line_color = ('orange', 'blue')
times = plotting_data[time_agg_str].values
# NDVI boxplot boxes
# The data formatted for matplotlib.pyplot.boxplot().
ndvi_formatted_data = xr.DataArray(np.full_like(plotting_data.ndvi.values, np.nan))
for i, time in enumerate(times):
ndvi_formatted_data.loc[i, :] = plotting_data.loc[{time_agg_str: time}].ndvi.values
ndvi_nan_mask = ~np.isnan(ndvi_formatted_data)
filtered_formatted_data = [] # Data formatted for matplotlib.pyplot.boxplot().
acq_inds_to_keep = [] # Indices of acquisitions to keep. Other indicies contain all nan values.
for i, (d, m) in enumerate(zip(ndvi_formatted_data, ndvi_nan_mask)):
if len(d[m] != 0):
filtered_formatted_data.append(d[m])
acq_inds_to_keep.append(i)
times_no_nan = times[acq_inds_to_keep]
epochs = np.array(list(map(n64_to_epoch, times_no_nan))) if time_agg_str == 'time' else None
x_locs = epochs if time_agg_str == 'time' else times_no_nan
box_width = 0.5 * np.min(np.diff(x_locs))
bp = ax.boxplot(filtered_formatted_data, widths=[box_width] * len(filtered_formatted_data),
positions=x_locs, patch_artist=True, boxprops=dict(facecolor=ndvi_box_color),
flierprops=dict(marker='o', markersize=0.25),
manage_ticks=False) # `manage_ticks=False` to avoid excessive padding on the x-axis.
# WOFS line
wofs_formatted_data = xr.DataArray(np.full_like(plotting_data.wofs.values, np.nan))
for i, time in enumerate(times):
wofs_formatted_data.loc[i, :] = plotting_data.loc[{time_agg_str: time}].wofs.values
wofs_line_plot_data = np.nanmean(wofs_formatted_data.values, axis=1)
wofs_nan_mask = ~np.isnan(wofs_line_plot_data)
line = ax.plot(x_locs, wofs_line_plot_data[wofs_nan_mask], c=wofs_line_color)
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times_no_nan))) if time_agg_str == 'time' else \
naive_months_ticks_by_week(times_no_nan)
x_labels = date_strs
plt.xticks(x_locs, x_labels, rotation=45, ha='right', rotation_mode='anchor')
plt.legend(handles=[bp['boxes'][0], line[0]], labels=list(plotting_data.data_vars), loc='best')
plt.tight_layout()
plt.show()
def xarray_time_series_plot(dataset, plot_descs, x_coord='longitude',
y_coord='latitude', fig_params=None,
fig=None, ax=None, show_legend=True, title=None,
max_times_per_plot=None, max_cols=1):
"""
Plot data variables in an `xarray.Dataset` with different
plot types and optional curve fits. Handles data binned with
`xarray.Dataset` methods `resample()` and `groupby()`. That is, it handles data
binned along time (e.g. by week) or across years (e.g. by week of year).
Parameters
-----------
dataset: xarray.Dataset
A Dataset containing some bands like NDVI or WOFS.
It must have time, x, and y coordinates with names specified by
the 'x_coord' and 'y_coord' parameters.
plot_descs: dict
Dictionary mapping names of DataArrays in the Dataset to plot to
dictionaries mapping aggregation types (e.g. 'mean', 'median') to
lists of dictionaries mapping plot types
(e.g. 'line', 'box', 'scatter') to keyword arguments for plotting.
Aggregation happens within time slices and can be many-to-many or many-to-one.
Some plot types require many-to-many aggregation (e.g. 'none'), and some other plot types
require many-to-one aggregation (e.g. 'mean'). Aggregation types can be any of
['min', 'mean', 'median', 'none', 'max'], with 'none' performing no aggregation.
Plot types can be any of
['scatter', 'line', 'box', 'gaussian', 'gaussian_filter', 'poly', 'cubic_spline', 'fourier'].
Here are the required arguments, with format {plot_type: {arg_name: (data_type[, description]}}:
{'poly': {'degree': (int, "the degree of the polynomial to fit.")}}
Here are the optional arguments, with format {plot_type: {arg_name: (data_type[, description]}}:
# See matplotlib.axes.Axes.boxplot() for more information.
{'box': {'boxprops': dict, 'flierprops': dict, 'showfliers': bool},
# See gaussian_filter_fit() in data_cube_utilities/curve_fitting.py for more information.
'gaussian_filter': {'sigma': numeric},
'fourier':
{'extrap_time': (string, "a positive integer followed by Y, M, or D -
year, month, or day - specifying the amount of time to extrapolate over."),
'extrap_color': (matplotlib color, "a matplotlib color to color the extrapolated data with.")
}}
Additionally, all of the curve fits (['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']) support an optional 'smooth' boolean parameter.
If true, the curve fit is smoothed, otherwise it will look no smoother than the original data.
Here is an example:
{'ndvi':
{'mean':[{'line':{'color':'forestgreen', 'alpha':alpha}}],
'none':[{'box':{'boxprops':{'facecolor':'forestgreen','alpha':alpha},'showfliers':False}}]}}
This example will create a green line plot of the mean of the 'ndvi' band
as well as a green box plot of the 'ndvi' band.
x_coord, y_coord: str
Names of the x and y coordinates in `dataset`.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}). Used to create a Figure
`if fig is None and ax is None`.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first. This
argument is ignored if ``max_times_per_plot`` is less than the number of times.
ax: matplotlib.axes.Axes
The axes to use for the plot. This argument is ignored if
``max_times_per_plot`` is less than the number of times.
show_legend: bool
Whether or not to show the legend.
title: str
The title of each subplot. Note that a date range enclosed in parenthesis
will be postpended whether this is specified or not.
max_times_per_plot: int
The maximum number of times per plot. If specified, multiple plots may be created,
with each plot having as close to `num_times/max_times_per_plot` number of points
as possible, where `num_times` is the total number of plotting points, including
extrapolations. The plots will be arranged in a row-major grid, with the number
of columns being at most `max_cols`.
max_cols: int
The maximum number of columns in the plot grid.
Returns
-------
fig: matplotlib.figure.Figure
The figure containing the plot grid.
plotting_data: dict
A dictionary mapping 3-tuples of data array names, aggregation types, and plot types
(e.g. ('ndvi', 'none', 'box')) to `xarray.DataArray` objects of the data that was
plotted for those combinations of aggregation types and plot types.
Raises
------
ValueError:
If an aggregation type is not possible for a plot type
"""
fig_params = {} if fig_params is None else fig_params
# Lists of plot types that can and cannot accept many-to-one aggregation
# for each time slice, as well as plot types that support extrapolation.
plot_types_requiring_aggregation = ['line', 'gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']
plot_types_handling_aggregation = ['scatter'] + plot_types_requiring_aggregation
plot_types_not_handling_aggregation = ['box']
plot_types_curve_fit = ['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier']
plot_types_supporting_extrapolation = ['fourier']
all_plot_types = list(set(plot_types_requiring_aggregation + plot_types_handling_aggregation + \
plot_types_not_handling_aggregation + plot_types_curve_fit + \
plot_types_supporting_extrapolation))
# Aggregation types that aggregate all values for a given time to one value.
many_to_one_agg_types = ['min', 'mean', 'median', 'max']
# Aggregation types that aggregate to many values or do not aggregate.
many_to_many_agg_types = ['none']
all_agg_types = many_to_one_agg_types + many_to_many_agg_types
# Determine how the data was aggregated, if at all.
possible_time_agg_strs = ['time', 'week', 'month']
time_agg_str = 'time'
for possible_time_agg_str in possible_time_agg_strs:
if possible_time_agg_str in list(dataset.coords):
time_agg_str = possible_time_agg_str
break
# Make the data 2D - time and a stack of all other dimensions.
all_plotting_data_arrs = list(plot_descs.keys())
all_plotting_data = dataset[all_plotting_data_arrs]
all_times = all_plotting_data[time_agg_str].values
# Mask out times for which no data variable to plot has any non-NaN data.
nan_mask_data_vars = list(all_plotting_data[all_plotting_data_arrs] \
.notnull().data_vars.values())
for i, data_var in enumerate(nan_mask_data_vars):
time_nan_mask = data_var if i == 0 else time_nan_mask | data_var
time_nan_mask = time_nan_mask.any([x_coord, y_coord])
times_not_all_nan = all_times[time_nan_mask.values]
non_nan_plotting_data = all_plotting_data.loc[{time_agg_str: times_not_all_nan}]
# Determine the number of extrapolation data points. #
extrap_day_range = 0
n_extrap_pts = 0
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for i, plot_dict in enumerate(plot_dicts):
for plot_type, plot_kwargs in plot_dict.items():
# Only check the plot types supporting extrapolation.
if plot_type == 'fourier':
curr_extrap_day_range = 0
n_predict = 0 # Default to no extrapolation.
# Addressing this way to modify `plot_descs`.
extrap_time = plot_kwargs.get('extrap_time', None)
if extrap_time is not None:
assert time_agg_str == 'time', \
"Extrapolating for data with a time dimension other than 'time' - " \
"such as 'month', or 'week' - is not supported. A time dimension of 'month' " \
"or 'week' denotes data aggregated for each month or week across years, so " \
"extrapolation is meaningless in that case. Support for a time dimension of 'year' " \
"has not yet been added."
# Determine the number of points to extrapolate (in an approximate manner).
# First find the time range of the given data.
first_last_days = list(map(lambda np_dt_64: _n64_to_datetime(np_dt_64),
non_nan_plotting_data.time.values[[0, -1]]))
year_range = first_last_days[1].year - first_last_days[0].year
month_range = first_last_days[1].month - first_last_days[0].month
day_range = first_last_days[1].day - first_last_days[0].day
day_range = year_range * 365.25 + month_range * 30 + day_range
# Then find the time range of the extrapolation string.
fields = re.match(r"(?P<num>[0-9]{0,5})(?P<unit>[YMD])", extrap_time)
assert fields is not None, \
r"For the '{}' DataArray: When using 'fourier' as " \
"the fit type, if the 'extrap_time' parameter is supplied, it must be " \
"a string containing a positive integer followed by one of ['Y', 'M', or 'D']." \
.format(data_arr_name)
num, unit = int(fields['num']), fields['unit']
days_per_unit = dict(Y=365.25, M=30, D=1)[unit]
curr_extrap_day_range = num * days_per_unit
n_predict = round(len(non_nan_plotting_data[time_agg_str]) *
(curr_extrap_day_range / day_range))
plot_descs[data_arr_name][agg_type][i][plot_type] \
['n_predict'] = n_predict
# This parameter is used by get_curvefit() later.
extrap_day_range = max(extrap_day_range, curr_extrap_day_range)
n_extrap_pts = max(n_extrap_pts, n_predict)
# Collect (1) the times not containing only NaN values and (2) the extrapolation times.
if time_agg_str == 'time' and len(times_not_all_nan) > 0:
first_extrap_time = times_not_all_nan[-1] + np.timedelta64(extrap_day_range, 'D') / n_extrap_pts
last_extrap_time = times_not_all_nan[-1] + np.timedelta64(extrap_day_range, 'D')
extrap_times = np.linspace(_n64_datetime_to_scalar(first_extrap_time),
_n64_datetime_to_scalar(last_extrap_time), num=n_extrap_pts)
extrap_times = np.array(list(map(_scalar_to_n64_datetime, extrap_times)))
times_not_all_nan_and_extrap = np.concatenate((times_not_all_nan, extrap_times)) \
if len(extrap_times) > 0 else times_not_all_nan
else:
times_not_all_nan_and_extrap = times_not_all_nan
# Compute all of the plotting data - handling aggregations and extrapolations.
plotting_data_not_nan_and_extrap = {} # Maps data arary names to plotting data (NumPy arrays).
# Get the x locations of data points not filled with NaNs and the x locations of extrapolation points.
epochs = np.array(list(map(n64_to_epoch, times_not_all_nan_and_extrap))) \
if time_agg_str == 'time' else times_not_all_nan_and_extrap
epochs_not_extrap = epochs[:len(times_not_all_nan)]
# Handle aggregations and curve fits. #
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
data_arr_plotting_data = non_nan_plotting_data[data_arr_name]
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for i, plot_dict in enumerate(plot_dicts):
for plot_type, plot_kwargs in plot_dict.items():
assert plot_type in all_plot_types, \
r"For the '{}' DataArray: plot_type '{}' not recognized" \
.format(data_arr_name, plot_type)
# Ensure aggregation types are legal for this data.
# Some plot types require aggregation.
if plot_type in plot_types_requiring_aggregation:
if agg_type not in many_to_one_agg_types:
raise ValueError("For the '{}' DataArray: the plot type "
"'{}' only accepts many-to-one aggregation (currently using '{}'). "
"Please pass any of {} as the aggregation type "
"or change the plot type.".format(data_arr_name, \
plot_type, agg_type,
many_to_one_agg_types))
# Some plot types cannot accept many-to-one aggregation.
if plot_type not in plot_types_handling_aggregation:
if agg_type not in many_to_many_agg_types:
raise ValueError("For the '{}' DataArray: "
"the plot type '{}' only accepts many-to-many aggregation "
"(currently using '{}'). Please pass any of {} as "
"the aggregation type or change the plot type."
.format(data_arr_name, plot_type, agg_type,
many_to_many_agg_types))
# Aggregate if necessary.
y = data_arr_plotting_data
if agg_type == 'min':
y = y.min([x_coord, y_coord])
if agg_type == 'mean':
y = y.mean([x_coord, y_coord])
if agg_type == 'median':
y = y.median([x_coord, y_coord])
if agg_type == 'max':
y = y.max([x_coord, y_coord])
# Handle curve fits.
if plot_type in plot_types_curve_fit:
smooth = plot_kwargs.get('smooth', True)
# Create the curve fit.
x_smooth = None if smooth else epochs_not_extrap
data_arr_epochs, y = get_curvefit(epochs_not_extrap, y.values, fit_type=plot_type,
x_smooth=x_smooth, fit_kwargs=plot_kwargs)
# Convert time stamps to NumPy datetime objects.
data_arr_times = np.array(list(map(_scalar_to_n64_datetime, data_arr_epochs))) \
if time_agg_str == 'time' else data_arr_epochs
# Convert the NumPy array into an xarray DataArray.
coords = {time_agg_str: data_arr_times}
dims = list(coords.keys())
y = xr.DataArray(y, coords=coords, dims=dims)
plotting_data_not_nan_and_extrap[(data_arr_name, agg_type, plot_type)] = y
# Handle the potential for multiple plots.
max_times_per_plot = len(times_not_all_nan_and_extrap) if max_times_per_plot is None else \
max_times_per_plot
num_times = len(times_not_all_nan_and_extrap)
num_plots = int(np.ceil(num_times / max_times_per_plot))
num_times_per_plot = round(num_times / num_plots) if num_plots != 0 else 0
num_cols = min(num_plots, max_cols)
num_rows = int(np.ceil(num_plots / num_cols)) if num_cols != 0 else 0
# Set a reasonable figsize if one is not set in `fig_params`.
fig_params.setdefault('figsize', (12 * num_cols, 6 * num_rows))
fig = plt.figure(**fig_params) if fig is None else fig
# Check if there are no plots to make.
if num_plots == 0:
return fig, plotting_data_not_nan_and_extrap
# Create each plot. #
for time_ind, ax_ind in zip(range(0, len(times_not_all_nan_and_extrap), num_times_per_plot),
range(num_plots)):
# The time bounds of this canvas (or "Axes object" or "plot grid cell").
ax_lower_time_bound_ind, ax_upper_time_bound_ind = \
time_ind, min(time_ind + num_times_per_plot, len(times_not_all_nan_and_extrap))
# Retrieve or create the axes if necessary.
if len(times_not_all_nan_and_extrap) <= num_times_per_plot:
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_params)
else:
ax = fig.add_subplot(num_rows, num_cols, ax_ind + 1)
ax_times_not_all_nan_and_extrap = \
times_not_all_nan_and_extrap[ax_lower_time_bound_ind:ax_upper_time_bound_ind]
ax_time_bounds = ax_times_not_all_nan_and_extrap[[0, -1]]
ax_epochs = epochs[ax_lower_time_bound_ind:ax_upper_time_bound_ind]
ax_x_locs = np_scale(ax_epochs if time_agg_str == 'time' else ax_times_not_all_nan_and_extrap)
# Data variable plots within each plot.
data_arr_plots = []
legend_labels = []
# For each data array to plot...
for data_arr_name, agg_dict in plot_descs.items():
# For each aggregation type (e.g. 'mean', 'median')...
for agg_type, plot_dicts in agg_dict.items():
# For each plot for this aggregation type...
for plot_dict in plot_dicts:
for plot_type, plot_kwargs in plot_dict.items():
# Determine the legend label for this plot.
plot_type_str = \
{'scatter': 'scatterplot', 'line': 'lineplot',
'box': 'boxplot', 'gaussian': 'gaussian fit',
'gaussian_filter': 'gaussian filter fit',
'poly': 'degree {} polynomial fit',
'cubic_spline': 'cubic spline fit',
'fourier': 'Fourier fit ({} harmonics)'}[plot_type]
if plot_type == 'poly':
assert 'degree' in plot_kwargs, \
r"For the '{}' DataArray: When using 'poly' as " \
"the fit type, the fit kwargs must have 'degree' " \
"specified.".format(data_arr_name)
plot_type_str = plot_type_str.format(
plot_kwargs.get('degree'))
if plot_type == 'fourier':
plot_type_str = plot_type_str.format(
plot_kwargs.get('n_harm', default_fourier_n_harm))
# Legend labels for the non-extrapolation
# and extrapolation segments
plot_type_strs = []
# Remove plot kwargs that are not recognized
# by plotting methods (cause errors).
plot_kwargs = plot_kwargs.copy()
plot_kwargs.pop('extrap_time', None)
plot_kwargs.pop('n_predict', None)
plot_kwargs.pop('smooth', None)
plot_kwargs.pop('degree', None) # 'degree'
plot_kwargs.pop('n_harm', None) # 'fourier'
# Handle default plot kwargs.
if plot_type == 'box':
plot_kwargs.setdefault('boxprops',
dict(facecolor='orange'))
plot_kwargs.setdefault('flierprops',
dict(marker='o', markersize=0.5))
plot_kwargs.setdefault('showfliers', False)
# Retrieve the plotting data.
y = plotting_data_not_nan_and_extrap[
(data_arr_name, agg_type, plot_type)]
y = y.sel({time_agg_str:
slice(ax_time_bounds[0], ax_time_bounds[1])})
# Handle cases of insufficient data for this section of the plot.
not_nat_times = None
if time_agg_str == 'time':
not_nat_times = ~np.isnat(y[time_agg_str].values)
else:
not_nat_times = ~np.isnan(y[time_agg_str].values)
num_unique_times_y = len(np.unique(y[time_agg_str].values[not_nat_times]))
if num_unique_times_y == 0: # There is no data.
continue
if num_unique_times_y == 1: # There is 1 data point.
plot_type = 'scatter'
plot_kwargs = {}
data_arr_epochs = \
np.array(list(map(n64_to_epoch, y[time_agg_str].values))) \
if time_agg_str == 'time' else \
ax_times_not_all_nan_and_extrap
data_arr_x_locs = np.interp(data_arr_epochs,
ax_epochs, ax_x_locs)
data_arr_time_bounds = y[time_agg_str].values[[0, -1]]
# Determine if this plotting data includes extrapolated values.
data_arr_non_extrap_time_bounds = None
data_arr_has_non_extrap = \
data_arr_time_bounds[0] < times_not_all_nan[-1]
if data_arr_has_non_extrap:
data_arr_non_extrap_time_bounds = \
[data_arr_time_bounds[0], min(data_arr_time_bounds[1],
times_not_all_nan[-1])]
# Because the data could be smoothed, the last
# non-extrapolation time is the last time before
# or at the last non-extrapolation time
# for the original data.
non_extrap_plot_last_time = data_arr_non_extrap_time_bounds[1]
if num_unique_times_y > 1:
non_extrap_plot_last_time = \
y.sel({time_agg_str: data_arr_non_extrap_time_bounds[1]},
method='ffill')[time_agg_str].values
data_arr_non_extrap_plotting_time_bounds = [data_arr_non_extrap_time_bounds[0],
non_extrap_plot_last_time]
data_arr_extrap_time_bounds = None
data_arr_has_extrap = times_not_all_nan[-1] < data_arr_time_bounds[1]
if data_arr_has_extrap:
data_arr_extrap_time_bounds = [max(data_arr_time_bounds[0],
extrap_times[0]),
data_arr_time_bounds[1]]
# Because the data could be smoothed, the first extrapolation time
# is the first time after the last non-extrapolation time for the original data.
extrap_plot_first_time = \
y.sel({time_agg_str: data_arr_non_extrap_time_bounds[1]},
method='ffill')[time_agg_str].values \
if data_arr_has_non_extrap else \
data_arr_time_bounds[0]
data_arr_extrap_plotting_time_bounds = [extrap_plot_first_time,
data_arr_extrap_time_bounds[1]]
# Separate non-extrapolation and extrapolation data.
if data_arr_has_non_extrap:
data_arr_non_extrap = \
y.sel({time_agg_str: slice(*data_arr_non_extrap_plotting_time_bounds)})
data_arr_non_extrap_epochs = \
np.array(list(map(n64_to_epoch, data_arr_non_extrap[time_agg_str].values))) \
if time_agg_str == 'time' else data_arr_non_extrap[time_agg_str].values
data_arr_non_extrap_x_locs = \
np.interp(data_arr_non_extrap_epochs, ax_epochs, ax_x_locs)
# Format plotting kwargs for the non-extrapolation data.
plot_kwargs_non_extrap = plot_kwargs.copy()
plot_kwargs_non_extrap.pop('extrap_color', None)
if data_arr_has_extrap:
# Include the last non-extrapolation point so the
# non-extrapolation and extrapolation lines connect.
data_arr_extrap = \
y.sel({time_agg_str: slice(*data_arr_extrap_plotting_time_bounds)})
data_arr_extrap_epochs = \
np.array(list(map(n64_to_epoch, data_arr_extrap[time_agg_str].values))) \
if time_agg_str == 'time' else data_arr_extrap[time_agg_str].values
data_arr_extrap_x_locs = \
np.interp(data_arr_extrap_epochs, ax_epochs, ax_x_locs)
# Format plotting kwargs for the extrapolation data.
plot_kwargs_extrap = plot_kwargs.copy()
extrap_color = plot_kwargs_extrap.pop('extrap_color', None)
if extrap_color is not None:
plot_kwargs_extrap['color'] = extrap_color
# Specify non-extrap and extrap plotting args.
if data_arr_has_non_extrap:
plot_args_non_extrap = \
[data_arr_non_extrap_x_locs, data_arr_non_extrap]
if data_arr_has_extrap:
plot_args_extrap = \
[data_arr_extrap_x_locs, data_arr_extrap]
# Actually create the plot.
def create_plot(x_locs, data_arr, **plot_kwargs):
"""
Creates a plot
Parameters
----------
x_locs: xarray.DataArray
A 1D `xarray.DataArray` containing ascending values
in range [0,1], denoting the x locations on the current
canvas at which to plot data with corresponding time
indicies in `data_arr`.
data_arr: xarray.DataArray
An `xarray.DataArray` containing a dimension named
`time_agg_str` (the value of that variable in this context).
Returns
-------
plot_obj: matplotlib.artist.Artist
The plot.
"""
plot_obj = None
if plot_type == 'scatter':
data_arr_dims = list(data_arr.dims)
data_arr_flat = data_arr.stack(flat=data_arr_dims)
plot_obj = ax.scatter(x_locs, data_arr_flat)
elif plot_type in ['line', 'gaussian', 'gaussian_filter',
'poly', 'cubic_spline', 'fourier']:
plot_obj = ax.plot(x_locs, data_arr)[0]
elif plot_type == 'box':
boxplot_nan_mask = ~np.isnan(data_arr)
# Data formatted for matplotlib.pyplot.boxplot().
filtered_formatted_data = []
for i, (d, m) in enumerate(zip(data_arr.values,
boxplot_nan_mask.values)):
if len(d[m] != 0):
filtered_formatted_data.append(d[m])
box_width = 0.5 * np.min(np.diff(x_locs)) \
if len(x_locs) > 1 else 0.5
# `manage_ticks=False` to avoid excessive padding on x-axis.
bp = ax.boxplot(filtered_formatted_data,
widths=[box_width] * len(filtered_formatted_data),
positions=x_locs, patch_artist=True,
manage_ticks=False, **plot_kwargs)
plot_obj = bp['boxes'][0]
return plot_obj
if data_arr_has_non_extrap:
plot_obj = create_plot(*plot_args_non_extrap, **plot_kwargs_non_extrap)
data_arr_plots.append(plot_obj)
plot_type_strs.append(plot_type_str)
if data_arr_has_extrap and plot_type in plot_types_supporting_extrapolation:
plot_obj = create_plot(*plot_args_extrap, **plot_kwargs_extrap)
data_arr_plots.append(plot_obj)
plot_type_strs.append('extrapolation of ' + plot_type_str)
plot_type_str_suffix = ' of {}'.format(agg_type) if agg_type != 'none' else ''
plot_type_strs = [plot_type_str + plot_type_str_suffix
for plot_type_str in plot_type_strs]
[legend_labels.append('{} of {}'.format(plot_type_str, data_arr_name))
for plot_type_str in plot_type_strs]
# Label the axes and create the legend.
date_strs = \
np.array(list(map(lambda time: np_dt64_to_str(time), ax_times_not_all_nan_and_extrap))) \
if time_agg_str == 'time' else \
naive_months_ticks_by_week(ax_times_not_all_nan_and_extrap) \
if time_agg_str in ['week', 'weekofyear'] else \
month_ints_to_month_names(ax_times_not_all_nan_and_extrap)
plt.xticks(ax_x_locs, date_strs, rotation=45, ha='right', rotation_mode='anchor')
if show_legend:
ax.legend(handles=data_arr_plots, labels=legend_labels, loc='best')
title_postpend = " ({} to {})".format(date_strs[0], date_strs[-1])
title_prepend = "Figure {}".format(ax_ind) if title is None else title
ax.set_title(title_prepend + title_postpend)
return fig, plotting_data_not_nan_and_extrap
## Curve fitting ##
def get_curvefit(x, y, fit_type, x_smooth=None, n_pts=n_pts_smooth, fit_kwargs=None):
"""
Gets a curve fit given x values, y values, a type of curve, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of
['gaussian', 'gaussian_filter', 'poly',
'cubic_spline', 'fourier'].
The option 'gaussian' creates a Gaussian fit.
The option 'gaussian_filter' creates a Gaussian filter fit.
The option 'poly' creates a polynomial fit.
The option 'cubic_spline' creates a cubic spline fit.
The option 'fourier' creates a Fourier curve fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fit_kwargs: dict
Keyword arguments for the selected fit type.
In the case of `fit_type == 'poly'`, this must contain a 'degree' entry (an int),
which is the degree of the polynomial to fit.
In the case of `fit_type == 'gaussian_filter'`, this may contain a 'sigma' entry,
which is the standard deviation of the Gaussian kernel.
A larger value yields a smoother but less close-fitting curve.
In the case of `fit_type == 'fourier'`, this may contain 'n_predict' or 'n_harm' entries.
The 'n_predict' entry is the number of points to extrapolate.
The points will be spaced evenly by the mean spacing of values in `x`.
The 'n_harm' entry is the number of harmonics to use.
A higher value yields a closer fit.
Returns
-------
x_smooth, y_smooth: numpy.ndarray
The smoothed x and y values of the curve fit.
If there are no non-NaN values in `y`, these will be filled with `n_pts` NaNs.
If there is only 1 non-NaN value in `y`, these will be filled with
their corresponding values (y or x value) for that point to a length of `n_pts`.
"""
from scipy.interpolate import CubicSpline
from .curve_fitting import gaussian_fit, gaussian_filter_fit, poly_fit, fourier_fit
interpolation_curve_fits = ['gaussian', 'gaussian_filter',
'poly', 'cubic_spline']
extrapolation_curve_filts = ['fourier']
# Handle NaNs (omit them).
not_nan_mask = ~np.isnan(y)
x = x[not_nan_mask]; y = y[not_nan_mask]
# Handle the cases of there being too few points to curve fit.
if len(y) == 0:
x_smooth = np.repeat(np.nan, n_pts)
y_smooth = np.repeat(np.nan, n_pts)
return x_smooth, y_smooth
if len(y) == 1:
x_smooth = np.repeat(x[0], n_pts)
y_smooth = np.repeat(y[0], n_pts)
return x_smooth, y_smooth
if x_smooth is None:
x_smooth_inds = np.linspace(0, len(x) - 1, n_pts)
x_smooth = np.interp(x_smooth_inds, np.arange(len(x)), x)
opt_params = {}
if fit_type == 'gaussian':
x_smooth, y_smooth = gaussian_fit(x, y, x_smooth)
elif fit_type == 'gaussian_filter':
if 'sigma' in fit_kwargs:
opt_params.update(dict(sigma=fit_kwargs.get('sigma')))
x_smooth, y_smooth = gaussian_filter_fit(x, y, x_smooth,
**opt_params)
elif fit_type == 'poly':
assert 'degree' in fit_kwargs.keys(), \
"When plotting a polynomal fit, there must be" \
"a 'degree' entry in the plot_kwargs parameter."
degree = fit_kwargs.get('degree')
x_smooth, y_smooth = poly_fit(x, y, degree, x_smooth)
elif fit_type == 'cubic_spline':
cs = CubicSpline(x, y)
y_smooth = cs(x_smooth)
if fit_type in extrapolation_curve_filts:
n_predict = fit_kwargs.get('n_predict', 0)
if fit_type == 'fourier':
if 'n_harm' in fit_kwargs:
opt_params.update(dict(n_harm=fit_kwargs.get('n_harm')))
x_smooth, y_smooth = \
fourier_fit(x, y, n_predict, x_smooth,
**opt_params)
return x_smooth, y_smooth
def plot_curvefit(x, y, fit_type, x_smooth=None, n_pts=n_pts_smooth, fig_params={}, plot_kwargs={}, fig=None, ax=None):
"""
**This function is DEPRECATED.**
Plots a curve fit given x values, y values, a type of curve to plot, and parameters for that curve.
Parameters
----------
x: np.ndarray
A 1D NumPy array. The x values to fit to.
y: np.ndarray
A 1D NumPy array. The y values to fit to.
fit_type: str
The type of curve to fit. One of ['gaussian', 'gaussian_filter', 'poly', 'cubic_spline'].
The option 'gaussian' plots a Gaussian fit.
The option 'gaussian_filter' plots a Gaussian filter fit.
The option 'poly' plots a polynomial fit.
The option 'cubic_spline' plots a cubic spline fit.
x_smooth: list-like
The exact x values to interpolate for. Supercedes `n_pts`.
n_pts: int
The number of evenly spaced points spanning the range of `x` to interpolate for.
fig_params: dict
Figure parameters dictionary (e.g. {'figsize':(12,6)}).
Used to create a Figure ``if fig is None and ax is None``.
plot_kwargs: dict
The kwargs for the call to ``matplotlib.axes.Axes.plot()``.
fig: matplotlib.figure.Figure
The figure to use for the plot. The figure must have at least one Axes object.
You can use the code ``fig,ax = plt.subplots()`` to create a figure with an associated Axes object.
The code ``fig = plt.figure()`` will not provide the Axes object.
The Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
Returns
-------
lines: matplotlib.lines.Line2D
Can be used as a handle for a matplotlib legend (i.e. plt.legend(handles=...)) among other things.
"""
from scipy.interpolate import CubicSpline
from .curve_fitting import gaussian_fit, gaussian_filter_fit, poly_fit, fourier_fit
# Avoid modifying the original arguments.
fig_params, plot_kwargs = fig_params.copy(), plot_kwargs.copy()
fig_params.setdefault('figsize', (12, 6))
plot_kwargs.setdefault('linestyle', '-')
# Retrieve or create the axes if necessary.
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_params)
if x_smooth is None:
x_smooth = np.linspace(x.min(), x.max(), n_pts)
if fit_type == 'gaussian':
y_smooth = gaussian_fit(x, y, x_smooth)
elif fit_type == 'gaussian_filter':
sigma = plot_kwargs.pop('sigma', None)
y_smooth = gaussian_filter_fit(x, y, x_smooth, sigma=sigma)
elif fit_type == 'poly':
assert 'degree' in plot_kwargs.keys(), "When plotting a polynomal fit, there must be" \
"a 'degree' entry in the plot_kwargs parameter."
degree = plot_kwargs.pop('degree')
y_smooth = poly_fit(x, y, degree, x_smooth)
elif fit_type == 'cubic_spline':
cs = CubicSpline(x, y)
y_smooth = cs(x_smooth)
return ax.plot(x_smooth, y_smooth, **plot_kwargs)[0]
## End curve fitting ##
def plot_band(dataset, figsize=(20, 15), fontsize=24, legend_fontsize=24):
"""
Plots several statistics over time - including mean, median, linear regression of the
means, Gaussian smoothed curve of means, and the band enclosing the 25th and 75th percentiles.
This is very similar to the output of the Comet Time Series Toolset (https://github.com/CosmiQ/CometTS).
Parameters
----------
dataset: xarray.DataArray
An xarray `DataArray` containing time, latitude, and longitude coordinates.
figsize: tuple
A 2-tuple of the figure size in inches for the entire figure.
fontsize: int
The font size to use for text.
"""
# Calculations
times = dataset.time.values
epochs = np.sort(np.array(list(map(n64_to_epoch, times))))
x_locs = (epochs - epochs.min()) / (epochs.max() - epochs.min())
means = dataset.mean(dim=['latitude', 'longitude'], skipna=True).values
medians = dataset.median(dim=['latitude', 'longitude'], skipna=True).values
mask = ~np.isnan(means) & ~np.isnan(medians)
plt.figure(figsize=figsize)
ax = plt.gca()
# Shaded Area (percentiles)
with warnings.catch_warnings():
# Ignore warning about encountering an All-NaN slice. Some acquisitions have all-NaN values.
warnings.simplefilter("ignore", category=RuntimeWarning)
quarter = np.nanpercentile(
dataset.values.reshape((
len(dataset['time']),
len(dataset['latitude']) * len(dataset['longitude']))),
25,
axis=1
)
three_quarters = np.nanpercentile(
dataset.values.reshape((
len(dataset['time']),
len(dataset['latitude']) * len(dataset['longitude']))),
75,
axis=1
)
np.array(quarter)
np.array(three_quarters)
ax.grid(color='lightgray', linestyle='-', linewidth=1)
fillcolor = 'gray'
fillalpha = 0.4
plt.fill_between(x_locs, quarter, three_quarters, interpolate=False, color=fillcolor, alpha=fillalpha,
label="25th and 75th percentile band")
# Medians
plt.plot(x_locs, medians, color="black", marker="o", linestyle='None', label="Medians")
# The Actual Plot
plt.plot(x_locs, means, color="blue", label="Mean")
# Linear Regression (on mean)
m, b = np.polyfit(x_locs[mask], means[mask], 1)
plt.plot(x_locs, m * x_locs + b, '-', color="red", label="linear regression of means", linewidth=3.0)
# Gaussian Curve
plot_curvefit(x_locs[mask], means[mask], fit_type='gaussian', ax=ax,
plot_kwargs=dict(linestyle='-', label="Gaussian smoothed of means",
alpha=1, color='limegreen', linewidth=3.0))
# Formatting
date_strs = np.array(list(map(lambda time: np_dt64_to_str(time), times[mask])))
ax.grid(color='k', alpha=0.1, linestyle='-', linewidth=1)
ax.xaxis.set_major_formatter(FuncFormatter(tfmt))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=legend_fontsize)
plt.xticks(x_locs, date_strs, rotation=45, fontsize=fontsize)
plt.yticks(fontsize=fontsize)
ax.set_xlabel('Time', fontsize=fontsize)
ax.set_ylabel('Value', fontsize=fontsize)
plt.show()
## Color utils ##
def convert_name_rgb_255(color):
"""
Converts a name of a matplotlib color to a list of rgb values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str
The color name to convert to an rgb list.
"""
return [int(255 * rgb) for rgb in mpl.colors.to_rgb(color)] if isinstance(color, str) else color
def convert_name_rgba_255(color):
"""
Converts a name of a matplotlib color to a list of rgba values in the range [0,255].
Else, returns the original argument.
Parameters
----------
color: str
The color name to convert to an rgba list.
"""
return [*convert_name_rgb_255(color), 255] if isinstance(color, str) else color
def norm_color(color):
"""
Converts either a string name of a matplotlib color or a 3-tuple of rgb values
in the range [0,255] to a 3-tuple of rgb values in the range [0,1].
Parameters
----------
color: str or list-like of numeric
The name of a matplolib color or a .
"""
color = convert_name_rgb_255(color)
if len(color) == 3:
color = [rgb / 255 for rgb in color]
return color
## End color utils ##
## Matplotlib colormap functions ##
def create_discrete_color_map(data_range=None, colors=None, cmap=None,
th=None, pts=None, cmap_name='my_cmap',
data_range_fmt=None, pts_fmt=None):
"""
Creates a discrete `matplotlib.colors.LinearSegmentedColormap` with thresholds for color changes.
Exclusively either `colors` or `cmap` must be specified (i.e. one and only one).
At least one of the parameters `th` or `pts` may be specified, but not both.
Parameters
----------
data_range: list
A 2-tuple of the minimum and maximum values the data may take.
Can be omitted if `pts` is specified as a list-like of points.
colors: list-like
Colors to use between thresholds specified in `th` or around points specified in `pts`.
Colors can be string names of matplotlib colors, 3-tuples of rgb values in range [0,255],
or 4-tuples of rgba values in range [0,1].
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color data in the regions between thresholds
specified in `th` or around points specified in `pts`.
th: list-like of float
Threshold values separating colors, so `len(colors) == len(th)+1`.
Must be in the range of `data_range` - noninclusive.
pts: int or list-like of float
Points around which to color the same. This can be either an integer
specifying the number of evenly-spaced points to use or a list-like of points,
in which case values must be in the range of `data_range` - inclusive.
The thresholds used will be the midpoints between points in `pts`.
cmap_name: str
The name of the created colormap for matplotlib.
data_range_fmt: list-like of size 2
A mutable container intended to hold values used to set vmin and vmax, respectively, of
`pyplot.imshow()` for the purpose of formatting a colorbar. Only useful if `pts` is
specified as a list-like.
pts_fmt: list-like
A mutable container intended to hold the midpoints of the thresholds. This must have the same length
as the number of points specified by `pts` or have a length of `len(th)+1`.
"""
assert (colors is None) ^ (cmap is None), \
"Exclusively either `colors` or `cmap` must be specified."
assert th is None or pts is None, \
"The parameters `th` or `pts` may be specified, but not both."
cmap = plt.get_cmap(cmap) if isinstance(cmap, str) else cmap
if th is None: # If `th` is not supplied, construct it based on other arguments.
if pts is not None:
if isinstance(pts, int): # Use `pts` as the number of evenly-spaced points.
assert pts > 0, "The number of points specified by `pts` must be positive."
th_spacing = (data_range[1] - data_range[0]) / pts
th = np.linspace(data_range[0] + th_spacing, data_range[1] - th_spacing, pts - 1)
else: # Use `pts` as a list-like of points to put thresholds between.
assert data_range[0] <= min(pts) and max(pts) <= data_range[1], \
"The values in `pts` must be within `data_range`, inclusive."
assert len(pts) > 0, "The parameter `pts` is a list, but it has no elements. " \
"Please ensure it has one or more numeric elements."
if len(pts) == 1:
th = []
elif len(pts) > 1:
# Choose imaginary lower and upper bounds of the data to scale `pts` with
# so that the first and last color regions are sized appropriately.
data_range_fmt = [None] * 2 if data_range_fmt is None else data_range_fmt
data_range_fmt[0] = pts[0] - (pts[1] - pts[0]) / 2
data_range_fmt[1] = pts[-1] + (pts[-1] - pts[-2]) / 2
pts = np.interp(pts, data_range_fmt, data_range) # (0,1))
th = [pts[ind - 1] + (pts[ind] - pts[ind - 1]) / 2 for ind in range(1, len(pts))]
else:
assert colors is not None, \
"If neither `th` nor `pts` are specified, `colors` must be specified."
th_spacing = (data_range[1] - data_range[0]) / len(colors)
th = np.linspace(data_range[0] + th_spacing, data_range[1] - th_spacing, len(colors) - 1)
else:
assert len(th) == 0 or (data_range[0] < min(th) and max(th) < data_range[1]), \
"The values in `th` must be within `data_range`, exclusive."
# Normalize threshold values based on the data range.
th = [(val - data_range[0]) / (data_range[1] - data_range[0]) for val in th]
th = np.interp(th, data_range, (0, 1))
th = [0.0] + list(th) + [1.0]
if pts_fmt is not None:
for ind in range(len(th) - 1):
pts_fmt[ind] = th[ind] + (th[ind + 1] - th[ind]) / 2
if colors is None: # If `colors` is not supplied, construct it based on other arguments.
assert cmap is not None, \
"If `colors` is not specified, `cmap` must be specified."
colors = [cmap(th[ind - 1] + (th[ind] - th[ind - 1]) / 2) for ind in range(1, len(th))]
else:
colors = list(map(norm_color, colors))
cdict = {}
# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.
primary_colors = ['red', 'green', 'blue']
# Get the 3-tuples of rgb values for the colors.
color_rgbs = [(mpl.colors.to_rgb(color) if isinstance(color, str) else color) for color in colors]
# For each color entry to go into the color dictionary...
for primary_color_ind, primary_color in enumerate(primary_colors):
cdict_entry = [None] * len(th)
# For each threshold (as well as 0.0 and 1.0), specify the values for this primary color.
for row_ind, th_ind in enumerate(range(len(th))):
# Get the two colors that this threshold corresponds to.
th_color_inds = [0, 0] if th_ind == 0 else \
[len(colors) - 1, len(colors) - 1] if th_ind == len(th) - 1 else \
[th_ind - 1, th_ind]
primary_color_vals = [color_rgbs[th_color_ind][primary_color_ind] for th_color_ind in th_color_inds]
cdict_entry[row_ind] = (th[th_ind],) + tuple(primary_color_vals)
cdict[primary_color] = cdict_entry
cmap = LinearSegmentedColormap(cmap_name, cdict)
return cmap
def create_gradient_color_map(data_range, colors, positions=None, cmap_name='my_cmap'):
"""
Creates a gradient colormap with a `matplotlib.colors.LinearSegmentedColormap`.
Currently only creates linear gradients.
Parameters
----------
data_range: list-like
A 2-tuple of the minimum and maximum values the data may take.
colors: list of str or list of tuple
Colors can be string names of matplotlib colors or 3-tuples of rgb values in range [0,255].
The first and last colors are placed at the beginning and end of the colormap, respectively.
positions: list-like
The values which are colored with corresponding colors in `colors`,
except the first and last colors, so `len(positions) == len(colors)-2`.
Positions must be in the range of `data_range` - noninclusive.
If no positions are provided, the colors are evenly spaced.
cmap_name: str
The name of the created colormap for matplotlib.
Examples
--------
Creating a linear gradient colormap of red, green, and blue, with even spacing between them:
create_gradient_color_map(data_range=(0,1), positions=(0.5,), colors=('red', 'green', 'blue'))
Which can also be done without specifying `positions`:
create_gradient_color_map(data_range=(0,1), colors=('red', 'green', 'blue'))
"""
# Normalize position values based on the data range.
if positions is None:
range_size = data_range[1] - data_range[0]
spacing = range_size / (len(colors) - 1)
positions = [spacing * i for i in range(1, len(colors) - 1)]
else:
positions = list(map(lambda val: (val - data_range[0]) / (data_range[1] - data_range[0]), positions))
colors = list(map(norm_color, colors)) # Normalize color values for colormap creation.
positions = [0.0] + positions + [1.0]
cdict = {}
# These are fully-saturated red, green, and blue - not the matplotlib colors for 'red', 'green', and 'blue'.
primary_colors = ['red', 'green', 'blue']
# Get the 3-tuples of rgb values for the colors.
color_rgbs = [(mpl.colors.to_rgb(color) if isinstance(color, str) else color) for color in colors]
cdict = {'red': [], 'green': [], 'blue': []}
for pos, color in zip(positions, color_rgbs):
cdict['red'].append((pos, color[0], color[0]))
cdict['green'].append((pos, color[1], color[1]))
cdict['blue'].append((pos, color[2], color[2]))
return LinearSegmentedColormap(cmap_name, cdict)
## End matplotlib colormap functions ##
### Discrete color plotting (exclusive) ###
def binary_class_change_plot(dataarrays, clean_masks=None, x_coord='longitude', y_coord='latitude',
colors=None, override_mask=None, override_color=None,
neg_trans=False, pos_trans=False,
class_legend_label=None, width=10, fig=None, ax=None,
fig_kwargs={}, title_kwargs={}, imshow_kwargs={},
x_label_kwargs={}, y_label_kwargs={}, legend_kwargs={},
create_stats_table=True, create_change_matrix=True,
denoise=True, denoise_params=None):
"""
Creates a figure showing one of the following, depending on the format of arguments.
1. The change in the extents of a binary pixel classification in a region over time.
Pixels are colored based on never, sometimes, or always being a member of the class.
In this case, there are 3 regions - never, sometimes, and always.
2. The change in the extents of a binary pixel classification in a region over time between
two time periods. Pixels are colored based on a change in having zero or more than zero
times in which they are members of the class between the time periods.
In this case, there are 4 regions - (never,never),(never,some),(some,never),(some,some).
Parameters
----------
dataarrays: list-like of xarray.DataArray
A list-like of one or two DataArrays of classification values
to plot, which must be either 0 or 1.
clean_masks: list-like of xarray.DataArray
A list-like of one or two DataArrays of boolean values denoting
where the `xarray.DataArray` objects in `dataarrays` are considered
clean. Any non-clean values in `dataarrays` will be ignored.
If specifed, every entry in `datarrays` must have a corresponding entry in `clean_masks`.
If this argument is not supplied (i.e. is `None`), all values will be
considered to be clean.
x_coord, y_coord: str
Names of the x and y coordinates in the elements of `dataarrays` to use
as tick and axis labels.
colors: list-like
A list-like of matplotlib colors - whether string names of
matplotlib colors (like 'red'), or list-likes of rgba values in range [0,255].
If `dataarrays` contains one DataArray, provide 3 color entries -
for never, sometimes, and always class membership, in that order.
If `dataarrays` contains two DataArrays, provide 4 color entires -
for transitions betwen never and sometimes/always class membership
between the two time periods. These transitions are, in order,
(never,never),(never,some),(some,never),(some,some).
override_mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays.
The pixels for which it is `True` are colored `override_color`.
override_color: str or list of rgba values
The color to use for `override_mask`. Can be a string name of a matplotlib color
or a list-like of rgba values (not rgb). By default, it is transparency.
neg_trans: bool
Whether to make pixels that are never a member of the class transparent.
pos_trans: bool
Whether to make pixels that are always a member of the class transparent.
class_legend_label: str
The class label on the legend. For example, `class_legend_label='Water'` would yield
legend labels like "Never Water".
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
title_kwargs: dict
The dictionary of keyword arguments used to format the title.
Passed to `matplotlib.axes.Axes.set_title()`.
Set the title text with a 'label' keyword argument.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `ax.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
legend_kwargs: dict
The dictionary of keyword arguments passed to `ax.legend()`.
create_stats_table: bool
Whether to create a table of statistics showing the number and percent
of pixels in each category of membership.
create_change_matrix: bool
Wheter to create a 3x3 change matrix showing the number and percent of pixels
that experience each possible transition between never, sometimes, and always
a member of the class between the baseline and analysis time periods.
Only considered if `len(dataarrays) == 2`.
denoise: bool
Whether to denoise the output image.
denoise_params: dict
Dictionary of keyword arguments for
`utils.data_cube_utilites.raster_filter.lone_object_filter()`.
See that function's docstring for information about its parameters.
Returns
-------
(fig,ax): tuple
A 2-tuple of the figure and axes used to create the figure.
stats: tuple
Only returned if `create_stats_table == True` or `create_change_matrix == True`.
If `create_stats_table == True`, `stats` includes a `pandas.DataFrame` containing
the number and percent of pixels in each category of membership,
with the categories depending on whether `dataarrays` contains one or two DataArrays.
* If `dataarrays` contains one DataArray, there are 4 rows for never, sometimes,
always, and unknown (due to `clean_masks`) class membership.
* If `dataarrays` contains two DataArrays, there are 6 rows for the transitions
(never,never), (never,some), (some,never), (some,some), the net change
((never,some) + (some,never)), and unknown.
If `len(dataarrays == 2) and create_change_matrix == True`, `stats` includes
an `xarray.Dataset` containing the number and percent of pixels in each possible
transition between never, sometimes, and always a member of the class between
the baseline and analysis time periods. The number and percent are each a
data variable of the `xarray.Dataset`.
If a stats table and a change matrix are both created, they will be returned in that order.
"""
from .raster_filter import lone_object_filter
if clean_masks is None:
clean_masks = [xr.DataArray(np.ones(dataarray.shape, dtype=np.bool),
coords=dataarray.coords, dims=dataarray.dims)
for dataarray in dataarrays]
denoise_params = {} if denoise_params is None and denoise else \
denoise_params
# Avoid modifying the original arguments.
fig_kwargs, title_kwargs, legend_kwargs = \
fig_kwargs.copy(), title_kwargs.copy(), legend_kwargs.copy()
# Handle conversion of matplotlib color names to lists of rgb values (range [0,255] for plt.imshow()).
colors = list(map(convert_name_rgba_255, colors))
override_color = convert_name_rgba_255(override_color) if override_color is not None else [0, 0, 0, 0]
def get_none_chng_perm_masks(dataarray, clean_mask, time_dim='time'):
"""
For a DataArray of binary classifications (0 or 1) with a time dimension,
get a list of masks indicating where the points are, in order, never, sometimes,
or always a member of the class (1 indicates membership), considering only
non-NaN values for those points.
"""
time_axis = dataarray.get_axis_num(time_dim)
# Get the mean classification across time.
masked_da = np.ma.array(dataarray.values, mask=~clean_mask.values)
frac_cls = masked_da.mean(axis=time_axis)
# Find where pixels are permanent, changing, or never a member of the class.
none_mask = (frac_cls == 0).filled(False)
chng_mask = (0 < frac_cls).filled(False) & (frac_cls < 1).filled(False)
perm_mask = (1 == frac_cls).filled(False)
return [none_mask, chng_mask, perm_mask]
# Assemble the color masks.
masks = []
if len(dataarrays) == 1: # Determine extent change in one time period.
dataarray = dataarrays[0]
clean_mask = clean_masks[0]
masks += get_none_chng_perm_masks(dataarray, clean_mask)
else: # Determine change between two time periods.
baseline_da, analysis_da = dataarrays
baseline_clean_mask = clean_masks[0] if clean_masks is not None else None
analysis_clean_mask = clean_masks[1] if clean_masks is not None else None
baseline_none_mask, baseline_chng_mask, baseline_perm_mask = get_none_chng_perm_masks(baseline_da,
baseline_clean_mask)
analysis_none_mask, analysis_chng_mask, analysis_perm_mask = get_none_chng_perm_masks(analysis_da,
analysis_clean_mask)
# Find where points are never a member of the class or are a member at one or more times.
baseline_cls_ever = baseline_chng_mask | baseline_perm_mask
analysis_cls_ever = analysis_chng_mask | analysis_perm_mask
# Find where points change between never being a member of the class
# and being a member at one or more times between the two periods.
no_cls_no_cls_mask = baseline_none_mask & analysis_none_mask
no_cls_cls_mask = baseline_none_mask & analysis_cls_ever
cls_no_cls_mask = baseline_cls_ever & analysis_none_mask
cls_cls_mask = baseline_cls_ever & analysis_cls_ever
masks += [no_cls_no_cls_mask, no_cls_cls_mask, cls_no_cls_mask, cls_cls_mask]
# Determine the overriding mask.
y_x_shape = len(dataarrays[0][y_coord]), len(dataarrays[0][x_coord])
override_mask = np.zeros(y_x_shape, dtype=np.bool) if override_mask is None else override_mask
# Create an array of integer-encoded change-class values.
cls_cng_arr = np.zeros(y_x_shape, dtype=np.uint8)
for i, mask in enumerate(masks):
cls_cng_arr[mask] = i
# Denoise the class change image (optional).
if denoise:
cls_cng_arr = lone_object_filter(cls_cng_arr, **denoise_params)
# Color the image with the masks.
# Initialize pixels as white.
transparency_mask = np.zeros(y_x_shape, dtype=np.bool)
color_array = np.full((*y_x_shape, 4), 255, dtype=np.uint8)
for i in range(len(masks)):
if (neg_trans and i == 0) or (pos_trans and i == len(masks) - 1):
transparency_mask[cls_cng_arr == i] = True
color_array[cls_cng_arr == i] = colors[i]
if neg_trans or pos_trans:
color_array[transparency_mask] = [0, 0, 0, 0]
color_array[override_mask] = override_color
fig_kwargs['figsize'] = fig_kwargs.get('figsize', figure_ratio(dataarrays[0], x_coord, y_coord,
fixed_width=width))
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
# Set the tick and axes labels.
xarray_set_axes_labels(dataarrays[0], ax, x_coord, y_coord, x_label_kwargs, y_label_kwargs)
# Title the plot.
title_kwargs.setdefault('label', "Class Extents" if len(dataarrays) == 1 else \
"Class Extents Change (Baseline/Analysis)")
ax.set_title(**title_kwargs)
# Create the legend.
# Colors must be in range [0,1] for color patches.
colors = [np.array(color) / 255 for color in colors]
if len(dataarrays) == 1:
class_legend_label = "a Member of the Class" if class_legend_label is None else class_legend_label
labels = list(map(lambda str: str.format(class_legend_label),
['Never {}', 'Sometimes {}', 'Always {}']))
else:
class_legend_label = "Class Membership" if class_legend_label is None else class_legend_label
labels = list(map(lambda str: str.format(class_legend_label, class_legend_label),
['No {} to No {}', 'No {} to {}', '{} to No {}', '{} to {}']))
color_patches = list(map(lambda color, label: mpatches.Patch(color=color, label=label), colors, labels))
legend_kwargs.setdefault('loc', 'best')
legend_kwargs['handles'] = color_patches
ax.legend(**legend_kwargs)
ax.imshow(color_array, **imshow_kwargs)
if create_stats_table or create_change_matrix:
stats_data = []
if create_stats_table:
num_table_rows = 4 if len(dataarrays) == 1 else 6
index = labels + ['Unknown'] if len(dataarrays) == 1 else \
labels + ['Net Change'] + ['Unknown']
stats_table = pd.DataFrame(data=np.zeros((num_table_rows, 2)),
index=index, columns=['Number', 'Percent'])
# Number
num_insufficient_data = ~masks[0]
for i in range(1, len(masks)):
num_insufficient_data = num_insufficient_data & ~masks[i]
num_insufficient_data = num_insufficient_data.sum()
mask_sums = np.array([mask.sum() for mask in masks])
if len(dataarrays) == 1:
stats_table.iloc[:, 0] = np.concatenate((mask_sums, np.array([num_insufficient_data])))
else:
stats_table.iloc[:, 0] = np.concatenate(
(mask_sums, np.array([mask_sums[[1, 2]].sum()]), np.array([num_insufficient_data])))
# Percent
stats_table.iloc[:, 1] = stats_table.iloc[:, 0] / (y_x_shape[0] * y_x_shape[1])
stats_data.append(stats_table)
if len(dataarrays) == 2 and create_change_matrix:
dims = ['baseline', 'analysis']
classes = ['always', 'sometimes', 'never']
coords = {'baseline': classes, 'analysis': classes}
# Number
num_px_trans_da = xr.DataArray(np.zeros((3, 3), dtype=np.uint64),
dims=dims, coords=coords)
baseline_dict = OrderedDict([('always', baseline_perm_mask),
('sometimes', baseline_chng_mask),
('never', baseline_none_mask)])
analysis_dict = OrderedDict([('always', analysis_perm_mask),
('sometimes', analysis_chng_mask),
('never', analysis_none_mask)])
for baseline_cls, baseline_cls_mask in baseline_dict.items():
num_px_trans_da.sel(dict(baseline=baseline_cls)).values[:] = \
np.array([((baseline_cls_mask == 1) & (analysis_cls_mask == 1)).sum()
for analysis_cls_mask in analysis_dict.values()])
# Percent
percent_px_trans_da = num_px_trans_da / (y_x_shape[0] * y_x_shape[1])
stats_data.append(xr.Dataset(data_vars=dict(Number=num_px_trans_da,
Percent=percent_px_trans_da)))
stats_data = tuple(stats_data)
if create_stats_table or create_change_matrix:
return (fig, ax), stats_data
else:
return (fig, ax)
## Threshold plotting ##
def intersection_threshold_plot(first, second, th, mask=None,
color_none='black', color_first='green',
color_second='red', color_both='white',
color_mask='gray',
width=10, fig=None, ax=None,
x_coord='longitude', y_coord='latitude',
*args, **kwargs):
"""
Given two dataarrays, create a threshold plot showing where zero, one, or both are within a threshold.
Parameters
----------
first, second: xarray.DataArray
The DataArrays to compare.
th: tuple
A 2-tuple of the minimum (inclusive) and maximum (exclusive) threshold values, respectively.
mask: numpy.ndarray
A NumPy array of the same shape as the dataarrays. The pixels for which it is `True`
are colored`color_mask`.
color_none: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
neither first nor second have values within the threshold.
Default color is black.
color_first: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the first has values within the threshold.
Default color is green.
color_second: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
only the second has values within the threshold.
Default color is red.
color_both: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where
both the first and second have values within the threshold.
Default color is white.
color_mask: list-like or str
A list-like of 3 elements - red, green, and blue values in range [0,255],
or the name of a matplotlib color. Used to color regions where `mask == True`.
Overrides any other color a region may have.
Default color is gray.
width: int
The width of the created ``matplotlib.figure.Figure``.
The height will be set to maintain aspect ratio.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
*args: list
Arguments passed to ``matplotlib.pyplot.imshow()``.
**kwargs: dict
Keyword arguments passed to ``matplotlib.pyplot.imshow()``.
"""
# Handle conversion of matplotlib color names to lists of rgb values.
color_none, color_first, color_second, color_both, color_mask = \
list(map(convert_name_rgb_255, [color_none, color_first, color_second, color_both, color_mask]))
# Determine the regions.
first_in = np.logical_and(th[0] <= first, first < th[1])
second_in = np.logical_and(th[0] <= second, second < th[1])
both_in = np.logical_and(first_in, second_in)
none_in = np.invert(both_in)
# Determine the overriding mask.
mask = np.zeros(first.shape).astype(bool) if mask is None else mask
# The colors for each pixel.
color_array = np.zeros((*first.shape, 3)).astype(np.int16)
color_array[none_in] = color_none
color_array[first_in] = color_first
color_array[second_in] = color_second
color_array[both_in] = color_both
color_array[mask] = color_mask
fig, ax = retrieve_or_create_fig_ax(fig, ax, figsize=figure_ratio(first, x_coord, y_coord, fixed_width=width))
plt.title("Threshold: {} < x < {}".format(th[0], th[1]))
max_num_ticks = 10 # Max ticks per axis.
lon = first.longitude.values
label_every = int(round(len(lon) / max_num_ticks))
lon_labels = ["{0:.4f}".format(lon_val) for lon_val in lon[::label_every]]
plt.xlabel('Longitude')
plt.xticks(range(len(lon))[::label_every], lon_labels, rotation='vertical')
lat = first.latitude.values
label_every = int(round(len(lat) / max_num_ticks))
lat_labels = ["{0:.4f}".format(lat_val) for lat_val in lat[::label_every]]
plt.ylabel('Latitude')
plt.yticks(range(len(lat))[::label_every], lat_labels)
plt.imshow(color_array, *args, **kwargs)
return fig, ax
## End threshold plotting ##
### End discrete color plotting (exclusive)##
## Misc ##
def print_matrix(cell_value_mtx, cell_label_mtx=None, row_labels=None, col_labels=None,
show_row_labels=True, show_col_labels=True, show_cell_labels=True,
cmap=None, cell_val_fmt='2g', annot_kwargs={}, tick_fontsize=14,
x_axis_tick_kwargs=None, y_axis_tick_kwargs=None,
x_axis_ticks_position='default', y_axis_ticks_position='default',
fig=None, ax=None, heatmap_kwargs={}, fig_kwargs={}):
"""
Prints a matrix as a heatmap.
Inspired by https://gist.github.com/shaypal5/94c53d765083101efc0240d776a23823.
Arguments
---------
cell_value_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell values when coloring with the colormap.
cell_label_mtx: numpy.ndarray
A 2D NumPy array to be used as the cell labels.
row_labels, col_labels: list-like or xarray.DataArray
Lists of labels in the order they index the matrix rows and columns, respectively.
show_row_labels, show_col_labels: bool
Whether to show the row or column labels, respectively.
show_cell_labels: bool
Whether to show values as cell labels or not.
cmap: matplotlib.colors.Colormap
A matplotlib colormap used to color the cells based on `cell_value_mtx`.
cell_val_fmt: str
Formatting string for values in the matrix cells.
annot_kwargs: dict
Keyword arguments for ``ax.text`` for formatting cell annotation text.
tick_fontsize: int
The fontsize of tick labels. Overridden by `x_axis_tick_kwargs` and `y_axis_tick_kwargs`.
x_axis_tick_kwargs, y_axis_tick_kwargs: dict
Keyword arguments for x and y axis tick labels, respectively.
Specifically, keyword arguments for calls to `ax.[x_axis,y_axis].set_ticklabels()`
where `ax` is the `matplotlib.axes.Axes` object returned by `seaborn.heatmap()`.
x_axis_ticks_position, y_axis_ticks_position: str
The position of x and y axis ticks, respectively.
For x_axis_ticks_position, possible values are ['top', 'bottom', 'both', 'default', 'none'].
For y_axis_ticks_position, possible values are ['left', 'right', 'both', 'default', 'none'].
See https://matplotlib.org/api/axis_api.html for more information.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If only `fig` is supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
heatmap_kwargs: dict
Dictionary of keyword arguments to `seaborn.heatmap()`.
Overrides any other relevant parameters passed to this function.
Some notable parameters include 'vmin', 'vmax', 'cbar', and 'cbar_kws'.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and axes used for the plot.
"""
import seaborn as sns
cell_label_mtx = cell_value_mtx if cell_label_mtx is None else cell_label_mtx
row_labels = [''] * cell_value_mtx.shape[0] if not show_row_labels \
or row_labels is None else row_labels
col_labels = [''] * cell_value_mtx.shape[1] if not show_col_labels \
or col_labels is None else col_labels
heatmap_kwargs.setdefault('cbar', False)
df = pd.DataFrame(cell_value_mtx, index=row_labels, columns=col_labels)
cell_labels = cell_label_mtx if show_cell_labels else None
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
heatmap = sns.heatmap(df, cmap=cmap, annot=cell_labels, fmt=cell_val_fmt,
annot_kws=annot_kwargs, ax=ax, **heatmap_kwargs)
if not show_row_labels:
heatmap.set_yticks([]) # Ticks must be hidden explicitly.
else:
if y_axis_tick_kwargs is None:
y_axis_tick_kwargs = dict(rotation=0, ha='right')
y_axis_tick_kwargs.setdefault('fontsize', tick_fontsize)
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), **y_axis_tick_kwargs)
heatmap.yaxis.set_ticks_position(y_axis_ticks_position)
heatmap.yaxis.tick_left() # Ticks will also appear on the right side otherwise.
if not show_col_labels:
heatmap.set_xticks([])
else:
if x_axis_tick_kwargs is None:
x_axis_tick_kwargs = dict(rotation=45, ha='right')
x_axis_tick_kwargs.setdefault('fontsize', tick_fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), **x_axis_tick_kwargs)
heatmap.xaxis.set_ticks_position(x_axis_ticks_position)
heatmap.xaxis.tick_bottom() # Ticks will also appear on the top side otherwise.
return fig, ax
def get_ax_size(fig, ax):
"""
Given matplotlib Figure (fig) and Axes (ax) objects, return
the width and height of the Axes object in inches as a list.
"""
# Credit goes to https://stackoverflow.com/a/19306776/5449970.
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
return [bbox.width, bbox.height]
def xarray_imshow(data, x_coord='longitude', y_coord='latitude', width=10,
fig=None, ax=None, use_colorbar=True, cbar_labels=None,
use_legend=False, legend_labels=None, fig_kwargs=None,
imshow_kwargs=None, x_label_kwargs=None, y_label_kwargs=None,
cbar_kwargs=None, nan_color='white', legend_kwargs=None,
ax_tick_label_kwargs=None, x_tick_label_kwargs=None,
y_tick_label_kwargs=None, title=None, title_kwargs=None,
possible_plot_values=None):
"""
Shows a heatmap of an `xarray.DataArray` with only latitude and longitude dimensions.
Unlike matplotlib `imshow()` or `data.plot.imshow()`, this sets axes ticks and labels.
It also simplifies creating a colorbar and legend.
Parameters
----------
data: xarray.DataArray
The xarray.DataArray containing only latitude and longitude coordinates.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
width: numeric
The width of the created ``matplotlib.figure.Figure``, if none is supplied in `fig`.
The height will be set to maintain aspect ratio.
Will be overridden by `'figsize'` in `fig_kwargs`, if present.
fig: matplotlib.figure.Figure
The figure to use for the plot.
If `ax` is not supplied, the Axes object used will be the first.
ax: matplotlib.axes.Axes
The axes to use for the plot.
use_colorbar: bool
Whether or not to create a colorbar to the right of the axes.
cbar_labels: list
A list of strings to label the colorbar.
use_legend: bool
Whether or not to create a legend showing labels for unique values.
Only use if you are sure you have a low number of unique values.
legend_labels: dict
A mapping of values to legend labels.
fig_kwargs: dict
The dictionary of keyword arguments used to build the figure.
imshow_kwargs: dict
The dictionary of keyword arguments passed to `plt.imshow()`.
You can pass a colormap here with the key 'cmap'.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
They cannot reference the same dictionary.
cbar_kwargs: dict
The dictionary of keyword arguments passed to `plt.colorbar()`.
Some parameters of note include 'ticks', which is a list of values to place ticks at.
nan_color: str or list-like
The color used for NaN regions. Can be a string name of a matplotlib color or
a 3-tuple (list-like) of rgb values in range [0,255].
legend_kwargs: dict
The dictionary of keyword arguments passed to `plt.legend()`.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
title: str
The title of the figure.
title_kwargs: dict
The dictionary of keyword arguments passed to `ax.set_title()`.
possible_plot_values: list-like
The possible range of values for `data`. The affects the coloring of the map and the legend entries.
Returns
-------
fig, ax, im, cbar: matplotlib.figure.Figure, matplotlib.axes.Axes,
matplotlib.image.AxesImage, matplotlib.colorbar.Colorbar
The figure and axes used as well as the image returned by `pyplot.imshow()` and the colorbar.
If `use_colorbar == False`, `cbar` will be `None`.
"""
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Figure kwargs
# Use `copy()` to avoid modifying the original dictionaries.
fig_kwargs = {} if fig_kwargs is None else fig_kwargs.copy()
figsize = \
fig_kwargs.setdefault('figsize', figure_ratio(data, x_coord, y_coord,
fixed_width=width))
# Imshow kwargs
imshow_kwargs = {} if imshow_kwargs is None else imshow_kwargs.copy()
imshow_kwargs.setdefault('interpolation', 'nearest')
nan_color = norm_color(nan_color) # Normalize color value for matplotlib.
fig, ax = retrieve_or_create_fig_ax(fig, ax, **fig_kwargs)
axsize = get_ax_size(fig, ax) # Scale fonts on axis size, not figure size.
# Axis label kwargs
x_label_kwargs = {} if x_label_kwargs is None else x_label_kwargs.copy()
y_label_kwargs = {} if y_label_kwargs is None else y_label_kwargs.copy()
# Axis tick label kwargs
ax_tick_label_kwargs = {} if ax_tick_label_kwargs is None else \
ax_tick_label_kwargs.copy()
x_tick_label_kwargs = {} if x_tick_label_kwargs is None else \
x_tick_label_kwargs
y_tick_label_kwargs = {} if y_tick_label_kwargs is None else \
y_tick_label_kwargs
# Handle display of NaN values.
data_arr = data.values
masked_array = np.ma.array(data_arr, mask=np.isnan(data_arr))
cmap = imshow_kwargs.setdefault('cmap', copy.copy(plt.get_cmap('viridis')))
cmap.set_bad(nan_color)
# Handle kwargs for `imshow()`.
vmin, vmax = (np.min(possible_plot_values), np.max(possible_plot_values)) \
if possible_plot_values is not None else (np.nanmin(data), np.nanmax(data))
imshow_kwargs.setdefault('vmin', vmin)
imshow_kwargs.setdefault('vmax', vmax)
im = ax.imshow(masked_array, **imshow_kwargs)
# Set axis labels and tick labels.
xarray_set_axes_labels(data, ax, x_coord, y_coord,
x_label_kwargs, y_label_kwargs,
ax_tick_label_kwargs,
x_tick_label_kwargs, y_tick_label_kwargs)
# Set the title.
if title is not None:
title_kwargs = {} if title_kwargs is None else title_kwargs.copy()
ax.set_title(title, **title_kwargs)
# Create a colorbar.
if use_colorbar:
cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs.copy()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="7.5%", pad=0.05)
cbar = fig.colorbar(im, ax=ax, cax=cax, **cbar_kwargs)
if cbar_labels is not None:
cbar.ax.set_yticklabels(cbar_labels)
else:
cbar = None
# Create a legend.
if use_legend:
legend_kwargs = {} if legend_kwargs is None else legend_kwargs.copy()
legend_kwargs.setdefault("framealpha", 0.4)
# Determine the legend labels. If no set of values to create legend entries for
# is specified, use the unique values.
if possible_plot_values is None:
legend_values = np.unique(data.values)
legend_values = legend_values[~np.isnan(legend_values)]
else:
legend_values = possible_plot_values
if legend_labels is None:
legend_labels = ["{}".format(value) for value in legend_values]
else:
legend_labels = [legend_labels.get(value, "{}".format(value)) for value in legend_values]
colors = [im.cmap(value/np.max(legend_values)) for value in legend_values]
patches = [mpatches.Patch(color=colors[i], label=legend_labels[i])
for i in range(len(legend_values))]
legend_kwargs.setdefault('loc', 'best')
legend_kwargs['handles'] = patches
ax.legend(**legend_kwargs)
return fig, ax, im, cbar
def xarray_set_axes_labels(data, ax, x_coord='longitude', y_coord='latitude',
x_label_kwargs=None, y_label_kwargs=None,
ax_tick_label_kwargs=None,
x_tick_label_kwargs=None, y_tick_label_kwargs=None):
"""
Sets tick locations and labels for x and y axes on a `matplotlib.axes.Axes`
object such that the tick labels do not overlap. This currently only supports
numeric coordinates.
Parameters
----------
data: xarray.Dataset or xarray.DataArray
The xarray Dataset or DataArray containing latitude and longitude coordinates.
ax: matplotlib.axes.Axes
The matplotlib Axes object to set tick locations and labels for.
x_coord, y_coord: str
Names of the x and y coordinates in `data` to use as tick and axis labels.
x_label_kwargs, y_label_kwargs: dict
Dictionaries of keyword arguments for
`Axes.set_xlabel()` and `Axes.set_ylabel()`, respectively.
ax_tick_label_kwargs: dict
The dictionary of keyword arguments passed to `ax.tick_params()`.
x_tick_label_kwargs, y_tick_label_kwargs: dict
Dictionaries of keyword arguments passed to `ax.set_xticklabels()`
and `ax.set_yticklabels()`, respectively.
"""
import string
# Avoid modifying the original arguments.
x_label_kwargs = {} if x_label_kwargs is None else x_label_kwargs.copy()
y_label_kwargs = {} if y_label_kwargs is None else y_label_kwargs.copy()
ax_tick_label_kwargs = {} if ax_tick_label_kwargs is None else \
ax_tick_label_kwargs.copy()
x_tick_label_kwargs = {} if x_tick_label_kwargs is None else \
x_tick_label_kwargs.copy()
y_tick_label_kwargs = {} if y_tick_label_kwargs is None else \
y_tick_label_kwargs.copy()
width, height = get_ax_size(ax.figure, ax)
# Labels
x_label_kwargs.setdefault('xlabel', x_coord)
ax.set_xlabel(**x_label_kwargs)
y_label_kwargs.setdefault('ylabel', y_coord)
ax.set_ylabel(**y_label_kwargs)
# Tick labels
ax.tick_params(**ax_tick_label_kwargs)
# X ticks
x_vals = data[x_coord].values
x_fontsize = \
x_tick_label_kwargs.setdefault('fontsize', mpl.rcParams['font.size'])
label_every = max(1, int(round(1 / 10 * len(x_vals) * x_fontsize / width)))
x_labels = ["{0:.4f}".format(float(x_val)) for x_val in x_vals[::label_every]]
ax.set_xticks(range(len(x_vals))[::label_every])
x_tick_label_kwargs.setdefault('rotation', 30)
ax.set_xticklabels(x_labels, **x_tick_label_kwargs)
# Y ticks
y_vals = data[y_coord].values
y_fontsize = \
y_tick_label_kwargs.setdefault('fontsize', mpl.rcParams['font.size'])
label_every = max(1, int(round(1 / 10 * len(y_vals) * y_fontsize / height)))
y_labels = ["{0:.4f}".format(float(y_val)) for y_val in y_vals[::label_every]]
ax.set_yticks(range(len(y_vals))[::label_every])
ax.set_yticklabels(y_labels, **y_tick_label_kwargs)
def figure_ratio(data, x_coord='longitude', y_coord='latitude',
fixed_width=None, fixed_height=None,
num_cols=1, num_rows=1):
"""
Returns a list of the width and height that match constraints on height
and width for a figure while maintaining aspect ratio if possible.
Also can be used to size a figure of a grid of plots of identically sized cells.
Specifically, the width and height are scaled by `num_cols` and `num_rows`.
Parameters
----------
data: xarray.Dataset or xarray.DataArray or list-like
Can be either of the following:
1. A list-like of x and y dimension sizes, respectively
2. An xarray Dataset or DataArray containing x and y dimensions
x_coord, y_coord: str
Names of the x and y coordinates in `data`.
fixed_width, fixed_height: float
The desired width or height. If both are specified, the aspect
ratio is maintained and `fixed_width` and `fixed_height` are
treated as maximum values for the size of a single grid element.
num_cols, num_rows: int
The number of columns and rows in the grid the plots will be in.
Zero, one, or both may be specified.
"""
assert (fixed_width is not None) or (fixed_height is not None), \
"At least one of `fixed_width` or `fixed_height` must be specified."
# Determine the x and y dimension sizes and the aspect ratio.
if isinstance(data, xr.Dataset) or isinstance(data, xr.DataArray):
x_sz, y_sz = len(data[x_coord]), len(data[y_coord])
else:
x_sz, y_sz = data[0], data[1]
aspect_ratio = y_sz / x_sz
# Determine the figure size.
if fixed_width is not None:
width = fixed_width
height = width * aspect_ratio
elif fixed_height is not None:
height = fixed_height
width = height / aspect_ratio
# If both `fixed_width` and `fixed_height` are specified, treat as maximums.
if (fixed_width is not None) and (fixed_height is not None):
if width > fixed_width:
height *= fixed_width / width
width = fixed_width
if height > fixed_height:
width *= fixed_height / height
height = fixed_height
return [width * num_cols, height * num_rows]
def retrieve_or_create_fig_ax(fig=None, ax=None, **subplots_kwargs):
"""
Returns appropriate matplotlib Figure and Axes objects given Figure and/or Axes objects.
If neither is supplied, a new figure will be created with associated axes.
If only `fig` is supplied, `(fig,fig.axes[0])` is returned. That is, the first Axes object will be used (and created if necessary).
If `ax` is supplied, `(fig, ax)` is returned.
Returns
-------
fig, ax: matplotlib.figure.Figure, matplotlib.axes.Axes
The figure and the axes of that figure.
**subplots_kwargs: dict
A dictionary of keyword arguments to passed to `matplotlib.pyplot.subplots()`,
such as `ncols` or `figsize`.
"""
if ax is None:
if fig is None:
fig, ax = plt.subplots(**subplots_kwargs)
else:
if len(fig.axes) == 0:
fig.add_subplot(111)
ax = fig.axes[0]
return fig, ax
def skip_plot(n_pts, plot_type, kwargs={}):
"""Returns a boolean denoting whether to skip plotting data given the number of points it contains."""
min_pts_dict = {'scatter': 1, 'box': 1, 'gaussian': 3, 'gaussian_filter': 2,
'poly': 1, 'cubic_spline': 3, 'line': 2,
'fourier': 1}
min_pts = min_pts_dict[plot_type]
if plot_type == 'poly':
assert 'degree' in kwargs.keys(), "When plotting a polynomal fit, there must be" \
"a 'degree' entry in the fit_kwargs parameter."
degree = kwargs['degree']
min_pts = min_pts + degree
return n_pts < min_pts
def remove_non_unique_ordered_list_str(ordered_list):
"""
Sets all occurrences of a value in an ordered list after its first occurence to ''.
For example, ['a', 'a', 'b', 'b', 'c'] would become ['a', '', 'b', '', 'c'].
"""
prev_unique_str = ""
for i in range(len(ordered_list)):
current_str = ordered_list[i]
if current_str != prev_unique_str:
prev_unique_str = current_str
else:
ordered_list[i] = ""
return ordered_list
# Time #
# For February, assume leap years are included.
days_per_month = {1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30,
7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
def get_weeks_per_month(num_weeks):
"""
Including January, give 5 weeks to every third month - accounting for
variation between 52 and 54 weeks in a year by adding weeks to the last 3 months.
"""
last_months_num_weeks = None
if num_weeks <= 52:
last_months_num_weeks = [5, 4, 4]
elif num_weeks == 53:
last_months_num_weeks = [5, 4, 5]
elif num_weeks == 54:
last_months_num_weeks = [5, 5, 5]
return {month_int: num_weeks for (month_int, num_weeks) in
zip(days_per_month.keys(), [5, 4, 4] * 3 + last_months_num_weeks)}
num_weeks_per_month = np.tile([5, 4], 6)
last_week_int_per_month = np.cumsum(num_weeks_per_month)
month_names_short = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
month_names_long = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
def day_of_year_int_to_str(day):
"""
Converts an integer day of year to a string containing the month and day, like "January 1".
The argument value must be in range [1,366].
Parameters
---------
day: int
The day of the year, represented as an integer.
"""
month_int = 1
while month_int < 12:
days_curr_month = days_per_month[month_int]
if day < days_curr_month:
break
else:
day -= days_curr_month
month_int += 1
month_name = month_names_long[month_int - 1]
return "{} {}".format(month_name, day)
def month_ints_to_month_names(month_ints):
"""
Converts ordinal numbers for months (in range [1,12]) to their 3-letter names.
"""
return [month_names_short[i - 1] for i in month_ints]
def week_int_to_month_name(week_int):
month_ind = np.argmax(week_int <= last_week_int_per_month)
return month_names_short[month_ind]
def week_ints_to_month_names(week_ints):
return [week_int_to_month_name(week_int) for week_int in week_ints]
def naive_months_ticks_by_week(week_ints=None):
"""
Given a list of week numbers (in range [1,54]), returns a list of month strings separated by spaces.
Covers 54 weeks if no list-like of week numbers is given.
This is only intended to be used for labeling axes in plotting.
"""
month_ticks_by_week = []
week_ints = list(range(1, 55)) if week_ints is None else week_ints
month_ticks_by_week = remove_non_unique_ordered_list_str(week_ints_to_month_names(week_ints))
return month_ticks_by_week
def n64_to_month_and_year(n64):
datetime_val = _n64_to_datetime(n64)
return month_names_long[datetime_val.month-1] + ' ' + str(datetime_val.year)
# End Time #
## DEA Plotting Utils ##
# Define function to convert xarray dataset to list of one or three band numpy arrays
def _ds_to_arraylist(ds, bands, time_dim, x_dim, y_dim, percentile_stretch, image_proc_func=None):
"""
Converts an xarray dataset to a list of numpy arrays for plt.imshow plotting
"""
# Compute percents
p_low, p_high = ds[bands].to_array().quantile(percentile_stretch).values
array_list = []
for i, timestep in enumerate(ds[time_dim]):
# Select single timestep from the data array
ds_i = ds[{time_dim: i}]
# Get shape of array
x = len(ds[x_dim])
y = len(ds[y_dim])
if len(bands) == 1:
# Create new one band array
img_toshow = exposure.rescale_intensity(ds_i[bands[0]].values,
in_range=(p_low, p_high),
out_range='image')
else:
# Create new three band array
rawimg = np.zeros((y, x, 3), dtype=np.float32)
# Add xarray bands into three dimensional numpy array
for band, colour in enumerate(bands):
rawimg[:, :, band] = ds_i[colour].values
# Stretch contrast using percentile values
img_toshow = exposure.rescale_intensity(rawimg,
in_range=(p_low, p_high),
out_range=(0, 1.0))
# Optionally image processing
if image_proc_func:
img_toshow = image_proc_func(img_toshow).clip(0, 1)
array_list.append(img_toshow)
return array_list, p_low, p_high
def xr_animation(ds,
bands=None,
output_path='animation.mp4',
width_pixels=500,
interval=100,
percentile_stretch=(0.02, 0.98),
image_proc_funcs=None,
show_gdf=None,
show_date='%d %b %Y',
show_text=None,
show_colorbar=True,
gdf_kwargs={},
annotation_kwargs={},
imshow_kwargs={},
colorbar_kwargs={},
limit=None):
"""
Takes an `xarray` timeseries and animates the data as either a
three-band (true or false colour) or single-band animation,
allowing changes in the landscape to be compared across time.
Animations can be customised to include text and date annotations
or use specific combinations of input bands. Vector data can be
overlaid and animated on top of imagery, and custom image
processing functions can be applied to each frame.
Supports .mp4 (ideal for Twitter/social media) and .gif (ideal
for all purposes, but can have large file sizes) format files.
Last modified: April 2020
Parameters
----------
ds : xarray.Dataset
An xarray dataset with multiple time steps (i.e. multiple
observations along the `time` dimension).
bands : list of strings
An list of either one or three band names to be plotted,
all of which must exist in `ds`.
output_path : str, optional
A string giving the output location and filename of the
resulting animation. File extensions of '.mp4' and '.gif' are
accepted. Defaults to 'animation.mp4'.
width_pixels : int, optional
An integer defining the output width in pixels for the
resulting animation. The height of the animation is set
automatically based on the dimensions/ratio of the input
xarray dataset. Defaults to 500 pixels wide.
interval : int, optional
An integer defining the milliseconds between each animation
frame used to control the speed of the output animation. Higher
values result in a slower animation. Defaults to 100
milliseconds between each frame.
percentile_stretch : tuple of floats, optional
An optional tuple of two floats that can be used to clip one or
three-band arrays by percentiles to produce a more vibrant,
visually attractive image that is not affected by outliers/
extreme values. The default is `(0.02, 0.98)` which is
equivalent to xarray's `robust=True`.
image_proc_funcs : list of funcs, optional
An optional list containing functions that will be applied to
each animation frame (timestep) prior to animating. This can
include image processing functions such as increasing contrast,
unsharp masking, saturation etc. The function should take AND
return a `numpy.ndarray` with shape [y, x, bands]. If your
function has parameters, you can pass in custom values using
a lambda function:
`image_proc_funcs=[lambda x: custom_func(x, param1=10)]`.
show_gdf: geopandas.GeoDataFrame, optional
Vector data (e.g. ESRI shapefiles or GeoJSON) can be optionally
plotted over the top of imagery by supplying a
`geopandas.GeoDataFrame` object. To customise colours used to
plot the vector features, create a new column in the
GeoDataFrame called 'colors' specifying the colour used to plot
each feature: e.g. `gdf['colors'] = 'red'`.
To plot vector features at specific moments in time during the
animation, create new 'start_time' and/or 'end_time' columns in
the GeoDataFrame that define the time range used to plot each
feature. Dates can be provided in any string format that can be
converted using the `pandas.to_datetime()`. e.g.
`gdf['end_time'] = ['2001', '2005-01', '2009-01-01']`
show_date : string or bool, optional
An optional string or bool that defines how (or if) to plot
date annotations for each animation frame. Defaults to
'%d %b %Y'; can be customised to any format understood by
strftime. Set to False to remove date
annotations completely.
show_text : str or list of strings, optional
An optional string or list of strings with a length equal to
the number of timesteps in `ds`. This can be used to display a
static text annotation (using a string), or a dynamic title
(using a list) that displays different text for each timestep.
By default, no text annotation will be plotted.
show_colorbar : bool, optional
An optional boolean indicating whether to include a colourbar
for single-band animations. Defaults to True.
gdf_kwargs : dict, optional
An optional dictionary of keyword arguments to customise the
appearance of a `geopandas.GeoDataFrame` supplied to
`show_gdf`. Keyword arguments are passed to `GeoSeries.plot`
(see http://geopandas.org/reference.html#geopandas.GeoSeries.plot).
For example: `gdf_kwargs = {'linewidth': 2}`.
annotation_kwargs : dict, optional
An optional dict of keyword arguments for controlling the
appearance of text annotations. Keyword arguments are passed
to `matplotlib.pyplot.annotate`
(see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.annotate.html
for options). For example, `annotation_kwargs={'fontsize':20,
'color':'red', 'family':'serif'}`.
imshow_kwargs : dict, optional
An optional dict of keyword arguments for controlling the
appearance of arrays passed to `matplotlib.pyplot.imshow`
(see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.imshow.html
for options). For example, a green colour scheme and custom
stretch could be specified using:
`onebandplot_kwargs={'cmap':'Greens`, 'vmin':0.2, 'vmax':0.9}`.
(some parameters like 'cmap' will only have an effect for
single-band animations, not three-band RGB animations).
colorbar_kwargs : dict, optional
An optional dict of keyword arguments used to control the
appearance of the colourbar. Keyword arguments are passed to
`matplotlib.pyplot.tick_params`
(see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.tick_params.html
for options). This can be used to customise the colourbar
ticks, e.g. changing tick label colour depending on the
background of the animation:
`colorbar_kwargs={'colors': 'black'}`.
limit: int, optional
An optional integer specifying how many animation frames to
render (e.g. `limit=50` will render the first 50 frames). This
can be useful for quickly testing animations without rendering
the entire time-series.
"""
def _start_end_times(gdf, ds):
"""
Converts 'start_time' and 'end_time' columns in a
`geopandas.GeoDataFrame` to datetime objects to allow vector
features to be plotted at specific moments in time during an
animation, and sets default values based on the first
and last time in `ds` if this information is missing from the
dataset.
"""
# Make copy of gdf so we do not modify original data
gdf = gdf.copy()
# Get min and max times from input dataset
minmax_times = pd.to_datetime(ds.time.isel(time=[0, -1]).values)
# Update both `start_time` and `end_time` columns
for time_col, time_val in zip(['start_time', 'end_time'], minmax_times):
# Add time_col if it does not exist
if time_col not in gdf:
gdf[time_col] = np.nan
# Convert values to datetimes and fill gaps with relevant time value
gdf[time_col] = pd.to_datetime(gdf[time_col], errors='ignore')
gdf[time_col] = gdf[time_col].fillna(time_val)
return gdf
def _add_colorbar(fig, ax, vmin, vmax, imshow_defaults, colorbar_defaults):
"""
Adds a new colorbar axis to the animation with custom minimum
and maximum values and styling.
"""
# Create new axis object for colorbar
cax = fig.add_axes([0.02, 0.02, 0.96, 0.03])
# Initialise color bar using plot min and max values
img = ax.imshow(np.array([[vmin, vmax]]), **imshow_defaults)
fig.colorbar(img,
cax=cax,
orientation='horizontal',
ticks=np.linspace(vmin, vmax, 2))
# Fine-tune appearance of colorbar
cax.xaxis.set_ticks_position('top')
cax.tick_params(axis='x', **colorbar_defaults)
cax.get_xticklabels()[0].set_horizontalalignment('left')
cax.get_xticklabels()[-1].set_horizontalalignment('right')
def _frame_annotation(times, show_date, show_text):
"""
Creates a custom annotation for the top-right of the animation
by converting a `xarray.DataArray` of times into strings, and
combining this with a custom text annotation. Handles cases
where `show_date=False/None`, `show_text=False/None`, or where
`show_text` is a list of strings.
"""
# Test if show_text is supplied as a list
is_sequence = isinstance(show_text, (list, tuple, np.ndarray))
# Raise exception if it is shorter than number of dates
if is_sequence and (len(show_text) == 1):
show_text, is_sequence = show_text[0], False
elif is_sequence and (len(show_text) < len(times)):
raise ValueError(f'Annotations supplied via `show_text` must have '
f'either a length of 1, or a length >= the number '
f'of timesteps in `ds` (n={len(times)})')
times_list = (times.dt.strftime(show_date).values if show_date else [None] *
len(times))
text_list = show_text if is_sequence else [show_text] * len(times)
annotation_list = ['\n'.join([str(i) for i in (a, b) if i])
for a, b in zip(times_list, text_list)]
return annotation_list
def _update_frames(i, ax, extent, annotation_text, gdf, gdf_defaults,
annotation_defaults, imshow_defaults):
"""
Animation called by `matplotlib.animation.FuncAnimation` to
animate each frame in the animation. Plots array and any text
annotations, as well as a temporal subset of `gdf` data based
on the times specified in 'start_time' and 'end_time' columns.
"""
# Clear previous frame to optimise render speed and plot imagery
ax.clear()
ax.imshow(array[i, ...].clip(0.0, 1.0), extent=extent, **imshow_defaults)
# Add annotation text
ax.annotate(annotation_text[i], **annotation_defaults)
# Add geodataframe annotation
if show_gdf is not None:
# Obtain start and end times to filter geodataframe features
time_i = ds.time.isel(time=i).values
# Subset geodataframe using start and end dates
gdf_subset = show_gdf.loc[(show_gdf.start_time <= time_i) &
(show_gdf.end_time >= time_i)]
if len(gdf_subset.index) > 0:
# Set color to geodataframe field if supplied
if ('color' in gdf_subset) and ('color' not in gdf_kwargs):
gdf_defaults.update({'color': gdf_subset['color'].tolist()})
gdf_subset.plot(ax=ax, **gdf_defaults)
# Remove axes to show imagery only
ax.axis('off')
# Update progress bar
progress_bar.update(1)
# Test if bands have been supplied, or convert to list to allow
# iteration if a single band is provided as a string
if bands is None:
raise ValueError(f'Please use the `bands` parameter to supply '
f'a list of one or three bands that exist as '
f'variables in `ds`, e.g. {list(ds.data_vars)}')
elif isinstance(bands, str):
bands = [bands]
# Test if bands exist in dataset
missing_bands = [b for b in bands if b not in ds.data_vars]
if missing_bands:
raise ValueError(f'Band(s) {missing_bands} do not exist as '
f'variables in `ds` {list(ds.data_vars)}')
# Test if time dimension exists in dataset
if 'time' not in ds.dims:
raise ValueError(f"`ds` does not contain a 'time' dimension "
f"required for generating an animation")
# Set default parameters
outline = [PathEffects.withStroke(linewidth=2.5, foreground='black')]
annotation_defaults = {
'xy': (1, 1),
'xycoords': 'axes fraction',
'xytext': (-5, -5),
'textcoords': 'offset points',
'horizontalalignment': 'right',
'verticalalignment': 'top',
'fontsize': 20,
'color': 'white',
'path_effects': outline
}
imshow_defaults = {'cmap': 'magma', 'interpolation': 'nearest'}
colorbar_defaults = {'colors': 'white', 'labelsize': 12, 'length': 0}
gdf_defaults = {'linewidth': 1.5}
# Update defaults with kwargs
annotation_defaults.update(annotation_kwargs)
imshow_defaults.update(imshow_kwargs)
colorbar_defaults.update(colorbar_kwargs)
gdf_defaults.update(gdf_kwargs)
# Get info on dataset dimensions
height, width = ds.geobox.shape
scale = width_pixels / width
left, bottom, right, top = ds.geobox.extent.boundingbox
# Prepare annotations
annotation_list = _frame_annotation(ds.time, show_date, show_text)
# Prepare geodataframe
if show_gdf is not None:
show_gdf = show_gdf.to_crs(ds.geobox.crs)
show_gdf = gpd.clip(show_gdf, mask=box(left, bottom, right, top))
show_gdf = _start_end_times(show_gdf, ds)
# Convert data to 4D numpy array of shape [time, y, x, bands]
ds = ds[bands].to_array().transpose(..., 'variable')[0:limit, ...]
array = ds.values
# Optionally apply image processing along axis 0 (e.g. to each timestep)
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} ({remaining_s:.1f} ' \
'seconds remaining at {rate_fmt}{postfix})'
if image_proc_funcs:
print('Applying custom image processing functions')
for i, array_i in tqdm(enumerate(array),
total=len(ds.time),
leave=False,
bar_format=bar_format,
unit=' frames'):
for func in image_proc_funcs:
array_i = func(array_i)
array[i, ...] = array_i
# Clip to percentiles and rescale between 0.0 and 1.0 for plotting
vmin, vmax = np.quantile(array[np.isfinite(array)], q=percentile_stretch)
# Replace with vmin and vmax if present in `imshow_defaults`
if 'vmin' in imshow_defaults:
vmin = imshow_defaults['vmin']
if 'vmax' in imshow_defaults:
vmax = imshow_defaults['vmax']
array = rescale_intensity(array, in_range=(vmin, vmax), out_range=(0.0, 1.0))
array = np.squeeze(array) # remove final axis if only one band
# Set up figure
fig, ax = plt.subplots()
fig.set_size_inches(width * scale / 72, height * scale / 72, forward=True)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
# Optionally add colorbar
if show_colorbar & (len(bands) == 1):
_add_colorbar(fig, ax, vmin, vmax, imshow_defaults, colorbar_defaults)
# Animate
print(f'Exporting animation to {output_path}')
anim = FuncAnimation(
fig=fig,
func=_update_frames,
fargs=(
ax, # axis to plot into
[left, right, bottom, top], # imshow extent
annotation_list, # list of text annotations
show_gdf, # geodataframe to plot over imagery
gdf_defaults, # any kwargs used to plot gdf
annotation_defaults, # kwargs for annotations
imshow_defaults), # kwargs for imshow
frames=len(ds.time),
interval=interval,
repeat=False)
# Set up progress bar
progress_bar = tqdm(total=len(ds.time),
unit=' frames',
bar_format=bar_format)
# Export animation to file
if Path(output_path).suffix == '.gif':
anim.save(output_path, writer='pillow')
else:
anim.save(output_path, dpi=72)
# Update progress bar to fix progress bar moving past end
if progress_bar.n != len(ds.time):
progress_bar.n = len(ds.time)
progress_bar.last_print_n = len(ds.time)
## End DEA Plotting Utils ##
|
ceos-seo/data_cube_utilities
|
data_cube_utilities/plotter_utils.py
|
Python
|
apache-2.0
| 125,683
|
[
"Gaussian"
] |
5d1b528a6fb103174c5e092d3cfc4f0fa43d9dd69112dcb07511daa30cd67b8d
|
##############################################################################
#
# Author: Alejandro Molina-Sanchez
# Run real-time simulations with yambo
#
# Warning: Real-time simulations requires several data folders for running
# properly. Before using this scripts compulsively is recommended
# to understand the different run levels.
#
# This script map a fine grid to a coarse grid
#
##############################################################################
#from __future__ import print_function
from sys import argv
from yambopy import *
import argparse
print ('This script map a fine grid to a coarse grid.')
print ('It requires three arguments')
print ('1: -i folder with the fine grid')
print ('2: -o folder of the RT simulation')
print ('3: -dg name for the folder hosting the double-grid')
parser = argparse.ArgumentParser(description='Map of a double-grid')
parser.add_argument('-i' ,'--input' , help='Folder containing the SAVE folder of the double grid')
parser.add_argument('-o' ,'--output' , help='Output folder (contains the rt simulation SAVE)')
parser.add_argument('-dg' ,'--folder_dg',help='Folder containing the mapped double grid')
args = parser.parse_args()
print(args.input)
print(args.output)
print(args.folder_dg)
folder_in = args.input
folder_out = args.output
folder_dg = args.folder_dg
sym = YamboIn('ypp_rt -m',folder=folder_out,filename='ypp.in')
sym['DbGd_DB1_paths']= [ ["'../%s'" % folder_in], '' ]
sym.arguments.append('noBZExpand')
sym.write('%s/map-dg.in' % (folder_out))
os.system('cd %s; ypp_rt -F map-dg.in' % (folder_out))
os.system('cd %s; mkdir -p %s; mv SAVE/ndb.Double_Grid %s/' % (folder_out, folder_dg, folder_dg))
|
henriquemiranda/yambopy
|
tutorial/si/map-symm.py
|
Python
|
bsd-3-clause
| 1,693
|
[
"Yambo"
] |
63d6ce3c4489d416df53c99cea866b09e3cb801527da549aad6c1bc7b2a0e4d6
|
""" This is a test of the ProxyDB
It supposes that the DB is present and installed in DIRAC
"""
# pylint: disable=invalid-name,wrong-import-position,protected-access
import os
import re
import sys
import stat
import shutil
import tempfile
# TODO: This should be modernised to use subprocess(32)
import subprocess as commands
import unittest
from diraccfg import CFG
import DIRAC
DIRAC.initialize() # Initialize configuration
import DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.FrameworkSystem.DB.ProxyDB import ProxyDB
from DIRAC.Resources.ProxyProvider.DIRACCAProxyProvider import DIRACCAProxyProvider
certsPath = os.path.join(os.path.dirname(DIRAC.__file__), "Core/Security/test/certs")
ca = DIRACCAProxyProvider()
ca.setParameters(
{"CertFile": os.path.join(certsPath, "ca/ca.cert.pem"), "KeyFile": os.path.join(certsPath, "ca/ca.key.pem")}
)
diracTestCACFG = """
Resources
{
ProxyProviders
{
DIRAC_CA
{
ProviderType = DIRACCA
CertFile = %s
KeyFile = %s
Supplied = C, O, OU, CN
Optional = emailAddress
DNOrder = C, O, OU, CN, emailAddress
OU = None
C = DN
O = DIRACCA
}
}
}
""" % (
os.path.join(certsPath, "ca/ca.cert.pem"),
os.path.join(certsPath, "ca/ca.key.pem"),
)
userCFG = """
Registry
{
Users
{
# In dirac_user group
user_ca
{
DN = /C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org
DNProperties
{
DN.1
{
DN = /C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org
ProxyProviders = DIRAC_CA
Groups = dirac_user
}
}
}
user
{
DN = /C=CC/O=DN/O=DIRAC/CN=user
DNProperties
{
DN.1
{
DN = /C=CC/O=DN/O=DIRAC/CN=user
ProxyProviders =
Groups = dirac_user
}
}
}
user_1
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_1
DNProperties
{
DN.1
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_1
ProxyProviders =
Groups = dirac_user
}
}
}
user_2
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_2
}
user_3
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_3
}
# Not in dirac_user group
user_4
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_4
}
}
Groups
{
group_1
{
Users = user_ca, user, user_1, user_2, user_3
VO = vo_1
}
group_2
{
Users = user_4
enableToDownload = False
}
}
VO
{
vo_1
{
VOMSName = vo_1
VOMSServers
{
}
}
}
}
"""
db = ProxyDB()
class ProxyDBTestCase(unittest.TestCase):
@classmethod
def createProxy(self, userName, group, time, vo=None, role=None):
"""Create user proxy
:param str userName: user name
:param str group: group name
:param int time: proxy expired time
:param str vo: VOMS VO name
:param str role: VOMS Role
:return: S_OK(tuple)/S_ERROR() -- contain proxy as and as string
"""
userCertFile = os.path.join(self.userDir, userName + ".cert.pem")
userKeyFile = os.path.join(self.userDir, userName + ".key.pem")
self.proxyPath = os.path.join(self.userDir, userName + ".pem")
if not vo:
chain = X509Chain()
# Load user cert and key
retVal = chain.loadChainFromFile(userCertFile)
if not retVal["OK"]:
gLogger.warn(retVal["Message"])
return S_ERROR("Can't load %s" % userCertFile)
retVal = chain.loadKeyFromFile(userKeyFile)
if not retVal["OK"]:
gLogger.warn(retVal["Message"])
if "bad decrypt" in retVal["Message"]:
return S_ERROR("Bad passphrase")
return S_ERROR("Can't load %s" % userKeyFile)
result = chain.generateProxyToFile(self.proxyPath, time * 3600, diracGroup=group)
if not result["OK"]:
return result
else:
cmd = "voms-proxy-fake --cert %s --key %s -q" % (userCertFile, userKeyFile)
cmd += " -hostcert %s -hostkey %s" % (self.hostCert, self.hostKey)
cmd += " -uri fakeserver.cern.ch:15000"
cmd += ' -voms "%s"' % vo
cmd += ' -fqan "/%s/Role=%s/Capability=NULL"' % (vo, role)
cmd += " -hours %s -out %s -rfc" % (time, self.proxyPath)
status, output = commands.getstatusoutput(cmd)
if status:
return S_ERROR(output)
chain = X509Chain()
result = chain.loadProxyFromFile(self.proxyPath)
if not result["OK"]:
return result
result = chain.generateProxyToString(12 * 3600, diracGroup=group)
if not result["OK"]:
return result
return S_OK((chain, result["Value"]))
@classmethod
def setUpClass(cls):
cls.failed = False
# Add configuration
cfg = CFG()
cfg.loadFromBuffer(diracTestCACFG)
gConfig.loadCFG(cfg)
cfg.loadFromBuffer(userCFG)
gConfig.loadCFG(cfg)
# Prepare CA
lines = []
cfgDict = {}
cls.caPath = os.path.join(certsPath, "ca")
cls.caConfigFile = os.path.join(cls.caPath, "openssl_config_ca.cnf")
# Save original configuration file
shutil.copyfile(cls.caConfigFile, cls.caConfigFile + "bak")
# Parse
fields = ["dir", "database", "serial", "new_certs_dir", "private_key", "certificate"]
with open(cls.caConfigFile, "r") as caCFG:
for line in caCFG:
if re.findall("=", re.sub(r"#.*", "", line)):
field = re.sub(r"#.*", "", line).replace(" ", "").rstrip().split("=")[0]
line = "dir = %s #PUT THE RIGHT DIR HERE!\n" % (cls.caPath) if field == "dir" else line
val = re.sub(r"#.*", "", line).replace(" ", "").rstrip().split("=")[1]
if field in fields:
for i in fields:
if cfgDict.get(i):
val = val.replace("$%s" % i, cfgDict[i])
cfgDict[field] = val
if not cfgDict[field]:
cls.failed = "%s have empty value in %s" % (field, cls.caConfigFile)
lines.append(line)
with open(cls.caConfigFile, "w") as caCFG:
caCFG.writelines(lines)
for field in fields:
if field not in cfgDict.keys():
cls.failed = "%s value is absent in %s" % (field, cls.caConfigFile)
cls.hostCert = os.path.join(certsPath, "host/hostcert.pem")
cls.hostKey = os.path.join(certsPath, "host/hostkey.pem")
cls.caCert = cfgDict["certificate"]
cls.caKey = cfgDict["private_key"]
os.chmod(cls.caKey, stat.S_IREAD)
# Check directory for new certificates
cls.newCertDir = cfgDict["new_certs_dir"]
if not os.path.exists(cls.newCertDir):
os.makedirs(cls.newCertDir)
for f in os.listdir(cls.newCertDir):
os.remove(os.path.join(cls.newCertDir, f))
# Empty the certificate database
cls.index = cfgDict["database"]
with open(cls.index, "w") as indx:
indx.write("")
# Write down serial
cls.serial = cfgDict["serial"]
with open(cls.serial, "w") as serialFile:
serialFile.write("1000")
# Create temporaly directory for users certificates
cls.userDir = tempfile.mkdtemp(dir=certsPath)
# Create user certificates
for userName in ["no_user", "user", "user_1", "user_2", "user_3"]:
userConf = """[ req ]
default_bits = 4096
encrypt_key = yes
distinguished_name = req_dn
prompt = no
req_extensions = v3_req
[ req_dn ]
C = CC
O = DN
0.O = DIRAC
CN = %s
[ v3_req ]
# Extensions for client certificates (`man x509v3_config`).
nsComment = "OpenSSL Generated Client Certificate"
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
""" % (
userName
)
userConfFile = os.path.join(cls.userDir, userName + ".cnf")
userReqFile = os.path.join(cls.userDir, userName + ".req")
userKeyFile = os.path.join(cls.userDir, userName + ".key.pem")
userCertFile = os.path.join(cls.userDir, userName + ".cert.pem")
with open(userConfFile, "w") as f:
f.write(userConf)
status, output = commands.getstatusoutput("openssl genrsa -out %s" % userKeyFile)
if status:
gLogger.error(output)
exit()
gLogger.debug(output)
os.chmod(userKeyFile, stat.S_IREAD)
status, output = commands.getstatusoutput(
"openssl req -config %s -key %s -new -out %s" % (userConfFile, userKeyFile, userReqFile)
)
if status:
gLogger.error(output)
exit()
gLogger.debug(output)
cmd = "openssl ca -config %s -extensions usr_cert -batch -days 375 -in %s -out %s"
cmd = cmd % (cls.caConfigFile, userReqFile, userCertFile)
status, output = commands.getstatusoutput(cmd)
if status:
gLogger.error(output)
exit()
gLogger.debug(output)
# Result
status, output = commands.getstatusoutput("ls -al %s" % cls.userDir)
if status:
gLogger.error(output)
exit()
gLogger.debug("User certificates:\n", output)
def setUp(self):
gLogger.debug("\n")
if self.failed:
self.fail(self.failed)
db._update('DELETE FROM ProxyDB_Proxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")')
db._update(
'DELETE FROM ProxyDB_CleanProxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")'
)
def tearDown(self):
db._update('DELETE FROM ProxyDB_Proxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")')
db._update(
'DELETE FROM ProxyDB_CleanProxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")'
)
@classmethod
def tearDownClass(cls):
shutil.move(cls.caConfigFile + "bak", cls.caConfigFile)
if os.path.exists(cls.newCertDir):
for f in os.listdir(cls.newCertDir):
os.remove(os.path.join(cls.newCertDir, f))
for f in os.listdir(cls.caPath):
if re.match("%s..*" % cls.index, f) or f.endswith(".old"):
os.remove(os.path.join(cls.caPath, f))
if os.path.exists(cls.userDir):
shutil.rmtree(cls.userDir)
# Empty the certificate database
with open(cls.index, "w") as index:
index.write("")
# Write down serial
with open(cls.serial, "w") as serialFile:
serialFile.write("1000")
class testDB(ProxyDBTestCase):
def test_connectDB(self):
"""Try to connect to the ProxyDB"""
res = db._connect()
self.assertTrue(res["OK"])
def test_getUsers(self):
"""Test 'getUsers' - try to get users from DB"""
field = '("%%s", "/C=CC/O=DN/O=DIRAC/CN=%%s", %%s "PEM", TIMESTAMPADD(SECOND, %%s, UTC_TIMESTAMP()))%s' % ""
# Fill table for test
gLogger.info("\n* Fill tables for test..")
for table, values, fields in [
(
"ProxyDB_Proxies",
[field % ("user", "user", '"group_1",', "800"), field % ("user_2", "user_2", '"group_1",', "-1")],
"(UserName, UserDN, UserGroup, Pem, ExpirationTime)",
),
(
"ProxyDB_CleanProxies",
[field % ("user_3", "user_3", "", "43200")],
"(UserName, UserDN, Pem, ExpirationTime)",
),
]:
result = db._update("INSERT INTO %s%s VALUES %s ;" % (table, fields, ", ".join(values)))
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Testing 'getUsers'
gLogger.info("\n* Run `purgeExpiredProxies()`..")
for user, exp, expect, log in [
(False, 0, ["user", "user_2", "user_3"], "\n* Without arguments"),
(False, 1200, ["user_3"], "* Request proxy live time"),
("user_2", 0, ["user_2"], "* Request user name"),
("no_user", 0, [], "* Request not exist user name"),
]:
gLogger.info("%s.." % log)
result = db.getUsers(validSecondsLeft=exp, userMask=user)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
usersList = []
for line in result["Value"]:
if line["Name"] in ["user", "user_2", "user_3"]:
usersList.append(line["Name"])
self.assertEqual(set(expect), set(usersList), str(usersList) + ", when expected " + str(expect))
def test_purgeExpiredProxies(self):
"""Test 'purgeExpiredProxies' - try to purge expired proxies"""
# Purge existed proxies
gLogger.info("\n* First cleaning..")
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("user", "/C=CC/O=DN/O=DIRAC/CN=user", "group_1", "PEM", '
cmd += "TIMESTAMPADD(SECOND, -1, UTC_TIMESTAMP()));"
result = db._query(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
cmd = "SELECT COUNT( * ) FROM ProxyDB_Proxies WHERE ExpirationTime < UTC_TIMESTAMP()"
self.assertTrue(bool(db._query(cmd)["Value"][0][0] > 0))
result = db.purgeExpiredProxies()
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(result["Value"] > 0, "Must be more then null")
self.assertFalse(bool(db._query(cmd)["Value"][0][0] > 0), "Must be null")
def test_getRemoveProxy(self):
"""Testing get, store proxy"""
gLogger.info("\n* Check that DB is clean..")
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1" "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Check posible crashes when get proxy..")
# Make record with not valid proxy, valid group, user and short expired time
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("user", "/C=CC/O=DN/O=DIRAC/CN=user", "group_1", "PEM", '
cmd += "TIMESTAMPADD(SECOND, 1800, UTC_TIMESTAMP()));"
result = db._update(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Try to no correct getProxy requests
for dn, group, reqtime, log in [
(
"/C=CC/O=DN/O=DIRAC/CN=user",
"group_1",
9999,
"No proxy provider, set request time, not valid proxy in ProxyDB_Proxies",
),
("/C=CC/O=DN/O=DIRAC/CN=user", "group_1", 0, "Not valid proxy in ProxyDB_Proxies"),
("/C=CC/O=DN/O=DIRAC/CN=no_user", "no_valid_group", 0, "User not exist, proxy not in DB tables"),
("/C=CC/O=DN/O=DIRAC/CN=user", "no_valid_group", 0, "Group not valid, proxy not in DB tables"),
("/C=CC/O=DN/O=DIRAC/CN=user", "group_1", 0, "No proxy provider for user, proxy not in DB tables"),
("/C=CC/O=DN/O=DIRAC/CN=user_4", "group_2", 0, "Group has option enableToDownload = False in CS"),
]:
gLogger.info("== > %s:" % log)
result = db.getProxy(dn, group, reqtime)
self.assertFalse(result["OK"], "Must be fail.")
gLogger.info("Msg: %s" % result["Message"])
# In the last case method found proxy and must to delete it as not valid
cmd = 'SELECT COUNT( * ) FROM ProxyDB_Proxies WHERE UserName="user"'
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == 0), "GetProxy method didn't delete the last proxy.")
gLogger.info("* Check that DB is clean..")
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Generate proxy on the fly..")
result = db.getProxy("/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org", "group_1", 1800)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
gLogger.info("* Check that ProxyDB_CleanProxy contain generated proxy..")
result = db.getProxiesContent({"UserName": "user_ca"}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 1), "Generated proxy must be one.")
for table, count in [("ProxyDB_Proxies", 0), ("ProxyDB_CleanProxies", 1)]:
cmd = 'SELECT COUNT( * ) FROM %s WHERE UserName="user_ca"' % table
self.assertTrue(
bool(db._query(cmd)["Value"][0][0] == count),
table + " must " + (count and "contain proxy" or "be empty"),
)
gLogger.info("* Check that DB is clean..")
result = db.deleteProxy(
"/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org", proxyProvider="DIRAC_CA"
)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Upload proxy..")
for user, dn, group, vo, time, res, log in [
("user", "/C=CC/O=DN/O=DIRAC/CN=user", "group_1", False, 12, False, "With group extension"),
("user", "/C=CC/O=DN/O=DIRAC/CN=user", False, "vo_1", 12, False, "With voms extension"),
("user_1", "/C=CC/O=DN/O=DIRAC/CN=user_1", False, "vo_1", 12, False, "With voms extension"),
("user", "/C=CC/O=DN/O=DIRAC/CN=user", False, False, 0, False, "Expired proxy"),
("no_user", "/C=CC/O=DN/O=DIRAC/CN=no_user", False, False, 12, False, "Not exist user"),
("user", "/C=CC/O=DN/O=DIRAC/CN=user", False, False, 12, True, "Valid proxy"),
]:
# Clean tables with proxies
for table in ["ProxyDB_Proxies", "ProxyDB_CleanProxies"]:
result = db._update('DELETE FROM %s WHERE UserName = "user"' % table)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db._update('DELETE FROM %s WHERE UserName = "user_1"' % table)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
gLogger.info("== > %s:" % log)
result = self.createProxy(user, group, time, vo=vo)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
chain = result["Value"][0]
# Assert VOMSProxy
if vo:
self.assertTrue(bool(chain.isVOMS().get("Value")), "Cannot create proxy with VOMS extension")
result = db.generateDelegationRequest(chain, dn)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
resDict = result["Value"]
result = chain.generateChainFromRequestString(resDict["request"], time * 3500)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
if not chain.isVOMS().get("Value") and vo:
gLogger.info("voms-proxy-fake command not working as expected, so proxy have no VOMS extention")
res = not res
result = db.completeDelegation(resDict["id"], dn, result["Value"])
text = "Must be ended %s%s" % (
"successful" if res else "with error",
": %s" % result.get("Message", "Error message is absent."),
)
self.assertEqual(result["OK"], res, text)
if not res:
gLogger.info("Msg: %s" % (result["Message"]))
cmd = 'SELECT COUNT( * ) FROM ProxyDB_Proxies WHERE UserName="%s"' % user
self.assertTrue(
bool(db._query(cmd)["Value"][0][0] == 0),
"ProxyDB_Proxies must " + ("contain proxy" if res else "be empty"),
)
cmd = 'SELECT COUNT( * ) FROM ProxyDB_CleanProxies WHERE UserName="%s"' % user
self.assertTrue(
bool(db._query(cmd)["Value"][0][0] == (1 if res else 0)),
"ProxyDB_CleanProxies must " + ("contain proxy" if res else "be empty"),
)
# Last test test must leave proxy in DB
gLogger.info("* Check that ProxyDB_CleanProxy contain generated proxy..")
result = db.getProxiesContent({"UserName": "user"}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 1), "Generated proxy must be one.")
cmd = 'SELECT COUNT( * ) FROM ProxyDB_CleanProxies WHERE UserName="user"'
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == 1), "ProxyDB_CleanProxies must contain proxy")
gLogger.info("* Get proxy that store only in ProxyDB_CleanProxies..")
# Try to get proxy that was stored to ProxyDB_CleanProxies in previous step
for res, group, reqtime, log in [
(False, "group_1", 24 * 3600, "Request time more that in stored proxy"),
(False, "group_2", 0, "Request group not contain user"),
(True, "group_1", 0, "Request time less that in stored proxy"),
]:
gLogger.info("== > %s:" % log)
result = db.getProxy("/C=CC/O=DN/O=DIRAC/CN=user", group, reqtime)
text = "Must be ended %s%s" % (
res and "successful" or "with error",
": %s" % result.get("Message", "Error message is absent."),
)
self.assertEqual(result["OK"], res, text)
if res:
chain = result["Value"][0]
self.assertTrue(chain.isValidProxy()["OK"], "\n" + result.get("Message", "Error message is absent."))
result = chain.getDIRACGroup()
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertEqual("group_1", result["Value"], "Group must be group_1, not " + result["Value"])
else:
gLogger.info("Msg: %s" % (result["Message"]))
gLogger.info("* Check that DB is clean..")
result = db.deleteProxy("/C=CC/O=DN/O=DIRAC/CN=user", proxyProvider="Certificate")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Get proxy when it store only in ProxyDB_Proxies..")
# Make record with proxy that contain group
result = ca._forceGenerateProxyForDN("/C=CC/O=DN/O=DIRAC/CN=user", 12 * 3600, group="group_1")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
proxyStr = result["Value"][1]
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("user", "%s", "%s", "%s", TIMESTAMPADD(SECOND, 43200, UTC_TIMESTAMP()))' % (dn, group, proxyStr)
result = db._update(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Try to get it
result = db.getProxy(dn, group, 1800)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Check that proxy contain group
chain = result["Value"][0]
self.assertTrue(chain.isValidProxy()["OK"], "\n" + result.get("Message", "Error message is absent."))
result = chain.getDIRACGroup()
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertEqual("group_1", result["Value"], "Group must be group_1, not " + result["Value"])
gLogger.info("* Check that DB is clean..")
result = db.deleteProxy("/C=CC/O=DN/O=DIRAC/CN=user")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Get VOMS proxy..")
for vomsuser in ["user", "user_1"]:
# Create proxy with VOMS extension
result = self.createProxy(vomsuser, "group_1", 12, vo="vo_1", role="role_2")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
chain, proxyStr = result["Value"]
# Assert VOMSProxy
self.assertTrue(bool(chain.isVOMS().get("Value")), "Cannot create proxy with VOMS extension")
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("%s", "/C=CC/O=DN/O=DIRAC/CN=%s", "group_1", "%s", ' % (vomsuser, vomsuser, proxyStr)
cmd += "TIMESTAMPADD(SECOND, 43200, UTC_TIMESTAMP()))"
result = db._update(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Try to get proxy with VOMS extension
for dn, group, role, time, log in [
("/C=CC/O=DN/O=DIRAC/CN=user_4", "group_2", False, 9999, "Not exist VO for current group"),
(
"/C=CC/O=DN/O=DIRAC/CN=user",
"group_1",
"role_1",
9999,
"Stored proxy already have different VOMS extension",
),
(
"/C=CC/O=DN/O=DIRAC/CN=user_1",
"group_1",
"role_1",
9999,
"Stored proxy already have different VOMS extension",
),
(
"/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org",
"group_1",
"role_1",
9999,
"Not correct VO configuration",
),
]:
gLogger.info("== > %s(DN: %s):" % (log, dn))
if not any([dn, group, role, time, log]):
gLogger.info(
"voms-proxy-fake command not working as expected, proxy have no VOMS extention, go to the next.."
)
continue
result = db.getVOMSProxy(dn, group, time, role)
self.assertFalse(result["OK"], "Must be fail.")
gLogger.info("Msg: %s" % result["Message"])
# Check stored proxies
for table, user, count in [("ProxyDB_Proxies", "user", 1), ("ProxyDB_CleanProxies", "user_ca", 1)]:
cmd = 'SELECT COUNT( * ) FROM %s WHERE UserName="%s"' % (table, user)
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == count))
gLogger.info("* Delete proxies..")
for dn, table in [
("/C=CC/O=DN/O=DIRAC/CN=user", "ProxyDB_Proxies"),
("/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org", "ProxyDB_CleanProxies"),
]:
result = db.deleteProxy(dn)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
cmd = 'SELECT COUNT( * ) FROM %s WHERE UserName="user_ca"' % table
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == 0))
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(ProxyDBTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(testDB))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
DIRACGrid/DIRAC
|
tests/Integration/Framework/Test_ProxyDB.py
|
Python
|
gpl-3.0
| 29,670
|
[
"DIRAC"
] |
14fc779ce5a525827fb6cd766a3d849d202447a889b35f229e046492977ed9e8
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
import math
from .exceptions import *
from . import qcformat
from . import molpro_basissets
class MolproIn(qcformat.InputFormat):
def __init__(self, mem, mtd, bas, mol, sys, cast):
qcformat.InputFormat.__init__(self, mem, mtd, bas, mol, sys, cast)
# memory in MB --> MW
self.memory = int(math.ceil(mem / 8.0))
# auxiliary basis sets
[self.unaugbasis, self.augbasis, self.auxbasis] = self.corresponding_aux_basis()
def format_global_parameters(self):
text = ''
if self.method in ['mp2c', 'dft-sapt-shift', 'dft-sapt', 'dft-sapt-pbe0ac', 'dft-sapt-pbe0acalda']:
text += """GTHRESH,ZERO=1.e-14,ONEINT=1.e-14,TWOINT=1.e-14,ENERGY=1.e-8,ORBITAL=1.e-8,GRID=1.e-8\n\n"""
elif self.method in ['b3lyp', 'b3lyp-d', 'df-b3lyp', 'df-b3lyp-d']:
text += """GTHRESH,ZERO=1.e-14,ONEINT=1.e-14,TWOINT=1.e-14,ENERGY=1.e-8,ORBITAL=1.e-7,GRID=1.e-8\n\n"""
else:
text += """GTHRESH,ZERO=1.e-14,ONEINT=1.e-14,TWOINT=1.e-14,ENERGY=1.e-9\n\n"""
return text
def format_basis(self):
text = ''
text += """basis={\n"""
try:
# jaxz, maxz, etc.
for line in molpro_basissets.altbasis[self.basis]:
text += """%s\n""" % (line)
text += '\n'
except KeyError:
# haxz
if self.basis.startswith('heavy-aug-'):
text += """set,orbital; default,%s,H=%s\n""" % (self.basis[6:], self.unaugbasis)
# xz, axz, 6-31g*
else:
text += """set,orbital; default,%s\n""" % (self.basis)
if ('df-' in self.method) or ('f12' in self.method) or (self.method in ['mp2c', 'dft-sapt', 'dft-sapt-pbe0acalda']):
if self.unaugbasis and self.auxbasis:
text += """set,jkfit; default,%s/jkfit\n""" % (self.auxbasis)
text += """set,jkfitb; default,%s/jkfit\n""" % (self.unaugbasis)
text += """set,mp2fit; default,%s/mp2fit\n""" % (self.auxbasis)
text += """set,dflhf; default,%s/jkfit\n""" % (self.auxbasis)
else:
raise ValidationError("""Auxiliary basis not predictable from orbital basis '%s'""" % (self.basis))
text += """}\n\n"""
return text
def format_infile_string(self):
text = ''
# format comment and memory
text += """***, %s %s\n""" % (self.index, self.molecule.tagline)
text += """memory,%d,m\n""" % (self.memory)
# format molecule, incl. charges and dummy atoms
text += self.molecule.format_molecule_for_molpro()
# format global convergence directions
text += self.format_global_parameters()
# format castup directions
if self.castup is True:
text += """basis=sto-3g\n"""
text += """rhf\n"""
text += '\n'
# format basis set
text += self.format_basis()
# format method
for line in qcmtdIN[self.method]:
text += """%s\n""" % (line)
text += """show[1,20f20.12],ee*,ce*,te*\n"""
text += """show[1,60f20.12],_E*\n"""
text += '\n'
return text
qcmtdIN = {
'ccsd(t)-f12': [
'rhf',
'eehf=energy',
'ccsd(t)-f12,df_basis=mp2fit,df_basis_exch=jkfitb,ri_basis=jkfitb',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'eemp3=emp3',
'cemp3=eemp3-eehf',
'eeccsd=energc',
'ceccsd=eeccsd-eehf',
'eeccsdt=energy',
'ceccsdt=eeccsdt-eehf',
'temp2=emp2_trip',
'teccsd=ectrip'],
'ccsd(t)': [
'rhf',
'eehf=energy',
'ccsd(t)',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'eemp3=emp3',
'cemp3=eemp3-eehf',
'eeccsd=energc',
'ceccsd=eeccsd-eehf',
'eeccsdt=energy',
'ceccsdt=eeccsdt-eehf',
'temp2=emp2_trip',
'teccsd=ectrip'],
'mp3': [
'gdirect',
'rhf',
'eehf=energy',
'mp3',
'eemp2=emp2',
'eemp3=emp3',
'eemp25=0.5*(eemp2+eemp3)',
'cemp2=eemp2-eehf',
'cemp3=eemp3-eehf',
'cemp25=eemp25-eehf',
'temp2=emp2_trip',
'temp3=ectrip'],
'mp2': [
'gdirect',
'rhf',
'eehf=energy',
'mp2',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'temp2=emp2_trip'],
'df-hf-mp2': [
'gdirect',
'{df-hf,basis=jkfit}',
'eehf=energy',
'mp2',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'temp2=emp2_trip'],
'hf-df-mp2': [
'gdirect',
'rhf',
'eehf=energy',
'{df-mp2,basis_mp2=mp2fit}',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'temp2=emp2_trip'],
'hf': [
'rhf',
'eehf=energy'],
'mp2-f12': [
'gdirect',
'rhf',
'eehf=energy',
'mp2-f12',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'temp2=emp2_trip'],
'df-mp2-f12': [
'gdirect',
#'rhf',
'{df-hf,basis=jkfit}',
'eehf=energy',
#'{df-mp2-f12,df_basis=mp2fit,df_basis_exch=jkfit,ri_basis=optrib}',
'{df-mp2-f12,df_basis=mp2fit,df_basis_exch=jkfitb,ri_basis=jkfitb}',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'temp2=emp2_trip'],
'df-mp2': [
'gdirect',
'{df-hf,basis=jkfit}',
'eehf=energy',
'{df-mp2,basis_mp2=mp2fit}',
'eemp2=emp2',
'cemp2=eemp2-eehf',
'temp2=emp2_trip'],
'df-hf': [
'gdirect',
'{df-hf,basis=jkfit}',
'eehf=energy'],
'b3lyp-d': [
'gdirect',
'rks,b3lyp3',
'eehf=energy',
'dispcorr',
'eehfd=eehf+edisp'],
'df-b3lyp-d': [
'gdirect',
'{df-rks,b3lyp3,basis=jkfit}',
'eehf=energy',
'dispcorr',
'eehfd=eehf+edisp'],
'b3lyp': [
'gdirect',
'rks,b3lyp3',
'eehf=energy'],
'df-b3lyp': [
'gdirect',
'{df-rks,b3lyp3,basis=jkfit}',
'eehf=energy'],
#'mp2c': [ # this job computes one part [E_disp(TDDFT)] of the three parts of a MP2C calculation
# # check that nfrag = 2
# 'gdirect',
# 'ga=1101.2; gb=1102.2',
# 'ca=2101.2; cb=2102.2\n',
#
# $spin = $cgmp{MLPmol1} - 1;
# 'SET,CHARGE=$cgmp{CHGmol1}',
# 'SET,SPIN=$spin',
# 'dummy',
# foreach $at (@monoBreal) { print $handle ",$at"; }
# ''
# '{df-hf,basis=jkfit,locorb=0; start,atdens; save,$ga}',
# '{df-ks,lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,1.0; start,$ga; save,$ca}',
# 'eehfa=energy; sapt; monomerA',
# '',
#
# $spin = $cgmp{MLPmol2} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol2}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoAreal) { print $handle ",$at"; }
# print $handle "\n{df-hf,basis=jkfit,locorb=0; start,atdens; save,\$gb}\n";
# print $handle "{df-ks,lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,1.0; start,\$gb; save,\$cb}\n";
# print $handle "eehfb=energy; sapt; monomerB\n\n";
#
# $spin = $cgmp{MLPsyst} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGsyst}\nSET,SPIN=$spin\n";
# print $handle "{sapt,SAPT_LEVEL=3; intermol,ca=\$ca,cb=\$cb,icpks=0,fitlevel=3,nlexfac=0.0,cfac=0.0\n";
# print $handle "dfit,basis_coul=jkfit,basis_exch=jkfit,cfit_scf=3}\n";
# print $handle "eedisp=E2disp\n\n";
#
# ],
}
#'dft-sapt-shift': [
#
# # this is written in an inflexible way (fixed basis, functional) so that it is computed
# # only once, then used when writing DFT-SAPT inputs, which we'll be more flexible with
#
# print $handle "basis={\n";
# print $handle "set,orbital; default,aug-cc-pVQZ\n";
# print $handle "set,jkfit; default,avqz/jkfit\n";
# print $handle "set,dflhf; default,avqz/jkfit\n";
# print $handle "}\n";
#
# if ($handle eq "M1OUT") { $charge = $cgmp{CHGmol1}; $spin = $cgmp{MLPmol1} - 1; }
# elsif ($handle eq "M2OUT") { $charge = $cgmp{CHGmol2}; $spin = $cgmp{MLPmol2} - 1; }
#
# print $handle "\ngdirect\n";
# print $handle "{df-ks,pbex,pw91c,lhf; dftfac,0.75,1.0,0.25}\n";
# print $handle "basis=tzvpp\n";
# print $handle "{ks,pbe0; orbprint,0}\n";
# print $handle "eeneut=energy\n";
# $charge += 1;
# $spin += 1;
# print $handle "SET,CHARGE=$charge\nSET,SPIN=$spin\n";
# print $handle "{ks,pbe0}\n";
# print $handle "eecat=energy\n";
# print $handle "eeie=eecat-eeneut\n";
# print $handle "show[1,20f20.12],ee*,ce*,te*\n";
# print $handle "show[1,60f20.12],_E*\n";
# ]
#'dft-sapt': [
#
# if ( ($asyA eq '') || ($asyB eq '') ) {
# print "ERROR: asymptotic correction not defined for one or more monomers in index $system.\n";
# close(DIOUT);
# unlink("$pathDIOUT");
# }
#
# print $handle "gdirect\n";
# print $handle "ca=2101.2; cb=2102.2\n\n";
#
# $spin = $cgmp{MLPmol1} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol1}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoBreal) { print $handle ",$at"; }
# print $handle "\n{df-ks,pbex,pw91c,lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,0.75,1.0,0.25; asymp,$asyA; save,\$ca}\n";
# print $handle "eehfa=energy; sapt; monomerA\n\n";
#
# $spin = $cgmp{MLPmol2} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol2}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoAreal) { print $handle ",$at"; }
# print $handle "\n{df-ks,pbex,pw91c,lhf,df_basis=dflhf,basis_coul=jkfitb,basis_exch=jkfitb; dftfac,0.75,1.0,0.25; asymp,$asyB; save,\$cb}\n";
# print $handle "eehfb=energy; sapt; monomerB\n\n";
#
# $spin = $cgmp{MLPsyst} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGsyst}\nSET,SPIN=$spin\n";
# print $handle "{sapt,sapt_level=3; intermol,ca=\$ca,cb=\$cb,icpks=0,fitlevel=3,nlexfac=0.0\n";
# print $handle "dfit,basis_coul=jkfit,basis_exch=jkfit,basis_mp2=mp2fit,cfit_scf=3}\n";
# print $handle "eeelst=E1pol\n";
# print $handle "eeexch=E1ex\n";
# print $handle "eeind=E2ind\n";
# print $handle "eeexind=E2exind\n";
# print $handle "eedisp=E2disp\n";
# print $handle "eeexdisp=E2exdisp\n\n";
#
# ]
#'dft-sapt-pbe0ac': [
#
# if ( ($asyA eq '') || ($asyB eq '') ) {
# print "ERROR: asymptotic correction not defined for one or more monomers in index $system.\n";
# close(DIOUT);
# unlink("$pathDIOUT");
# }
#
# print $handle "ca=2101.2; cb=2102.2\n\n";
#
# $spin = $cgmp{MLPmol1} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol1}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoBreal) { print $handle ",$at"; }
# print $handle "\n{ks,pbe0; asymp,$asyA; save,\$ca}\n";
# print $handle "eehfa=energy; sapt; monomerA\n\n";
#
# $spin = $cgmp{MLPmol2} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol2}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoAreal) { print $handle ",$at"; }
# print $handle "\n{ks,pbe0; asymp,$asyB; save,\$cb}\n";
# print $handle "eehfb=energy; sapt; monomerB\n\n";
#
# $spin = $cgmp{MLPsyst} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGsyst}\nSET,SPIN=$spin\n";
# print $handle "{sapt; intermol,ca=\$ca,cb=\$cb,icpks=0}\n";
# print $handle "eeelst=E1pol\n";
# print $handle "eeexch=E1ex\n";
# print $handle "eeind=E2ind\n";
# print $handle "eeexind=E2exind\n";
# print $handle "eedisp=E2disp\n";
# print $handle "eeexdisp=E2exdisp\n\n";
# ]
#'dft-sapt-pbe0acalda': [
#
# if ( ($asyA eq '') || ($asyB eq '') ) {
# print "ERROR: asymptotic correction not defined for one or more monomers in index $system.\n";
# close(DIOUT);
# unlink("$pathDIOUT");
# }
#
# print $handle "ca=2101.2; cb=2102.2\n\n";
#
# $spin = $cgmp{MLPmol1} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol1}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoBreal) { print $handle ",$at"; }
# print $handle "\n{ks,pbe0; asymp,$asyA; save,\$ca}\n";
# print $handle "eehfa=energy; sapt; monomerA\n\n";
#
# $spin = $cgmp{MLPmol2} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGmol2}\nSET,SPIN=$spin\ndummy";
# foreach $at (@monoAreal) { print $handle ",$at"; }
# print $handle "\n{ks,pbe0; asymp,$asyB; save,\$cb}\n";
# print $handle "eehfb=energy; sapt; monomerB\n\n";
#
# $spin = $cgmp{MLPsyst} - 1;
# print $handle "SET,CHARGE=$cgmp{CHGsyst}\nSET,SPIN=$spin\n";
# print $handle "{sapt,sapt_level=3; intermol,ca=\$ca,cb=\$cb,icpks=0,fitlevel=3,nlexfac=0.0\n";
# print $handle "dfit,basis_coul=jkfit,basis_exch=jkfit,basis_mp2=mp2fit,cfit_scf=3}\n";
# print $handle "eeelst=E1pol\n";
# print $handle "eeexch=E1ex\n";
# print $handle "eeind=E2ind\n";
# print $handle "eeexind=E2exind\n";
# print $handle "eedisp=E2disp\n";
# print $handle "eeexdisp=E2exdisp\n\n";
#
# print $handle "show[1,20f20.12],ee*,ce*,te*\n";
# print $handle "show[1,60f20.12],_E*\n";
# }
#
|
kannon92/psi4
|
psi4/driver/qcdb/molpro.py
|
Python
|
gpl-2.0
| 14,111
|
[
"Psi4"
] |
0fdcf54512b2a4cfb6d78c2f12f223e9e2e698b22344b5e9a7898b10d612ecf6
|
#from ctx_base import StandardBaseContext
from .libmp.backend import basestring, exec_
from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
ComplexResult, to_pickable, from_pickable, normalize,
from_int, from_float, from_npfloat, from_Decimal, from_str, to_int, to_float, to_str,
from_rational, from_man_exp,
fone, fzero, finf, fninf, fnan,
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
mpf_hash, mpf_rand,
mpf_sum,
bitcount, to_fixed,
mpc_to_str,
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
mpc_mpf_div,
mpf_pow,
mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
mpf_glaisher, mpf_twinprime, mpf_mertens,
int_types)
from . import rational
from . import function_docs
new = object.__new__
class mpnumeric(object):
"""Base class for mpf and mpc."""
__slots__ = []
def __new__(cls, val):
raise NotImplementedError
class _mpf(mpnumeric):
"""
An mpf instance holds a real-valued floating-point number. mpf:s
work analogously to Python floats, but support arbitrary-precision
arithmetic.
"""
__slots__ = ['_mpf_']
def __new__(cls, val=fzero, **kwargs):
"""A new mpf can be created from a Python float, an int, a
or a decimal string representing a number in floating-point
format."""
prec, rounding = cls.context._prec_rounding
if kwargs:
prec = kwargs.get('prec', prec)
if 'dps' in kwargs:
prec = dps_to_prec(kwargs['dps'])
rounding = kwargs.get('rounding', rounding)
if type(val) is cls:
sign, man, exp, bc = val._mpf_
if (not man) and exp:
return val
v = new(cls)
v._mpf_ = normalize(sign, man, exp, bc, prec, rounding)
return v
elif type(val) is tuple:
if len(val) == 2:
v = new(cls)
v._mpf_ = from_man_exp(val[0], val[1], prec, rounding)
return v
if len(val) == 4:
sign, man, exp, bc = val
v = new(cls)
v._mpf_ = normalize(sign, MPZ(man), exp, bc, prec, rounding)
return v
raise ValueError
else:
v = new(cls)
v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding)
return v
@classmethod
def mpf_convert_arg(cls, x, prec, rounding):
if isinstance(x, int_types): return from_int(x)
if isinstance(x, float): return from_float(x)
if isinstance(x, basestring): return from_str(x, prec, rounding)
if isinstance(x, cls.context.constant): return x.func(prec, rounding)
if hasattr(x, '_mpf_'): return x._mpf_
if hasattr(x, '_mpmath_'):
t = cls.context.convert(x._mpmath_(prec, rounding))
if hasattr(t, '_mpf_'):
return t._mpf_
if hasattr(x, '_mpi_'):
a, b = x._mpi_
if a == b:
return a
raise ValueError("can only create mpf from zero-width interval")
raise TypeError("cannot create mpf from " + repr(x))
@classmethod
def mpf_convert_rhs(cls, x):
if isinstance(x, int_types): return from_int(x)
if isinstance(x, float): return from_float(x)
if isinstance(x, complex_types): return cls.context.mpc(x)
if isinstance(x, rational.mpq):
p, q = x._mpq_
return from_rational(p, q, cls.context.prec)
if hasattr(x, '_mpf_'): return x._mpf_
if hasattr(x, '_mpmath_'):
t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding))
if hasattr(t, '_mpf_'):
return t._mpf_
return t
return NotImplemented
@classmethod
def mpf_convert_lhs(cls, x):
x = cls.mpf_convert_rhs(x)
if type(x) is tuple:
return cls.context.make_mpf(x)
return x
man_exp = property(lambda self: self._mpf_[1:3])
man = property(lambda self: self._mpf_[1])
exp = property(lambda self: self._mpf_[2])
bc = property(lambda self: self._mpf_[3])
real = property(lambda self: self)
imag = property(lambda self: self.context.zero)
conjugate = lambda self: self
def __getstate__(self): return to_pickable(self._mpf_)
def __setstate__(self, val): self._mpf_ = from_pickable(val)
def __repr__(s):
if s.context.pretty:
return str(s)
return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits)
def __str__(s): return to_str(s._mpf_, s.context._str_digits)
def __hash__(s): return mpf_hash(s._mpf_)
def __int__(s): return int(to_int(s._mpf_))
def __long__(s): return long(to_int(s._mpf_))
def __float__(s): return to_float(s._mpf_, rnd=s.context._prec_rounding[1])
def __complex__(s): return complex(float(s))
def __nonzero__(s): return s._mpf_ != fzero
__bool__ = __nonzero__
def __abs__(s):
cls, new, (prec, rounding) = s._ctxdata
v = new(cls)
v._mpf_ = mpf_abs(s._mpf_, prec, rounding)
return v
def __pos__(s):
cls, new, (prec, rounding) = s._ctxdata
v = new(cls)
v._mpf_ = mpf_pos(s._mpf_, prec, rounding)
return v
def __neg__(s):
cls, new, (prec, rounding) = s._ctxdata
v = new(cls)
v._mpf_ = mpf_neg(s._mpf_, prec, rounding)
return v
def _cmp(s, t, func):
if hasattr(t, '_mpf_'):
t = t._mpf_
else:
t = s.mpf_convert_rhs(t)
if t is NotImplemented:
return t
return func(s._mpf_, t)
def __cmp__(s, t): return s._cmp(t, mpf_cmp)
def __lt__(s, t): return s._cmp(t, mpf_lt)
def __gt__(s, t): return s._cmp(t, mpf_gt)
def __le__(s, t): return s._cmp(t, mpf_le)
def __ge__(s, t): return s._cmp(t, mpf_ge)
def __ne__(s, t):
v = s.__eq__(t)
if v is NotImplemented:
return v
return not v
def __rsub__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if type(t) in int_types:
v = new(cls)
v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding)
return v
t = s.mpf_convert_lhs(t)
if t is NotImplemented:
return t
return t - s
def __rdiv__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if isinstance(t, int_types):
v = new(cls)
v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding)
return v
t = s.mpf_convert_lhs(t)
if t is NotImplemented:
return t
return t / s
def __rpow__(s, t):
t = s.mpf_convert_lhs(t)
if t is NotImplemented:
return t
return t ** s
def __rmod__(s, t):
t = s.mpf_convert_lhs(t)
if t is NotImplemented:
return t
return t % s
def sqrt(s):
return s.context.sqrt(s)
def ae(s, t, rel_eps=None, abs_eps=None):
return s.context.almosteq(s, t, rel_eps, abs_eps)
def to_fixed(self, prec):
return to_fixed(self._mpf_, prec)
def __round__(self, *args):
return round(float(self), *args)
mpf_binary_op = """
def %NAME%(self, other):
mpf, new, (prec, rounding) = self._ctxdata
sval = self._mpf_
if hasattr(other, '_mpf_'):
tval = other._mpf_
%WITH_MPF%
ttype = type(other)
if ttype in int_types:
%WITH_INT%
elif ttype is float:
tval = from_float(other)
%WITH_MPF%
elif hasattr(other, '_mpc_'):
tval = other._mpc_
mpc = type(other)
%WITH_MPC%
elif ttype is complex:
tval = from_float(other.real), from_float(other.imag)
mpc = self.context.mpc
%WITH_MPC%
if isinstance(other, mpnumeric):
return NotImplemented
try:
other = mpf.context.convert(other, strings=False)
except TypeError:
return NotImplemented
return self.%NAME%(other)
"""
return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj"
return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj"
mpf_pow_same = """
try:
val = mpf_pow(sval, tval, prec, rounding) %s
except ComplexResult:
if mpf.context.trap_complex:
raise
mpc = mpf.context.mpc
val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s
""" % (return_mpf, return_mpc)
def binary_op(name, with_mpf='', with_int='', with_mpc=''):
code = mpf_binary_op
code = code.replace("%WITH_INT%", with_int)
code = code.replace("%WITH_MPC%", with_mpc)
code = code.replace("%WITH_MPF%", with_mpf)
code = code.replace("%NAME%", name)
np = {}
exec_(code, globals(), np)
return np[name]
_mpf.__eq__ = binary_op('__eq__',
'return mpf_eq(sval, tval)',
'return mpf_eq(sval, from_int(other))',
'return (tval[1] == fzero) and mpf_eq(tval[0], sval)')
_mpf.__add__ = binary_op('__add__',
'val = mpf_add(sval, tval, prec, rounding)' + return_mpf,
'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf,
'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc)
_mpf.__sub__ = binary_op('__sub__',
'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf,
'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf,
'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc)
_mpf.__mul__ = binary_op('__mul__',
'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf,
'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf,
'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc)
_mpf.__div__ = binary_op('__div__',
'val = mpf_div(sval, tval, prec, rounding)' + return_mpf,
'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf,
'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc)
_mpf.__mod__ = binary_op('__mod__',
'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf,
'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf,
'raise NotImplementedError("complex modulo")')
_mpf.__pow__ = binary_op('__pow__',
mpf_pow_same,
'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf,
'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc)
_mpf.__radd__ = _mpf.__add__
_mpf.__rmul__ = _mpf.__mul__
_mpf.__truediv__ = _mpf.__div__
_mpf.__rtruediv__ = _mpf.__rdiv__
class _constant(_mpf):
"""Represents a mathematical constant with dynamic precision.
When printed or used in an arithmetic operation, a constant
is converted to a regular mpf at the working precision. A
regular mpf can also be obtained using the operation +x."""
def __new__(cls, func, name, docname=''):
a = object.__new__(cls)
a.name = name
a.func = func
a.__doc__ = getattr(function_docs, docname, '')
return a
def __call__(self, prec=None, dps=None, rounding=None):
prec2, rounding2 = self.context._prec_rounding
if not prec: prec = prec2
if not rounding: rounding = rounding2
if dps: prec = dps_to_prec(dps)
return self.context.make_mpf(self.func(prec, rounding))
@property
def _mpf_(self):
prec, rounding = self.context._prec_rounding
return self.func(prec, rounding)
def __repr__(self):
return "<%s: %s~>" % (self.name, self.context.nstr(self(dps=15)))
class _mpc(mpnumeric):
"""
An mpc represents a complex number using a pair of mpf:s (one
for the real part and another for the imaginary part.) The mpc
class behaves fairly similarly to Python's complex type.
"""
__slots__ = ['_mpc_']
def __new__(cls, real=0, imag=0):
s = object.__new__(cls)
if isinstance(real, complex_types):
real, imag = real.real, real.imag
elif hasattr(real, '_mpc_'):
s._mpc_ = real._mpc_
return s
real = cls.context.mpf(real)
imag = cls.context.mpf(imag)
s._mpc_ = (real._mpf_, imag._mpf_)
return s
real = property(lambda self: self.context.make_mpf(self._mpc_[0]))
imag = property(lambda self: self.context.make_mpf(self._mpc_[1]))
def __getstate__(self):
return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1])
def __setstate__(self, val):
self._mpc_ = from_pickable(val[0]), from_pickable(val[1])
def __repr__(s):
if s.context.pretty:
return str(s)
r = repr(s.real)[4:-1]
i = repr(s.imag)[4:-1]
return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i)
def __str__(s):
return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits)
def __complex__(s):
return mpc_to_complex(s._mpc_, rnd=s.context._prec_rounding[1])
def __pos__(s):
cls, new, (prec, rounding) = s._ctxdata
v = new(cls)
v._mpc_ = mpc_pos(s._mpc_, prec, rounding)
return v
def __abs__(s):
prec, rounding = s.context._prec_rounding
v = new(s.context.mpf)
v._mpf_ = mpc_abs(s._mpc_, prec, rounding)
return v
def __neg__(s):
cls, new, (prec, rounding) = s._ctxdata
v = new(cls)
v._mpc_ = mpc_neg(s._mpc_, prec, rounding)
return v
def conjugate(s):
cls, new, (prec, rounding) = s._ctxdata
v = new(cls)
v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding)
return v
def __nonzero__(s):
return mpc_is_nonzero(s._mpc_)
__bool__ = __nonzero__
def __hash__(s):
return mpc_hash(s._mpc_)
@classmethod
def mpc_convert_lhs(cls, x):
try:
y = cls.context.convert(x)
return y
except TypeError:
return NotImplemented
def __eq__(s, t):
if not hasattr(t, '_mpc_'):
if isinstance(t, str):
return False
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
return s.real == t.real and s.imag == t.imag
def __ne__(s, t):
b = s.__eq__(t)
if b is NotImplemented:
return b
return not b
def _compare(*args):
raise TypeError("no ordering relation is defined for complex numbers")
__gt__ = _compare
__le__ = _compare
__gt__ = _compare
__ge__ = _compare
def __add__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if not hasattr(t, '_mpc_'):
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
if hasattr(t, '_mpf_'):
v = new(cls)
v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding)
return v
v = new(cls)
v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding)
return v
def __sub__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if not hasattr(t, '_mpc_'):
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
if hasattr(t, '_mpf_'):
v = new(cls)
v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding)
return v
v = new(cls)
v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding)
return v
def __mul__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if not hasattr(t, '_mpc_'):
if isinstance(t, int_types):
v = new(cls)
v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding)
return v
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
if hasattr(t, '_mpf_'):
v = new(cls)
v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding)
return v
t = s.mpc_convert_lhs(t)
v = new(cls)
v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding)
return v
def __div__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if not hasattr(t, '_mpc_'):
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
if hasattr(t, '_mpf_'):
v = new(cls)
v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding)
return v
v = new(cls)
v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding)
return v
def __pow__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if isinstance(t, int_types):
v = new(cls)
v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding)
return v
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
v = new(cls)
if hasattr(t, '_mpf_'):
v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding)
else:
v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding)
return v
__radd__ = __add__
def __rsub__(s, t):
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
return t - s
def __rmul__(s, t):
cls, new, (prec, rounding) = s._ctxdata
if isinstance(t, int_types):
v = new(cls)
v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding)
return v
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
return t * s
def __rdiv__(s, t):
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
return t / s
def __rpow__(s, t):
t = s.mpc_convert_lhs(t)
if t is NotImplemented:
return t
return t ** s
__truediv__ = __div__
__rtruediv__ = __rdiv__
def ae(s, t, rel_eps=None, abs_eps=None):
return s.context.almosteq(s, t, rel_eps, abs_eps)
complex_types = (complex, _mpc)
class PythonMPContext(object):
def __init__(ctx):
ctx._prec_rounding = [53, round_nearest]
ctx.mpf = type('mpf', (_mpf,), {})
ctx.mpc = type('mpc', (_mpc,), {})
ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding]
ctx.mpf.context = ctx
ctx.mpc.context = ctx
ctx.constant = type('constant', (_constant,), {})
ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding]
ctx.constant.context = ctx
def make_mpf(ctx, v):
a = new(ctx.mpf)
a._mpf_ = v
return a
def make_mpc(ctx, v):
a = new(ctx.mpc)
a._mpc_ = v
return a
def default(ctx):
ctx._prec = ctx._prec_rounding[0] = 53
ctx._dps = 15
ctx.trap_complex = False
def _set_prec(ctx, n):
ctx._prec = ctx._prec_rounding[0] = max(1, int(n))
ctx._dps = prec_to_dps(n)
def _set_dps(ctx, n):
ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n)
ctx._dps = max(1, int(n))
prec = property(lambda ctx: ctx._prec, _set_prec)
dps = property(lambda ctx: ctx._dps, _set_dps)
def convert(ctx, x, strings=True):
"""
Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``,
``mpc``, ``int``, ``float``, ``complex``, the conversion
will be performed losslessly.
If *x* is a string, the result will be rounded to the present
working precision. Strings representing fractions or complex
numbers are permitted.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> mpmathify(3.5)
mpf('3.5')
>>> mpmathify('2.1')
mpf('2.1000000000000001')
>>> mpmathify('3/4')
mpf('0.75')
>>> mpmathify('2+3j')
mpc(real='2.0', imag='3.0')
"""
if type(x) in ctx.types: return x
if isinstance(x, int_types): return ctx.make_mpf(from_int(x))
if isinstance(x, float): return ctx.make_mpf(from_float(x))
if isinstance(x, complex):
return ctx.make_mpc((from_float(x.real), from_float(x.imag)))
if type(x).__module__ == 'numpy': return ctx.npconvert(x)
if isinstance(x, numbers.Rational): # e.g. Fraction
try: x = rational.mpq(int(x.numerator), int(x.denominator))
except: pass
prec, rounding = ctx._prec_rounding
if isinstance(x, rational.mpq):
p, q = x._mpq_
return ctx.make_mpf(from_rational(p, q, prec))
if strings and isinstance(x, basestring):
try:
_mpf_ = from_str(x, prec, rounding)
return ctx.make_mpf(_mpf_)
except ValueError:
pass
if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_)
if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_)
if hasattr(x, '_mpmath_'):
return ctx.convert(x._mpmath_(prec, rounding))
if type(x).__module__ == 'decimal':
try: return ctx.make_mpf(from_Decimal(x, prec, rounding))
except: pass
return ctx._convert_fallback(x, strings)
def npconvert(ctx, x):
"""
Converts *x* to an ``mpf`` or ``mpc``. *x* should be a numpy
scalar.
"""
import numpy as np
if isinstance(x, np.integer): return ctx.make_mpf(from_int(int(x)))
if isinstance(x, np.floating): return ctx.make_mpf(from_npfloat(x))
if isinstance(x, np.complexfloating):
return ctx.make_mpc((from_npfloat(x.real), from_npfloat(x.imag)))
raise TypeError("cannot create mpf from " + repr(x))
def isnan(ctx, x):
"""
Return *True* if *x* is a NaN (not-a-number), or for a complex
number, whether either the real or complex part is NaN;
otherwise return *False*::
>>> from mpmath import *
>>> isnan(3.14)
False
>>> isnan(nan)
True
>>> isnan(mpc(3.14,2.72))
False
>>> isnan(mpc(3.14,nan))
True
"""
if hasattr(x, "_mpf_"):
return x._mpf_ == fnan
if hasattr(x, "_mpc_"):
return fnan in x._mpc_
if isinstance(x, int_types) or isinstance(x, rational.mpq):
return False
x = ctx.convert(x)
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
return ctx.isnan(x)
raise TypeError("isnan() needs a number as input")
def isinf(ctx, x):
"""
Return *True* if the absolute value of *x* is infinite;
otherwise return *False*::
>>> from mpmath import *
>>> isinf(inf)
True
>>> isinf(-inf)
True
>>> isinf(3)
False
>>> isinf(3+4j)
False
>>> isinf(mpc(3,inf))
True
>>> isinf(mpc(inf,3))
True
"""
if hasattr(x, "_mpf_"):
return x._mpf_ in (finf, fninf)
if hasattr(x, "_mpc_"):
re, im = x._mpc_
return re in (finf, fninf) or im in (finf, fninf)
if isinstance(x, int_types) or isinstance(x, rational.mpq):
return False
x = ctx.convert(x)
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
return ctx.isinf(x)
raise TypeError("isinf() needs a number as input")
def isnormal(ctx, x):
"""
Determine whether *x* is "normal" in the sense of floating-point
representation; that is, return *False* if *x* is zero, an
infinity or NaN; otherwise return *True*. By extension, a
complex number *x* is considered "normal" if its magnitude is
normal::
>>> from mpmath import *
>>> isnormal(3)
True
>>> isnormal(0)
False
>>> isnormal(inf); isnormal(-inf); isnormal(nan)
False
False
False
>>> isnormal(0+0j)
False
>>> isnormal(0+3j)
True
>>> isnormal(mpc(2,nan))
False
"""
if hasattr(x, "_mpf_"):
return bool(x._mpf_[1])
if hasattr(x, "_mpc_"):
re, im = x._mpc_
re_normal = bool(re[1])
im_normal = bool(im[1])
if re == fzero: return im_normal
if im == fzero: return re_normal
return re_normal and im_normal
if isinstance(x, int_types) or isinstance(x, rational.mpq):
return bool(x)
x = ctx.convert(x)
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
return ctx.isnormal(x)
raise TypeError("isnormal() needs a number as input")
def isint(ctx, x, gaussian=False):
"""
Return *True* if *x* is integer-valued; otherwise return
*False*::
>>> from mpmath import *
>>> isint(3)
True
>>> isint(mpf(3))
True
>>> isint(3.2)
False
>>> isint(inf)
False
Optionally, Gaussian integers can be checked for::
>>> isint(3+0j)
True
>>> isint(3+2j)
False
>>> isint(3+2j, gaussian=True)
True
"""
if isinstance(x, int_types):
return True
if hasattr(x, "_mpf_"):
sign, man, exp, bc = xval = x._mpf_
return bool((man and exp >= 0) or xval == fzero)
if hasattr(x, "_mpc_"):
re, im = x._mpc_
rsign, rman, rexp, rbc = re
isign, iman, iexp, ibc = im
re_isint = (rman and rexp >= 0) or re == fzero
if gaussian:
im_isint = (iman and iexp >= 0) or im == fzero
return re_isint and im_isint
return re_isint and im == fzero
if isinstance(x, rational.mpq):
p, q = x._mpq_
return p % q == 0
x = ctx.convert(x)
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
return ctx.isint(x, gaussian)
raise TypeError("isint() needs a number as input")
def fsum(ctx, terms, absolute=False, squared=False):
"""
Calculates a sum containing a finite number of terms (for infinite
series, see :func:`~mpmath.nsum`). The terms will be converted to
mpmath numbers. For len(terms) > 2, this function is generally
faster and produces more accurate results than the builtin
Python function :func:`sum`.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fsum([1, 2, 0.5, 7])
mpf('10.5')
With squared=True each term is squared, and with absolute=True
the absolute value of each term is used.
"""
prec, rnd = ctx._prec_rounding
real = []
imag = []
other = 0
for term in terms:
reval = imval = 0
if hasattr(term, "_mpf_"):
reval = term._mpf_
elif hasattr(term, "_mpc_"):
reval, imval = term._mpc_
else:
term = ctx.convert(term)
if hasattr(term, "_mpf_"):
reval = term._mpf_
elif hasattr(term, "_mpc_"):
reval, imval = term._mpc_
else:
if absolute: term = ctx.absmax(term)
if squared: term = term**2
other += term
continue
if imval:
if squared:
if absolute:
real.append(mpf_mul(reval,reval))
real.append(mpf_mul(imval,imval))
else:
reval, imval = mpc_pow_int((reval,imval),2,prec+10)
real.append(reval)
imag.append(imval)
elif absolute:
real.append(mpc_abs((reval,imval), prec))
else:
real.append(reval)
imag.append(imval)
else:
if squared:
reval = mpf_mul(reval, reval)
elif absolute:
reval = mpf_abs(reval)
real.append(reval)
s = mpf_sum(real, prec, rnd, absolute)
if imag:
s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
else:
s = ctx.make_mpf(s)
if other == 0:
return s
else:
return s + other
def fdot(ctx, A, B=None, conjugate=False):
r"""
Computes the dot product of the iterables `A` and `B`,
.. math ::
\sum_{k=0} A_k B_k.
Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs.
In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent.
The elements are automatically converted to mpmath numbers.
With ``conjugate=True``, the elements in the second vector
will be conjugated:
.. math ::
\sum_{k=0} A_k \overline{B_k}
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> A = [2, 1.5, 3]
>>> B = [1, -1, 2]
>>> fdot(A, B)
mpf('6.5')
>>> list(zip(A, B))
[(2, 1), (1.5, -1), (3, 2)]
>>> fdot(_)
mpf('6.5')
>>> A = [2, 1.5, 3j]
>>> B = [1+j, 3, -1-j]
>>> fdot(A, B)
mpc(real='9.5', imag='-1.0')
>>> fdot(A, B, conjugate=True)
mpc(real='3.5', imag='-5.0')
"""
if B is not None:
A = zip(A, B)
prec, rnd = ctx._prec_rounding
real = []
imag = []
other = 0
hasattr_ = hasattr
types = (ctx.mpf, ctx.mpc)
for a, b in A:
if type(a) not in types: a = ctx.convert(a)
if type(b) not in types: b = ctx.convert(b)
a_real = hasattr_(a, "_mpf_")
b_real = hasattr_(b, "_mpf_")
if a_real and b_real:
real.append(mpf_mul(a._mpf_, b._mpf_))
continue
a_complex = hasattr_(a, "_mpc_")
b_complex = hasattr_(b, "_mpc_")
if a_real and b_complex:
aval = a._mpf_
bre, bim = b._mpc_
if conjugate:
bim = mpf_neg(bim)
real.append(mpf_mul(aval, bre))
imag.append(mpf_mul(aval, bim))
elif b_real and a_complex:
are, aim = a._mpc_
bval = b._mpf_
real.append(mpf_mul(are, bval))
imag.append(mpf_mul(aim, bval))
elif a_complex and b_complex:
#re, im = mpc_mul(a._mpc_, b._mpc_, prec+20)
are, aim = a._mpc_
bre, bim = b._mpc_
if conjugate:
bim = mpf_neg(bim)
real.append(mpf_mul(are, bre))
real.append(mpf_neg(mpf_mul(aim, bim)))
imag.append(mpf_mul(are, bim))
imag.append(mpf_mul(aim, bre))
else:
if conjugate:
other += a*ctx.conj(b)
else:
other += a*b
s = mpf_sum(real, prec, rnd)
if imag:
s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd)))
else:
s = ctx.make_mpf(s)
if other == 0:
return s
else:
return s + other
def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc="<no doc>"):
"""
Given a low-level mpf_ function, and optionally similar functions
for mpc_ and mpi_, defines the function as a context method.
It is assumed that the return type is the same as that of
the input; the exception is that propagation from mpf to mpc is possible
by raising ComplexResult.
"""
def f(x, **kwargs):
if type(x) not in ctx.types:
x = ctx.convert(x)
prec, rounding = ctx._prec_rounding
if kwargs:
prec = kwargs.get('prec', prec)
if 'dps' in kwargs:
prec = dps_to_prec(kwargs['dps'])
rounding = kwargs.get('rounding', rounding)
if hasattr(x, '_mpf_'):
try:
return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding))
except ComplexResult:
# Handle propagation to complex
if ctx.trap_complex:
raise
return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding))
elif hasattr(x, '_mpc_'):
return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding))
raise NotImplementedError("%s of a %s" % (name, type(x)))
name = mpf_f.__name__[4:]
f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc)
return f
# Called by SpecialFunctions.__init__()
@classmethod
def _wrap_specfun(cls, name, f, wrap):
if wrap:
def f_wrapped(ctx, *args, **kwargs):
convert = ctx.convert
args = [convert(a) for a in args]
prec = ctx.prec
try:
ctx.prec += 10
retval = f(ctx, *args, **kwargs)
finally:
ctx.prec = prec
return +retval
else:
f_wrapped = f
f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
setattr(cls, name, f_wrapped)
def _convert_param(ctx, x):
if hasattr(x, "_mpc_"):
v, im = x._mpc_
if im != fzero:
return x, 'C'
elif hasattr(x, "_mpf_"):
v = x._mpf_
else:
if type(x) in int_types:
return int(x), 'Z'
p = None
if isinstance(x, tuple):
p, q = x
elif hasattr(x, '_mpq_'):
p, q = x._mpq_
elif isinstance(x, basestring) and '/' in x:
p, q = x.split('/')
p = int(p)
q = int(q)
if p is not None:
if not p % q:
return p // q, 'Z'
return ctx.mpq(p,q), 'Q'
x = ctx.convert(x)
if hasattr(x, "_mpc_"):
v, im = x._mpc_
if im != fzero:
return x, 'C'
elif hasattr(x, "_mpf_"):
v = x._mpf_
else:
return x, 'U'
sign, man, exp, bc = v
if man:
if exp >= -4:
if sign:
man = -man
if exp >= 0:
return int(man) << exp, 'Z'
if exp >= -4:
p, q = int(man), (1<<(-exp))
return ctx.mpq(p,q), 'Q'
x = ctx.make_mpf(v)
return x, 'R'
elif not exp:
return 0, 'Z'
else:
return x, 'U'
def _mpf_mag(ctx, x):
sign, man, exp, bc = x
if man:
return exp+bc
if x == fzero:
return ctx.ninf
if x == finf or x == fninf:
return ctx.inf
return ctx.nan
def mag(ctx, x):
"""
Quick logarithmic magnitude estimate of a number. Returns an
integer or infinity `m` such that `|x| <= 2^m`. It is not
guaranteed that `m` is an optimal bound, but it will never
be too large by more than 2 (and probably not more than 1).
**Examples**
>>> from mpmath import *
>>> mp.pretty = True
>>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2)))
(4, 4, 4, 4)
>>> mag(10j), mag(10+10j)
(4, 5)
>>> mag(0.01), int(ceil(log(0.01,2)))
(-6, -6)
>>> mag(0), mag(inf), mag(-inf), mag(nan)
(-inf, +inf, +inf, nan)
"""
if hasattr(x, "_mpf_"):
return ctx._mpf_mag(x._mpf_)
elif hasattr(x, "_mpc_"):
r, i = x._mpc_
if r == fzero:
return ctx._mpf_mag(i)
if i == fzero:
return ctx._mpf_mag(r)
return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i))
elif isinstance(x, int_types):
if x:
return bitcount(abs(x))
return ctx.ninf
elif isinstance(x, rational.mpq):
p, q = x._mpq_
if p:
return 1 + bitcount(abs(p)) - bitcount(q)
return ctx.ninf
else:
x = ctx.convert(x)
if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
return ctx.mag(x)
else:
raise TypeError("requires an mpf/mpc")
# Register with "numbers" ABC
# We do not subclass, hence we do not use the @abstractmethod checks. While
# this is less invasive it may turn out that we do not actually support
# parts of the expected interfaces. See
# http://docs.python.org/2/library/numbers.html for list of abstract
# methods.
try:
import numbers
numbers.Complex.register(_mpc)
numbers.Real.register(_mpf)
except ImportError:
pass
|
FreeCAD/FreeCAD-AppImage
|
conda/modifications/ctx_mp_python.py
|
Python
|
lgpl-2.1
| 38,113
|
[
"Gaussian"
] |
bd42f2641281f966ba579e05d4a14a62ea4d4ccd93d4db6c34e3fa79106a90ad
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Analytical nuclear hessian for 1-electron spin-free x2c method
Ref.
JCP 135, 244104 (2011); DOI:10.1063/1.3667202
JCTC 8, 2617 (2012); DOI:10.1021/ct300127e
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import x2c
from pyscf.x2c import sfx2c1e_grad
def hcore_hess_generator(x2cobj, mol=None):
'''nuclear gradients of 1-component X2c hcore Hamiltonian (spin-free part only)
'''
if mol is None: mol = x2cobj.mol
xmol, contr_coeff = x2cobj.get_xmol(mol)
if x2cobj.basis is not None:
s22 = xmol.intor_symmetric('int1e_ovlp')
s21 = gto.intor_cross('int1e_ovlp', xmol, mol)
contr_coeff = lib.cho_solve(s22, s21)
get_h1_xmol = gen_sf_hfw(xmol, x2cobj.approx)
def hcore_deriv(ia, ja):
h1 = get_h1_xmol(ia, ja)
if contr_coeff is not None:
h1 = lib.einsum('pi,xypq,qj->xyij', contr_coeff, h1, contr_coeff)
return numpy.asarray(h1)
return hcore_deriv
def gen_sf_hfw(mol, approx='1E'):
approx = approx.upper()
c = lib.param.LIGHT_SPEED
h0, s0 = sfx2c1e_grad._get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
c0[:,c0[1]<0] *= -1
aoslices = mol.aoslice_by_atom()
nao = mol.nao_nr()
if 'ATOM' in approx:
x0 = numpy.zeros((nao,nao))
for ia in range(mol.natm):
ish0, ish1, p0, p1 = aoslices[ia]
shls_slice = (ish0, ish1, ish0, ish1)
t1 = mol.intor('int1e_kin', shls_slice=shls_slice)
s1 = mol.intor('int1e_ovlp', shls_slice=shls_slice)
with mol.with_rinv_at_nucleus(ia):
z = -mol.atom_charge(ia)
v1 = z * mol.intor('int1e_rinv', shls_slice=shls_slice)
w1 = z * mol.intor('int1e_prinvp', shls_slice=shls_slice)
x0[p0:p1,p0:p1] = x2c._x2c1e_xmatrix(t1, v1, w1, s1, c)
else:
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
t0x0 = numpy.dot(s0[nao:,nao:], x0)
s_nesc0 = s0[:nao,:nao] + numpy.dot(x0.T, t0x0)
w_s, v_s = scipy.linalg.eigh(s0[:nao,:nao])
w_sqrt = numpy.sqrt(w_s)
s_nesc0_vbas = reduce(numpy.dot, (v_s.T, s_nesc0, v_s))
R0_mid = numpy.einsum('i,ij,j->ij', 1./w_sqrt, s_nesc0_vbas, 1./w_sqrt)
wr0, vr0 = scipy.linalg.eigh(R0_mid)
wr0_sqrt = numpy.sqrt(wr0)
# R0 in v_s basis
R0 = numpy.dot(vr0/wr0_sqrt, vr0.T)
R0 *= w_sqrt
R0 /= w_sqrt[:,None]
# Transform R0 back
R0 = reduce(numpy.dot, (v_s, R0, v_s.T))
R0 = x2c._get_r(s0[:nao,:nao], s_nesc0)
c_fw0 = numpy.vstack((R0, numpy.dot(x0, R0)))
h0_fw_half = numpy.dot(h0, c_fw0)
epq = e0[:,None] - e0
degen_mask = abs(epq) < 1e-7
epq[degen_mask] = 1e200
s2aa = mol.intor('int1e_ipipovlp', comp=9).reshape(3,3,nao,nao)
t2aa = mol.intor('int1e_ipipkin', comp=9).reshape(3,3,nao,nao)
v2aa = mol.intor('int1e_ipipnuc', comp=9).reshape(3,3,nao,nao)
w2aa = mol.intor('int1e_ipippnucp', comp=9).reshape(3,3,nao,nao)
s2ab = mol.intor('int1e_ipovlpip', comp=9).reshape(3,3,nao,nao)
t2ab = mol.intor('int1e_ipkinip', comp=9).reshape(3,3,nao,nao)
v2ab = mol.intor('int1e_ipnucip', comp=9).reshape(3,3,nao,nao)
w2ab = mol.intor('int1e_ippnucpip', comp=9).reshape(3,3,nao,nao)
n2 = nao * 2
h2ao = numpy.zeros((3,3,n2,n2), dtype=v2aa.dtype)
s2ao = numpy.zeros((3,3,n2,n2), dtype=v2aa.dtype)
get_h1_etc = sfx2c1e_grad._gen_first_order_quantities(mol, e0, c0, x0, approx)
def hcore_deriv(ia, ja):
ish0, ish1, i0, i1 = aoslices[ia]
jsh0, jsh1, j0, j1 = aoslices[ja]
s2cc = numpy.zeros_like(s2aa)
t2cc = numpy.zeros_like(s2aa)
v2cc = numpy.zeros_like(s2aa)
w2cc = numpy.zeros_like(s2aa)
if ia == ja:
with mol.with_rinv_origin(mol.atom_coord(ia)):
z = mol.atom_charge(ia)
rinv2aa = z*mol.intor('int1e_ipiprinv', comp=9).reshape(3,3,nao,nao)
rinv2ab = z*mol.intor('int1e_iprinvip', comp=9).reshape(3,3,nao,nao)
prinvp2aa = z*mol.intor('int1e_ipipprinvp', comp=9).reshape(3,3,nao,nao)
prinvp2ab = z*mol.intor('int1e_ipprinvpip', comp=9).reshape(3,3,nao,nao)
s2cc[:,:,i0:i1 ] = s2aa[:,:,i0:i1 ]
s2cc[:,:,i0:i1,j0:j1]+= s2ab[:,:,i0:i1,j0:j1]
t2cc[:,:,i0:i1 ] = t2aa[:,:,i0:i1 ]
t2cc[:,:,i0:i1,j0:j1]+= t2ab[:,:,i0:i1,j0:j1]
v2cc -= rinv2aa + rinv2ab
v2cc[:,:,i0:i1 ]+= v2aa[:,:,i0:i1 ]
v2cc[:,:,i0:i1,j0:j1]+= v2ab[:,:,i0:i1,j0:j1]
v2cc[:,:,i0:i1 ]+= rinv2aa[:,:,i0:i1]
v2cc[:,:,i0:i1 ]+= rinv2ab[:,:,i0:i1]
v2cc[:,:,: ,i0:i1]+= rinv2aa[:,:,i0:i1].transpose(0,1,3,2)
v2cc[:,:,: ,i0:i1]+= rinv2ab[:,:,:,i0:i1]
w2cc -= prinvp2aa + prinvp2ab
w2cc[:,:,i0:i1 ]+= w2aa[:,:,i0:i1 ]
w2cc[:,:,i0:i1,j0:j1]+= w2ab[:,:,i0:i1,j0:j1]
w2cc[:,:,i0:i1 ]+= prinvp2aa[:,:,i0:i1]
w2cc[:,:,i0:i1 ]+= prinvp2ab[:,:,i0:i1]
w2cc[:,:,: ,i0:i1]+= prinvp2aa[:,:,i0:i1].transpose(0,1,3,2)
w2cc[:,:,: ,i0:i1]+= prinvp2ab[:,:,:,i0:i1]
else:
s2cc[:,:,i0:i1,j0:j1] = s2ab[:,:,i0:i1,j0:j1]
t2cc[:,:,i0:i1,j0:j1] = t2ab[:,:,i0:i1,j0:j1]
v2cc[:,:,i0:i1,j0:j1] = v2ab[:,:,i0:i1,j0:j1]
w2cc[:,:,i0:i1,j0:j1] = w2ab[:,:,i0:i1,j0:j1]
zi = mol.atom_charge(ia)
zj = mol.atom_charge(ja)
with mol.with_rinv_at_nucleus(ia):
shls_slice = (jsh0, jsh1, 0, mol.nbas)
rinv2aa = mol.intor('int1e_ipiprinv', comp=9, shls_slice=shls_slice)
rinv2ab = mol.intor('int1e_iprinvip', comp=9, shls_slice=shls_slice)
prinvp2aa = mol.intor('int1e_ipipprinvp', comp=9, shls_slice=shls_slice)
prinvp2ab = mol.intor('int1e_ipprinvpip', comp=9, shls_slice=shls_slice)
rinv2aa = zi * rinv2aa.reshape(3,3,j1-j0,nao)
rinv2ab = zi * rinv2ab.reshape(3,3,j1-j0,nao)
prinvp2aa = zi * prinvp2aa.reshape(3,3,j1-j0,nao)
prinvp2ab = zi * prinvp2ab.reshape(3,3,j1-j0,nao)
v2cc[:,:,j0:j1] += rinv2aa
v2cc[:,:,j0:j1] += rinv2ab.transpose(1,0,2,3)
w2cc[:,:,j0:j1] += prinvp2aa
w2cc[:,:,j0:j1] += prinvp2ab.transpose(1,0,2,3)
with mol.with_rinv_at_nucleus(ja):
shls_slice = (ish0, ish1, 0, mol.nbas)
rinv2aa = mol.intor('int1e_ipiprinv', comp=9, shls_slice=shls_slice)
rinv2ab = mol.intor('int1e_iprinvip', comp=9, shls_slice=shls_slice)
prinvp2aa = mol.intor('int1e_ipipprinvp', comp=9, shls_slice=shls_slice)
prinvp2ab = mol.intor('int1e_ipprinvpip', comp=9, shls_slice=shls_slice)
rinv2aa = zj * rinv2aa.reshape(3,3,i1-i0,nao)
rinv2ab = zj * rinv2ab.reshape(3,3,i1-i0,nao)
prinvp2aa = zj * prinvp2aa.reshape(3,3,i1-i0,nao)
prinvp2ab = zj * prinvp2ab.reshape(3,3,i1-i0,nao)
v2cc[:,:,i0:i1] += rinv2aa
v2cc[:,:,i0:i1] += rinv2ab
w2cc[:,:,i0:i1] += prinvp2aa
w2cc[:,:,i0:i1] += prinvp2ab
s2cc = s2cc + s2cc.transpose(0,1,3,2)
t2cc = t2cc + t2cc.transpose(0,1,3,2)
v2cc = v2cc + v2cc.transpose(0,1,3,2)
w2cc = w2cc + w2cc.transpose(0,1,3,2)
h2ao[:,:,:nao,:nao] = v2cc
h2ao[:,:,:nao,nao:] = t2cc
h2ao[:,:,nao:,:nao] = t2cc
h2ao[:,:,nao:,nao:] = w2cc * (.25/c**2) - t2cc
s2ao[:,:,:nao,:nao] = s2cc
s2ao[:,:,nao:,nao:] = t2cc * (.5/c**2)
h1i, s1i, e1i, c1i, x1i, s_nesc1i, R1i, c_fw1i = get_h1_etc(ia)
h1j, s1j, e1j, c1j, x1j, s_nesc1j, R1j, c_fw1j = get_h1_etc(ja)
if 'ATOM' not in approx:
f2 = lib.einsum('xypq,qj->xypj', h2ao, c0[:,nao:])
f2+= lib.einsum('xpq,yqj->xypj', h1i, c1j)
f2+= lib.einsum('ypq,xqj->xypj', h1j, c1i)
sc2 = lib.einsum('xypq,qj->xypj', s2ao, c0[:,nao:])
sc2+= lib.einsum('xpq,yqj->xypj', s1i, c1j)
sc2+= lib.einsum('ypq,xqj->xypj', s1j, c1i)
f2-= sc2 * e0[nao:]
sc1i = lib.einsum('xpq,qj->xpj', s1i, c0[:,nao:])
sc1j = lib.einsum('xpq,qj->xpj', s1j, c0[:,nao:])
sc1i+= lib.einsum('pq,xqj->xpj', s0, c1i)
sc1j+= lib.einsum('pq,xqj->xpj', s0, c1j)
f2-= lib.einsum('xpq,yqj->xypj', sc1i, e1j)
f2-= lib.einsum('ypq,xqj->xypj', sc1j, e1i)
c2 = lib.einsum('pi,xypj->xyij', c0.conj(), f2) / -epq[:,nao:]
c2_ao = lib.einsum('pq,xyqi->xypi', c0, c2)
cl2 = c2_ao[:,:,:nao]
cs2 = c2_ao[:,:,nao:]
tmp = cs2 - lib.einsum('pq,xyqi->xypi', x0, cl2)
tmp-= lib.einsum('xpq,yqi->xypi', x1i, c1j[:,:nao])
tmp-= lib.einsum('ypq,xqi->xypi', x1j, c1i[:,:nao])
x2 = scipy.linalg.solve(cl0.T, tmp.reshape(-1,nao).T).T.reshape(3,3,nao,nao)
hfw2 = numpy.empty((3,3,nao,nao))
for i in range(3):
for j in range(3):
if 'ATOM' in approx:
s_nesc2 = reduce(numpy.dot, (x0.T, s2ao[i,j,nao:,nao:], x0))
s_nesc2 += s2ao[i,j,:nao,:nao]
R2 = _get_r2((w_sqrt,v_s), s_nesc0,
s1i[i,:nao,:nao], s_nesc1i[i],
s1j[j,:nao,:nao], s_nesc1j[j],
s2ao[i,j,:nao,:nao], s_nesc2, (wr0_sqrt,vr0))
c_fw2 = numpy.vstack((R2, numpy.dot(x0, R2)))
else:
s_nesc2 = numpy.dot(x2[i,j].T, t0x0)
s_nesc2 += reduce(numpy.dot, (x1i[i].T, s1j[j,nao:,nao:], x0))
s_nesc2 += reduce(numpy.dot, (x0.T, s1i[i,nao:,nao:], x1j[j]))
s_nesc2 += reduce(numpy.dot, (x1i[i].T, s0[nao:,nao:], x1j[j]))
s_nesc2 = s_nesc2 + s_nesc2.T
s_nesc2 += reduce(numpy.dot, (x0.T, s2ao[i,j,nao:,nao:], x0))
s_nesc2 += s2ao[i,j,:nao,:nao]
R2 = _get_r2((w_sqrt,v_s), s_nesc0,
s1i[i,:nao,:nao], s_nesc1i[i],
s1j[j,:nao,:nao], s_nesc1j[j],
s2ao[i,j,:nao,:nao], s_nesc2, (wr0_sqrt,vr0))
c_fw_s = (numpy.dot(x0, R2) + numpy.dot(x1i[i], R1j[j]) +
numpy.dot(x1j[j], R1i[i]) + numpy.dot(x2[i,j], R0))
c_fw2 = numpy.vstack((R2, c_fw_s))
tmp = numpy.dot(c_fw2.T, h0_fw_half)
tmp += reduce(numpy.dot, (c_fw1i[i].T, h1j[j], c_fw0))
tmp += reduce(numpy.dot, (c_fw0.T, h1i[i], c_fw1j[j]))
tmp += reduce(numpy.dot, (c_fw1i[i].T, h0, c_fw1j[j]))
hfw2[i,j] = tmp + tmp.T
hfw2[i,j]+= reduce(numpy.dot, (c_fw0.T, h2ao[i,j], c_fw0))
return hfw2
return hcore_deriv
def _get_r2(s0_roots, sa0, s1i, sa1i, s1j, sa1j, s2, sa2, r0_roots):
w_sqrt, v_s = s0_roots
w_invsqrt = 1. / w_sqrt
wr0_sqrt, vr0 = r0_roots
wr0_invsqrt = 1. / wr0_sqrt
sa0 = lib.einsum('pi,pq,qj->ij', v_s, sa0 , v_s)
s1i = lib.einsum('pi,pq,qj->ij', v_s, s1i , v_s)
s1j = lib.einsum('pi,pq,qj->ij', v_s, s1j , v_s)
s2 = lib.einsum('pi,pq,qj->ij', v_s, s2 , v_s)
sa1i = lib.einsum('pi,pq,qj->ij', v_s, sa1i, v_s)
sa1j = lib.einsum('pi,pq,qj->ij', v_s, sa1j, v_s)
sa2 = lib.einsum('pi,pq,qj->ij', v_s, sa2 , v_s)
s1i_sqrt = s1i / (w_sqrt[:,None] + w_sqrt)
s1i_invsqrt = (numpy.einsum('i,ij,j->ij', w_invsqrt**2, s1i, w_invsqrt**2)
/ -(w_invsqrt[:,None] + w_invsqrt))
s1j_sqrt = s1j / (w_sqrt[:,None] + w_sqrt)
s1j_invsqrt = (numpy.einsum('i,ij,j->ij', w_invsqrt**2, s1j, w_invsqrt**2)
/ -(w_invsqrt[:,None] + w_invsqrt))
tmp = numpy.dot(s1i_sqrt, s1j_sqrt)
s2_sqrt = (s2 - tmp - tmp.T) / (w_sqrt[:,None] + w_sqrt)
tmp = numpy.dot(s1i*w_invsqrt**2, s1j)
tmp = s2 - tmp - tmp.T
tmp = -numpy.einsum('i,ij,j->ij', w_invsqrt**2, tmp, w_invsqrt**2)
tmp1 = numpy.dot(s1i_invsqrt, s1j_invsqrt)
s2_invsqrt = (tmp - tmp1 - tmp1.T) / (w_invsqrt[:,None] + w_invsqrt)
R1i_mid = lib.einsum('ip,pj,j->ij', s1i_invsqrt, sa0, w_invsqrt)
R1i_mid = R1i_mid + R1i_mid.T
R1i_mid+= numpy.einsum('i,ij,j->ij', w_invsqrt, sa1i, w_invsqrt)
R1i_mid = tmpi = lib.einsum('pi,pq,qj->ij', vr0, R1i_mid, vr0)
R1i_mid = (numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, R1i_mid, wr0_invsqrt**2)
/ -(wr0_invsqrt[:,None] + wr0_invsqrt))
R1j_mid = lib.einsum('ip,pj,j->ij', s1j_invsqrt, sa0, w_invsqrt)
R1j_mid = R1j_mid + R1j_mid.T
R1j_mid+= numpy.einsum('i,ij,j->ij', w_invsqrt, sa1j, w_invsqrt)
R1j_mid = tmpj = lib.einsum('pi,pq,qj->ij', vr0, R1j_mid, vr0)
R1j_mid = (numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, R1j_mid, wr0_invsqrt**2)
/ -(wr0_invsqrt[:,None] + wr0_invsqrt))
# second derivative of (s_invsqrt * sa * s_invsqrt), 9 terms
R2_mid = lib.einsum('ip,pj,j->ij', s2_invsqrt , sa0 , w_invsqrt)
R2_mid+= lib.einsum('ip,pj,j->ij', s1i_invsqrt, sa1j, w_invsqrt)
R2_mid+= lib.einsum('i,ip,pj->ij', w_invsqrt , sa1i, s1j_invsqrt)
R2_mid+= lib.einsum('ip,pq,qj->ij', s1i_invsqrt, sa0 , s1j_invsqrt)
R2_mid = R2_mid + R2_mid.T
R2_mid+= numpy.einsum('i,ij,j->ij', w_invsqrt, sa2, w_invsqrt)
R2_mid = lib.einsum('pi,pq,qj->ij', vr0, R2_mid, vr0)
tmp = numpy.dot(tmpi*wr0_invsqrt**2, tmpj)
tmp = R2_mid - tmp - tmp.T
tmp = -numpy.einsum('i,ij,j->ij', wr0_invsqrt**2, tmp, wr0_invsqrt**2)
tmp1 = numpy.dot(R1i_mid, R1j_mid)
R2_mid = (tmp - tmp1 - tmp1.T) / (wr0_invsqrt[:,None] + wr0_invsqrt)
R0_mid = numpy.dot(vr0*wr0_invsqrt, vr0.T)
R1i_mid = reduce(numpy.dot, (vr0, R1i_mid, vr0.T))
R1j_mid = reduce(numpy.dot, (vr0, R1j_mid, vr0.T))
R2_mid = reduce(numpy.dot, (vr0, R2_mid, vr0.T))
R2 = lib.einsum('ip,pj,j->ij' , s2_invsqrt , R0_mid , w_sqrt)
R2 += lib.einsum('ip,pj,j->ij' , s1i_invsqrt, R1j_mid, w_sqrt)
R2 += lib.einsum('ip,pq,qj->ij', s1i_invsqrt, R0_mid , s1j_sqrt)
R2 += lib.einsum('ip,pj,j->ij' , s1j_invsqrt, R1i_mid, w_sqrt)
R2 += numpy.einsum('i,ij,j->ij', w_invsqrt , R2_mid , w_sqrt)
R2 += lib.einsum('i,iq,qj->ij' , w_invsqrt , R1i_mid, s1j_sqrt)
R2 += lib.einsum('ip,pq,qj->ij', s1j_invsqrt, R0_mid , s1i_sqrt)
R2 += lib.einsum('i,iq,qj->ij' , w_invsqrt , R1j_mid, s1i_sqrt)
R2 += lib.einsum('i,iq,qj->ij' , w_invsqrt , R0_mid , s2_sqrt)
R2 = reduce(numpy.dot, (v_s, R2, v_s.T))
return R2
if __name__ == '__main__':
bak = lib.param.LIGHT_SPEED
lib.param.LIGHT_SPEED = 10
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h1_deriv_1 = sfx2c1e_grad.gen_sf_hfw(mol, approx='1E')
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h1_deriv_2 = sfx2c1e_grad.gen_sf_hfw(mol, approx='1E')
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0. )],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
h2_deriv = gen_sf_hfw(mol)
h2 = h2_deriv(0,0)
h2_ref = (h1_deriv_1(0)[2] - h1_deriv_2(0)[2]) / 0.0002 * lib.param.BOHR
print(abs(h2[2,2]-h2_ref).max())
print(lib.finger(h2) - 33.71188112440316)
h2 = h2_deriv(1,0)
h2_ref = (h1_deriv_1(1)[2] - h1_deriv_2(1)[2]) / 0.0002 * lib.param.BOHR
print(abs(h2[2,2]-h2_ref).max())
print(lib.finger(h2) - -23.609411428378138)
lib.param.LIGHT_SPEED = bak
|
sunqm/pyscf
|
pyscf/x2c/sfx2c1e_hess.py
|
Python
|
apache-2.0
| 16,870
|
[
"PySCF"
] |
7f440f158f3a5d1cca7b4e006aa1c14da53a995a313b6d87ead57d54bbff1a88
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2007, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
import time
from Globals import InitializeClass
from Products.ZenUtils import Map
from Products.ZenUtils import Time
from Products.ZenEvents.ZenEventClasses import Status_Ping, Status_Snmp
from Products.ZenEvents.ZenEventClasses import Status_OSProcess
from Products.ZenEvents.Availability import Availability
from AccessControl import ClassSecurityInfo
import logging
log = logging.getLogger("zen.Reports")
CACHE_TIME = 60.
_cache = Map.Locked(Map.Timed({}, CACHE_TIME))
def _round(value):
if value is None: return None
return (value // CACHE_TIME) * CACHE_TIME
def _findComponent(device, name):
for c in device.getMonitoredComponents():
if c.name() == name:
return c
return None
InitializeClass(Availability)
class Report:
"Determine availability by counting the amount of time down"
def __init__(self,
startDate = None,
endDate = None,
eventClass=Status_Ping,
severity=5,
device=None,
component='',
groupName=''):
self.startDate = _round(startDate)
self.endDate = _round(endDate)
self.eventClass = eventClass
self.severity = severity
self.device = device
self.component = component
self.groupName = groupName
def tuple(self):
return (self.startDate, self.endDate, self.eventClass,
self.severity, self.device, self.component)
def __hash__(self):
return hash(self.tuple())
def __cmp__(self, other):
return cmp(self.tuple(), other.tuple())
def run(self, dmd):
log.debug('in method report run')
"""Run the report, returning an Availability object for each device"""
# Note: we don't handle overlapping "down" events, so down
# time could get get double-counted.
__pychecker__='no-local'
zem = dmd.ZenEventManager
cols = 'device, component, firstTime, lastTime'
endDate = self.endDate or time.time()
startDate = self.startDate
if not startDate:
days = zem.defaultAvailabilityDays
startDate = time.time() - days*60*60*24
env = self.__dict__.copy()
env.update(locals())
severity = self.severity
groupName = self.groupName
log.debug('groupName: %s', groupName)
w = ' WHERE severity >= %(severity)s '
w += " AND DeviceGroups LIKE '%%%(groupName)s%%' "
w += ' AND lastTime > %(startDate)s '
w += ' AND firstTime <= %(endDate)s '
w += ' AND firstTime != lastTime '
w += " AND eventClass = '%(eventClass)s' "
w += " AND prodState >= 1000 "
if self.device:
w += " AND device = '%(device)s' "
if self.component:
w += " AND component like '%%%(component)s%%' "
env['w'] = w % env
s = ('SELECT %(cols)s FROM ( '
' SELECT %(cols)s FROM history %(w)s '
' UNION '
' SELECT %(cols)s FROM status %(w)s '
') AS U ' % env)
devices = {}
conn = zem.connect()
try:
curs = conn.cursor()
curs.execute(s)
while 1:
rows = curs.fetchmany()
if not rows: break
for row in rows:
device, component, first, last = row
last = min(last, endDate)
first = max(first, startDate)
k = (device, component)
try:
devices[k] += last - first
except KeyError:
devices[k] = last - first
finally: zem.close(conn)
total = endDate - startDate
if self.device:
log.debug('self.device defined')
deviceList = []
device = dmd.Devices.findDevice(self.device)
if device:
deviceList = [device]
devices.setdefault( (self.device, self.component), 0)
else:
log.debug('self.undevice defined')
groupNameLong = 'Groups'
groupNameLong += groupName
log.debug('groupNameLong: %s', groupNameLong)
deviceList = [d for d in dmd.Groups.getDmdObj(groupNameLong).getSubDevices()]
if not self.component:
for d in dmd.Groups.getDmdObj(groupNameLong).getSubDevices():
devices.setdefault( (d.id, self.component), 0)
deviceLookup = dict([(d.id, d) for d in deviceList])
result = []
for (d, c), v in devices.items():
dev = deviceLookup.get(d, None)
sys = (dev and dev.getSystemNamesString()) or ''
result.append( Availability(d, c, v, total, sys) )
# add in the devices that have the component, but no events
if self.component:
for d in deviceList:
for c in d.getMonitoredComponents():
if c.name().find(self.component) >= 0:
a = Availability(d.id, c.name(), 0, total,
d.getSystemNamesString())
result.append(a)
return result
def query(dmd, *args, **kwargs):
log.debug('in method query')
r = Report(*args, **kwargs)
# caching disabled
# try:
# return _cache[r.tuple()]
# except KeyError:
# result = r.run(dmd)
# _cache[r.tuple()] = result
# return result
result = r.run(dmd)
_cache[r.tuple()] = result
return result
class AvailabilityByGroup:
def run(self, dmd, REQUEST):
zem = dmd.ZenEventManager
# Get values
component = REQUEST.get('component', '')
eventClasses = REQUEST.get('eventClasses', '/Status/Ping')
severity = REQUEST.get('severity', '4')
device = REQUEST.get('device', '')
groupName = REQUEST.get('groupName', '/')
startDate = Time.ParseUSDate(REQUEST.get('startDate', zem.defaultAvailabilityStart()))
endDate = Time.ParseUSDate(REQUEST.get('endDate', zem.defaultAvailabilityEnd()))
r = Report(startDate, endDate, eventClasses, severity, device, component, groupName)
result = r.run(dmd)
return result
if __name__ == '__main__':
import pprint
r = Report(time.time() - 60*60*24*30)
start = time.time() - 60*60*24*30
# r.component = 'snmp'
r.component = None
r.eventClass = Status_Snmp
r.severity = 3
from Products.ZenUtils.ZCmdBase import ZCmdBase
z = ZCmdBase()
pprint.pprint(r.run(z.dmd))
a = query(z.dmd, start, device='gate.zenoss.loc', eventClass=Status_Ping)
assert 0 <= float(a[0]) <= 1.
b = query(z.dmd, start, device='gate.zenoss.loc', eventClass=Status_Ping)
assert a == b
assert id(a) == id(b)
pprint.pprint(r.run(z.dmd))
r.component = 'httpd'
r.eventClass = Status_OSProcess
r.severity = 4
pprint.pprint(r.run(z.dmd))
r.device = 'gate.zenoss.loc'
r.component = ''
r.eventClass = Status_Ping
r.severity = 4
pprint.pprint(r.run(z.dmd))
|
zenoss/ZenPacks.community.AvailabilityReportPerGroup
|
ZenPacks/community/AvailabilityReportPerGroup/reports/plugins/AvailabilityByGroup.py
|
Python
|
gpl-2.0
| 7,751
|
[
"VisIt"
] |
dcd412c0492b570d6079ff2e0c4d8cc421bdfdc822d6789670fe86202942298d
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
************************************
**espresso.standard_system.Default**
************************************
"""
import espresso
import mpi4py.MPI as MPI
def Default(box, rc=1.12246, skin=0.3, dt=0.005, temperature=None):
'''
return default system and integrator, no interactions, no particles are set
if tempearture is != None then Langevin thermostat is set to temperature (gamma is 1.0)
'''
system = espresso.System()
system.rng = espresso.esutil.RNG()
system.bc = espresso.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
nodeGrid = espresso.tools.decomp.nodeGrid(MPI.COMM_WORLD.size)
cellGrid = espresso.tools.decomp.cellGrid(box, nodeGrid, rc, skin)
system.storage = espresso.storage.DomainDecomposition(system, nodeGrid, cellGrid)
print "nodeGrid: ",nodeGrid, " cellGrid: ",cellGrid
integrator = espresso.integrator.VelocityVerlet(system)
integrator.dt = dt
if (temperature != None):
thermostat = espresso.integrator.LangevinThermostat(system)
thermostat.gamma = 1.0
thermostat.temperature = temperature
integrator.addExtension(thermostat)
return system, integrator
|
BackupTheBerlios/espressopp
|
src/standard_system/Default.py
|
Python
|
gpl-3.0
| 2,072
|
[
"ESPResSo"
] |
c823f3daba32e2993f4e0448305366b8e4d2aa491098953548032a408a82b4cd
|
"""Reference implementation for building a nengo.Network."""
import collections
import logging
import numpy as np
import nengo.decoders
import nengo.neurons
import nengo.objects
import nengo.utils.distributions as dists
import nengo.utils.numpy as npext
from nengo.utils.compat import is_callable, is_integer
logger = logging.getLogger(__name__)
class ShapeMismatch(ValueError):
pass
class SignalView(object):
def __init__(self, base, shape, elemstrides, offset, name=None):
assert base is not None
self.base = base
self.shape = tuple(shape)
self.elemstrides = tuple(elemstrides)
self.offset = int(offset)
if name is not None:
self._name = name
def __len__(self):
return self.shape[0]
def __str__(self):
return '%s{%s, %s}' % (
self.__class__.__name__,
self.name, self.shape)
def __repr__(self):
return '%s{%s, %s}' % (
self.__class__.__name__,
self.name, self.shape)
def view_like_self_of(self, newbase, name=None):
if newbase.base != newbase:
raise NotImplementedError()
if newbase.structure != self.base.structure:
raise NotImplementedError('technically ok but should not happen',
(self.base, newbase))
return SignalView(newbase,
self.shape,
self.elemstrides,
self.offset,
name)
@property
def structure(self):
return (self.shape, self.elemstrides, self.offset)
def same_view_as(self, other):
return self.structure == other.structure and self.base == other.base
@property
def dtype(self):
return np.dtype(self.base.dtype)
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return int(np.prod(self.shape))
def reshape(self, *shape):
if len(shape) == 1 and isinstance(shape[0], (list, tuple)):
shape = shape[0]
if self.elemstrides == (1,):
size = int(np.prod(shape))
if size != self.size:
raise ShapeMismatch(shape, self.shape)
elemstrides = [1]
for si in reversed(shape[1:]):
elemstrides = [si * elemstrides[0]] + elemstrides
return SignalView(
base=self.base,
shape=shape,
elemstrides=elemstrides,
offset=self.offset)
elif self.size == 1:
# -- scalars can be reshaped to any number of (1, 1, 1...)
size = int(np.prod(shape))
if size != self.size:
raise ShapeMismatch(shape, self.shape)
elemstrides = [1] * len(shape)
return SignalView(
base=self.base,
shape=shape,
elemstrides=elemstrides,
offset=self.offset)
else:
# -- there are cases where reshaping can still work
# but there are limits too, because we can only
# support view-based reshapes. So the strides have
# to work.
raise NotImplementedError('reshape of strided view')
def transpose(self, neworder=None):
if neworder:
raise NotImplementedError()
return SignalView(
self.base,
reversed(self.shape),
reversed(self.elemstrides),
self.offset,
self.name + '.T'
)
@property
def T(self):
if self.ndim < 2:
return self
else:
return self.transpose()
def __getitem__(self, item): # noqa
# -- copy the shape and strides
shape = list(self.shape)
elemstrides = list(self.elemstrides)
offset = self.offset
if isinstance(item, (list, tuple)):
dims_to_del = []
for ii, idx in enumerate(item):
if isinstance(idx, int):
dims_to_del.append(ii)
offset += idx * elemstrides[ii]
elif isinstance(idx, slice):
start, stop, stride = idx.indices(shape[ii])
offset += start * elemstrides[ii]
if stride != 1:
raise NotImplementedError()
shape[ii] = stop - start
for dim in reversed(dims_to_del):
shape.pop(dim)
elemstrides.pop(dim)
return SignalView(
base=self.base,
shape=shape,
elemstrides=elemstrides,
offset=offset)
elif isinstance(item, (int, np.integer)):
if len(self.shape) == 0:
raise IndexError()
if not (0 <= item < self.shape[0]):
raise NotImplementedError()
shape = self.shape[1:]
elemstrides = self.elemstrides[1:]
offset = self.offset + item * self.elemstrides[0]
return SignalView(
base=self.base,
shape=shape,
elemstrides=elemstrides,
offset=offset)
elif isinstance(item, slice):
return self.__getitem__((item,))
else:
raise NotImplementedError(item)
@property
def name(self):
try:
return self._name
except AttributeError:
if self.base is self:
return '<anon%d>' % id(self)
else:
return 'View(%s[%d])' % (self.base.name, self.offset)
@name.setter
def name(self, value):
self._name = value
def is_contiguous(self):
shape, strides, offset = self.structure
if len(shape) == 0:
return True, offset, offset + 1
elif len(shape) == 1:
if strides[0] == 1:
return True, offset, offset + shape[0]
else:
return False, None, None
elif len(shape) == 2:
if strides == (1, shape[0]) or strides == (shape[1], 1):
return True, offset, offset + shape[0] * shape[1]
else:
return False, None, None
else:
raise NotImplementedError()
# if self.ndim == 1 and self.elemstrides[0] == 1:
# return self.offset, self.offset + self.size
def shares_memory_with(self, other): # noqa
# TODO: WRITE SOME UNIT TESTS FOR THIS FUNCTION !!!
# Terminology: two arrays *overlap* if the lowermost memory addressed
# touched by upper one is higher than the uppermost memory address
# touched by the lower one.
#
# np.may_share_memory returns True iff there is overlap.
# Overlap is a necessary but insufficient condition for *aliasing*.
#
# Aliasing is when two ndarrays refer a common memory location.
if self.base is not other.base or self.size == 0 or other.size == 0:
return False
elif self is other or self.same_view_as(other):
return True
elif self.ndim < other.ndim:
return other.shares_memory_with(self)
assert self.ndim > 0
if self.ndim == 1:
# -- self is a vector view
# and other is either a scalar or vector view
ae0, = self.elemstrides
be0, = other.elemstrides
amin = self.offset
amax = amin + self.shape[0] * ae0
bmin = other.offset
bmax = bmin + other.shape[0] * be0
if amin <= amax <= bmin <= bmax or bmin <= bmax <= amin <= amax:
return False
elif ae0 == be0 == 1:
# -- strides are equal, and we've already checked for
# non-overlap. They do overlap, so they are aliased.
return True
# TODO: look for common divisor of ae0 and be0
raise NotImplementedError('1d', (self.structure, other.structure))
elif self.ndim == 2:
# -- self is a matrix view
# and other is either a scalar, vector or matrix view
a_contig, amin, amax = self.is_contiguous()
b_contig, bmin, bmax = other.is_contiguous()
if a_contig and b_contig:
# -- both have a contiguous memory layout,
# from min up to but not including max
return (not (amin <= amax <= bmin <= bmax)
and not (bmin <= bmax <= amin <= amax))
elif a_contig:
# -- only a contiguous
raise NotImplementedError('2d self:contig, other:discontig',
(self.structure, other.structure))
else:
raise NotImplementedError('2d',
(self.structure, other.structure))
raise NotImplementedError()
class Signal(SignalView):
"""Interpretable, vector-valued quantity within Nengo"""
# Set assert_named_signals True to raise an Exception
# if model.signal is used to create a signal with no name.
# This can help to identify code that's creating un-named signals,
# if you are trying to track down mystery signals that are showing
# up in a model.
assert_named_signals = False
def __init__(self, value, name=None):
self.value = np.asarray(value, dtype=np.float64)
if name is not None:
self._name = name
if Signal.assert_named_signals:
assert name
def __str__(self):
try:
return "Signal(" + self._name + ", shape=" + str(self.shape) + ")"
except AttributeError:
return ("Signal(id " + str(id(self)) + ", shape="
+ str(self.shape) + ")")
def __repr__(self):
return str(self)
@property
def dtype(self):
return self.value.dtype
@property
def shape(self):
return self.value.shape
@property
def size(self):
return self.value.size
@property
def elemstrides(self):
s = np.asarray(self.value.strides)
return tuple(int(si / self.dtype.itemsize) for si in s)
@property
def offset(self):
return 0
@property
def base(self):
return self
class Operator(object):
"""Base class for operator instances understood by nengo.Simulator.
The lifetime of a Signal during one simulator timestep:
0) at most one set operator (optional)
1) any number of increments
2) any number of reads
3) at most one update
A signal that is only read can be considered a "constant".
A signal that is both set *and* updated can be a problem:
since reads must come after the set, and the set will destroy
whatever were the contents of the update, it can be the case
that the update is completely hidden and rendered irrelevant.
There are however at least two reasons to use both a set and an update:
(a) to use a signal as scratch space (updating means destroying it)
(b) to use sets and updates on partly overlapping views of the same
memory.
N.B.: It is done on purpose that there are no default values for
reads, sets, incs, and updates.
Each operator should explicitly set each of these properties.
"""
@property
def reads(self):
"""Signals that are read and not modified"""
return self._reads
@reads.setter
def reads(self, val):
self._reads = val
@property
def sets(self):
"""Signals assigned by this operator
A signal that is set here cannot be set or updated
by any other operator.
"""
return self._sets
@sets.setter
def sets(self, val):
self._sets = val
@property
def incs(self):
"""Signals incremented by this operator
Increments will be applied after this signal has been
set (if it is set), and before reads.
"""
return self._incs
@incs.setter
def incs(self, val):
self._incs = val
@property
def updates(self):
"""Signals assigned their value for time t + 1
This operator will be scheduled so that updates appear after
all sets, increments and reads of this signal.
"""
return self._updates
@updates.setter
def updates(self, val):
self._updates = val
@property
def all_signals(self):
return self.reads + self.sets + self.incs + self.updates
def init_signals(self, signals, dt):
"""Initialize simulator.signals
Install any buffers into the signals view that
this operator will need. Classes for neurons
that use extra buffers should create them here.
"""
for sig in self.all_signals:
if sig.base not in signals:
signals.init(sig.base,
np.asarray(
np.zeros(sig.base.shape,
dtype=sig.base.dtype)
+ sig.base.value))
class Reset(Operator):
"""Assign a constant value to a Signal."""
def __init__(self, dst, value=0):
self.dst = dst
self.value = float(value)
self.reads = []
self.incs = []
self.updates = []
self.sets = [dst]
def __str__(self):
return 'Reset(%s)' % str(self.dst)
def make_step(self, signals, dt):
target = signals[self.dst]
value = self.value
def step():
target[...] = value
return step
class Copy(Operator):
"""Assign the value of one signal to another."""
def __init__(self, dst, src, as_update=False, tag=None):
self.dst = dst
self.src = src
self.tag = tag
self.as_update = True
self.reads = [src]
self.sets = [] if as_update else [dst]
self.updates = [dst] if as_update else []
self.incs = []
def __str__(self):
return 'Copy(%s -> %s, as_update=%s)' % (
str(self.src), str(self.dst), self.as_update)
def make_step(self, signals, dt):
dst = signals[self.dst]
src = signals[self.src]
def step():
dst[...] = src
return step
def reshape_dot(A, X, Y, tag=None):
"""Checks if the dot product needs to be reshaped.
Also does a bunch of error checking based on the shapes of A and X.
"""
badshape = False
ashape = (1,) if A.shape == () else A.shape
xshape = (1,) if X.shape == () else X.shape
if A.shape == ():
incshape = X.shape
elif X.shape == ():
incshape = A.shape
elif X.ndim == 1:
badshape = ashape[-1] != xshape[0]
incshape = ashape[:-1]
else:
badshape = ashape[-1] != xshape[-2]
incshape = ashape[:-1] + xshape[:-2] + xshape[-1:]
if (badshape or incshape != Y.shape) and incshape != ():
raise ValueError('shape mismatch in %s: %s x %s -> %s' % (
tag, A.shape, X.shape, Y.shape))
# If the result is scalar, we'll reshape it so Y[...] += inc works
return incshape == ()
class DotInc(Operator):
"""Increment signal Y by dot(A, X)"""
def __init__(self, A, X, Y, tag=None):
self.A = A
self.X = X
self.Y = Y
self.tag = tag
self.reads = [self.A, self.X]
self.incs = [self.Y]
self.sets = []
self.updates = []
def __str__(self):
return 'DotInc(%s, %s -> %s "%s")' % (
str(self.A), str(self.X), str(self.Y), self.tag)
def make_step(self, signals, dt):
X = signals[self.X]
A = signals[self.A]
Y = signals[self.Y]
reshape = reshape_dot(A, X, Y, self.tag)
def step():
inc = np.dot(A, X)
if reshape:
inc = np.asarray(inc).reshape(Y.shape)
Y[...] += inc
return step
class ProdUpdate(Operator):
"""Sets Y <- dot(A, X) + B * Y"""
def __init__(self, A, X, B, Y, tag=None):
self.A = A
self.X = X
self.B = B
self.Y = Y
self.tag = tag
self.reads = [self.A, self.X, self.B]
self.updates = [self.Y]
self.incs = []
self.sets = []
def __str__(self):
return 'ProdUpdate(%s, %s, %s, -> %s "%s")' % (
str(self.A), str(self.X), str(self.B), str(self.Y), self.tag)
def make_step(self, signals, dt):
X = signals[self.X]
A = signals[self.A]
Y = signals[self.Y]
B = signals[self.B]
reshape = reshape_dot(A, X, Y, self.tag)
def step():
val = np.dot(A, X)
if reshape:
val = np.asarray(val).reshape(Y.shape)
Y[...] *= B
Y[...] += val
return step
class SimPyFunc(Operator):
"""Set signal `output` by some non-linear function of x, possibly t"""
def __init__(self, output, fn, t_in, x):
self.output = output
self.fn = fn
self.t_in = t_in
self.x = x
self.reads = [] if x is None else [x]
self.updates = [] if output is None else [output]
self.sets = []
self.incs = []
def __str__(self):
return "SimPyFunc(%s -> %s '%s')" % (self.x, self.output, self.fn)
def make_step(self, signals, dt):
if self.output is not None:
output = signals[self.output]
fn = self.fn
args = [signals['__time__']] if self.t_in else []
args += [signals[self.x]] if self.x is not None else []
def step():
y = fn(*args)
if self.output is not None:
if y is None:
raise ValueError(
"Function '%s' returned invalid value" % fn.__name__)
output[...] = y
return step
class SimNeurons(Operator):
"""Set output to neuron model output for the given input current."""
def __init__(self, neurons, J, output, states=[]):
self.neurons = neurons
self.J = J
self.output = output
self.states = states
self.reads = [J]
self.updates = [output] + states
self.sets = []
self.incs = []
def make_step(self, signals, dt):
J = signals[self.J]
output = signals[self.output]
states = [signals[state] for state in self.states]
def step():
self.neurons.step_math(dt, J, output, *states)
return step
class Model(object):
"""Output of the Builder, used by the Simulator."""
def __init__(self, dt=0.001, label=None, seed=None):
# Resources used by the build process.
self.operators = []
self.params = {}
self.probes = []
self.sig_in = {}
self.sig_out = {}
self.dt = dt
self.label = label
self.seed = np.random.randint(npext.maxint) if seed is None else seed
self.rng = np.random.RandomState(self.seed)
def __str__(self):
return "Model: %s" % self.label
def has_built(self, obj):
"""Returns true iff obj has been processed by build."""
return obj in self.params
def next_seed(self):
"""Yields a seed to use for RNG during build computations."""
return self.rng.randint(npext.maxint)
BuiltConnection = collections.namedtuple(
'BuiltConnection', ['decoders', 'eval_points', 'transform', 'solver_info'])
BuiltNeurons = collections.namedtuple('BuiltNeurons', ['gain', 'bias'])
BuiltEnsemble = collections.namedtuple(
'BuiltEnsemble',
['eval_points', 'encoders', 'intercepts', 'max_rates', 'scaled_encoders'])
class Builder(object):
builders = {}
@classmethod
def register_builder(cls, build_fn, nengo_class):
cls.builders[nengo_class] = build_fn
@classmethod
def build(cls, obj, *args, **kwargs):
model = kwargs.setdefault('model', Model())
if model.has_built(obj):
# If we've already built the obj, we'll ignore it.
# This is most likely the result of Neurons being used in
# two different Ensembles, which is unlikely to be desired.
# TODO: Prevent this at pre-build validation time.
logger.warning("Object '%s' has already been built in model "
"'%s'." % (str(obj), model.label))
return
for obj_cls in obj.__class__.__mro__:
if obj_cls in cls.builders:
break
else:
raise TypeError("Cannot build object of type '%s'." %
cls.__name__)
cls.builders[obj_cls](obj, *args, **kwargs)
if obj not in model.params:
raise RuntimeError(
"Build function '%s' did not add '%s' to model.params"
% (cls.builders[obj_cls].__name__, str(obj)))
return model
def build_network(network, model):
"""Takes a Network object and returns a Model.
This determines the signals and operators necessary to simulate that model.
Builder does this by mapping each high-level object to its associated
signals and operators one-by-one, in the following order:
1) Ensembles, Nodes, Neurons, Probes
2) Subnetworks (recursively)
3) Connections
"""
logger.info("Network step 1: Building ensembles and nodes")
for obj in network.ensembles + network.nodes:
Builder.build(obj, model=model)
logger.info("Network step 2: Building subnetworks")
for subnetwork in network.networks:
Builder.build(subnetwork, model=model)
logger.info("Network step 3: Building connections")
for conn in network.connections:
Builder.build(conn, model=model)
model.params[network] = None
Builder.register_builder(build_network, nengo.objects.Network)
def pick_eval_points(ens, n_points, rng):
if n_points is None:
# use a heuristic to pick the number of points
dims, neurons = ens.dimensions, ens.neurons.n_neurons
n_points = max(np.clip(500 * dims, 750, 2500), 2 * neurons)
return dists.UniformHypersphere(ens.dimensions).sample(
n_points, rng=rng) * ens.radius
def build_ensemble(ens, model): # noqa: C901
# Create random number generator
seed = model.next_seed() if ens.seed is None else ens.seed
rng = np.random.RandomState(seed)
# Generate eval points
if ens.eval_points is None or is_integer(ens.eval_points):
eval_points = pick_eval_points(
ens=ens, n_points=ens.eval_points, rng=rng)
else:
eval_points = npext.array(
ens.eval_points, dtype=np.float64, min_dims=2)
# Set up signal
model.sig_in[ens] = Signal(np.zeros(ens.dimensions),
name="%s.signal" % ens.label)
model.operators.append(Reset(model.sig_in[ens]))
# Set up encoders
if ens.encoders is None:
if isinstance(ens.neurons, nengo.Direct):
encoders = np.identity(ens.dimensions)
else:
sphere = dists.UniformHypersphere(ens.dimensions, surface=True)
encoders = sphere.sample(ens.neurons.n_neurons, rng=rng)
else:
encoders = np.array(ens.encoders, dtype=np.float64)
enc_shape = (ens.neurons.n_neurons, ens.dimensions)
if encoders.shape != enc_shape:
raise ShapeMismatch(
"Encoder shape is %s. Should be (n_neurons, dimensions); "
"in this case %s." % (encoders.shape, enc_shape))
encoders /= npext.norm(encoders, axis=1, keepdims=True)
# Determine max_rates and intercepts
if isinstance(ens.max_rates, dists.Distribution):
max_rates = ens.max_rates.sample(
ens.neurons.n_neurons, rng=rng)
else:
max_rates = np.array(ens.max_rates)
if isinstance(ens.intercepts, dists.Distribution):
intercepts = ens.intercepts.sample(
ens.neurons.n_neurons, rng=rng)
else:
intercepts = np.array(ens.intercepts)
# Build the neurons
if isinstance(ens.neurons, nengo.Direct):
Builder.build(ens.neurons, ens.dimensions, model=model)
else:
Builder.build(ens.neurons, max_rates, intercepts, model=model)
bn = model.params[ens.neurons]
# Scale the encoders
if isinstance(ens.neurons, nengo.Direct):
scaled_encoders = encoders
else:
scaled_encoders = encoders * (bn.gain / ens.radius)[:, np.newaxis]
# Create output signal, using built Neurons
model.operators.append(DotInc(
Signal(scaled_encoders, name="%s.scaled_encoders" % ens.label),
model.sig_in[ens],
model.sig_in[ens.neurons],
tag="%s encoding" % ens.label))
# Output is neural output
model.sig_out[ens] = model.sig_out[ens.neurons]
for probe in ens.probes["decoded_output"]:
Builder.build(probe, dimensions=ens.dimensions, model=model)
for probe in ens.probes["spikes"] + ens.probes["voltages"]:
Builder.build(probe, dimensions=ens.neurons.n_neurons, model=model)
model.params[ens] = BuiltEnsemble(eval_points=eval_points,
encoders=encoders,
intercepts=intercepts,
max_rates=max_rates,
scaled_encoders=scaled_encoders)
Builder.register_builder(build_ensemble, nengo.objects.Ensemble)
def build_node(node, model):
# Get input
if node.output is None or is_callable(node.output):
if node.size_in > 0:
model.sig_in[node] = Signal(
np.zeros(node.size_in), name="%s.signal" % node.label)
# Reset input signal to 0 each timestep
model.operators.append(Reset(model.sig_in[node]))
# Provide output
if node.output is None:
model.sig_out[node] = model.sig_in[node]
elif not is_callable(node.output):
model.sig_out[node] = Signal(node.output, name=node.label)
else:
sig_in, sig_out = build_pyfunc(fn=node.output,
t_in=True,
n_in=node.size_in,
n_out=node.size_out,
label="%s.pyfn" % node.label,
model=model)
if sig_in is not None:
model.operators.append(DotInc(
model.sig_in[node],
Signal(1.0, name="1"),
sig_in,
tag="%s input" % node.label))
if sig_out is not None:
model.sig_out[node] = sig_out
for probe in node.probes["output"]:
Builder.build(probe, dimensions=model.sig_out[node].shape, model=model)
model.params[node] = None
Builder.register_builder(build_node, nengo.objects.Node)
def build_probe(probe, dimensions, model):
model.sig_in[probe] = Signal(np.zeros(dimensions), name=probe.label)
# Reset input signal to 0 each timestep
model.operators.append(Reset(model.sig_in[probe]))
model.probes.append(probe)
# We put a list here so that the simulator can fill it
# as it simulates the model
model.params[probe] = []
Builder.register_builder(build_probe, nengo.objects.Probe)
def decay_coef(pstc, dt):
pstc = max(pstc, dt)
return np.exp(-dt / pstc)
def filtered_signal(signal, pstc, model):
name = "%s.filtered(%f)" % (signal.name, pstc)
filtered = Signal(np.zeros(signal.size), name=name)
decay = decay_coef(pstc=pstc, dt=model.dt)
model.operators.append(ProdUpdate(
Signal(1.0 - decay, name="1 - decay"),
signal,
Signal(decay, name="decay"),
filtered,
tag="%s filtering" % name))
return filtered
def build_connection(conn, model): # noqa: C901
rng = np.random.RandomState(model.next_seed())
model.sig_in[conn] = model.sig_out[conn.pre]
model.sig_out[conn] = model.sig_in[conn.post]
decoders = None
eval_points = None
solver_info = None
transform = np.array(conn.transform_full, dtype=np.float64)
# Figure out the signal going across this connection
if (isinstance(conn.pre, nengo.Ensemble)
and isinstance(conn.pre.neurons, nengo.Direct)):
# Decoded connection in directmode
if conn.function is None:
signal = model.sig_in[conn]
else:
sig_in, signal = build_pyfunc(
fn=conn.function,
t_in=False,
n_in=model.sig_in[conn].size,
n_out=conn.dimensions,
label=conn.label,
model=model)
model.operators.append(DotInc(
model.sig_in[conn],
Signal(1.0, name="1"),
sig_in,
tag="%s input" % conn.label))
elif isinstance(conn.pre, nengo.Ensemble):
# Normal decoded connection
encoders = model.params[conn.pre].encoders
gain = model.params[conn.pre.neurons].gain
bias = model.params[conn.pre.neurons].bias
eval_points = conn.eval_points
if eval_points is None:
eval_points = npext.array(
model.params[conn.pre].eval_points, min_dims=2)
elif is_integer(eval_points):
eval_points = pick_eval_points(
ens=conn.pre, n_points=eval_points, rng=rng)
else:
eval_points = npext.array(eval_points, min_dims=2)
x = np.dot(eval_points, encoders.T / conn.pre.radius)
activities = model.dt * conn.pre.neurons.rates(x, gain, bias)
if np.count_nonzero(activities) == 0:
raise RuntimeError(
"In '%s', for '%s', 'activites' matrix is all zero. "
"This is because no evaluation points fall in the firing "
"ranges of any neurons." % (str(conn), str(conn.pre)))
if conn.function is None:
targets = eval_points
else:
targets = np.zeros((len(eval_points), conn.function_size))
for i, ep in enumerate(eval_points):
targets[i] = conn.function(ep)
if conn.weight_solver is not None:
if conn.decoder_solver is not None:
raise ValueError("Cannot specify both 'weight_solver' "
"and 'decoder_solver'.")
# account for transform
targets = np.dot(targets, transform.T)
transform = np.array(1., dtype=np.float64)
decoders, solver_info = conn.weight_solver(
activities, targets, rng=rng,
E=model.params[conn.post].scaled_encoders.T)
model.sig_out[conn] = model.sig_in[conn.post.neurons]
signal_size = model.sig_out[conn].size
else:
solver = (conn.decoder_solver if conn.decoder_solver is
not None else nengo.decoders.lstsq_L2nz)
decoders, solver_info = solver(activities, targets, rng=rng)
signal_size = conn.dimensions
# Add operator for decoders and filtering
decoders = decoders.T
if conn.synapse is not None and conn.synapse > model.dt:
decay = decay_coef(pstc=conn.synapse, dt=model.dt)
decoder_signal = Signal(
decoders * (1.0 - decay),
name="%s.decoders * (1 - decay)" % conn.label)
else:
decoder_signal = Signal(decoders,
name="%s.decoders" % conn.label)
decay = 0
signal = Signal(np.zeros(signal_size), name=conn.label)
model.operators.append(ProdUpdate(
decoder_signal,
model.sig_in[conn],
Signal(decay, name="decay"),
signal,
tag="%s decoding" % conn.label))
else:
# Direct connection
signal = model.sig_in[conn]
# Add operator for filtering (in the case filter wasn't already
# added, when pre.neurons is a non-direct Ensemble)
if decoders is None and conn.synapse is not None:
# Note: we add a filter here even if synapse < dt,
# in order to avoid cycles in the op graph. If the filter
# is explicitly set to None (e.g. for a passthrough node)
# then cycles can still occur.
signal = filtered_signal(signal, conn.synapse, model=model)
if conn.modulatory:
# Make a new signal, effectively detaching from post
model.sig_out[conn] = Signal(
np.zeros(model.sig_out[conn].size),
name="%s.mod_output" % conn.label)
# Add reset operator?
# TODO: add unit test
# Add operator for transform
if isinstance(conn.post, nengo.objects.Neurons):
if not model.has_built(conn.post):
# Since it hasn't been built, it wasn't added to the Network,
# which is most likely because the Neurons weren't associated
# with an Ensemble.
raise RuntimeError("Connection '%s' refers to Neurons '%s' "
"that are not a part of any Ensemble." % (
conn, conn.post))
transform *= model.params[conn.post].gain[:, np.newaxis]
model.operators.append(
DotInc(Signal(transform, name="%s.transform" % conn.label),
signal,
model.sig_out[conn],
tag=conn.label))
# Set up probes
for probe in conn.probes["signal"]:
Builder.build(probe, dimensions=model.sig_out[conn].size, model=model)
model.params[conn] = BuiltConnection(decoders=decoders,
eval_points=eval_points,
transform=transform,
solver_info=solver_info)
Builder.register_builder(build_connection, nengo.Connection) # noqa
def build_pyfunc(fn, t_in, n_in, n_out, label, model):
if n_in:
sig_in = Signal(np.zeros(n_in), name="%s.input" % label)
model.operators.append(Reset(sig_in))
else:
sig_in = None
if n_out > 0:
sig_out = Signal(np.zeros(n_out), name="%s.output" % label)
else:
sig_out = None
model.operators.append(
SimPyFunc(output=sig_out, fn=fn, t_in=t_in, x=sig_in))
return sig_in, sig_out
def build_direct(direct, dimensions, model):
model.sig_in[direct] = Signal(np.zeros(dimensions), name=direct.label)
model.sig_out[direct] = model.sig_in[direct]
model.operators.append(Reset(model.sig_in[direct]))
model.params[direct] = BuiltNeurons(gain=None, bias=None)
Builder.register_builder(build_direct, nengo.neurons.Direct)
def build_neurons(neurons, max_rates, intercepts, model):
if neurons.n_neurons <= 0:
raise ValueError(
"Number of neurons (%d) must be positive." % neurons.n_neurons)
gain, bias = neurons.gain_bias(max_rates, intercepts)
model.sig_in[neurons] = Signal(
np.zeros(neurons.n_neurons), name="%s.input" % neurons.label)
model.sig_out[neurons] = Signal(
np.zeros(neurons.n_neurons), name="%s.output" % neurons.label)
model.operators.append(Copy(
src=Signal(bias, name="%s.bias" % neurons.label),
dst=model.sig_in[neurons]))
for probe in neurons.probes["output"]:
Builder.build(probe, dimensions=neurons.n_neurons, model=model)
model.params[neurons] = BuiltNeurons(gain=gain, bias=bias)
def build_lifrate(lif, max_rates, intercepts, model):
build_neurons(lif, max_rates, intercepts, model=model)
model.operators.append(SimNeurons(
neurons=lif, J=model.sig_in[lif], output=model.sig_out[lif]))
Builder.register_builder(build_lifrate, nengo.neurons.LIFRate)
def build_lif(lif, max_rates, intercepts, model):
build_neurons(lif, max_rates, intercepts, model=model)
voltage = Signal(np.zeros(lif.n_neurons), name="%s.voltage" % lif.label)
refractory_time = Signal(
np.zeros(lif.n_neurons), name="%s.refractory_time" % lif.label)
model.operators.append(SimNeurons(neurons=lif,
J=model.sig_in[lif],
output=model.sig_out[lif],
states=[voltage, refractory_time]))
Builder.register_builder(build_lif, nengo.neurons.LIF)
def build_alifrate(alif, max_rates, intercepts, model):
build_neurons(alif, max_rates, intercepts, model=model)
adaptation = Signal(np.zeros(alif.n_neurons),
name="%s.adaptation" % alif.label)
model.operators.append(SimNeurons(neurons=alif,
J=model.sig_in[alif],
output=model.sig_out[alif],
states=[adaptation]))
Builder.register_builder(build_alifrate, nengo.neurons.AdaptiveLIFRate)
def build_alif(alif, max_rates, intercepts, model):
build_neurons(alif, max_rates, intercepts, model=model)
voltage = Signal(np.zeros(alif.n_neurons), name="%s.voltage" % alif.label)
refractory_time = Signal(np.zeros(alif.n_neurons),
name="%s.refractory_time" % alif.label)
adaptation = Signal(np.zeros(alif.n_neurons),
name="%s.adaptation" % alif.label)
model.operators.append(SimNeurons(
neurons=alif,
J=model.sig_in[alif],
output=model.sig_out[alif],
states=[voltage, refractory_time, adaptation]))
Builder.register_builder(build_alif, nengo.neurons.AdaptiveLIF)
|
ZeitgeberH/nengo
|
nengo/builder.py
|
Python
|
gpl-3.0
| 37,766
|
[
"NEURON"
] |
f4ed77863bad87a5012b40ba4e376956b2795ac7b4f276f7d359c81584ed23d6
|
'''
MMD functions implemented in tensorflow.
'''
from __future__ import division
import tensorflow as tf
from tf_ops import dot, sq_sum
_eps=1e-8
################################################################################
### Quadratic-time MMD with Gaussian RBF kernel
def _mix_rbf_kernel(X, Y, sigmas, wts=None):
if wts is None:
wts = [1] * len(sigmas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0, 0, 0
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XX += wt * tf.exp(-gamma * (-2 * XX + c(X_sqnorms) + r(X_sqnorms)))
K_XY += wt * tf.exp(-gamma * (-2 * XY + c(X_sqnorms) + r(Y_sqnorms)))
K_YY += wt * tf.exp(-gamma * (-2 * YY + c(Y_sqnorms) + r(Y_sqnorms)))
return K_XX, K_XY, K_YY, tf.reduce_sum(wts)
def rbf_mmd2(X, Y, sigma=1, biased=True):
return mix_rbf_mmd2(X, Y, sigmas=[sigma], biased=biased)
def mix_rbf_mmd2(X, Y, sigmas=(1,), wts=None, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigmas, wts)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
def rbf_mmd2_and_ratio(X, Y, sigma=1, biased=True):
return mix_rbf_mmd2_and_ratio(X, Y, sigmas=[sigma], biased=biased)
def mix_rbf_mmd2_and_ratio(X, Y, sigmas=(1,), wts=None, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigmas, wts)
return _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
################################################################################
### Helper functions to compute variances based on kernel matrices
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32)
n = tf.cast(K_YY.get_shape()[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX) / (m * m)
+ tf.reduce_sum(K_YY) / (n * n)
- 2 * tf.reduce_sum(K_XY) / (m * n))
else:
if const_diagonal is not False:
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
def _mmd2_and_ratio(K_XX, K_XY, K_YY, const_diagonal=False, biased=False,
min_var_est=_eps):
mmd2, var_est = _mmd2_and_variance(
K_XX, K_XY, K_YY, const_diagonal=const_diagonal, biased=biased)
ratio = mmd2 / tf.sqrt(tf.maximum(var_est, min_var_est))
return mmd2, ratio
def _mmd2_and_variance(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(K_XX.get_shape()[0], tf.float32) # Assumes X, Y are same shape
### Get the various sums of kernels that we'll use
# Kts drop the diagonal, but we don't need to compute them explicitly
if const_diagonal is not False:
const_diagonal = tf.cast(const_diagonal, tf.float32)
diag_X = diag_Y = const_diagonal
sum_diag_X = sum_diag_Y = m * const_diagonal
sum_diag2_X = sum_diag2_Y = m * const_diagonal**2
else:
diag_X = tf.diag_part(K_XX)
diag_Y = tf.diag_part(K_YY)
sum_diag_X = tf.reduce_sum(diag_X)
sum_diag_Y = tf.reduce_sum(diag_Y)
sum_diag2_X = sq_sum(diag_X)
sum_diag2_Y = sq_sum(diag_Y)
Kt_XX_sums = tf.reduce_sum(K_XX, 1) - diag_X
Kt_YY_sums = tf.reduce_sum(K_YY, 1) - diag_Y
K_XY_sums_0 = tf.reduce_sum(K_XY, 0)
K_XY_sums_1 = tf.reduce_sum(K_XY, 1)
Kt_XX_sum = tf.reduce_sum(Kt_XX_sums)
Kt_YY_sum = tf.reduce_sum(Kt_YY_sums)
K_XY_sum = tf.reduce_sum(K_XY_sums_0)
Kt_XX_2_sum = sq_sum(K_XX) - sum_diag2_X
Kt_YY_2_sum = sq_sum(K_YY) - sum_diag2_Y
K_XY_2_sum = sq_sum(K_XY)
if biased:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * m)
+ (Kt_YY_sum + sum_diag_Y) / (m * m)
- 2 * K_XY_sum / (m * m))
else:
mmd2 = ((Kt_XX_sum + sum_diag_X) / (m * (m-1))
+ (Kt_YY_sum + sum_diag_Y) / (m * (m-1))
- 2 * K_XY_sum / (m * m))
var_est = (
2 / (m**2 * (m-1)**2) * (
2 * sq_sum(Kt_XX_sums) - Kt_XX_2_sum
+ 2 * sq_sum(Kt_YY_sums) - Kt_YY_2_sum)
- (4*m-6) / (m**3 * (m-1)**3) * (Kt_XX_sum**2 + Kt_YY_sum**2)
+ 4*(m-2) / (m**3 * (m-1)**2) * (
sq_sum(K_XY_sums_1) + sq_sum(K_XY_sums_0))
- 4 * (m-3) / (m**3 * (m-1)**2) * K_XY_2_sum
- (8*m - 12) / (m**5 * (m-1)) * K_XY_sum**2
+ 8 / (m**3 * (m-1)) * (
1/m * (Kt_XX_sum + Kt_YY_sum) * K_XY_sum
- dot(Kt_XX_sums, K_XY_sums_1)
- dot(Kt_YY_sums, K_XY_sums_0))
)
return mmd2, var_est
|
dougalsutherland/opt-mmd
|
gan/mmd.py
|
Python
|
bsd-3-clause
| 5,045
|
[
"Gaussian"
] |
ee87e14d544134bfbe45500a732cef6212007b5c89b9e0d94119521534970d39
|
#!/usr/bin/python
#collect_metrics_seqXTo2d.py
import sys
import re
import argparse
from argparse import RawTextHelpFormatter
import logging
import pprint
import yaml
import json
from collections import OrderedDict
import os
import numpy
#from Bio import SeqIO
import ast
pp = pprint.PrettyPrinter(indent=4, width=10)
def read_arguments():
parser = argparse.ArgumentParser(description="parses log files from various SeqX programs and turns them into a 2d table")
parser.add_argument('infiles', nargs='+', type=argparse.FileType('r'),
default=sys.stdin, help ="Infiles: default=stdin")
parser.add_argument('--secondary-files', nargs='+', type=argparse.FileType('r'),
default=None, help ="for paired end with two output files")
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help ="Outfile: default=stdout")
parser.add_argument('--program', nargs='?', type=str, choices=["post_sawdust",
"cutadapt", "htseq-count", "fastq_screen", "fix_cutadapt", "bowtie2", "adapterremoval" ],
default=None, help ="which program generated the logfile")
parser.add_argument('--program-version', nargs=1, type=str,
default=any, help ="which program generated the logfile")
parser.add_argument('--htseq-strip-dot-gencode', nargs='?', type=bool,
default=False, help ="strip .dot form gencode identifier ")
parser.add_argument('--step-id', nargs='?', type=str,
default=None, help ="include a column with name ")
parser.add_argument('--split', nargs='?', type=str,
default=None, help ="split basename by SPLIT")
parser.add_argument('--logfile', nargs='?', type=argparse.FileType('w'),
default=sys.stderr, help ="logfile default=stderr ")
parser.add_argument('-d', '--debug', help="Print lots of debugging statements",
action="store_const", dest="loglevel", const=logging.DEBUG,
default=logging.WARNING)
parser.add_argument('-v', '--verbose', help="Be verbose",
action="store_const", dest="loglevel", const=logging.INFO )
parser.add_argument( '--version', action='version', version='%(prog)s ' + __version__)
return parser.parse_args()
def eval_args(args):
logging.basicConfig(stream=args.logfile, level=args.loglevel)
logger = logging.getLogger()
# logger.debug('This message should go to the log file')
# logger.info('So should this')
# logger.warning('And this, too')
args.fnames = []
args.fnames_sec = []
for f in args.infiles:
basename = os.path.basename(f.name)
if args.split:
basename = basename.split(args.split)[0]
args.fnames.append (basename)
if args.secondary_files:
for f in args.secondary_files:
basename = os.path.basename(f.name)
if args.split:
basename = basename.split(args.split)[0]
args.fnames_sec.append (basename)
if args.program == "cutadapt":
fname = "-".join([args.step_id, 'clipping.txt'])
args.outfile2 = open(fname, "w")
if args.program == "adapterremoval":
fname = "-".join([args.step_id, 'ar-trimming.txt'])
args.outfile2 = open(fname, "w")
if logger.isEnabledFor(logging.DEBUG):
for k, v in args.__dict__.iteritems():
pp.pprint([k,v])
return (args)
## called from main
def init_metrics():
"""Returns a dict like thingy for counting"""
#new test add
metrics = []
indict = OrderedDict()
indict['sub_multiple']['r2'] = 0
indict['unique'] = 0
indict['sub_unique'] = {}
indict['sub_unique']['r1'] = 0
indict['sub_unique']['r2'] = 0
indict['sub_unique']['splits'] = 0
indict['sub_unique']['split_types'] = {}
for types in split_types:
indict['sub_unique']['split_types'][types] = 0
outdict = OrderedDict()
outdict['templates'] = 0
outdict['unmapped'] = 0
outdict['multiple'] = 0
outdict['sub_multiple'] = {}
outdict['sub_multiple']['r1'] = 0
outdict['sub_multiple']['r2'] = 0
outdict['unique'] = 0
outdict['sub_unique'] = {}
outdict['sub_unique']['r1'] = 0
outdict['sub_unique']['r2'] = 0
outdict['sub_unique']['splits'] = 0
metrics.append(indict)
metrics.append(outdict)
return metrics
class OrderedDictYAMLLoader(yaml.Loader):
"""
A YAML loader that loads mappings into ordered dictionaries.
https://gist.github.com/enaeseth/844388
"""
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.add_constructor(u'tag:yaml.org,2002:map', type(self).construct_yaml_map)
self.add_constructor(u'tag:yaml.org,2002:omap', type(self).construct_yaml_map)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(None, None,
'expected a mapping node, but found %s' % node.id, node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError, exc:
raise yaml.constructor.ConstructorError('while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc, key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def iterme(d, l, prefix):
for k, v in d.iteritems():
k1= "_".join([prefix,k ])
if issubclass(dict,OrderedDict) or isinstance(v, dict):
l = iterme(v,l, k1)
else:
l.append([k1,v])
return(l)
def process_fastq_screen(args):
header = ["id", "genome", "Reads_processed",
"Unmapped", "P_Unmapped",
"One_hit_one_library", "P_One_hit_one_library",
"Multiple_hits_one_library", "P_Multiple_hits_one_library",
"One_hit_multiple_libraries", "P_One_hit_multiple_libraries",
"Multiple_hits_multiple_libraries", "P_Multiple_hits_multiple_libraries"]
out_line= "\t".join(header)
args.outfile.write(out_line +'\n')
for c, f in enumerate(args.infiles):
reads_proc = "None"
for line in f:
if not line.strip():
continue
line = line.rstrip('\n')
if line[0] =='#':
continue
elif line[0] =='%':
tmp = []
nohits = line.split()[1]
tmp.extend([args.fnames[c], "nohits"])
tmp.extend([str(0.00)] * 11)
tmp[6] = str(nohits)
tmp[2] = str(reads_proc)
tmp[5] = str(int(float(nohits) * float(reads_proc)/100))
out_line= "\t".join(tmp)
args.outfile.write(out_line +'\n')
# old style header
elif line[0:7] =='Library':
continue
elif line[0:6] =='Genome' and line[7:23] == '#Reads_processed':
continue
else:
reads_proc = line.split()[1]
out_line= "\t".join([args.fnames[c], line ])
args.outfile.write(out_line +'\n')
def process_post_sawdust(args):
for c, f in enumerate(args.infiles):
a = yaml.load(f, OrderedDictYAMLLoader)
m = iterme(a[0],[], 'in')
m.extend(iterme(a[1],[], 'out'))
if c ==0:
out = [str(i[0]).rstrip('\n') for i in m]
out.insert(0, "id")
out_line= "\t".join(out)
ae = re.sub(r'_sub', '', out_line)
args.outfile.write(ae +'\n')
out = [str(i[1]).rstrip('\n') for i in m]
out.insert(0, args.fnames[c])
out_line= "\t".join(out)
args.outfile.write(out_line +'\n')
def _par_match(query):
curses = {"Total number of read pairs" : "total",
"Number of unaligned read pairs" : "not_paired",
"Number of well aligned read pairs" : "well_paired",
"Number of inadequate alignments" : "inadequate_paired",
"Number of discarded mate 1 reads" : "d_mate1",
"Number of singleton mate 1 reads" : "s_mate1",
"Number of discarded mate 2 reads" : "d_mate2",
"Number of singleton mate 2 reads" : "s_mate2",
"Number of reads with adapters" : "with_adapters",
"Number of full-length collapsed pairs" : "full_collapsed",
"Number of truncated collapsed pairs" : "trunc_collapsed",
"Number of retained reads" : "retained",
"Number of retained nucleotides" : "retained_nuc",
"Average read length of trimmed reads" : "average_length" }
for i in curses.keys():
p = re.compile(i)
if p.match(query):
return curses[i]
return None
def process_adapterremoval(args):
trim_seen = False
hist_seen = False
for c, f in enumerate(args.infiles):
process = None
phist_state = None
ptrim_state = None
phist = re.compile('\[Length distribution\]')
ptrim = re.compile('\[Trimming statistics\]')
for line in f:
if not line.strip():
continue
line = line.rstrip('\n')
if phist.match(line):
phist_state = True
ptrim_state = None
continue
if ptrim.match(line) and trim_seen == False:
trim_seen = True
ptrim_state = True
out_line = "\t".join(["id", "feature", "variable" ])
args.outfile2.write(out_line +'\n')
continue
if ptrim.match(line):
ptrim_state = True
if ptrim_state:
r = line.split(":")
res = _par_match(r[0])
if res:
out_line= "\t".join([args.fnames[c], res, r[1] ])
args.outfile2.write(out_line +'\n')
if phist_state:
p = re.compile('Length')
if p.match(line) and hist_seen == False:
out_line= "\t".join(["id", line ])
hist_seen = True
elif p.match(line):
continue
else:
out_line= "\t".join([args.fnames[c], line ])
args.outfile.write(out_line +'\n')
def process_cutadapt(args):
# print ((args))
_pc(args, "R1", args.infiles, args.fnames )
if args.secondary_files:
_pc(args, "R2", args.secondary_files, args.fnames_sec)
def _pc(args, read_type, mylist, fnames):
p = re.compile('^Total reads processed:')
pclipped = re.compile('^Reads with adapters:')
pnumber = re.compile('\d+')
plength = re.compile('^length')
for c, f in enumerate(mylist):
process = None
for line in f:
if not line.strip():
continue
line = line.rstrip('\n')
if p.match(line):
tmp = re.sub(r',', '', line)
res = pnumber.search(tmp)
total_reads = res.group()
out_line= "\t".join([fnames[c], read_type, total_reads, args.step_id ])
args.outfile.write(out_line +'\n')
if pclipped.match(line):
tmp = re.sub(r',', '', line)
res = pnumber.search(tmp)
clipped_reads = res.group()
not_clipped_reads =str(int(total_reads) - int(clipped_reads))
zero_line= "\t".join([fnames[c], read_type, args.step_id, '0', not_clipped_reads ])
if plength.match(line):
process = True
args.outfile2.write(zero_line +'\n')
continue
if process:
tmp = line.split("\t")
out_line= "\t".join([fnames[c], read_type, args.step_id,tmp[0], tmp[1] ])
args.outfile2.write(out_line +'\n')
def process_htseq_count(args):
logger = logging.getLogger()
#d->id->gene->count
genes = dict()
gene_ids = dict()
if args.htseq_strip_dot_gencode:
logger.warning("--htseq-strip-dot-gencode == TRUE")
p_underscore = re.compile('^__*')
for c, f in enumerate(args.infiles):
genes[args.fnames[c]] = dict()
for line in f:
if not line.strip():
continue
if p_underscore.match(line):
continue
line = line.rstrip('\n')
if args.htseq_strip_dot_gencode:
line = re.sub(r'\.\d+', '', line)
tmp = line.split("\t")
ids = tmp[0]
count = tmp[1]
gene_ids[ids] = 1
genes[args.fnames[c]][ids] = count
header = []
header.append("id")
header.extend(args.fnames)
header_line= "\t".join(str(x) for x in header)
args.outfile.write(header_line +'\n')
for gene_id in gene_ids.keys():
out = []
out.append(gene_id)
for fname in args.fnames:
try:
res = genes[fname][gene_id]
except:
res = 0
out.append(res)
out_line= "\t".join(str(x) for x in out)
args.outfile.write(out_line +'\n')
def process_fix_cutadapt(args):
logger = logging.getLogger()
res = dict()
keys = dict()
for c, f in enumerate(args.infiles):
res[args.fnames[c]] = dict()
for line in f:
if not line.strip():
continue
line = line.rstrip('\n')
tmp = line.split("\t")
ids = tmp[0]
count = tmp[1]
keys[ids] = 1
res[args.fnames[c]][ids] = count
header = []
header.append("id")
if args.step_id:
header.append("step")
header.extend(keys.keys())
header_line= "\t".join(str(x) for x in header)
args.outfile.write(header_line +'\n')
for id in res.keys():
out = []
out.append(id)
if args.step_id:
out.append(args.step_id)
for key in keys.keys():
out.append(res[id][key])
out_line= "\t".join(str(x) for x in out)
args.outfile.write(out_line +'\n')
def process_bowtie2(args):
logger = logging.getLogger()
res = dict()
keys = dict()
p_zero = re.compile('0\stimes')
p_once = re.compile('exactly 1 time')
p_multiple = re.compile('>1 times')
#single
#9202262 reads; of these:
# 9202262 (100.00%) were unpaired; of these:
# 859754 (9.34%) aligned 0 times
# 1233838 (13.41%) aligned exactly 1 time
# 7108670 (77.25%) aligned >1 times
#90.66% overall alignment rate
#paired
#3826398 reads; of these:
# 3826398 (100.00%) were paired; of these:
# 3826396 (100.00%) aligned concordantly 0 times
# 0 (0.00%) aligned concordantly exactly 1 time
# 2 (0.00%) aligned concordantly >1 times
# ----
for c, f in enumerate(args.infiles):
res[args.fnames[c]] = dict()
for line in f:
if not line.strip():
continue
line = line.rstrip('\n')
line = line.lstrip(' ')
tmp = line.split(' ')
count = tmp[0]
other = tmp[1]
if count == '----':
break
if other == 'reads;':
pass
if tmp[2] == 'were':
res[args.fnames[c]]['input'] = int(count)
keys['input'] = 1
if p_zero.search(line):
keys['unaligned'] = 1
keys['p_unaligned'] = 1
keys['overall'] = 1
res[args.fnames[c]]['overall'] = res[args.fnames[c]]['input'] - int(count)
res[args.fnames[c]]['unaligned'] = count
res[args.fnames[c]]['p_unaligned'] = other.split('%')[0][1:]
if p_once.search(line):
keys['unique'] = 1
keys['p_unique'] = 1
res[args.fnames[c]]['unique'] = count
res[args.fnames[c]]['p_unique'] = other.split('%')[0][1:]
if p_multiple.search(line):
keys['multiple'] = 1
keys['p_multiple'] = 1
res[args.fnames[c]]['multiple'] = count
res[args.fnames[c]]['p_multiple'] = other.split('%')[0][1:]
if other == 'overall':
keys['p_overall'] = 1
res[args.fnames[c]]['p_overall'] = count.split('%')[0]
header = []
header.append("id")
if args.step_id:
header.append("step")
header.extend(keys.keys())
header_line= "\t".join(str(x) for x in header)
args.outfile.write(header_line +'\n')
for id in res.keys():
out = []
out.append(id)
if args.step_id:
out.append(args.step_id)
for key in keys.keys():
out.append(res[id][key])
out_line= "\t".join(str(x) for x in out)
args.outfile.write(out_line +'\n')
def main(args):
if args.program == "post_sawdust":
process_post_sawdust(args)
elif args.program == "cutadapt":
process_cutadapt(args)
elif args.program == "htseq-count":
process_htseq_count(args)
elif args.program == "fastq_screen":
process_fastq_screen(args)
elif args.program == "fix_cutadapt":
process_fix_cutadapt(args)
elif args.program == "bowtie2":
process_bowtie2(args)
elif args.program == "adapterremoval":
process_adapterremoval(args)
else:
print "no progL:"
__version__ = '0.001'
if __name__ == '__main__':
args = read_arguments()
args = eval_args(args)
main(args)
|
tiennes/misc-bio-stuff
|
scripts/collect_metrics_seqXTo2d.py
|
Python
|
gpl-3.0
| 18,986
|
[
"HTSeq"
] |
c88d157ff74b71834abcead611d89d9989e22403ed76f0ec0b5dcf69ddcf3e8b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.