signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def parse_result ( cls , result ) :
"""Parse a simple items result .
May either be two item tuple containing items and a context dictionary ( see : relation convention )
or a list of items ."""
|
if isinstance ( result , tuple ) == 2 :
items , context = result
else :
context = { }
items = result
return items , context
|
def normalize ( self , path ) :
"""Return the normalized path ( on the server ) of a given path . This
can be used to quickly resolve symbolic links or determine what the
server is considering to be the " current folder " ( by passing C { ' . ' }
as C { path } ) .
@ param path : path to be normalized
@ type path : str
@ return : normalized form of the given path
@ rtype : str
@ raise IOError : if the path can ' t be resolved on the server"""
|
path = self . _adjust_cwd ( path )
self . _log ( DEBUG , 'normalize(%r)' % path )
t , msg = self . _request ( CMD_REALPATH , path )
if t != CMD_NAME :
raise SFTPError ( 'Expected name response' )
count = msg . get_int ( )
if count != 1 :
raise SFTPError ( 'Realpath returned %d results' % count )
return _to_unicode ( msg . get_string ( ) )
|
def set_left_colon ( self , show_colon ) :
"""Turn the left colon on with show color True , or off with show colon
False . Only the large 1.2 " 7 - segment display has a left colon ."""
|
if show_colon :
self . buffer [ 4 ] |= 0x04
self . buffer [ 4 ] |= 0x08
else :
self . buffer [ 4 ] &= ( ~ 0x04 ) & 0xFF
self . buffer [ 4 ] &= ( ~ 0x08 ) & 0xFF
|
def _get_bounds ( self , ib , dimension ) :
"""ib = = 0/1 means lower / upper bound , return a vector of length
` dimension `"""
|
sign_ = 2 * ib - 1
assert sign_ ** 2 == 1
if self . bounds is None or self . bounds [ ib ] is None :
return array ( dimension * [ sign_ * np . Inf ] )
res = [ ]
for i in xrange ( dimension ) :
res . append ( self . bounds [ ib ] [ min ( [ i , len ( self . bounds [ ib ] ) - 1 ] ) ] )
if res [ - 1 ] is None :
res [ - 1 ] = sign_ * np . Inf
return array ( res )
|
def save ( self , status = None ) :
"""Save object to persistent storage ."""
|
if self . model is None :
raise WorkflowsMissingModel ( )
with db . session . begin_nested ( ) :
self . model . modified = datetime . now ( )
if status is not None :
self . model . status = status
if self . model . extra_data is None :
self . model . extra_data = dict ( )
flag_modified ( self . model , 'extra_data' )
db . session . merge ( self . model )
|
def logon ( self , password = 'admin' ) :
"""Parameters
password : str
default ' admin '
Returns
dict"""
|
r = self . _basic_post ( url = 'logon' , data = password )
return r . json ( )
|
def implicify_hydrogens ( self ) :
"""remove explicit hydrogens if possible
: return : number of removed hydrogens"""
|
total = 0
for ml in ( self . __reagents , self . __reactants , self . __products ) :
for m in ml :
if hasattr ( m , 'implicify_hydrogens' ) :
total += m . implicify_hydrogens ( )
if total :
self . flush_cache ( )
return total
|
def authcode_get ( self , path , ** kwargs ) :
"""Perform an HTTP GET to okcupid . com using this profiles session
where the authcode is automatically added as a query parameter ."""
|
kwargs . setdefault ( 'params' , { } ) [ 'authcode' ] = self . authcode
return self . _session . okc_get ( path , ** kwargs )
|
def rename_keys ( record : Mapping , key_map : Mapping ) -> dict :
"""New record with same keys or renamed keys if key found in key _ map ."""
|
new_record = dict ( )
for k , v in record . items ( ) :
key = key_map [ k ] if k in key_map else k
new_record [ key ] = v
return new_record
|
def attach ( self , handle , filters = None , resolution = None ) :
"""Attach to an existing SignalFlow computation ."""
|
params = self . _get_params ( filters = filters , resolution = resolution )
c = computation . Computation ( lambda since : self . _transport . attach ( handle , params ) )
self . _computations . add ( c )
return c
|
def acquisition_source ( self , key , value ) :
"""Populate the ` ` acquisition _ source ` ` key ."""
|
def _get_datetime ( value ) :
d_value = force_single_element ( value . get ( 'd' , '' ) )
if d_value :
try :
date = PartialDate . loads ( d_value )
except ValueError :
return d_value
else :
datetime_ = datetime ( year = date . year , month = date . month , day = date . day )
return datetime_ . isoformat ( )
internal_uid , orcid , source = None , None , None
a_values = force_list ( value . get ( 'a' ) )
for a_value in a_values :
if IS_INTERNAL_UID . match ( a_value ) :
if a_value . startswith ( 'inspire:uid:' ) :
internal_uid = int ( a_value [ 12 : ] )
else :
internal_uid = int ( a_value )
elif IS_ORCID . match ( a_value ) :
if a_value . startswith ( 'orcid:' ) :
orcid = a_value [ 6 : ]
else :
orcid = a_value
else :
source = a_value
c_value = force_single_element ( value . get ( 'c' , '' ) )
normalized_c_value = c_value . lower ( )
if normalized_c_value == 'batchupload' :
method = 'batchuploader'
elif normalized_c_value == 'submission' :
method = 'submitter'
else :
method = normalized_c_value
return { 'datetime' : _get_datetime ( value ) , 'email' : value . get ( 'b' ) , 'internal_uid' : internal_uid , 'method' : method , 'orcid' : orcid , 'source' : source , 'submission_number' : value . get ( 'e' ) , }
|
def get_initial_RAM ( self ) :
"""init the Dragon RAM
See : http : / / archive . worldofdragon . org / phpBB3 / viewtopic . php ? f = 5 & t = 4444"""
|
mem_FF = [ 0xff for _ in xrange ( 4 ) ]
mem_00 = [ 0x00 for _ in xrange ( 4 ) ]
mem = [ ]
for _ in xrange ( self . RAM_SIZE // 8 ) :
mem += mem_FF
mem += mem_00
return mem
|
def epsilon ( data , correction = 'gg' ) :
"""Epsilon adjustement factor for repeated measures .
Parameters
data : pd . DataFrame
DataFrame containing the repeated measurements .
` ` data ` ` must be in wide - format . To convert from wide to long format ,
use the : py : func : ` pandas . pivot _ table ` function .
correction : string
Specify the epsilon version : :
' gg ' : Greenhouse - Geisser
' hf ' : Huynh - Feldt
' lb ' : Lower bound
Returns
eps : float
Epsilon adjustement factor .
Notes
The * * lower bound * * for epsilon is :
. . math : : lb = \\ frac { 1 } { k - 1}
where : math : ` k ` is the number of groups ( = data . shape [ 1 ] ) .
The * * Greenhouse - Geisser epsilon * * is given by :
. . math : :
\\ epsilon _ { GG } = \\ frac { k ^ 2( \\ overline { diag ( S ) } - \\ overline { S } ) ^ 2}
{ ( k - 1 ) ( \\ sum _ { i = 1 } ^ { k } \\ sum _ { j = 1 } ^ { k } s _ { ij } ^ 2 - 2k \\ sum _ { j = 1 } ^ { k }
\\ overline { s _ i } ^ 2 + k ^ 2 \\ overline { S } ^ 2 ) }
where : math : ` S ` is the covariance matrix , : math : ` \\ overline { S } ` the
grandmean of S and : math : ` \\ overline { diag ( S ) } ` the mean of all the elements
on the diagonal of S ( i . e . mean of the variances ) .
The * * Huynh - Feldt epsilon * * is given by :
. . math : :
\\ epsilon _ { HF } = \\ frac { n ( k - 1) \\ epsilon _ { GG } - 2 } { ( k - 1)
( n - 1 - ( k - 1) \\ epsilon _ { GG } ) }
where : math : ` n ` is the number of subjects .
References
. . [ 1 ] http : / / www . real - statistics . com / anova - repeated - measures / sphericity /
Examples
> > > import pandas as pd
> > > from pingouin import epsilon
> > > data = pd . DataFrame ( { ' A ' : [ 2.2 , 3.1 , 4.3 , 4.1 , 7.2 ] ,
. . . ' B ' : [ 1.1 , 2.5 , 4.1 , 5.2 , 6.4 ] ,
. . . ' C ' : [ 8.2 , 4.5 , 3.4 , 6.2 , 7.2 ] } )
> > > epsilon ( data , correction = ' gg ' )
0.5587754577585018
> > > epsilon ( data , correction = ' hf ' )
0.6223448311539781
> > > epsilon ( data , correction = ' lb ' )
0.5"""
|
# Covariance matrix
S = data . cov ( )
n = data . shape [ 0 ]
k = data . shape [ 1 ]
# Lower bound
if correction == 'lb' :
if S . columns . nlevels == 1 :
return 1 / ( k - 1 )
elif S . columns . nlevels == 2 :
ka = S . columns . levels [ 0 ] . size
kb = S . columns . levels [ 1 ] . size
return 1 / ( ( ka - 1 ) * ( kb - 1 ) )
# Compute GGEpsilon
# - Method 1
mean_var = np . diag ( S ) . mean ( )
S_mean = S . mean ( ) . mean ( )
ss_mat = ( S ** 2 ) . sum ( ) . sum ( )
ss_rows = ( S . mean ( 1 ) ** 2 ) . sum ( ) . sum ( )
num = ( k * ( mean_var - S_mean ) ) ** 2
den = ( k - 1 ) * ( ss_mat - 2 * k * ss_rows + k ** 2 * S_mean ** 2 )
eps = np . min ( [ num / den , 1 ] )
# - Method 2
# S _ pop = S - S . mean ( 0 ) [ : , None ] - S . mean ( 1 ) [ None , : ] + S . mean ( )
# eig = np . linalg . eigvalsh ( S _ pop ) [ 1 : ]
# V = eig . sum ( ) * * 2 / np . sum ( eig * * 2)
# eps = V / ( k - 1)
# Huynh - Feldt
if correction == 'hf' :
num = n * ( k - 1 ) * eps - 2
den = ( k - 1 ) * ( n - 1 - ( k - 1 ) * eps )
eps = np . min ( [ num / den , 1 ] )
return eps
|
def GetZipInfo ( self ) :
"""Retrieves the ZIP info object .
Returns :
zipfile . ZipInfo : a ZIP info object or None if not available .
Raises :
PathSpecError : if the path specification is incorrect ."""
|
if not self . _zip_info :
location = getattr ( self . path_spec , 'location' , None )
if location is None :
raise errors . PathSpecError ( 'Path specification missing location.' )
if not location . startswith ( self . _file_system . LOCATION_ROOT ) :
raise errors . PathSpecError ( 'Invalid location in path specification.' )
if len ( location ) == 1 :
return None
zip_file = self . _file_system . GetZipFile ( )
try :
self . _zip_info = zip_file . getinfo ( location [ 1 : ] )
except KeyError :
pass
return self . _zip_info
|
def _add_hard_link_to_rec ( self , old_rec , boot_catalog_old , ** kwargs ) : # type : ( Any , bool , str ) - > int
'''Add a hard link to the ISO . Hard links are alternate names for the
same file contents that don ' t take up any additional space on the ISO .
This API can be used to create hard links between two files on the
ISO9660 filesystem , between two files on the Joliet filesystem , or
between a file on the ISO9660 filesystem and the Joliet filesystem .
In all cases , exactly one old path must be specified , and exactly one
new path must be specified .
Parameters :
old _ rec - The old record to link against .
boot _ catalog _ old - Whether this is a link to an old boot catalog .
iso _ new _ path - The new path on the ISO9660 filesystem to link to .
joliet _ new _ path - The new path on the Joliet filesystem to link to .
rr _ name - The Rock Ridge name to use for the new file if this is a
Rock Ridge ISO and the new path is on the ISO9660 filesystem .
udf _ new _ path - The new path on the UDF filesystem to link to .
Returns :
The number of bytes to add to the descriptors .'''
|
num_new = 0
iso_new_path = None
joliet_new_path = None
rr_name = b''
udf_new_path = None
new_rec = None
# type : Optional [ Union [ dr . DirectoryRecord , udfmod . UDFFileEntry ] ]
for key in kwargs :
if key == 'iso_new_path' and kwargs [ key ] is not None :
num_new += 1
iso_new_path = utils . normpath ( kwargs [ key ] )
if not self . rock_ridge :
_check_path_depth ( iso_new_path )
elif key == 'joliet_new_path' and kwargs [ key ] is not None :
num_new += 1
joliet_new_path = self . _normalize_joliet_path ( kwargs [ key ] )
elif key == 'rr_name' and kwargs [ key ] is not None :
rr_name = self . _check_rr_name ( kwargs [ key ] )
elif key == 'udf_new_path' and kwargs [ key ] is not None :
num_new += 1
udf_new_path = utils . normpath ( kwargs [ key ] )
else :
raise pycdlibexception . PyCdlibInvalidInput ( 'Unknown keyword %s' % ( key ) )
if num_new != 1 :
raise pycdlibexception . PyCdlibInvalidInput ( 'Exactly one new path must be specified' )
if self . rock_ridge and iso_new_path is not None and not rr_name :
raise pycdlibexception . PyCdlibInvalidInput ( 'Rock Ridge name must be supplied for a Rock Ridge new path' )
data_ino = old_rec . inode
num_bytes_to_add = 0
if udf_new_path is None :
file_mode = - 1
if iso_new_path is not None : # . . . to another file on the ISO9660 filesystem .
( new_name , new_parent ) = self . _iso_name_and_parent_from_path ( iso_new_path )
vd = self . pvd
rr = self . rock_ridge
xa = self . xa
if self . rock_ridge :
file_mode = old_rec . rock_ridge . get_file_mode ( )
elif joliet_new_path is not None :
if self . joliet_vd is None :
raise pycdlibexception . PyCdlibInternalError ( 'Tried to link to Joliet record on non-Joliet ISO' )
# . . . to a file on the Joliet filesystem .
( new_name , new_parent ) = self . _joliet_name_and_parent_from_path ( joliet_new_path )
vd = self . joliet_vd
rr = ''
xa = False
# Above we checked to make sure we got at least one new path , so we
# don ' t need to worry about the else situation here .
new_rec = dr . DirectoryRecord ( )
new_rec . new_file ( vd , old_rec . get_data_length ( ) , new_name , new_parent , vd . sequence_number ( ) , rr , rr_name , xa , file_mode )
num_bytes_to_add += self . _add_child_to_dr ( new_rec , vd . logical_block_size ( ) )
else :
if self . udf_root is None :
raise pycdlibexception . PyCdlibInvalidInput ( 'Can only specify a udf_path for a UDF ISO' )
log_block_size = self . pvd . logical_block_size ( )
# UDF new path
( udf_name , udf_parent ) = self . _udf_name_and_parent_from_path ( udf_new_path )
file_ident = udfmod . UDFFileIdentifierDescriptor ( )
file_ident . new ( False , False , udf_name , udf_parent )
num_new_extents = udf_parent . add_file_ident_desc ( file_ident , log_block_size )
num_bytes_to_add += num_new_extents * log_block_size
file_entry = udfmod . UDFFileEntry ( )
file_entry . new ( old_rec . get_data_length ( ) , 'file' , udf_parent , log_block_size )
file_ident . file_entry = file_entry
file_entry . file_ident = file_ident
if data_ino is None or data_ino . num_udf == 0 :
num_bytes_to_add += log_block_size
if data_ino is not None :
data_ino . num_udf += 1
new_rec = file_entry
self . udf_logical_volume_integrity . logical_volume_impl_use . num_files += 1
if data_ino is not None and new_rec is not None :
data_ino . linked_records . append ( new_rec )
new_rec . inode = data_ino
if boot_catalog_old and new_rec is not None :
if self . eltorito_boot_catalog is None :
raise pycdlibexception . PyCdlibInternalError ( 'Tried to link to El Torito on non-El Torito ISO' )
self . eltorito_boot_catalog . add_dirrecord ( new_rec )
return num_bytes_to_add
|
def primitive ( self , dictionary ) :
"""Item from Python primitive ."""
|
self . __dict__ = { k : v for k , v in dictionary . items ( ) if v }
|
def driver_send ( command , hostname = None , wait = 0.2 ) :
'''Send a command ( or ` ` list ` ` of commands ) to AFNI at ` ` hostname ` ` ( defaults to local host )
Requires plugouts enabled ( open afni with ` ` - yesplugouts ` ` or set ` ` AFNI _ YESPLUGOUTS = YES ` ` in ` ` . afnirc ` ` )
If ` ` wait ` ` is not ` ` None ` ` , will automatically sleep ` ` wait ` ` seconds after sending the command ( to make sure it took effect )'''
|
cmd = [ 'plugout_drive' ]
if hostname :
cmd += [ '-host' , hostname ]
if isinstance ( command , basestring ) :
command = [ command ]
cmd += [ [ '-com' , x ] for x in command ] + [ '-quit' ]
o = nl . run ( cmd , quiet = None , stderr = None )
if wait != None :
time . sleep ( wait )
|
def _updateWidgets ( self ) :
"""Updates the combo and spin boxes given the new rti or axes .
Emits the sigContentsChanged signal ."""
|
row = 0
model = self . tree . model ( )
# Create path label
nodePath = '' if self . rti is None else self . rti . nodePath
pathItem = QtGui . QStandardItem ( nodePath )
pathItem . setToolTip ( nodePath )
pathItem . setEditable ( False )
if self . rti is not None :
pathItem . setIcon ( self . rti . decoration )
model . setItem ( row , 0 , pathItem )
self . _deleteSpinBoxes ( row )
self . _populateComboBoxes ( row )
self . _createSpinBoxes ( row )
self . _updateRtiInfo ( )
self . tree . resizeColumnsToContents ( startCol = self . COL_FIRST_COMBO )
logger . debug ( "{} sigContentsChanged signal (_updateWidgets)" . format ( "Blocked" if self . signalsBlocked ( ) else "Emitting" ) )
self . sigContentsChanged . emit ( UpdateReason . RTI_CHANGED )
|
def _summarize_accessible_fields ( field_descriptions , width = 40 , section_title = 'Accessible fields' ) :
"""Create a summary string for the accessible fields in a model . Unlike
` _ toolkit _ repr _ print ` , this function does not look up the values of the
fields , it just formats the names and descriptions .
Parameters
field _ descriptions : dict { str : str }
Name of each field and its description , in a dictionary . Keys and
values should be strings .
width : int , optional
Width of the names . This is usually determined and passed by the
calling ` _ _ repr _ _ ` method .
section _ title : str , optional
Name of the accessible fields section in the summary string .
Returns
out : str"""
|
key_str = "{:<{}}: {}"
items = [ ]
items . append ( section_title )
items . append ( "-" * len ( section_title ) )
for field_name , field_desc in field_descriptions . items ( ) :
items . append ( key_str . format ( field_name , width , field_desc ) )
return "\n" . join ( items )
|
def nucmer_hits_to_ref_and_qry_coords ( cls , nucmer_hits , contig = None ) :
'''Same as nucmer _ hits _ to _ ref _ coords , except removes containing hits first ,
and returns ref and qry coords lists'''
|
if contig is None :
ctg_coords = { key : [ ] for key in nucmer_hits . keys ( ) }
else :
ctg_coords = { contig : [ ] }
ref_coords = { }
for key in ctg_coords :
hits = copy . copy ( nucmer_hits [ key ] )
hits . sort ( key = lambda x : len ( x . ref_coords ( ) ) )
if len ( hits ) > 1 :
i = 0
while i < len ( hits ) - 1 :
c1 = hits [ i ] . ref_coords ( )
c2 = hits [ i + 1 ] . ref_coords ( )
if c2 . contains ( c1 ) :
hits . pop ( i )
else :
i += 1
ref_coords [ key ] = [ hit . ref_coords ( ) for hit in hits ]
ctg_coords [ key ] = [ hit . qry_coords ( ) for hit in hits ]
pyfastaq . intervals . merge_overlapping_in_list ( ref_coords [ key ] )
pyfastaq . intervals . merge_overlapping_in_list ( ctg_coords [ key ] )
return ctg_coords , ref_coords
|
def orthologize ( ast , bo , species_id : str ) :
"""Recursively orthologize BEL Entities in BEL AST using API endpoint
NOTE : - will take first ortholog returned in BEL . bio API result ( which may return more than one ortholog )
Args :
ast ( BEL ) : BEL AST
endpoint ( str ) : endpoint url with a placeholder for the term _ id
Returns :
BEL : BEL AST"""
|
# if species _ id = = ' TAX : 9606 ' and str ( ast ) = = ' MGI : Sult2a1 ' :
# import pdb ; pdb . set _ trace ( )
if not species_id :
bo . validation_messages . append ( ( "WARNING" , "No species id was provided for orthologization" ) )
return ast
if isinstance ( ast , NSArg ) :
if ast . orthologs : # log . debug ( f ' AST : { ast . to _ string ( ) } species _ id : { species _ id } orthologs : { ast . orthologs } ' )
if ast . orthologs . get ( species_id , None ) :
orthologized_nsarg_val = ast . orthologs [ species_id ] [ "decanonical" ]
ns , value = orthologized_nsarg_val . split ( ":" )
ast . change_nsvalue ( ns , value )
ast . canonical = ast . orthologs [ species_id ] [ "canonical" ]
ast . decanonical = ast . orthologs [ species_id ] [ "decanonical" ]
ast . orthologized = True
bo . ast . species . add ( ( species_id , ast . orthologs [ species_id ] [ "species_label" ] ) )
else :
bo . ast . species . add ( ( ast . species_id , ast . species_label ) )
bo . validation_messages . append ( ( "WARNING" , f"No ortholog found for {ast.namespace}:{ast.value}" ) )
elif ast . species_id :
bo . ast . species . add ( ( ast . species_id , ast . species_label ) )
# Recursively process every NSArg by processing BELAst and Functions
if hasattr ( ast , "args" ) :
for arg in ast . args :
orthologize ( arg , bo , species_id )
return ast
|
def output_format_lock ( self , packages , ** kwargs ) :
"""Text to lock file"""
|
self . _output_config [ 'type' ] = PLAIN
text = ''
tmp_packages = OrderedDict ( )
columns = self . _config . get_columns ( )
widths = { }
for _pkg in packages . values ( ) :
_pkg_name = _pkg . package_name
_params = _pkg . get_params ( columns , merged = True , raw = False )
if _pkg_name not in tmp_packages :
tmp_packages [ _pkg_name ] = _params
comment = 1
for _col in columns :
widths [ _col ] = max ( widths . get ( _col , len ( _col ) ) , len ( str ( _params . get ( _col , '' ) ) ) ) + comment
comment = 0
comment = 1
for _col in columns :
text += '{}{} ' . format ( _col , ' ' * ( widths [ _col ] - len ( _col ) - comment ) )
comment = 0
text = '#{}\n' . format ( text . strip ( ) )
for _pkg_name in sorted ( tmp_packages , key = lambda x : str ( x ) . lower ( ) ) :
_pkg = tmp_packages [ _pkg_name ]
line = ''
for _col in columns :
line += '{}{} ' . format ( _pkg [ _col ] , ' ' * ( widths [ _col ] - len ( str ( _pkg [ _col ] ) ) ) )
text += '{}\n' . format ( line . strip ( ) )
return text
|
async def _capability_negotiated ( self , capab ) :
"""Mark capability as negotiated , and end negotiation if we ' re done ."""
|
self . _capabilities_negotiating . discard ( capab )
if not self . _capabilities_requested and not self . _capabilities_negotiating :
await self . rawmsg ( 'CAP' , 'END' )
|
def touidref ( src , dst , src_relation , src_portal_type , fieldname ) :
"""Convert an archetypes reference in src / src _ relation to a UIDReference
in dst / fieldname ."""
|
field = dst . getField ( fieldname )
refs = src . getRefs ( relationship = src_relation )
if len ( refs ) == 1 :
value = get_uid ( refs [ 0 ] )
elif len ( refs ) > 1 :
value = filter ( lambda x : x , [ get_uid ( ref ) for ref in refs ] )
else :
value = field . get ( src )
if not value :
value = ''
if not field :
raise RuntimeError ( 'Cannot find field %s/%s' % ( fieldname , src ) )
if field . required and not value :
logger . exception ( 'Required %s field %s/%s has no value' % ( src . portal_type , src , fieldname ) )
field . set ( src , value )
|
def find_dunder_version_in_file ( self , full_path ) : # type : ( str ) - > Dict [ str , str ]
"""Find _ _ version _ _ in a source file
: param full _ path :
: return :"""
|
versions = { }
with self . file_opener . open_this ( full_path , "r" ) as infile :
for line in infile :
version , _ = dunder_version . find_in_line ( line )
if version :
versions [ full_path ] = version
version , _ = dunder_version . find_in_line ( line )
return versions
|
def Page ( QLExportable ) :
'''For multi - page files , e . g . if pdf preview'''
|
def __init__ ( self , filename , page_id ) :
self . id = page_id
super ( Page , self ) . __init__ ( filename )
def export ( self , export_format = ExportFormat . PNG ) :
pass
|
def download ( self ) :
"""Download SRA files .
Returns :
: obj : ` list ` of : obj : ` str ` : List of downloaded files ."""
|
self . downloaded_paths = list ( )
for path in self . paths_for_download :
downloaded_path = list ( )
utils . mkdir_p ( os . path . abspath ( self . directory ) )
sra_run = path . split ( "/" ) [ - 1 ]
logger . info ( "Analysing %s" % sra_run )
url = type ( self ) . FTP_ADDRESS_TPL . format ( range_subdir = sra_run [ : 6 ] , file_dir = sra_run )
logger . debug ( "URL: %s" , url )
filepath = os . path . abspath ( os . path . join ( self . directory , "%s.sra" % sra_run ) )
utils . download_from_url ( url , filepath , aspera = self . aspera , silent = self . silent , force = self . force )
if self . filetype in ( "fasta" , "fastq" ) :
if utils . which ( 'fastq-dump' ) is None :
logger . error ( "fastq-dump command not found" )
ftype = ""
if self . filetype == "fasta" :
ftype = " --fasta "
cmd = "fastq-dump"
if utils . which ( 'parallel-fastq-dump' ) is None :
cmd += " %s --outdir %s %s"
else :
logger . debug ( "Using parallel fastq-dump" )
cmd = " parallel-fastq-dump --threads %s"
cmd = cmd % self . threads
cmd += " %s --outdir %s -s %s"
cmd = cmd % ( ftype , self . directory , filepath )
for fqoption , fqvalue in iteritems ( self . fastq_dump_options ) :
if fqvalue :
cmd += ( " --%s %s" % ( fqoption , fqvalue ) )
elif fqvalue is None :
cmd += ( " --%s" % fqoption )
logger . debug ( cmd )
process = sp . Popen ( cmd , stdout = sp . PIPE , stderr = sp . PIPE , shell = True )
logger . info ( "Converting to %s/%s*.%s.gz\n" % ( self . directory , sra_run , self . filetype ) )
pout , perr = process . communicate ( )
downloaded_path = glob . glob ( os . path . join ( self . directory , "%s*.%s.gz" % ( sra_run , self . filetype ) ) )
elif self . filetype == 'sra' :
downloaded_path = glob . glob ( os . path . join ( self . directory , "%s*.%s" % ( sra_run , self . filetype ) ) )
else :
downloaded_path = glob . glob ( os . path . join ( self . directory , "%s*" % sra_run ) )
logger . error ( "Filetype %s not supported." % self . filetype )
if not self . keep_sra and self . filetype != 'sra' : # Delete sra file
os . unlink ( filepath )
self . downloaded_paths += downloaded_path
return self . downloaded_paths
|
def createTable ( self , tableName , path = None , source = None , schema = None , ** options ) :
"""Creates a table based on the dataset in a data source .
It returns the DataFrame associated with the table .
The data source is specified by the ` ` source ` ` and a set of ` ` options ` ` .
If ` ` source ` ` is not specified , the default data source configured by
` ` spark . sql . sources . default ` ` will be used . When ` ` path ` ` is specified , an external table is
created from the data at the given path . Otherwise a managed table is created .
Optionally , a schema can be provided as the schema of the returned : class : ` DataFrame ` and
created table .
: return : : class : ` DataFrame `"""
|
if path is not None :
options [ "path" ] = path
if source is None :
source = self . _sparkSession . _wrapped . _conf . defaultDataSourceName ( )
if schema is None :
df = self . _jcatalog . createTable ( tableName , source , options )
else :
if not isinstance ( schema , StructType ) :
raise TypeError ( "schema should be StructType" )
scala_datatype = self . _jsparkSession . parseDataType ( schema . json ( ) )
df = self . _jcatalog . createTable ( tableName , source , scala_datatype , options )
return DataFrame ( df , self . _sparkSession . _wrapped )
|
def clear ( self , context = None ) :
"""Delete all data from the graph ."""
|
context = URIRef ( context ) . n3 ( ) if context is not None else '?g'
query = """
DELETE { GRAPH %s { ?s ?p ?o } } WHERE { GRAPH %s { ?s ?p ?o } }
""" % ( context , context )
self . parent . graph . update ( query )
|
def get_attribute ( self , attribute ) :
""": param attribute : requested attributes .
: return : attribute value .
: raise TgnError : if invalid attribute ."""
|
value = self . api . getAttribute ( self . obj_ref ( ) , attribute )
# IXN returns ' : : ixNet : : OK ' for invalid attributes . We want error .
if value == '::ixNet::OK' :
raise TgnError ( self . ref + ' does not have attribute ' + attribute )
return str ( value )
|
def plotter ( path , show , goodFormat ) :
'''makes some plots
creates binned histograms of the results of each module
( ie count of results in ranges [ ( 0,40 ) , ( 40 , 50 ) , ( 50,60 ) , ( 60 , 70 ) , ( 70 , 80 ) , ( 80 , 90 ) , ( 90 , 100 ) ] )
Arguments :
path { str } - - path to save plots to
show { boolean } - - whether to show plots using python
goodFormat { dict } - - module : [ results for module ]
output :
saves plots to files / shows plots depending on inputs'''
|
for module in goodFormat . items ( ) : # for each module
bins = [ 0 , 40 , 50 , 60 , 70 , 80 , 90 , 100 ]
# cut the data into bins
out = pd . cut ( module [ 1 ] , bins = bins , include_lowest = True )
ax = out . value_counts ( ) . plot . bar ( rot = 0 , color = "b" , figsize = ( 10 , 6 ) , alpha = 0.5 , title = module [ 0 ] )
# plot counts of the cut data as a bar
ax . set_xticklabels ( [ '0 to 40' , '40 to 50' , '50 to 60' , '60 to 70' , '70 to 80' , '80 to 90' , '90 to 100' ] )
ax . set_ylabel ( "# of candidates" )
ax . set_xlabel ( "grade bins \n total candidates: {}" . format ( len ( module [ 1 ] ) ) )
if path is not None and show is not False : # if export path directory doesn ' t exist : create it
if not pathlib . Path . is_dir ( path . as_posix ( ) ) :
pathlib . Path . mkdir ( path . as_posix ( ) )
plt . savefig ( path / '' . join ( [ module [ 0 ] , '.png' ] ) )
plt . show ( )
elif path is not None : # if export path directory doesn ' t exist : create it
if not pathlib . Path . is_dir ( path ) :
pathlib . Path . mkdir ( path )
plt . savefig ( path / '' . join ( [ module [ 0 ] , '.png' ] ) )
plt . close ( )
elif show is not False :
plt . show ( )
|
def get_tensor_size ( self , tensor_name , partial_layout = None , mesh_dimension_to_size = None ) :
"""The size of a tensor in bytes .
If partial _ layout is specified , then mesh _ dimension _ to _ size must also be . In
this case , the size on a single device is returned .
Args :
tensor _ name : a string , name of a tensor in the graph .
partial _ layout : an optional { string : string } , from MTF dimension name to
mesh dimension name .
mesh _ dimension _ to _ size : an optional { string : int } , from mesh dimension
name to size .
Returns :
an integer"""
|
return ( self . get_tensor_dtype ( tensor_name ) . size * self . get_tensor_num_entries ( tensor_name , partial_layout , mesh_dimension_to_size ) )
|
def new_linsolver ( name , prop ) :
"""Creates a linear solver .
Parameters
name : string
prop : string
Returns
solver : : class : ` LinSolver < optalg . lin _ solver . LinSolver > `"""
|
if name == 'mumps' :
return LinSolverMUMPS ( prop )
elif name == 'superlu' :
return LinSolverSUPERLU ( prop )
elif name == 'umfpack' :
return LinSolverUMFPACK ( prop )
elif name == 'default' :
try :
return new_linsolver ( 'mumps' , prop )
except ImportError :
return new_linsolver ( 'superlu' , prop )
else :
raise ValueError ( 'invalid linear solver name' )
|
def _setNodeData ( self , name , metadata , data , channel = None ) :
"""Returns a data point from data"""
|
nodeChannel = None
if name in metadata :
nodeChannelList = metadata [ name ]
if len ( nodeChannelList ) > 1 :
nodeChannel = channel if channel is not None else nodeChannelList [ 0 ]
elif len ( nodeChannelList ) == 1 :
nodeChannel = nodeChannelList [ 0 ]
if nodeChannel is not None and nodeChannel in self . CHANNELS :
return self . _hmchannels [ nodeChannel ] . setValue ( name , data )
LOG . error ( "HMDevice.setNodeData: %s not found with value %s on %i" % ( name , data , nodeChannel ) )
return False
|
def repr_failure ( self , excinfo ) :
"""called when self . runtest ( ) raises an exception ."""
|
exc = excinfo . value
cc = self . colors
if isinstance ( exc , NbCellError ) :
msg_items = [ cc . FAIL + "Notebook cell execution failed" + cc . ENDC ]
formatstring = ( cc . OKBLUE + "Cell %d: %s\n\n" + "Input:\n" + cc . ENDC + "%s\n" )
msg_items . append ( formatstring % ( exc . cell_num , str ( exc ) , exc . source ) )
if exc . inner_traceback :
msg_items . append ( ( cc . OKBLUE + "Traceback:" + cc . ENDC + "\n%s\n" ) % exc . inner_traceback )
return "\n" . join ( msg_items )
else :
return "pytest plugin exception: %s" % str ( exc )
|
def mouseGestureHandler ( self , info ) :
"""This is the callback for MouseClickContext . Passed to VideoWidget as a parameter"""
|
print ( self . pre , ": mouseGestureHandler: " )
# * * * single click events * * *
if ( info . fsingle ) :
print ( self . pre , ": mouseGestureHandler: single click" )
if ( info . button == QtCore . Qt . LeftButton ) :
print ( self . pre , ": mouseGestureHandler: Left button clicked" )
elif ( info . button == QtCore . Qt . RightButton ) :
print ( self . pre , ": mouseGestureHandler: Right button clicked" )
self . handle_right_single_click ( info )
# * * * double click events * * *
elif ( info . fdouble ) :
if ( info . button == QtCore . Qt . LeftButton ) :
print ( self . pre , ": mouseGestureHandler: Left button double-clicked" )
self . handle_left_double_click ( info )
elif ( info . button == QtCore . Qt . RightButton ) :
print ( self . pre , ": mouseGestureHandler: Right button double-clicked" )
|
def in_cmd ( argv ) :
"""Run a command in the given virtualenv ."""
|
if len ( argv ) == 1 :
return workon_cmd ( argv )
parse_envname ( argv , lambda : sys . exit ( 'You must provide a valid virtualenv to target' ) )
return inve ( * argv )
|
def reconciliateNs ( self , tree ) :
"""This function checks that all the namespaces declared
within the given tree are properly declared . This is needed
for example after Copy or Cut and then paste operations .
The subtree may still hold pointers to namespace
declarations outside the subtree or invalid / masked . As much
as possible the function try to reuse the existing
namespaces found in the new environment . If not possible
the new namespaces are redeclared on @ tree at the top of
the given subtree ."""
|
if tree is None :
tree__o = None
else :
tree__o = tree . _o
ret = libxml2mod . xmlReconciliateNs ( self . _o , tree__o )
return ret
|
def text_has_changed ( self , text ) :
"""Line edit ' s text has changed"""
|
text = to_text_string ( text )
if text :
self . lineno = int ( text )
else :
self . lineno = None
|
def closedopen ( lower_value , upper_value ) :
"""Helper function to construct an interval object with a closed lower and open upper .
For example :
> > > closedopen ( 100.2 , 800.9)
[100.2 , 800.9)"""
|
return Interval ( Interval . CLOSED , lower_value , upper_value , Interval . OPEN )
|
def format_output ( old_maps , new_maps ) :
"""This function takes the returned dict from ` transform ` and converts
it to the same datatype as the input .
Parameters
old _ maps : { FieldArray , dict }
The mapping object to add new maps to .
new _ maps : dict
A dict with key as parameter name and value is numpy . array .
Returns
{ FieldArray , dict }
The old _ maps object with new keys from new _ maps ."""
|
# if input is FieldArray then return FieldArray
if isinstance ( old_maps , record . FieldArray ) :
keys = new_maps . keys ( )
values = [ new_maps [ key ] for key in keys ]
for key , vals in zip ( keys , values ) :
try :
old_maps = old_maps . add_fields ( [ vals ] , [ key ] )
except ValueError :
old_maps [ key ] = vals
return old_maps
# if input is dict then return dict
elif isinstance ( old_maps , dict ) :
out = old_maps . copy ( )
out . update ( new_maps )
return out
# else error
else :
raise TypeError ( "Input type must be FieldArray or dict." )
|
def resume ( localfile , jottafile , JFS ) :
"""Continue uploading a new file from local file ( already exists on JottaCloud"""
|
with open ( localfile ) as lf :
_complete = jottafile . resume ( lf )
return _complete
|
def delete_user ( iam_client , user , mfa_serial = None , keep_user = False , terminated_groups = [ ] ) :
"""Delete IAM user
: param iam _ client :
: param user :
: param mfa _ serial :
: param keep _ user :
: param terminated _ groups :
: return :"""
|
errors = [ ]
printInfo ( 'Deleting user %s...' % user )
# Delete access keys
try :
aws_keys = get_access_keys ( iam_client , user )
for aws_key in aws_keys :
try :
printInfo ( 'Deleting access key ID %s... ' % aws_key [ 'AccessKeyId' ] , False )
iam_client . delete_access_key ( AccessKeyId = aws_key [ 'AccessKeyId' ] , UserName = user )
printInfo ( 'Success' )
except Exception as e :
printInfo ( 'Failed' )
printException ( e )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
except Exception as e :
printException ( e )
printError ( 'Failed to get access keys for user %s.' % user )
# Deactivate and delete MFA devices
try :
mfa_devices = iam_client . list_mfa_devices ( UserName = user ) [ 'MFADevices' ]
for mfa_device in mfa_devices :
serial = mfa_device [ 'SerialNumber' ]
try :
printInfo ( 'Deactivating MFA device %s... ' % serial , False )
iam_client . deactivate_mfa_device ( SerialNumber = serial , UserName = user )
printInfo ( 'Success' )
except Exception as e :
printInfo ( 'Failed' )
printException ( e )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
delete_virtual_mfa_device ( iam_client , serial )
if mfa_serial :
delete_virtual_mfa_device ( iam_client , mfa_serial )
except Exception as e :
printException ( e )
printError ( 'Faile to fetch/delete MFA device serial number for user %s.' % user )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
# Remove IAM user from groups
try :
groups = iam_client . list_groups_for_user ( UserName = user ) [ 'Groups' ]
for group in groups :
try :
printInfo ( 'Removing from group %s... ' % group [ 'GroupName' ] , False )
iam_client . remove_user_from_group ( GroupName = group [ 'GroupName' ] , UserName = user )
printInfo ( 'Success' )
except Exception as e :
printInfo ( 'Failed' )
printException ( e )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
except Exception as e :
printException ( e )
printError ( 'Failed to fetch IAM groups for user %s.' % user )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
# Delete login profile
login_profile = [ ]
try :
login_profile = iam_client . get_login_profile ( UserName = user ) [ 'LoginProfile' ]
except Exception as e :
pass
try :
if len ( login_profile ) :
printInfo ( 'Deleting login profile... ' , False )
iam_client . delete_login_profile ( UserName = user )
printInfo ( 'Success' )
except Exception as e :
printInfo ( 'Failed' )
printException ( e )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
# Delete inline policies
try :
printInfo ( 'Deleting inline policies... ' , False )
policies = iam_client . list_user_policies ( UserName = user )
for policy in policies [ 'PolicyNames' ] :
iam_client . delete_user_policy ( UserName = user , PolicyName = policy )
printInfo ( 'Success' )
except Exception as e :
printInfo ( 'Failed' )
printException ( e )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
# Detach managed policies
try :
printInfo ( 'Detaching managed policies... ' , False )
policies = iam_client . list_attached_user_policies ( UserName = user )
for policy in policies [ 'AttachedPolicies' ] :
iam_client . detach_user_policy ( UserName = user , PolicyArn = policy [ 'PolicyArn' ] )
printInfo ( 'Success' )
except Exception as e :
printInfo ( 'Failed' )
printException ( e )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
# Delete IAM user
try :
if not keep_user :
iam_client . delete_user ( UserName = user )
printInfo ( 'User %s deleted.' % user )
else :
for group in terminated_groups :
add_user_to_group ( iam_client , group , user )
except Exception as e :
printException ( e )
printError ( 'Failed to delete user.' )
errors . append ( e . response [ 'Error' ] [ 'Code' ] )
pass
return errors
|
def systemInformationType16 ( ) :
"""SYSTEM INFORMATION TYPE 16 Section 9.1.43d"""
|
a = L2PseudoLength ( l2pLength = 0x01 )
b = TpPd ( pd = 0x6 )
c = MessageType ( mesType = 0x3d )
# 00111101
d = Si16RestOctets ( )
packet = a / b / c / d
return packet
|
def op_gen ( mcode ) :
"""Generate a machine instruction using the op gen table ."""
|
gen = op_tbl [ mcode [ 0 ] ]
ret = gen [ 0 ]
# opcode
nargs = len ( gen )
i = 1
while i < nargs :
if i < len ( mcode ) : # or assume they are same len
ret |= ( mcode [ i ] & gen [ i ] [ 0 ] ) << gen [ i ] [ 1 ]
i += 1
return ret
|
def SetDayOfWeekHasService ( self , dow , has_service = True ) :
"""Set service as running ( or not ) on a day of the week . By default the
service does not run on any days .
Args :
dow : 0 for Monday through 6 for Sunday
has _ service : True if this service operates on dow , False if it does not .
Returns :
None"""
|
assert ( dow >= 0 and dow < 7 )
self . day_of_week [ dow ] = has_service
|
def main ( ) :
"""Run the core ."""
|
parser = ArgumentParser ( )
subs = parser . add_subparsers ( dest = 'cmd' )
setup_parser = subs . add_parser ( 'setup' )
setup_parser . add_argument ( '-e' , '--email' , dest = 'email' , required = True , help = 'Email of the Google user.' , type = str )
setup_parser . add_argument ( '-p' , '--password' , dest = 'pwd' , required = True , help = 'Password of the Google user.' , type = str )
setup_parser = subs . add_parser ( 'seed' )
setup_parser . add_argument ( '-d' , '--driver' , dest = 'driver' , required = True , type = str , help = 'Location of the Chrome driver. This can be downloaded by visiting http://chromedriver.chromium.org/downloads' , )
setup_parser = subs . add_parser ( 'list' )
setup_parser = subs . add_parser ( 'create' )
setup_parser . add_argument ( '-t' , '--term' , dest = 'term' , required = True , help = 'Term to store.' , type = str )
setup_parser . add_argument ( '--exact' , dest = 'exact' , action = 'store_true' , help = 'Exact matches only for term.' )
setup_parser . add_argument ( '-d' , '--delivery' , dest = 'delivery' , required = True , choices = [ 'rss' , 'mail' ] , help = 'Delivery method of results.' )
setup_parser . add_argument ( '-f' , '--frequency' , dest = 'frequency' , default = "realtime" , choices = [ 'realtime' , 'daily' , 'weekly' ] , help = 'Frequency to send results. RSS only allows for realtime alerting.' )
setup_parser = subs . add_parser ( 'delete' )
setup_parser . add_argument ( '--id' , dest = 'term_id' , required = True , help = 'ID of the term to find for deletion.' , type = str )
args = parser . parse_args ( )
if args . cmd == 'setup' :
if not os . path . exists ( CONFIG_PATH ) :
os . makedirs ( CONFIG_PATH )
if not os . path . exists ( CONFIG_FILE ) :
json . dump ( CONFIG_DEFAULTS , open ( CONFIG_FILE , 'w' ) , indent = 4 , separators = ( ',' , ': ' ) )
config = CONFIG_DEFAULTS
config [ 'email' ] = args . email
config [ 'password' ] = str ( obfuscate ( args . pwd , 'store' ) )
json . dump ( config , open ( CONFIG_FILE , 'w' ) , indent = 4 , separators = ( ',' , ': ' ) )
config = json . load ( open ( CONFIG_FILE ) )
if config . get ( 'py2' , PY2 ) != PY2 :
raise Exception ( "Python versions have changed. Please run `setup` again to reconfigure the client." )
if config [ 'password' ] == '' :
raise Exception ( "Run setup before any other actions!" )
if args . cmd == 'seed' :
config [ 'password' ] = obfuscate ( str ( config [ 'password' ] ) , 'fetch' )
ga = GoogleAlerts ( config [ 'email' ] , config [ 'password' ] )
with contextlib . closing ( webdriver . Chrome ( args . driver ) ) as driver :
driver . get ( ga . LOGIN_URL )
wait = ui . WebDriverWait ( driver , 10 )
# timeout after 10 seconds
inputElement = driver . find_element_by_name ( 'Email' )
inputElement . send_keys ( config [ 'email' ] )
inputElement . submit ( )
time . sleep ( 3 )
inputElement = driver . find_element_by_id ( 'Passwd' )
inputElement . send_keys ( config [ 'password' ] )
inputElement . submit ( )
print ( "[!] Waiting 15 seconds for authentication to complete" )
time . sleep ( 15 )
cookies = driver . get_cookies ( )
collected = dict ( )
for cookie in cookies :
collected [ str ( cookie [ 'name' ] ) ] = str ( cookie [ 'value' ] )
with open ( SESSION_FILE , 'wb' ) as f :
pickle . dump ( collected , f , protocol = 2 )
print ( "Session has been seeded." )
if args . cmd == 'list' :
config [ 'password' ] = obfuscate ( str ( config [ 'password' ] ) , 'fetch' )
ga = GoogleAlerts ( config [ 'email' ] , config [ 'password' ] )
ga . authenticate ( )
print ( json . dumps ( ga . list ( ) , indent = 4 ) )
if args . cmd == 'create' :
config [ 'password' ] = obfuscate ( str ( config [ 'password' ] ) , 'fetch' )
ga = GoogleAlerts ( config [ 'email' ] , config [ 'password' ] )
ga . authenticate ( )
alert_frequency = 'as_it_happens'
if args . frequency == 'realtime' :
alert_frequency = 'as_it_happens'
elif args . frequency == 'daily' :
alert_frequency = 'at_most_once_a_day'
else :
alert_frequency = 'at_most_once_a_week'
monitor = ga . create ( args . term , { 'delivery' : args . delivery . upper ( ) , 'alert_frequency' : alert_frequency . upper ( ) , 'exact' : args . exact } )
print ( json . dumps ( monitor , indent = 4 ) )
if args . cmd == 'delete' :
config [ 'password' ] = obfuscate ( str ( config [ 'password' ] ) , 'fetch' )
ga = GoogleAlerts ( config [ 'email' ] , config [ 'password' ] )
ga . authenticate ( )
result = ga . delete ( args . term_id )
if result :
print ( "%s was deleted" % args . term_id )
|
def should_trace ( self , sampling_req = None ) :
"""Return the matched sampling rule name if the sampler finds one
and decide to sample . If no sampling rule matched , it falls back
to the local sampler ' s ` ` should _ trace ` ` implementation .
All optional arguments are extracted from incoming requests by
X - Ray middleware to perform path based sampling ."""
|
if not global_sdk_config . sdk_enabled ( ) :
return False
if not self . _started :
self . start ( )
# only front - end that actually uses the sampler spawns poller threads
now = int ( time . time ( ) )
if sampling_req and not sampling_req . get ( 'service_type' , None ) :
sampling_req [ 'service_type' ] = self . _origin
elif sampling_req is None :
sampling_req = { 'service_type' : self . _origin }
matched_rule = self . _cache . get_matched_rule ( sampling_req , now )
if matched_rule :
log . debug ( 'Rule %s is selected to make a sampling decision.' , matched_rule . name )
return self . _process_matched_rule ( matched_rule , now )
else :
log . info ( 'No effective centralized sampling rule match. Fallback to local rules.' )
return self . _local_sampler . should_trace ( sampling_req )
|
def read_csv ( path , fieldnames = None , sniff = True , encoding = 'utf-8' , * args , ** kwargs ) :
'''Read CSV rows as table from a file .
By default , csv . reader ( ) will be used any output will be a list of lists .
If fieldnames is provided , DictReader will be used and output will be list of OrderedDict instead .
CSV sniffing ( dialect detection ) is enabled by default , set sniff = False to switch it off .'''
|
return list ( r for r in read_csv_iter ( path , fieldnames = fieldnames , sniff = sniff , encoding = encoding , * args , ** kwargs ) )
|
def calcTemperature ( self ) :
"""Calculates the temperature using which uses equations . MeanPlanetTemp , albedo assumption and potentially
equations . starTemperature .
issues
- you cant get the albedo assumption without temp but you need it to calculate the temp ."""
|
try :
return eq . MeanPlanetTemp ( self . albedo , self . star . T , self . star . R , self . a ) . T_p
except ( ValueError , HierarchyError ) : # ie missing value ( . a ) returning nan
return np . nan
|
def ingest_from_dataframe ( self , df , ingestion_properties ) :
"""Ingest from pandas DataFrame .
: param pandas . DataFrame df : input dataframe to ingest .
: param azure . kusto . ingest . IngestionProperties ingestion _ properties : Ingestion properties ."""
|
from pandas import DataFrame
if not isinstance ( df , DataFrame ) :
raise ValueError ( "Expected DataFrame instance, found {}" . format ( type ( df ) ) )
file_name = "df_{timestamp}_{pid}.csv.gz" . format ( timestamp = int ( time . time ( ) ) , pid = os . getpid ( ) )
temp_file_path = os . path . join ( tempfile . gettempdir ( ) , file_name )
df . to_csv ( temp_file_path , index = False , encoding = "utf-8" , header = False , compression = "gzip" )
fd = FileDescriptor ( temp_file_path )
ingestion_properties . format = DataFormat . csv
self . _ingest ( fd . zipped_stream , fd . size , ingestion_properties , content_encoding = "gzip" )
fd . delete_files ( )
os . unlink ( temp_file_path )
|
def fit ( self , X ) :
"""Fit the scaler based on some data .
Takes the columnwise mean and standard deviation of the entire input
array .
If the array has more than 2 dimensions , it is flattened .
Parameters
X : numpy array
Returns
scaled : numpy array
A scaled version of said array ."""
|
if X . ndim > 2 :
X = X . reshape ( ( np . prod ( X . shape [ : - 1 ] ) , X . shape [ - 1 ] ) )
self . mean = X . mean ( 0 )
self . std = X . std ( 0 )
self . is_fit = True
return self
|
def clear ( self , clear_indices = False ) :
'Remove all items . index names are removed if ` ` clear _ indices = = True ` ` .'
|
super ( MIMapping , self ) . clear ( )
if clear_indices :
self . indices . clear ( )
else :
for index_d in self . indices [ 1 : ] :
index_d . clear ( )
|
async def callproc ( self , procname , args = ( ) ) :
"""Execute stored procedure procname with args
Compatibility warning : PEP - 249 specifies that any modified
parameters must be returned . This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query . Since stored
procedures return zero or more result sets , there is no
reliable way to get at OUT or INOUT parameters via callproc .
The server variables are named @ _ procname _ n , where procname
is the parameter above and n is the position of the parameter
( from zero ) . Once all result sets generated by the procedure
have been fetched , you can issue a SELECT @ _ procname _ 0 , . . .
query using . execute ( ) to get any OUT or INOUT values .
Compatibility warning : The act of calling a stored procedure
itself creates an empty result set . This appears after any
result sets generated by the procedure . This is non - standard
behavior with respect to the DB - API . Be sure to use nextset ( )
to advance through all result sets ; otherwise you may get
disconnected .
: param procname : ` ` str ` ` , name of procedure to execute on server
: param args : ` sequence of parameters to use with procedure
: returns : the original args ."""
|
conn = self . _get_db ( )
if self . _echo :
logger . info ( "CALL %s" , procname )
logger . info ( "%r" , args )
for index , arg in enumerate ( args ) :
q = "SET @_%s_%d=%s" % ( procname , index , conn . escape ( arg ) )
await self . _query ( q )
await self . nextset ( )
_args = ',' . join ( '@_%s_%d' % ( procname , i ) for i in range ( len ( args ) ) )
q = "CALL %s(%s)" % ( procname , _args )
await self . _query ( q )
self . _executed = q
return args
|
def instance ( cls , * args , ** kwargs ) :
"""Singleton getter"""
|
if cls . _instance is None :
cls . _instance = cls ( * args , ** kwargs )
loaded = cls . _instance . reload ( )
logging . getLogger ( 'luigi-interface' ) . info ( 'Loaded %r' , loaded )
return cls . _instance
|
def update_model ( self , sentences , update_labels_bool ) :
'''takes a list of sentenes and updates an existing model . Vectors will be
callable through self . model [ label ]
update _ labels _ bool : boolean that says whether to train the model ( self . model . train _ words = True )
or simply to get vectors for the documents ( self . model . train _ words = False )
self . vectorize should not train the model further
self . train should if model already exists'''
|
n_sentences = self . _add_new_labels ( sentences )
# add new rows to self . model . syn0
n = self . model . syn0 . shape [ 0 ]
self . model . syn0 = np . vstack ( ( self . model . syn0 , np . empty ( ( n_sentences , self . model . layer1_size ) , dtype = np . float32 ) ) )
for i in xrange ( n , n + n_sentences ) :
np . random . seed ( np . uint32 ( self . model . hashfxn ( self . model . index2word [ i ] + str ( self . model . seed ) ) ) )
a = ( np . random . rand ( self . model . layer1_size ) - 0.5 ) / self . model . layer1_size
self . model . syn0 [ i ] = a
# Set self . model . train _ words to False and self . model . train _ labels to True
self . model . train_words = update_labels_bool
self . model . train_lbls = True
# train
self . model . train ( sentences )
return
|
def word_tokenize ( text , tokenizer = None , include_punc = True , * args , ** kwargs ) :
"""Convenience function for tokenizing text into words .
NOTE : NLTK ' s word tokenizer expects sentences as input , so the text will be
tokenized to sentences before being tokenized to words .
This function returns an itertools chain object ( generator ) ."""
|
_tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer ( )
words = chain . from_iterable ( WordTokenizer ( tokenizer = _tokenizer ) . itokenize ( sentence , include_punc , * args , ** kwargs ) for sentence in sent_tokenize ( text , tokenizer = _tokenizer ) )
return words
|
def run ( self ) :
"""Perform the specified action"""
|
if self . args [ 'add' ] :
self . action_add ( )
elif self . args [ 'rm' ] :
self . action_rm ( )
elif self . args [ 'show' ] :
self . action_show ( )
elif self . args [ 'rename' ] :
self . action_rename ( )
else :
self . action_run_command ( )
|
def keys ( self ) :
""": returns : a list of usable keys
: rtype : list"""
|
keys = list ( )
for attribute_name , type_instance in inspect . getmembers ( self ) : # ignore parameters with _ _ and if they are methods
if attribute_name . startswith ( '__' ) or inspect . ismethod ( type_instance ) :
continue
keys . append ( attribute_name )
return keys
|
def add ( self , * args ) :
"""This function adds strings to the keyboard , while not exceeding row _ width .
E . g . ReplyKeyboardMarkup # add ( " A " , " B " , " C " ) yields the json result { keyboard : [ [ " A " ] , [ " B " ] , [ " C " ] ] }
when row _ width is set to 1.
When row _ width is set to 2 , the following is the result of this function : { keyboard : [ [ " A " , " B " ] , [ " C " ] ] }
See https : / / core . telegram . org / bots / api # replykeyboardmarkup
: param args : KeyboardButton to append to the keyboard"""
|
i = 1
row = [ ]
for button in args :
if util . is_string ( button ) :
row . append ( { 'text' : button } )
elif isinstance ( button , bytes ) :
row . append ( { 'text' : button . decode ( 'utf-8' ) } )
else :
row . append ( button . to_dic ( ) )
if i % self . row_width == 0 :
self . keyboard . append ( row )
row = [ ]
i += 1
if len ( row ) > 0 :
self . keyboard . append ( row )
|
def set_itunes_closed_captioned ( self ) :
"""Parses isClosedCaptioned from itunes tags and sets value"""
|
try :
self . itunes_closed_captioned = self . soup . find ( 'itunes:isclosedcaptioned' ) . string
self . itunes_closed_captioned = self . itunes_closed_captioned . lower ( )
except AttributeError :
self . itunes_closed_captioned = None
|
def _insert_dLbl_in_sequence ( self , idx ) :
"""Return a newly created ` c : dLbl ` element having ` c : idx ` child of * idx *
and inserted in numeric sequence among the ` c : dLbl ` children of this
element ."""
|
new_dLbl = self . _new_dLbl ( )
new_dLbl . idx . val = idx
dLbl = None
for dLbl in self . dLbl_lst :
if dLbl . idx_val > idx :
dLbl . addprevious ( new_dLbl )
return new_dLbl
if dLbl is not None :
dLbl . addnext ( new_dLbl )
else :
self . insert ( 0 , new_dLbl )
return new_dLbl
|
def get_invoice_payments_per_page ( self , per_page = 1000 , page = 1 , params = None ) :
"""Get invoice payments per page
: param per _ page : How many objects per page . Default : 1000
: param page : Which page . Default : 1
: param params : Search parameters . Default : { }
: return : list"""
|
if not params :
params = { }
return self . _get_resource_per_page ( resource = INVOICE_PAYMENTS , per_page = per_page , page = page , params = params , )
|
def serializer_class ( self ) :
"""Get the class of the child serializer .
Resolves string imports ."""
|
serializer_class = self . _serializer_class
if not isinstance ( serializer_class , six . string_types ) :
return serializer_class
parts = serializer_class . split ( '.' )
module_path = '.' . join ( parts [ : - 1 ] )
if not module_path :
if getattr ( self , 'parent' , None ) is None :
raise Exception ( "Can not load serializer '%s'" % serializer_class + ' before binding or without specifying full path' )
# try the module of the parent class
module_path = self . parent . __module__
module = importlib . import_module ( module_path )
serializer_class = getattr ( module , parts [ - 1 ] )
self . _serializer_class = serializer_class
return serializer_class
|
def toPlanarPotential ( Pot ) :
"""NAME :
toPlanarPotential
PURPOSE :
convert an Potential to a planarPotential in the mid - plane ( z = 0)
INPUT :
Pot - Potential instance or list of such instances ( existing planarPotential instances are just copied to the output )
OUTPUT :
planarPotential instance ( s )
HISTORY :
2016-06-11 - Written - Bovy ( UofT )"""
|
Pot = flatten ( Pot )
if isinstance ( Pot , list ) :
out = [ ]
for pot in Pot :
if isinstance ( pot , planarPotential ) :
out . append ( pot )
elif isinstance ( pot , Potential ) and pot . isNonAxi :
out . append ( planarPotentialFromFullPotential ( pot ) )
elif isinstance ( pot , Potential ) :
out . append ( planarPotentialFromRZPotential ( pot ) )
else :
raise PotentialError ( "Input to 'toPlanarPotential' is neither an Potential-instance or a list of such instances" )
return out
elif isinstance ( Pot , Potential ) and Pot . isNonAxi :
return planarPotentialFromFullPotential ( Pot )
elif isinstance ( Pot , Potential ) :
return planarPotentialFromRZPotential ( Pot )
elif isinstance ( Pot , planarPotential ) :
return Pot
else :
raise PotentialError ( "Input to 'toPlanarPotential' is neither an Potential-instance or a list of such instances" )
|
def makeFrequencyGraph ( allFreqs , title , substitution , pattern , color = 'blue' , createFigure = True , showFigure = True , readsAx = False ) :
"""For a title , make a graph showing the frequencies .
@ param allFreqs : result from getCompleteFreqs
@ param title : A C { str } , title of virus of which frequencies should be
plotted .
@ param substitution : A C { str } , which substitution should be plotted ;
must be one of ' C > A ' , ' C > G ' , ' C > T ' , ' T > A ' , ' T > C ' , ' T > G ' .
@ param pattern : A C { str } , which pattern we ' re looking for ( must be
one of ' cPattern ' , ' tPattern ' )
@ param color : A C { str } , color of bars .
@ param createFigure : If C { True } , create a figure .
@ param showFigure : If C { True } , show the created figure .
@ param readsAx : If not None , use this as the subplot for displaying reads ."""
|
cPattern = [ 'ACA' , 'ACC' , 'ACG' , 'ACT' , 'CCA' , 'CCC' , 'CCG' , 'CCT' , 'GCA' , 'GCC' , 'GCG' , 'GCT' , 'TCA' , 'TCC' , 'TCG' , 'TCT' ]
tPattern = [ 'ATA' , 'ATC' , 'ATG' , 'ATT' , 'CTA' , 'CTC' , 'CTG' , 'CTT' , 'GTA' , 'GTC' , 'GTG' , 'GTT' , 'TTA' , 'TTC' , 'TTG' , 'TTT' ]
# choose the right pattern
if pattern == 'cPattern' :
patterns = cPattern
else :
patterns = tPattern
fig = plt . figure ( figsize = ( 10 , 10 ) )
ax = readsAx or fig . add_subplot ( 111 )
# how many bars
N = 16
ind = np . arange ( N )
width = 0.4
# make a list in the right order , so that it can be plotted easily
divisor = allFreqs [ title ] [ 'numberOfReads' ]
toPlot = allFreqs [ title ] [ substitution ]
index = 0
data = [ ]
for item in patterns :
newData = toPlot [ patterns [ index ] ] / divisor
data . append ( newData )
index += 1
# create the bars
ax . bar ( ind , data , width , color = color )
maxY = np . max ( data ) + 5
# axes and labels
if createFigure :
title = title . split ( '|' ) [ 4 ] [ : 50 ]
ax . set_title ( '%s \n %s' % ( title , substitution ) , fontsize = 20 )
ax . set_ylim ( 0 , maxY )
ax . set_ylabel ( 'Absolute Number of Mutations' , fontsize = 16 )
ax . set_xticks ( ind + width )
ax . set_xticklabels ( patterns , rotation = 45 , fontsize = 8 )
if createFigure is False :
ax . set_xticks ( ind + width )
ax . set_xticklabels ( patterns , rotation = 45 , fontsize = 0 )
else :
if showFigure :
plt . show ( )
return maxY
|
def load_from_bytecode ( self , code : str , bin_runtime : bool = False , address : Optional [ str ] = None ) -> Tuple [ str , EVMContract ] :
"""Returns the address and the contract class for the given bytecode
: param code : Bytecode
: param bin _ runtime : Whether the code is runtime code or creation code
: param address : address of contract
: return : tuple ( address , Contract class )"""
|
if address is None :
address = util . get_indexed_address ( 0 )
if bin_runtime :
self . contracts . append ( EVMContract ( code = code , name = "MAIN" , enable_online_lookup = self . enable_online_lookup , ) )
else :
self . contracts . append ( EVMContract ( creation_code = code , name = "MAIN" , enable_online_lookup = self . enable_online_lookup , ) )
return address , self . contracts [ - 1 ]
|
def _translate_nd ( self , source : mx . nd . NDArray , source_length : int , restrict_lexicon : Optional [ lexicon . TopKLexicon ] , raw_constraints : List [ Optional [ constrained . RawConstraintList ] ] , raw_avoid_list : List [ Optional [ constrained . RawConstraintList ] ] , max_output_lengths : mx . nd . NDArray ) -> List [ Translation ] :
"""Translates source of source _ length , given a bucket _ key .
: param source : Source ids . Shape : ( batch _ size , bucket _ key , num _ factors ) .
: param source _ length : Bucket key .
: param restrict _ lexicon : Lexicon to use for vocabulary restriction .
: param raw _ constraints : A list of optional constraint lists .
: return : Sequence of translations ."""
|
return self . _get_best_from_beam ( * self . _beam_search ( source , source_length , restrict_lexicon , raw_constraints , raw_avoid_list , max_output_lengths ) )
|
def load_state ( self , fname : str ) :
"""Loads the state of the iterator from a file .
: param fname : File name to load the information from ."""
|
# restore order
self . data = self . data . permute ( self . inverse_data_permutations )
with open ( fname , "rb" ) as fp :
self . batch_indices = pickle . load ( fp )
self . curr_batch_index = pickle . load ( fp )
inverse_data_permutations = np . load ( fp )
data_permutations = np . load ( fp )
# Right after loading the iterator state , next ( ) should be called
self . curr_batch_index -= 1
# load previous permutations
self . inverse_data_permutations = [ ]
self . data_permutations = [ ]
for bucket in range ( len ( self . data ) ) :
inverse_permutation = mx . nd . array ( inverse_data_permutations [ bucket ] )
self . inverse_data_permutations . append ( inverse_permutation )
permutation = mx . nd . array ( data_permutations [ bucket ] )
self . data_permutations . append ( permutation )
self . data = self . data . permute ( self . data_permutations )
|
def set_option ( self , key , subkey , value ) :
"""Sets the value of an option .
: param str key : First identifier of the option .
: param str subkey : Second identifier of the option .
: param value : New value for the option ( type varies ) .
: raise :
: NotRegisteredError : If ` ` key ` ` or ` ` subkey ` ` do not define any
option .
: ValueError : If the targeted obtion is locked .
: ValueError : If the provided value is not the expected
type for the option .
: ValueError : If the provided value is not in the expected
available values for the option ."""
|
key , subkey = _lower_keys ( key , subkey )
_entry_must_exist ( self . gc , key , subkey )
df = self . gc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) ]
if df [ "locked" ] . values [ 0 ] :
raise ValueError ( "{0}.{1} option is locked" . format ( key , subkey ) )
ev . value_eval ( value , df [ "type" ] . values [ 0 ] )
if not self . check_option ( key , subkey , value ) :
info = "{0}.{1} accepted options are: " . format ( key , subkey )
info += "[{}]" . format ( ", " . join ( df [ "values" ] . values [ 0 ] ) )
raise ValueError ( info )
self . gc . loc [ ( self . gc [ "k1" ] == key ) & ( self . gc [ "k2" ] == subkey ) , "value" ] = value
|
def rehearse ( self , docs , sgd = None , losses = None , config = None ) :
"""Make a " rehearsal " update to the models in the pipeline , to prevent
forgetting . Rehearsal updates run an initial copy of the model over some
data , and update the model so its current predictions are more like the
initial ones . This is useful for keeping a pre - trained model on - track ,
even if you ' re updating it with a smaller set of examples .
docs ( iterable ) : A batch of ` Doc ` objects .
drop ( float ) : The droput rate .
sgd ( callable ) : An optimizer .
RETURNS ( dict ) : Results from the update .
EXAMPLE :
> > > raw _ text _ batches = minibatch ( raw _ texts )
> > > for labelled _ batch in minibatch ( zip ( train _ docs , train _ golds ) ) :
> > > docs , golds = zip ( * train _ docs )
> > > nlp . update ( docs , golds )
> > > raw _ batch = [ nlp . make _ doc ( text ) for text in next ( raw _ text _ batches ) ]
> > > nlp . rehearse ( raw _ batch )"""
|
# TODO : document
if len ( docs ) == 0 :
return
if sgd is None :
if self . _optimizer is None :
self . _optimizer = create_default_optimizer ( Model . ops )
sgd = self . _optimizer
docs = list ( docs )
for i , doc in enumerate ( docs ) :
if isinstance ( doc , basestring_ ) :
docs [ i ] = self . make_doc ( doc )
pipes = list ( self . pipeline )
random . shuffle ( pipes )
if config is None :
config = { }
grads = { }
def get_grads ( W , dW , key = None ) :
grads [ key ] = ( W , dW )
get_grads . alpha = sgd . alpha
get_grads . b1 = sgd . b1
get_grads . b2 = sgd . b2
for name , proc in pipes :
if not hasattr ( proc , "rehearse" ) :
continue
grads = { }
proc . rehearse ( docs , sgd = get_grads , losses = losses , ** config . get ( name , { } ) )
for key , ( W , dW ) in grads . items ( ) :
sgd ( W , dW , key = key )
return losses
|
def legacy_networking ( self , legacy_networking ) :
"""Sets either QEMU legacy networking commands are used .
: param legacy _ networking : boolean"""
|
if legacy_networking :
log . info ( 'QEMU VM "{name}" [{id}] has enabled legacy networking' . format ( name = self . _name , id = self . _id ) )
else :
log . info ( 'QEMU VM "{name}" [{id}] has disabled legacy networking' . format ( name = self . _name , id = self . _id ) )
self . _legacy_networking = legacy_networking
|
def search_template_create ( id , body , hosts = None , profile = None ) :
'''. . versionadded : : 2017.7.0
Create search template by supplied definition
id
Template ID
body
Search template definition
CLI example : :
salt myminion elasticsearch . search _ template _ create mytemplate ' { " template " : { " query " : { " match " : { " title " : " { { query _ string } } " } } } } ' '''
|
es = _get_instance ( hosts , profile )
try :
result = es . put_template ( id = id , body = body )
return result . get ( 'acknowledged' , False )
except elasticsearch . TransportError as e :
raise CommandExecutionError ( "Cannot create search template {0}, server returned code {1} with message {2}" . format ( id , e . status_code , e . error ) )
|
def get_page_and_url ( session , url ) :
"""Download an HTML page using the requests session and return
the final URL after following redirects ."""
|
reply = get_reply ( session , url )
return reply . text , reply . url
|
def systemInformationType7 ( ) :
"""SYSTEM INFORMATION TYPE 7 Section 9.1.41"""
|
a = L2PseudoLength ( l2pLength = 0x01 )
b = TpPd ( pd = 0x6 )
c = MessageType ( mesType = 0x37 )
# 000110111
d = Si7RestOctets ( )
packet = a / b / c / d
return packet
|
def seek_in_frame ( self , pos , * args , ** kwargs ) :
"""Seeks relative to the total offset of the current contextual frames ."""
|
super ( ) . seek ( self . _total_offset + pos , * args , ** kwargs )
|
def init_config ( self , ** kw ) :
"""Get a configuration object for this type of YubiKey ."""
|
return YubiKeyConfigUSBHID ( ykver = self . version_num ( ) , capabilities = self . capabilities , ** kw )
|
def update_stale ( self ) :
"""Update stale active statuses"""
|
# Some events don ' t post an inactive XML , only active .
# If we don ' t get an active update for 5 seconds we can
# assume the event is no longer active and update accordingly .
for etype , echannels in self . event_states . items ( ) :
for eprop in echannels :
if eprop [ 3 ] is not None :
sec_elap = ( ( datetime . datetime . now ( ) - eprop [ 3 ] ) . total_seconds ( ) )
# print ( ' Seconds since last update : { } ' . format ( sec _ elap ) )
if sec_elap > 5 and eprop [ 0 ] is True :
_LOGGING . debug ( 'Updating stale event %s on CH(%s)' , etype , eprop [ 1 ] )
attr = [ False , eprop [ 1 ] , eprop [ 2 ] , datetime . datetime . now ( ) ]
self . update_attributes ( etype , eprop [ 1 ] , attr )
self . publish_changes ( etype , eprop [ 1 ] )
|
def create_point ( self , x , y ) :
"""Create an ECDSA point on the SECP256k1 curve with the given coords .
: param x : The x coordinate on the curve
: type x : long
: param y : The y coodinate on the curve
: type y : long"""
|
if ( not isinstance ( x , six . integer_types ) or not isinstance ( y , six . integer_types ) ) :
raise ValueError ( "The coordinates must be longs." )
return _ECDSA_Point ( SECP256k1 . curve , x , y )
|
def get_children ( self ) :
""": returns : A queryset of all the node ' s children"""
|
if self . is_leaf ( ) :
return get_result_class ( self . __class__ ) . objects . none ( )
return get_result_class ( self . __class__ ) . objects . filter ( depth = self . depth + 1 , path__range = self . _get_children_path_interval ( self . path ) ) . order_by ( 'path' )
|
def plot_multi ( data , cols = None , spacing = .06 , color_map = None , plot_kw = None , ** kwargs ) :
"""Plot data with multiple scaels together
Args :
data : DataFrame of data
cols : columns to be plotted
spacing : spacing between legends
color _ map : customized colors in map
plot _ kw : kwargs for each plot
* * kwargs : kwargs for the first plot
Returns :
ax for plot
Examples :
> > > import pandas as pd
> > > import numpy as np
> > > idx = range ( 5)
> > > data = pd . DataFrame ( dict ( a = np . exp ( idx ) , b = idx ) , index = idx )
> > > # plot _ multi ( data = data , cols = [ ' a ' , ' b ' ] , plot _ kw = [ dict ( style = ' . - ' ) , dict ( ) ] )"""
|
import matplotlib . pyplot as plt
from pandas import plotting
if cols is None :
cols = data . columns
if plot_kw is None :
plot_kw = [ { } ] * len ( cols )
if len ( cols ) == 0 :
return
num_colors = len ( utils . flatten ( cols ) )
# Get default color style from pandas
colors = getattr ( getattr ( plotting , '_style' ) , '_get_standard_colors' ) ( num_colors = num_colors )
if color_map is None :
color_map = dict ( )
fig = plt . figure ( )
ax , lines , labels , c_idx = None , [ ] , [ ] , 0
for n , col in enumerate ( cols ) :
if isinstance ( col , ( list , tuple ) ) :
ylabel = ' / ' . join ( cols [ n ] )
color = [ color_map . get ( cols [ n ] [ _ - c_idx ] , colors [ _ % len ( colors ) ] ) for _ in range ( c_idx , c_idx + len ( cols [ n ] ) ) ]
c_idx += len ( col )
else :
ylabel = col
color = color_map . get ( col , colors [ c_idx % len ( colors ) ] )
c_idx += 1
if 'color' in plot_kw [ n ] :
color = plot_kw [ n ] . pop ( 'color' )
if ax is None : # First y - axes
legend = plot_kw [ 0 ] . pop ( 'legend' , kwargs . pop ( 'legend' , False ) )
ax = data . loc [ : , col ] . plot ( label = col , color = color , legend = legend , zorder = n , ** plot_kw [ 0 ] , ** kwargs )
ax . set_ylabel ( ylabel = ylabel )
line , label = ax . get_legend_handles_labels ( )
ax . spines [ 'left' ] . set_edgecolor ( '#D5C4A1' )
ax . spines [ 'left' ] . set_alpha ( .5 )
else : # Multiple y - axes
legend = plot_kw [ n ] . pop ( 'legend' , False )
ax_new = ax . twinx ( )
ax_new . spines [ 'right' ] . set_position ( ( 'axes' , 1 + spacing * ( n - 1 ) ) )
data . loc [ : , col ] . plot ( ax = ax_new , label = col , color = color , legend = legend , zorder = n , ** plot_kw [ n ] )
ax_new . set_ylabel ( ylabel = ylabel )
line , label = ax_new . get_legend_handles_labels ( )
ax_new . spines [ 'right' ] . set_edgecolor ( '#D5C4A1' )
ax_new . spines [ 'right' ] . set_alpha ( .5 )
ax_new . grid ( False )
# Proper legend position
lines += line
labels += label
fig . legend ( lines , labels , loc = 8 , prop = dict ( ) , ncol = num_colors ) . set_zorder ( len ( cols ) )
ax . set_xlabel ( ' \n ' )
return ax
|
def remove_field ( self , field ) :
"""Removes a field from this table
: param field : This can be a string of a field name , a dict of { ' alias ' : field } , or
a ` ` Field ` ` instance
: type field : str or dict or : class : ` Field < querybuilder . fields . Field > `"""
|
new_field = FieldFactory ( field , )
new_field . set_table ( self )
new_field_identifier = new_field . get_identifier ( )
for field in self . fields :
if field . get_identifier ( ) == new_field_identifier :
self . fields . remove ( field )
return field
return None
|
def _init_field ( self , setting , field_class , name , code = None ) :
"""Initialize a field whether it is built with a custom name for a
specific translation language or not ."""
|
kwargs = { "label" : setting [ "label" ] + ":" , "required" : setting [ "type" ] in ( int , float ) , "initial" : getattr ( settings , name ) , "help_text" : self . format_help ( setting [ "description" ] ) , }
if setting [ "choices" ] :
field_class = forms . ChoiceField
kwargs [ "choices" ] = setting [ "choices" ]
field_instance = field_class ( ** kwargs )
code_name = ( '_modeltranslation_' + code if code else '' )
self . fields [ name + code_name ] = field_instance
css_class = field_class . __name__ . lower ( )
field_instance . widget . attrs [ "class" ] = css_class
if code :
field_instance . widget . attrs [ "class" ] += " modeltranslation"
|
def _to_diagonally_dominant ( mat ) :
"""Make matrix unweighted diagonally dominant using the Laplacian ."""
|
mat += np . diag ( np . sum ( mat != 0 , axis = 1 ) + 0.01 )
return mat
|
def _make_register ( self ) -> BaseRegisterStore :
"""Make the register storage ."""
|
s = settings . REGISTER_STORE
store_class = import_class ( s [ 'class' ] )
return store_class ( ** s [ 'params' ] )
|
def save_imglist ( self , fname = None , root = None , shuffle = False ) :
"""save imglist to disk
Parameters :
fname : str
saved filename"""
|
def progress_bar ( count , total , suffix = '' ) :
import sys
bar_len = 24
filled_len = int ( round ( bar_len * count / float ( total ) ) )
percents = round ( 100.0 * count / float ( total ) , 1 )
bar = '=' * filled_len + '-' * ( bar_len - filled_len )
sys . stdout . write ( '[%s] %s%s ...%s\r' % ( bar , percents , '%' , suffix ) )
sys . stdout . flush ( )
str_list = [ ]
for index in range ( self . num_images ) :
progress_bar ( index , self . num_images )
label = self . label_from_index ( index )
if label . size < 1 :
continue
path = self . image_path_from_index ( index )
if root :
path = osp . relpath ( path , root )
str_list . append ( '\t' . join ( [ str ( index ) , str ( 2 ) , str ( label . shape [ 1 ] ) ] + [ "{0:.4f}" . format ( x ) for x in label . ravel ( ) ] + [ path , ] ) + '\n' )
if str_list :
if shuffle :
import random
random . shuffle ( str_list )
if not fname :
fname = self . name + '.lst'
with open ( fname , 'w' ) as f :
for line in str_list :
f . write ( line )
else :
raise RuntimeError ( "No image in imdb" )
|
def combined_message_class ( self ) :
"""A ProtoRPC message class with both request and parameters fields .
Caches the result in a local private variable . Uses _ CopyField to create
copies of the fields from the existing request and parameters classes since
those fields are " owned " by the message classes .
Raises :
TypeError : If a field name is used in both the request message and the
parameters but the two fields do not represent the same type .
Returns :
Value of combined message class for this property ."""
|
if self . __combined_message_class is not None :
return self . __combined_message_class
fields = { }
# We don ' t need to preserve field . number since this combined class is only
# used for the protorpc remote . method and is not needed for the API config .
# The only place field . number matters is in parameterOrder , but this is set
# based on container . parameters _ message _ class which will use the field
# numbers originally passed in .
# Counter for fields .
field_number = 1
for field in self . body_message_class . all_fields ( ) :
fields [ field . name ] = _CopyField ( field , number = field_number )
field_number += 1
for field in self . parameters_message_class . all_fields ( ) :
if field . name in fields :
if not _CompareFields ( field , fields [ field . name ] ) :
raise TypeError ( 'Field %r contained in both parameters and request ' 'body, but the fields differ.' % ( field . name , ) )
else : # Skip a field that ' s already there .
continue
fields [ field . name ] = _CopyField ( field , number = field_number )
field_number += 1
self . __combined_message_class = type ( 'CombinedContainer' , ( messages . Message , ) , fields )
return self . __combined_message_class
|
def exclude ( self , ** attrs ) :
"""Remove items from distribution that are named in keyword arguments
For example , ' dist . exclude ( py _ modules = [ " x " ] ) ' would remove ' x ' from
the distribution ' s ' py _ modules ' attribute . Excluding packages uses
the ' exclude _ package ( ) ' method , so all of the package ' s contained
packages , modules , and extensions are also excluded .
Currently , this method only supports exclusion from attributes that are
lists or tuples . If you need to add support for excluding from other
attributes in this or a subclass , you can add an ' _ exclude _ X ' method ,
where ' X ' is the name of the attribute . The method will be called with
the value passed to ' exclude ( ) ' . So , ' dist . exclude ( foo = { " bar " : " baz " } ) '
will try to call ' dist . _ exclude _ foo ( { " bar " : " baz " } ) ' , which can then
handle whatever special exclusion logic is needed ."""
|
for k , v in attrs . items ( ) :
exclude = getattr ( self , '_exclude_' + k , None )
if exclude :
exclude ( v )
else :
self . _exclude_misc ( k , v )
|
def list_of_dictionaries_to_mysql_inserts ( log , datalist , tableName ) :
"""Convert a python list of dictionaries to pretty csv output
* * Key Arguments : * *
- ` ` log ` ` - - logger
- ` ` datalist ` ` - - a list of dictionaries
- ` ` tableName ` ` - - the name of the table to create the insert statements for
* * Return : * *
- ` ` output ` ` - - the mysql insert statements ( as a string )
* * Usage : * *
. . code - block : : python
from fundamentals . files import list _ of _ dictionaries _ to _ mysql _ inserts
mysqlInserts = list _ of _ dictionaries _ to _ mysql _ inserts (
log = log ,
datalist = dataList ,
tableName = " my _ new _ table "
print mysqlInserts
this output the following :
. . code - block : : plain
INSERT INTO ` testing _ table ` ( a _ newKey , and _ another , dateCreated , uniqueKey2 , uniquekey1 ) VALUES ( " cool " , " super cool " , " 2016-09-14T13:17:26 " , " burgers " , " cheese " ) ON DUPLICATE KEY UPDATE a _ newKey = " cool " , and _ another = " super cool " , dateCreated = " 2016-09-14T13:17:26 " , uniqueKey2 = " burgers " , uniquekey1 = " cheese " ;"""
|
log . debug ( 'starting the ``list_of_dictionaries_to_mysql_inserts`` function' )
if not len ( datalist ) :
return "NO MATCH"
inserts = [ ]
for d in datalist :
insertCommand = convert_dictionary_to_mysql_table ( log = log , dictionary = d , dbTableName = "testing_table" , uniqueKeyList = [ ] , dateModified = False , returnInsertOnly = True , replace = True , batchInserts = False )
inserts . append ( insertCommand )
output = ";\n" . join ( inserts ) + ";"
log . debug ( 'completed the ``list_of_dictionaries_to_mysql_inserts`` function' )
return output
|
def tempallow ( ip = None , ttl = None , port = None , direction = None , comment = '' ) :
'''Add an rule to the temporary ip allow list .
See : func : ` _ access _ rule ` .
1 - Add an IP :
CLI Example :
. . code - block : : bash
salt ' * ' csf . tempallow 127.0.0.1 3600 port = 22 direction = ' in ' comment = ' # Temp dev ssh access ' '''
|
return _tmp_access_rule ( 'tempallow' , ip , ttl , port , direction , comment )
|
def evaluate ( self , num_eval_batches = None ) :
"""Run one round of evaluation , return loss and accuracy ."""
|
num_eval_batches = num_eval_batches or self . num_eval_batches
with tf . Graph ( ) . as_default ( ) as graph :
self . tensors = self . model . build_eval_graph ( self . eval_data_paths , self . batch_size )
self . summary = tf . summary . merge_all ( )
self . saver = tf . train . Saver ( )
self . summary_writer = tf . summary . FileWriter ( self . output_path )
self . sv = tf . train . Supervisor ( graph = graph , logdir = self . output_path , summary_op = None , global_step = None , saver = self . saver )
last_checkpoint = tf . train . latest_checkpoint ( self . checkpoint_path )
with self . sv . managed_session ( master = '' , start_standard_services = False ) as session :
self . sv . saver . restore ( session , last_checkpoint )
if not self . batch_of_examples :
self . sv . start_queue_runners ( session )
for i in range ( num_eval_batches ) :
self . batch_of_examples . append ( session . run ( self . tensors . examples ) )
for i in range ( num_eval_batches ) :
session . run ( self . tensors . metric_updates , { self . tensors . examples : self . batch_of_examples [ i ] } )
metric_values = session . run ( self . tensors . metric_values )
global_step = tf . train . global_step ( session , self . tensors . global_step )
summary = session . run ( self . summary )
self . summary_writer . add_summary ( summary , global_step )
self . summary_writer . flush ( )
return metric_values
|
def match_function_id ( self , function_id , match ) :
"""Matches the function identified by the given ` ` Id ` ` .
arg : function _ id ( osid . id . Id ) : the Id of the ` ` Function ` `
arg : match ( boolean ) : ` ` true ` ` if a positive match , ` ` false ` `
for a negative match
raise : NullArgument - ` ` function _ id ` ` is ` ` null ` `
* compliance : mandatory - - This method must be implemented . *"""
|
self . _add_match ( 'functionId' , str ( function_id ) , bool ( match ) )
|
def chart_range ( self ) :
"""Calculates the chart range from start and end . Downloads larger
datasets ( 5y and 2y ) when necessary , but defaults to 1y for performance
reasons"""
|
delta = datetime . datetime . now ( ) . year - self . start . year
if 2 <= delta <= 5 :
return "5y"
elif 1 <= delta <= 2 :
return "2y"
elif 0 <= delta < 1 :
return "1y"
else :
raise ValueError ( "Invalid date specified. Must be within past 5 years." )
|
def flux_down ( self , fluxDownTop , emission = None ) :
'''Compute upwelling radiative flux at interfaces between layers .
Inputs :
* fluxUpBottom : flux up from bottom
* emission : emission from atmospheric levels ( N )
defaults to zero if not given
Returns :
* vector of upwelling radiative flux between levels ( N + 1)
element N is the flux up to space .'''
|
if emission is None :
emission = np . zeros_like ( self . absorptivity )
E = np . concatenate ( ( np . atleast_1d ( fluxDownTop ) , emission ) , axis = - 1 )
# dot product ( matrix multiplication ) along last axes
return np . squeeze ( matrix_multiply ( self . Tdown , E [ ... , np . newaxis ] ) )
|
def copy_openapi_specs ( output_path , component ) :
"""Copy generated and validated openapi specs to reana - commons module ."""
|
if component == 'reana-server' :
file = 'reana_server.json'
elif component == 'reana-workflow-controller' :
file = 'reana_workflow_controller.json'
elif component == 'reana-job-controller' :
file = 'reana_job_controller.json'
if os . environ . get ( 'REANA_SRCDIR' ) :
reana_srcdir = os . environ . get ( 'REANA_SRCDIR' )
else :
reana_srcdir = os . path . join ( '..' )
try :
reana_commons_specs_path = os . path . join ( reana_srcdir , 'reana-commons' , 'reana_commons' , 'openapi_specifications' )
if os . path . exists ( reana_commons_specs_path ) :
if os . path . isfile ( output_path ) :
shutil . copy ( output_path , os . path . join ( reana_commons_specs_path , file ) )
# copy openapi specs file as well to docs
shutil . copy ( output_path , os . path . join ( 'docs' , 'openapi.json' ) )
except Exception as e :
click . echo ( 'Something went wrong, could not copy openapi ' 'specifications to reana-commons \n{0}' . format ( e ) )
|
def is_all_field_none ( self ) :
""": rtype : bool"""
|
if self . _id_ is not None :
return False
if self . _created is not None :
return False
if self . _updated is not None :
return False
if self . _billing_account_id is not None :
return False
if self . _invoice_notification_preference is not None :
return False
return True
|
def remove_item ( self , * args , ** kwargs ) :
"""Pass through to provider methods ."""
|
try :
self . _get_provider_session ( 'assessment_basic_authoring_session' ) . remove_item ( * args , ** kwargs )
except InvalidArgument :
self . _get_sub_package_provider_session ( 'assessment_authoring' , 'assessment_part_item_design_session' ) . remove_item ( * args , ** kwargs )
|
def collect_changes ( self ) :
"""Collect file and feature changes
Steps
1 . Collects the files that have changed in this pull request as
compared to a comparison branch .
2 . Categorize these file changes into admissible or inadmissible file
changes . Admissible file changes solely contribute python files to
the contrib subdirectory .
3 . Collect features from admissible new files .
Returns :
CollectedChanges"""
|
file_diffs = self . _collect_file_diffs ( )
candidate_feature_diffs , valid_init_diffs , inadmissible_diffs = self . _categorize_file_diffs ( file_diffs )
new_feature_info = self . _collect_feature_info ( candidate_feature_diffs )
return CollectedChanges ( file_diffs , candidate_feature_diffs , valid_init_diffs , inadmissible_diffs , new_feature_info )
|
def checklat ( lat , name = 'lat' ) :
"""Makes sure the latitude is inside [ - 90 , 90 ] , clipping close values
( tolerance 1e - 4 ) .
Parameters
lat : array _ like
latitude
name : str , optional
parameter name to use in the exception message
Returns
lat : ndarray or float
Same as input where values just outside the range have been
clipped to [ - 90 , 90]
Raises
ValueError
if any values are too far outside the range [ - 90 , 90]"""
|
if np . all ( np . float64 ( lat ) >= - 90 ) and np . all ( np . float64 ( lat ) <= 90 ) :
return lat
if np . isscalar ( lat ) :
if lat > 90 and np . isclose ( lat , 90 , rtol = 0 , atol = 1e-4 ) :
lat = 90
return lat
elif lat < - 90 and np . isclose ( lat , - 90 , rtol = 0 , atol = 1e-4 ) :
lat = - 90
return lat
else :
lat = np . float64 ( lat )
# make sure we have an array , not list
lat [ ( lat > 90 ) & ( np . isclose ( lat , 90 , rtol = 0 , atol = 1e-4 ) ) ] = 90
lat [ ( lat < - 90 ) & ( np . isclose ( lat , - 90 , rtol = 0 , atol = 1e-4 ) ) ] = - 90
if np . all ( lat >= - 90 ) and np . all ( lat <= 90 ) :
return lat
# we haven ' t returned yet , so raise exception
raise ValueError ( name + ' must be in [-90, 90]' )
|
def save ( self ) :
"""Save changes made to object to NIPAP .
If the object represents a new VRF unknown to NIPAP ( attribute ` id `
is ` None ` ) this function maps to the function
: py : func : ` nipap . backend . Nipap . add _ vrf ` in the backend , used to
create a new VRF . Otherwise it maps to the function
: py : func : ` nipap . backend . Nipap . edit _ vrf ` in the backend , used to
modify the VRF . Please see the documentation for the backend
functions for information regarding input arguments and return
values ."""
|
xmlrpc = XMLRPCConnection ( )
data = { 'rt' : self . rt , 'name' : self . name , 'description' : self . description , 'tags' : [ ] , 'avps' : self . avps }
for tag_name in self . tags :
data [ 'tags' ] . append ( tag_name )
if self . id is None : # New object , create
try :
vrf = xmlrpc . connection . add_vrf ( { 'attr' : data , 'auth' : self . _auth_opts . options } )
except xmlrpclib . Fault as xml_fault :
raise _fault_to_exception ( xml_fault )
else : # Old object , edit
try :
vrfs = xmlrpc . connection . edit_vrf ( { 'vrf' : { 'id' : self . id } , 'attr' : data , 'auth' : self . _auth_opts . options } )
except xmlrpclib . Fault as xml_fault :
raise _fault_to_exception ( xml_fault )
if len ( vrfs ) != 1 :
raise NipapError ( 'VRF edit returned %d entries, should be 1.' % len ( vrfs ) )
vrf = vrfs [ 0 ]
# Refresh object data with attributes from add / edit operation
VRF . from_dict ( vrf , self )
_cache [ 'VRF' ] [ self . id ] = self
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.