signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def url_param ( param , default = None ) :
"""Read a url or post parameter and use it in your SQL Lab query
When in SQL Lab , it ' s possible to add arbitrary URL " query string "
parameters , and use those in your SQL code . For instance you can
alter your url and add ` ? foo = bar ` , as in
` { domain } / superset / sqllab ? foo = bar ` . Then if your query is something like
SELECT * FROM foo = ' { { url _ param ( ' foo ' ) } } ' , it will be parsed at
runtime and replaced by the value in the URL .
As you create a visualization form this SQL Lab query , you can pass
parameters in the explore view as well as from the dashboard , and
it should carry through to your queries .
: param param : the parameter to lookup
: type param : str
: param default : the value to return in the absence of the parameter
: type default : str"""
|
if request . args . get ( param ) :
return request . args . get ( param , default )
# Supporting POST as well as get
if request . form . get ( 'form_data' ) :
form_data = json . loads ( request . form . get ( 'form_data' ) )
url_params = form_data . get ( 'url_params' ) or { }
return url_params . get ( param , default )
return default
|
def tobool ( obj , default = False ) :
'''Returns a bool representation of ` obj ` : if ` obj ` is not a string ,
it is returned cast to a boolean by calling ` bool ( ) ` . Otherwise , it
is checked for " truthy " or " falsy " values , and that is returned . If
it is not truthy or falsy , ` default ` is returned ( which defaults to
` ` False ` ` ) unless ` default ` is set to ` ` ValueError ` ` , in which case
an exception is raised .'''
|
if isinstance ( obj , bool ) :
return obj
if not isstr ( obj ) :
return bool ( obj )
lobj = obj . lower ( )
if lobj in truthy :
return True
if lobj in falsy :
return False
if default is ValueError :
raise ValueError ( 'invalid literal for tobool(): %r' % ( obj , ) )
return default
|
def get_conf_d_files ( path ) :
"""Return alphabetical ordered : class : ` list ` of the * . conf * files
placed in the path . * path * is a directory path .
> > > get _ conf _ d _ files ( ' conf / conf . d / ' )
[ ' conf / conf . d / 10 - base . conf ' , ' conf / conf . d / 99 - dev . conf ' ]"""
|
if not os . path . isdir ( path ) :
raise ValueError ( "'%s' is not a directory" % path )
files_mask = os . path . join ( path , "*.conf" )
return [ f for f in sorted ( glob . glob ( files_mask ) ) if os . path . isfile ( f ) ]
|
def set_icc_profile ( self , profile = None , name = 'ICC Profile' ) :
"""Add ICC Profile .
Prefered way is tuple ( ` profile _ name ` , ` profile _ bytes ` ) , but only
bytes with name as separate argument is also supported ."""
|
if isinstance ( profile , ( basestring , bytes ) ) :
icc_profile = [ name , profile ]
# TODO : more check
else :
icc_profile = profile
if not icc_profile [ 0 ] :
raise Error ( "ICC profile should have a name" )
elif not isinstance ( icc_profile [ 0 ] , bytes ) :
icc_profile [ 0 ] = strtobytes ( icc_profile [ 0 ] )
self . icc_profile = icc_profile
|
def from_array ( array ) :
"""Deserialize a new MediaGroupMessage from a given dictionary .
: return : new MediaGroupMessage instance .
: rtype : MediaGroupMessage"""
|
if array is None or not array :
return None
# end if
assert_type_or_raise ( array , dict , parameter_name = "array" )
from pytgbot . api_types . sendable . input_media import InputMediaPhoto
from pytgbot . api_types . sendable . input_media import InputMediaVideo
data = { }
if isinstance ( array . get ( 'media' ) , InputMediaPhoto ) :
data [ 'media' ] = InputMediaPhoto . from_array_list ( array . get ( 'media' ) , list_level = 1 )
elif isinstance ( array . get ( 'media' ) , InputMediaVideo ) :
data [ 'media' ] = InputMediaVideo . from_array_list ( array . get ( 'media' ) , list_level = 1 )
else :
raise TypeError ( 'Unknown type, must be one of InputMediaPhoto, InputMediaVideo.' )
# end if
if array . get ( 'chat_id' ) is None :
data [ 'receiver' ] = None
elif isinstance ( array . get ( 'chat_id' ) , None ) :
data [ 'receiver' ] = None ( array . get ( 'chat_id' ) )
elif isinstance ( array . get ( 'chat_id' ) , str ) :
data [ 'receiver' ] = u ( array . get ( 'chat_id' ) )
elif isinstance ( array . get ( 'chat_id' ) , int ) :
data [ 'receiver' ] = int ( array . get ( 'chat_id' ) )
else :
raise TypeError ( 'Unknown type, must be one of None, str, int or None.' )
# end if
if array . get ( 'reply_to_message_id' ) is None :
data [ 'reply_id' ] = None
elif isinstance ( array . get ( 'reply_to_message_id' ) , DEFAULT_MESSAGE_ID ) :
data [ 'reply_id' ] = DEFAULT_MESSAGE_ID ( array . get ( 'reply_to_message_id' ) )
elif isinstance ( array . get ( 'reply_to_message_id' ) , int ) :
data [ 'reply_id' ] = int ( array . get ( 'reply_to_message_id' ) )
else :
raise TypeError ( 'Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.' )
# end if
data [ 'disable_notification' ] = bool ( array . get ( 'disable_notification' ) ) if array . get ( 'disable_notification' ) is not None else None
return MediaGroupMessage ( ** data )
|
def f1 ( y , z ) :
"""F1 score : ` 2 * ( p * r ) / ( p + r ) ` , where p = precision and r = recall ."""
|
_recall = recall ( y , z )
_prec = precision ( y , z )
return 2 * ( _prec * _recall ) / ( _prec + _recall )
|
def prepare_axes ( wave , flux , fig = None , ax_lower = ( 0.1 , 0.1 ) , ax_dim = ( 0.85 , 0.65 ) ) :
"""Create fig and axes if needed and layout axes in fig ."""
|
# Axes location in figure .
if not fig :
fig = plt . figure ( )
ax = fig . add_axes ( [ ax_lower [ 0 ] , ax_lower [ 1 ] , ax_dim [ 0 ] , ax_dim [ 1 ] ] )
ax . plot ( wave , flux )
return fig , ax
|
def hybrid_forward ( self , F , scores , offset ) :
"""Get the single lowest element per sentence from a ` scores ` matrix . Expects that
beam size is 1 , for greedy decoding .
: param scores : Vocabulary scores for the next beam step . ( batch _ size * beam _ size , target _ vocabulary _ size )
: param offset : Array to add to the hypothesis indices for offsetting in batch decoding .
: return : The row indices , column indices and values of the smallest items in matrix ."""
|
best_word_indices = F . cast ( F . argmin ( scores , axis = 1 ) , dtype = 'int32' )
values = F . pick ( scores , best_word_indices , axis = 1 )
values = F . reshape ( values , shape = ( - 1 , 1 ) )
# for top1 , the best hyp indices are equal to the plain offset
best_hyp_indices = offset
return best_hyp_indices , best_word_indices , values
|
def find_mappable ( * axes ) :
"""Find the most recently added mappable layer in the given axes
Parameters
* axes : ` ~ matplotlib . axes . Axes `
one or more axes to search for a mappable"""
|
for ax in axes :
for aset in ( 'collections' , 'images' ) :
try :
return getattr ( ax , aset ) [ - 1 ]
except ( AttributeError , IndexError ) :
continue
raise ValueError ( "Cannot determine mappable layer on any axes " "for this colorbar" )
|
def create_network_interface ( SubnetId = None , Description = None , PrivateIpAddress = None , Groups = None , PrivateIpAddresses = None , SecondaryPrivateIpAddressCount = None , Ipv6Addresses = None , Ipv6AddressCount = None , DryRun = None ) :
"""Creates a network interface in the specified subnet .
For more information about network interfaces , see Elastic Network Interfaces in the Amazon Virtual Private Cloud User Guide .
See also : AWS API Documentation
Examples
This example creates a network interface for the specified subnet .
Expected Output :
: example : response = client . create _ network _ interface (
SubnetId = ' string ' ,
Description = ' string ' ,
PrivateIpAddress = ' string ' ,
Groups = [
' string ' ,
PrivateIpAddresses = [
' PrivateIpAddress ' : ' string ' ,
' Primary ' : True | False
SecondaryPrivateIpAddressCount = 123,
Ipv6Addresses = [
' Ipv6Address ' : ' string '
Ipv6AddressCount = 123,
DryRun = True | False
: type SubnetId : string
: param SubnetId : [ REQUIRED ]
The ID of the subnet to associate with the network interface .
: type Description : string
: param Description : A description for the network interface .
: type PrivateIpAddress : string
: param PrivateIpAddress : The primary private IPv4 address of the network interface . If you don ' t specify an IPv4 address , Amazon EC2 selects one for you from the subnet ' s IPv4 CIDR range . If you specify an IP address , you cannot indicate any IP addresses specified in privateIpAddresses as primary ( only one IP address can be designated as primary ) .
: type Groups : list
: param Groups : The IDs of one or more security groups .
( string ) - -
: type PrivateIpAddresses : list
: param PrivateIpAddresses : One or more private IPv4 addresses .
( dict ) - - Describes a secondary private IPv4 address for a network interface .
PrivateIpAddress ( string ) - - [ REQUIRED ] The private IPv4 addresses .
Primary ( boolean ) - - Indicates whether the private IPv4 address is the primary private IPv4 address . Only one IPv4 address can be designated as primary .
: type SecondaryPrivateIpAddressCount : integer
: param SecondaryPrivateIpAddressCount : The number of secondary private IPv4 addresses to assign to a network interface . When you specify a number of secondary IPv4 addresses , Amazon EC2 selects these IP addresses within the subnet ' s IPv4 CIDR range . You can ' t specify this option and specify more than one private IP address using privateIpAddresses .
The number of IP addresses you can assign to a network interface varies by instance type . For more information , see IP Addresses Per ENI Per Instance Type in the Amazon Virtual Private Cloud User Guide .
: type Ipv6Addresses : list
: param Ipv6Addresses : One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet . You can ' t use this option if you ' re specifying a number of IPv6 addresses .
( dict ) - - Describes an IPv6 address .
Ipv6Address ( string ) - - The IPv6 address .
: type Ipv6AddressCount : integer
: param Ipv6AddressCount : The number of IPv6 addresses to assign to a network interface . Amazon EC2 automatically selects the IPv6 addresses from the subnet range . You can ' t use this option if specifying specific IPv6 addresses . If your subnet has the AssignIpv6AddressOnCreation attribute set to true , you can specify 0 to override this setting .
: type DryRun : boolean
: param DryRun : Checks whether you have the required permissions for the action , without actually making the request , and provides an error response . If you have the required permissions , the error response is DryRunOperation . Otherwise , it is UnauthorizedOperation .
: rtype : dict
: return : {
' NetworkInterface ' : {
' NetworkInterfaceId ' : ' string ' ,
' SubnetId ' : ' string ' ,
' VpcId ' : ' string ' ,
' AvailabilityZone ' : ' string ' ,
' Description ' : ' string ' ,
' OwnerId ' : ' string ' ,
' RequesterId ' : ' string ' ,
' RequesterManaged ' : True | False ,
' Status ' : ' available ' | ' attaching ' | ' in - use ' | ' detaching ' ,
' MacAddress ' : ' string ' ,
' PrivateIpAddress ' : ' string ' ,
' PrivateDnsName ' : ' string ' ,
' SourceDestCheck ' : True | False ,
' Groups ' : [
' GroupName ' : ' string ' ,
' GroupId ' : ' string '
' Attachment ' : {
' AttachmentId ' : ' string ' ,
' InstanceId ' : ' string ' ,
' InstanceOwnerId ' : ' string ' ,
' DeviceIndex ' : 123,
' Status ' : ' attaching ' | ' attached ' | ' detaching ' | ' detached ' ,
' AttachTime ' : datetime ( 2015 , 1 , 1 ) ,
' DeleteOnTermination ' : True | False
' Association ' : {
' PublicIp ' : ' string ' ,
' PublicDnsName ' : ' string ' ,
' IpOwnerId ' : ' string ' ,
' AllocationId ' : ' string ' ,
' AssociationId ' : ' string '
' TagSet ' : [
' Key ' : ' string ' ,
' Value ' : ' string '
' PrivateIpAddresses ' : [
' PrivateIpAddress ' : ' string ' ,
' PrivateDnsName ' : ' string ' ,
' Primary ' : True | False ,
' Association ' : {
' PublicIp ' : ' string ' ,
' PublicDnsName ' : ' string ' ,
' IpOwnerId ' : ' string ' ,
' AllocationId ' : ' string ' ,
' AssociationId ' : ' string '
' Ipv6Addresses ' : [
' Ipv6Address ' : ' string '
' InterfaceType ' : ' interface ' | ' natGateway '"""
|
pass
|
def generate_id ( cls , prefix = "pelix-" ) :
"""Generates a random MQTT client ID
: param prefix : Client ID prefix ( truncated to 8 chars )
: return : A client ID of 22 or 23 characters"""
|
if not prefix : # Normalize string
prefix = ""
else : # Truncate long prefixes
prefix = prefix [ : 8 ]
# Prepare the missing part
nb_bytes = ( 23 - len ( prefix ) ) // 2
random_bytes = os . urandom ( nb_bytes )
if sys . version_info [ 0 ] >= 3 :
random_ints = [ char for char in random_bytes ]
else :
random_ints = [ ord ( char ) for char in random_bytes ]
random_id = "" . join ( "{0:02x}" . format ( value ) for value in random_ints )
return "{0}{1}" . format ( prefix , random_id )
|
def _MaybePurgeOrphanedData ( self , event ) :
"""Maybe purge orphaned data due to a TensorFlow crash .
When TensorFlow crashes at step T + O and restarts at step T , any events
written after step T are now " orphaned " and will be at best misleading if
they are included in TensorBoard .
This logic attempts to determine if there is orphaned data , and purge it
if it is found .
Args :
event : The event to use as a reference , to determine if a purge is needed ."""
|
if not self . purge_orphaned_data :
return
# # Check if the event happened after a crash , and purge expired tags .
if self . file_version and self . file_version >= 2 : # # If the file _ version is recent enough , use the SessionLog enum
# # to check for restarts .
self . _CheckForRestartAndMaybePurge ( event )
else : # # If there is no file version , default to old logic of checking for
# # out of order steps .
self . _CheckForOutOfOrderStepAndMaybePurge ( event )
# After checking , update the most recent summary step and wall time .
if event . HasField ( 'summary' ) :
self . most_recent_step = event . step
self . most_recent_wall_time = event . wall_time
|
def resize ( self , width = None ) :
"""Resizes image to fit inside terminal
Called by the constructor automatically ."""
|
( iw , ih ) = self . size
if width is None :
width = min ( iw , utils . term . width )
elif isinstance ( width , basestring ) :
percents = dict ( [ ( pct , '%s%%' % ( pct ) ) for pct in range ( 101 ) ] )
width = percents [ width ]
height = int ( float ( ih ) * ( float ( width ) / float ( iw ) ) )
height //= 2
self . img = self . img . resize ( ( width , height ) )
|
def cli ( ctx , organism = "" , sequence = "" ) :
"""Get the features for an organism / sequence
Output :
A standard apollo feature dictionary ( { " features " : [ { . . . } ] } )"""
|
return ctx . gi . annotations . get_features ( organism = organism , sequence = sequence )
|
def _pull_assemble_error_status ( logs ) :
'''Given input in this form : :
u ' { " status " : " Pulling repository foo / ubuntubox " } :
" image ( latest ) from foo / . . .
rogress " : " complete " , " id " : " 2c80228370c9 " } '
construct something like that ( load JSON data is possible ) : :
[ u ' { " status " : " Pulling repository foo / ubuntubox " ' ,
{ " status " : " Download " , " progress " : " complete " , " id " : " 2c80228370c9 " } ]'''
|
comment = 'An error occurred pulling your image'
try :
for err_log in logs :
if isinstance ( err_log , dict ) :
if 'errorDetail' in err_log :
if 'code' in err_log [ 'errorDetail' ] :
msg = '\n{0}\n{1}: {2}' . format ( err_log [ 'error' ] , err_log [ 'errorDetail' ] [ 'code' ] , err_log [ 'errorDetail' ] [ 'message' ] )
else :
msg = '\n{0}\n{1}' . format ( err_log [ 'error' ] , err_log [ 'errorDetail' ] [ 'message' ] , )
comment += msg
except Exception as e :
comment += "%s" % e
return comment
|
def use ( node ) :
"Set the fabric environment for the specifed node"
|
try :
role = node . tags . get ( "Name" ) . split ( '-' ) [ 1 ]
env . roledefs [ role ] += [ ip ( node ) ]
except IndexError :
pass
env . nodes += [ node ]
env . hosts += [ ip ( node ) ]
|
def get_all_spaces ( self , start = 0 , limit = 500 ) :
"""Get all spaces with provided limit
: param start : OPTIONAL : The start point of the collection to return . Default : None ( 0 ) .
: param limit : OPTIONAL : The limit of the number of pages to return , this may be restricted by
fixed system limits . Default : 500"""
|
url = 'rest/api/space'
params = { }
if limit :
params [ 'limit' ] = limit
if start :
params [ 'start' ] = start
return ( self . get ( url , params = params ) or { } ) . get ( 'results' )
|
def _get_triplet_value ( self , graph , identity , rdf_type ) :
"""Get a value from an RDF triple"""
|
value = graph . value ( subject = identity , predicate = rdf_type )
return value . toPython ( ) if value is not None else value
|
def gdcsreporter ( self , analysistype = 'GDCS' ) :
"""Creates a report of the GDCS results
: param analysistype : The variable to use when accessing attributes in the metadata object"""
|
logging . info ( 'Creating {} report' . format ( analysistype ) )
# Initialise list to store all the GDCS genes , and genera in the analysis
gdcs = list ( )
genera = list ( )
for sample in self . runmetadata . samples :
if sample . general . bestassemblyfile != 'NA' :
if os . path . isdir ( sample [ analysistype ] . targetpath ) : # Update the fai dict with all the genes in the analysis , rather than just those with baited hits
self . gdcs_fai ( sample )
sample [ analysistype ] . createreport = True
# Determine which genera are present in the analysis
if sample . general . closestrefseqgenus not in genera :
genera . append ( sample . general . closestrefseqgenus )
try : # Add all the GDCS genes to the list
for gene in sorted ( sample [ analysistype ] . faidict ) :
if gene not in gdcs :
gdcs . append ( gene )
except AttributeError :
sample [ analysistype ] . createreport = False
else :
sample [ analysistype ] . createreport = False
else :
sample [ analysistype ] . createreport = False
sample . general . incomplete = True
header = 'Strain,Genus,Matches,MeanCoverage,Pass/Fail,{},\n' . format ( ',' . join ( gdcs ) )
data = str ( )
with open ( os . path . join ( self . reportpath , '{}.csv' . format ( analysistype ) ) , 'w' ) as report : # Sort the samples in the report based on the closest refseq genus e . g . all samples with the same genus
# will be grouped together in the report
for genus in genera :
for sample in self . runmetadata . samples :
if sample . general . closestrefseqgenus == genus :
if sample [ analysistype ] . createreport :
sample [ analysistype ] . totaldepth = list ( )
# Add the sample to the report if it matches the current genus
# if genus = = sample . general . closestrefseqgenus :
data += '{},{},' . format ( sample . name , genus )
# Initialise a variable to store the number of GDCS genes were matched
count = 0
# As I want the count to be in the report before all the gene results , this string will
# store the specific sample information , and will be added to data once count is known
specific = str ( )
for gene in gdcs : # As there are different genes present in the GDCS databases for each organism of
# interest , genes that did not match because they ' re absent in the specific database are
# indicated using an X
if gene not in [ result for result in sample [ analysistype ] . faidict ] :
specific += 'X,'
else :
try : # Report the necessary information for each gene result
identity = sample [ analysistype ] . results [ gene ]
specific += '{}% ({} +/- {}),' . format ( identity , sample [ analysistype ] . avgdepth [ gene ] , sample [ analysistype ] . standarddev [ gene ] )
sample [ analysistype ] . totaldepth . append ( float ( sample [ analysistype ] . avgdepth [ gene ] ) )
count += 1
# If the gene was missing from the results attribute , add a - to the cell
except ( KeyError , AttributeError ) :
sample . general . incomplete = True
specific += '-,'
# Calculate the mean depth of the genes and the standard deviation
sample [ analysistype ] . mean = numpy . mean ( sample [ analysistype ] . totaldepth )
sample [ analysistype ] . stddev = numpy . std ( sample [ analysistype ] . totaldepth )
# Determine whether the sample pass the necessary quality criteria :
# Pass , all GDCS , mean coverage greater than 20X coverage ;
# ? : Indeterminate value ;
# - : Fail value
# Allow one missing GDCS to still be considered a pass
if count >= len ( sample [ analysistype ] . faidict ) - 1 :
if sample [ analysistype ] . mean > 20 :
quality = '+'
else :
quality = '?'
sample . general . incomplete = True
else :
quality = '-'
sample . general . incomplete = True
# Add the count , mean depth with standard deviation , the pass / fail determination ,
# and the total number of GDCS genes as well as the results
data += '{hits}/{total},{mean} +/- {std},{fail},{gdcs}\n' . format ( hits = str ( count ) , total = len ( sample [ analysistype ] . faidict ) , mean = '{:.2f}' . format ( sample [ analysistype ] . mean ) , std = '{:.2f}' . format ( sample [ analysistype ] . stddev ) , fail = quality , gdcs = specific )
# # Any samples with a best assembly of ' NA ' are considered incomplete .
# else :
# data + = ' { } , { } , , , - \ n ' . format ( sample . name , sample . general . closestrefseqgenus )
# sample . general . incomplete = True
elif sample . general . closestrefseqgenus == 'NA' :
data += '{}\n' . format ( sample . name )
sample . general . incomplete = True
# Write the header and data to file
report . write ( header )
report . write ( data )
|
def getAllKws ( self ) :
"""extract all keywords into two categories
kws _ ele : magnetic elements
kws _ bl : beamline elements
return ( kws _ ele , kws _ bl )"""
|
kws_ele = [ ]
kws_bl = [ ]
for ele in self . all_elements :
if ele == '_prefixstr' or ele == '_epics' :
continue
elif self . getElementType ( ele ) . lower ( ) == u'beamline' :
kws_bl . append ( ele )
else :
kws_ele . append ( ele )
return tuple ( ( kws_ele , kws_bl ) )
|
def detect_worktree ( cls , binary = 'git' , subdir = None ) :
"""Detect the git working tree above cwd and return it ; else , return None .
: param string binary : The path to the git binary to use , ' git ' by default .
: param string subdir : The path to start searching for a git repo .
: returns : path to the directory where the git working tree is rooted .
: rtype : string"""
|
# TODO ( John Sirois ) : This is only used as a factory for a Git instance in
# pants . base . build _ environment . get _ scm , encapsulate in a true factory method .
cmd = [ binary , 'rev-parse' , '--show-toplevel' ]
try :
if subdir :
with pushd ( subdir ) :
process , out = cls . _invoke ( cmd )
else :
process , out = cls . _invoke ( cmd )
cls . _check_result ( cmd , process . returncode , raise_type = Scm . ScmException )
except Scm . ScmException :
return None
return cls . _cleanse ( out )
|
def _retry ( self , context , backoff ) :
'''A function which determines whether and how to retry .
: param ~ azure . storage . models . RetryContext context :
The retry context . This contains the request , response , and other data
which can be used to determine whether or not to retry .
: param function ( ) backoff :
A function which returns the backoff time if a retry is to be performed .
: return :
An integer indicating how long to wait before retrying the request ,
or None to indicate no retry should be performed .
: rtype : int or None'''
|
# If the context does not contain a count parameter , this request has not
# been retried yet . Add the count parameter to track the number of retries .
if not hasattr ( context , 'count' ) :
context . count = 0
# Determine whether to retry , and if so increment the count , modify the
# request as desired , and return the backoff .
if self . _should_retry ( context ) :
backoff_interval = backoff ( context )
context . count += 1
# If retry to secondary is enabled , attempt to change the host if the
# request allows it
if self . retry_to_secondary :
self . _set_next_host_location ( context )
# rewind the request body if it is a stream
if hasattr ( context . request . body , 'read' ) : # no position was saved , then retry would not work
if context . body_position is None :
return None
else :
try : # attempt to rewind the body to the initial position
context . request . body . seek ( context . body_position , SEEK_SET )
except UnsupportedOperation : # if body is not seekable , then retry would not work
return None
return backoff_interval
return None
|
def nse ( sim = None , obs = None , node = None , skip_nan = False ) :
"""Calculate the efficiency criteria after Nash & Sutcliffe .
If the simulated values predict the observed values as well
as the average observed value ( regarding the the mean square
error ) , the NSE value is zero :
> > > from hydpy import nse
> > > nse ( sim = [ 2.0 , 2.0 , 2.0 ] , obs = [ 1.0 , 2.0 , 3.0 ] )
0.0
> > > nse ( sim = [ 0.0 , 2.0 , 4.0 ] , obs = [ 1.0 , 2.0 , 3.0 ] )
0.0
For worse and better simulated values the NSE is negative
or positive , respectively :
> > > nse ( sim = [ 3.0 , 2.0 , 1.0 ] , obs = [ 1.0 , 2.0 , 3.0 ] )
-3.0
> > > nse ( sim = [ 1.0 , 2.0 , 2.0 ] , obs = [ 1.0 , 2.0 , 3.0 ] )
0.5
The highest possible value is one :
> > > nse ( sim = [ 1.0 , 2.0 , 3.0 ] , obs = [ 1.0 , 2.0 , 3.0 ] )
1.0
See the documentation on function | prepare _ arrays | for some
additional instructions for use of function | nse | ."""
|
sim , obs = prepare_arrays ( sim , obs , node , skip_nan )
return 1. - numpy . sum ( ( sim - obs ) ** 2 ) / numpy . sum ( ( obs - numpy . mean ( obs ) ) ** 2 )
|
def move_back ( columns = 1 , file = sys . stdout ) :
"""Move the cursor back a number of columns .
Esc [ < columns > D :
Moves the cursor back by the specified number of columns without
changing lines . If the cursor is already in the leftmost column ,
ANSI . SYS ignores this sequence ."""
|
move . back ( columns ) . write ( file = file )
|
def read_pestpp_runstorage ( filename , irun = 0 , with_metadata = False ) :
"""read pars and obs from a specific run in a pest + + serialized run storage file into
pandas . DataFrame ( s )
Parameters
filename : str
the name of the run storage file
irun : int
the run id to process . If ' all ' , then all runs are read . Default is 0
with _ metadata : bool
flag to return run stats and info txt as well
Returns
par _ df : pandas . DataFrame
parameter information
obs _ df : pandas . DataFrame
observation information
metadata : pandas . DataFrame
run status and info txt ."""
|
header_dtype = np . dtype ( [ ( "n_runs" , np . int64 ) , ( "run_size" , np . int64 ) , ( "p_name_size" , np . int64 ) , ( "o_name_size" , np . int64 ) ] )
try :
irun = int ( irun )
except :
if irun . lower ( ) == "all" :
irun = irun . lower ( )
else :
raise Exception ( "unrecognized 'irun': should be int or 'all', not '{0}'" . format ( irun ) )
def status_str ( r_status ) :
if r_status == 0 :
return "not completed"
if r_status == 1 :
return "completed"
if r_status == - 100 :
return "canceled"
else :
return "failed"
assert os . path . exists ( filename )
f = open ( filename , "rb" )
header = np . fromfile ( f , dtype = header_dtype , count = 1 )
p_name_size , o_name_size = header [ "p_name_size" ] [ 0 ] , header [ "o_name_size" ] [ 0 ]
par_names = struct . unpack ( '{0}s' . format ( p_name_size ) , f . read ( p_name_size ) ) [ 0 ] . strip ( ) . lower ( ) . decode ( ) . split ( '\0' ) [ : - 1 ]
obs_names = struct . unpack ( '{0}s' . format ( o_name_size ) , f . read ( o_name_size ) ) [ 0 ] . strip ( ) . lower ( ) . decode ( ) . split ( '\0' ) [ : - 1 ]
n_runs , run_size = header [ "n_runs" ] [ 0 ] , header [ "run_size" ] [ 0 ]
run_start = f . tell ( )
def _read_run ( irun ) :
f . seek ( run_start + ( irun * run_size ) )
r_status = np . fromfile ( f , dtype = np . int8 , count = 1 )
info_txt = struct . unpack ( "41s" , f . read ( 41 ) ) [ 0 ] . strip ( ) . lower ( ) . decode ( )
par_vals = np . fromfile ( f , dtype = np . float64 , count = len ( par_names ) + 1 ) [ 1 : ]
obs_vals = np . fromfile ( f , dtype = np . float64 , count = len ( obs_names ) + 1 ) [ : - 1 ]
par_df = pd . DataFrame ( { "parnme" : par_names , "parval1" : par_vals } )
par_df . index = par_df . pop ( "parnme" )
obs_df = pd . DataFrame ( { "obsnme" : obs_names , "obsval" : obs_vals } )
obs_df . index = obs_df . pop ( "obsnme" )
return r_status , info_txt , par_df , obs_df
if irun == "all" :
par_dfs , obs_dfs = [ ] , [ ]
r_stats , txts = [ ] , [ ]
for irun in range ( n_runs ) : # print ( irun )
r_status , info_txt , par_df , obs_df = _read_run ( irun )
par_dfs . append ( par_df )
obs_dfs . append ( obs_df )
r_stats . append ( r_status )
txts . append ( info_txt )
par_df = pd . concat ( par_dfs , axis = 1 ) . T
par_df . index = np . arange ( n_runs )
obs_df = pd . concat ( obs_dfs , axis = 1 ) . T
obs_df . index = np . arange ( n_runs )
meta_data = pd . DataFrame ( { "r_status" : r_stats , "info_txt" : txts } )
meta_data . loc [ : , "status" ] = meta_data . r_status . apply ( status_str )
else :
assert irun <= n_runs
r_status , info_txt , par_df , obs_df = _read_run ( irun )
meta_data = pd . DataFrame ( { "r_status" : [ r_status ] , "info_txt" : [ info_txt ] } )
meta_data . loc [ : , "status" ] = meta_data . r_status . apply ( status_str )
f . close ( )
if with_metadata :
return par_df , obs_df , meta_data
else :
return par_df , obs_df
|
def filepaths ( self ) -> List [ str ] :
"""Absolute path names of the files contained in the current
working directory .
Files names starting with underscores are ignored :
> > > from hydpy . core . filetools import FileManager
> > > filemanager = FileManager ( )
> > > filemanager . BASEDIR = ' basename '
> > > filemanager . projectdir = ' projectname '
> > > from hydpy import repr _ , TestIO
> > > with TestIO ( ) :
. . . filemanager . currentdir = ' testdir '
. . . open ( ' projectname / basename / testdir / file1 . txt ' , ' w ' ) . close ( )
. . . open ( ' projectname / basename / testdir / file2 . npy ' , ' w ' ) . close ( )
. . . open ( ' projectname / basename / testdir / _ file1 . nc ' , ' w ' ) . close ( )
. . . for filepath in filemanager . filepaths :
. . . repr _ ( filepath ) # doctest : + ELLIPSIS
' . . . hydpy / tests / iotesting / projectname / basename / testdir / file1 . txt '
' . . . hydpy / tests / iotesting / projectname / basename / testdir / file2 . npy '"""
|
path = self . currentpath
return [ os . path . join ( path , name ) for name in self . filenames ]
|
def initialize ( self , symbolic_vm : LaserEVM ) :
"""Initializes the mutation pruner
Introduces hooks for SSTORE operations
: param symbolic _ vm :
: return :"""
|
@ symbolic_vm . pre_hook ( "SSTORE" )
def mutator_hook ( global_state : GlobalState ) :
global_state . annotate ( MutationAnnotation ( ) )
@ symbolic_vm . laser_hook ( "add_world_state" )
def world_state_filter_hook ( global_state : GlobalState ) :
if And ( * global_state . mstate . constraints [ : ] + [ global_state . environment . callvalue > symbol_factory . BitVecVal ( 0 , 256 ) ] ) . is_false :
return
if isinstance ( global_state . current_transaction , ContractCreationTransaction ) :
return
if len ( list ( global_state . get_annotations ( MutationAnnotation ) ) ) == 0 :
raise PluginSkipWorldState
|
def register ( self , plugin ) :
"""Make a plugin known to the CMS .
: param plugin : The plugin class , deriving from : class : ` ContentPlugin ` .
: type plugin : : class : ` ContentPlugin `
The plugin will be instantiated once , just like Django does this with : class : ` ~ django . contrib . admin . ModelAdmin ` classes .
If a plugin is already registered , this will raise a : class : ` PluginAlreadyRegistered ` exception ."""
|
# Duck - Typing does not suffice here , avoid hard to debug problems by upfront checks .
assert issubclass ( plugin , ContentPlugin ) , "The plugin must inherit from `ContentPlugin`"
assert plugin . model , "The plugin has no model defined"
assert issubclass ( plugin . model , ContentItem ) , "The plugin model must inherit from `ContentItem`"
assert issubclass ( plugin . form , ContentItemForm ) , "The plugin form must inherit from `ContentItemForm`"
name = plugin . __name__
# using class here , no instance created yet .
name = name . lower ( )
if name in self . plugins :
raise PluginAlreadyRegistered ( "{0}: a plugin with this name is already registered" . format ( name ) )
# Avoid registering 2 plugins to the exact same model . If you want to reuse code , use proxy models .
if plugin . model in self . _name_for_model : # Having 2 plugins for one model breaks ContentItem . plugin and the frontend code
# that depends on using inline - model names instead of plugins . Good luck fixing that .
# Better leave the model = = plugin dependency for now .
existing_plugin = self . plugins [ self . _name_for_model [ plugin . model ] ]
raise ModelAlreadyRegistered ( "Can't register the model {0} to {2}, it's already registered to {1}!" . format ( plugin . model . __name__ , existing_plugin . name , plugin . __name__ ) )
# Make a single static instance , similar to ModelAdmin .
plugin_instance = plugin ( )
self . plugins [ name ] = plugin_instance
self . _name_for_model [ plugin . model ] = name
# Track reverse for model . plugin link
# Only update lazy indexes if already created
if self . _name_for_ctype_id is not None :
self . _name_for_ctype_id [ plugin . type_id ] = name
return plugin
|
def remove_all_timers ( self ) :
"""Remove all waiting timers and terminate any blocking threads ."""
|
with self . lock :
if self . rtimer is not None :
self . rtimer . cancel ( )
self . timers = { }
self . heap = [ ]
self . rtimer = None
self . expiring = False
|
def lockToColumn ( self , index ) :
"""Sets the column that the tree view will lock to . If None is supplied ,
then locking will be removed .
: param index | < int > | | None"""
|
self . _lockColumn = index
if index is None :
self . __destroyLockedView ( )
return
else :
if not self . _lockedView :
view = QtGui . QTreeView ( self . parent ( ) )
view . setModel ( self . model ( ) )
view . setSelectionModel ( self . selectionModel ( ) )
view . setItemDelegate ( self . itemDelegate ( ) )
view . setFrameShape ( view . NoFrame )
view . setVerticalScrollBarPolicy ( QtCore . Qt . ScrollBarAlwaysOff )
view . setHorizontalScrollBarPolicy ( QtCore . Qt . ScrollBarAlwaysOff )
view . setRootIsDecorated ( self . rootIsDecorated ( ) )
view . setUniformRowHeights ( True )
view . setFocusProxy ( self )
view . header ( ) . setFocusProxy ( self . header ( ) )
view . setStyleSheet ( self . styleSheet ( ) )
view . setAutoScroll ( False )
view . setSortingEnabled ( self . isSortingEnabled ( ) )
view . setPalette ( self . palette ( ) )
view . move ( self . x ( ) , self . y ( ) )
self . setAutoScroll ( False )
self . setUniformRowHeights ( True )
view . collapsed . connect ( self . collapse )
view . expanded . connect ( self . expand )
view . expanded . connect ( self . __updateLockedView )
view . collapsed . connect ( self . __updateLockedView )
view_head = view . header ( )
for i in range ( self . columnCount ( ) ) :
view_head . setResizeMode ( i , self . header ( ) . resizeMode ( i ) )
view . header ( ) . sectionResized . connect ( self . __updateStandardSection )
self . header ( ) . sectionResized . connect ( self . __updateLockedSection )
vbar = view . verticalScrollBar ( )
self . verticalScrollBar ( ) . valueChanged . connect ( vbar . setValue )
self . _lockedView = view
self . __updateLockedView ( )
|
def indentation ( node ) :
"""Returns the indentation for this node
Iff a node is in a suite , then it has indentation ."""
|
while node . parent is not None and node . parent . type != syms . suite :
node = node . parent
if node . parent is None :
return u""
# The first three children of a suite are NEWLINE , INDENT , ( some other node )
# INDENT . value contains the indentation for this suite
# anything after ( some other node ) has the indentation as its prefix .
if node . type == token . INDENT :
return node . value
elif node . prev_sibling is not None and node . prev_sibling . type == token . INDENT :
return node . prev_sibling . value
elif node . prev_sibling is None :
return u""
else :
return node . prefix
|
def select_coins ( target , fee , output_size , min_change , * , absolute_fee = False , consolidate = False , unspents ) :
'''Implementation of Branch - and - Bound coin selection defined in Erhart ' s
Master ' s thesis An Evaluation of Coin Selection Strategies here :
http : / / murch . one / wp - content / uploads / 2016/11 / erhardt2016coinselection . pdf
: param target : The total amount of the outputs in a transaction for which
we try to select the inputs to spend .
: type target : ` ` int ` `
: param fee : The number of satoshi per byte for the fee of the transaction .
: type fee : ` ` int ` `
: param output _ size : A list containing as int the sizes of each output .
: type output _ size : ` ` list ` ` of ` ` int `
: param min _ change : The minimum amount of satoshis allowed for the
return / change address if there is no perfect match .
: type min _ change : ` ` int ` `
: param absolute _ fee : Whether or not the parameter ` ` fee ` ` should be
repurposed to denote the exact fee amount .
: type absolute _ fee : ` ` bool ` `
: param consolidate : Whether or not the Branch - and - Bound process for finding
a perfect match should be skipped and all unspents
used directly .
: type consolidate : ` ` bool ` `
: param unspents : The UTXOs to use as inputs .
: type unspents : ` ` list ` ` of : class : ` ~ bit . network . meta . Unspent `
: raises InsufficientFunds : If ` ` unspents ` ` does not contain enough balance
to allow spending matching the target .'''
|
# The maximum number of tries for Branch - and - Bound :
BNB_TRIES = 1000000
# COST _ OF _ OVERHEAD excludes the return address of output _ size ( last element ) .
COST_OF_OVERHEAD = ( 8 + sum ( output_size [ : - 1 ] ) + 1 ) * fee
def branch_and_bound ( d , selected_coins , effective_value , target , fee , sorted_unspents ) : # pragma : no cover
nonlocal COST_OF_OVERHEAD , BNB_TRIES
BNB_TRIES -= 1
COST_PER_INPUT = 148 * fee
# Just typical estimate values
COST_PER_OUTPUT = 34 * fee
# The target we want to match includes cost of overhead for transaction
target_to_match = target + COST_OF_OVERHEAD
# Allowing to pay fee for a whole input and output is rationally
# correct , but increases the fee - rate dramatically for only few inputs .
match_range = COST_PER_INPUT + COST_PER_OUTPUT
# We could allow to spend up to X % more on the fees if we can find a
# perfect match :
# match _ range + = int ( 0.1 * fee * sum ( u . vsize for u in selected _ coins ) )
# Check for solution and cut criteria :
if effective_value > target_to_match + match_range :
return [ ]
elif effective_value >= target_to_match :
return selected_coins
elif BNB_TRIES <= 0 :
return [ ]
elif d >= len ( sorted_unspents ) :
return [ ]
else : # Randomly explore next branch :
binary_random = randint ( 0 , 1 )
if binary_random : # Explore inclusion branch first , else omission branch :
effective_value_new = effective_value + sorted_unspents [ d ] . amount - fee * sorted_unspents [ d ] . vsize
with_this = branch_and_bound ( d + 1 , selected_coins + [ sorted_unspents [ d ] ] , effective_value_new , target , fee , sorted_unspents )
if with_this != [ ] :
return with_this
else :
without_this = branch_and_bound ( d + 1 , selected_coins , effective_value , target , fee , sorted_unspents )
return without_this
else : # As above but explore omission branch first :
without_this = branch_and_bound ( d + 1 , selected_coins , effective_value , target , fee , sorted_unspents )
if without_this != [ ] :
return without_this
else :
effective_value_new = effective_value + sorted_unspents [ d ] . amount - fee * sorted_unspents [ d ] . vsize
with_this = branch_and_bound ( d + 1 , selected_coins + [ sorted_unspents [ d ] ] , effective_value_new , target , fee , sorted_unspents )
return with_this
sorted_unspents = sorted ( unspents , key = lambda u : u . amount , reverse = True )
selected_coins = [ ]
if not consolidate : # Trying to find a perfect match using Branch - and - Bound :
selected_coins = branch_and_bound ( d = 0 , selected_coins = [ ] , effective_value = 0 , target = target , fee = fee , sorted_unspents = sorted_unspents )
remaining = 0
# Fallback : If no match , Single Random Draw with return address :
if selected_coins == [ ] :
unspents = unspents . copy ( )
# Since we have no information on the user ' s spending habit it is
# best practice to randomly select UTXOs until we have enough .
if not consolidate : # To have a deterministic way of inserting inputs when
# consolidating , we only shuffle the unspents otherwise .
shuffle ( unspents )
while unspents :
selected_coins . append ( unspents . pop ( 0 ) )
estimated_fee = estimate_tx_fee ( sum ( u . vsize for u in selected_coins ) , len ( selected_coins ) , sum ( output_size ) , len ( output_size ) , fee )
estimated_fee = fee if absolute_fee else estimated_fee
remaining = sum ( u . amount for u in selected_coins ) - target - estimated_fee
if remaining >= min_change and ( not consolidate or len ( unspents ) == 0 ) :
break
else :
raise InsufficientFunds ( 'Balance {} is less than {} (including ' 'fee).' . format ( sum ( u . amount for u in selected_coins ) , target + min_change + estimated_fee ) )
return selected_coins , remaining
|
def load_tool ( tool , tooldir = None ) :
'''load _ tool : import a Python module , optionally using several directories .
@ param tool [ string ] : name of tool to import .
@ param tooldir [ list ] : directories to look for the tool .
@ return : the loaded module .
Warning : this function is not thread - safe : plays with sys . path ,
so must run in sequence .'''
|
if tooldir :
assert isinstance ( tooldir , list )
sys . path = tooldir + sys . path
else :
tooldir = [ ]
try :
return __import__ ( tool )
finally :
for dt in tooldir :
sys . path . remove ( dt )
|
def setCurrentSchema ( self , schema ) :
"""Sets the index for this combobox to the inputed schema instance .
: param schema < orb . TableSchema >
: return < bool > success"""
|
if ( not schema in self . _schemas ) :
return False
index = self . _schemas . index ( schema )
self . setCurrentIndex ( index )
return True
|
def highPassFilter ( self , threshold ) :
'''remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size ( 2 * threshold ) ^ 2 to zero
threshold = 0 . . . 1'''
|
if not threshold :
return
rows , cols = self . img . shape
tx = int ( cols * threshold )
ty = int ( rows * threshold )
# middle :
crow , ccol = rows // 2 , cols // 2
# square in the middle to zero
self . fshift [ crow - tx : crow + tx , ccol - ty : ccol + ty ] = 0
|
def run ( self , change ) :
"""runs the report format instances in this reporter . Will call setup
if it hasn ' t been called already"""
|
if self . _formats is None :
self . setup ( )
entry = self . entry
for fmt in self . _formats :
fmt . run ( change , entry )
self . clear ( )
|
def energy_minimize ( self , forcefield = 'UFF' , steps = 1000 , ** kwargs ) :
"""Perform an energy minimization on a Compound
Default beahvior utilizes Open Babel ( http : / / openbabel . org / docs / dev / )
to perform an energy minimization / geometry optimization on a
Compound by applying a generic force field
Can also utilize OpenMM ( http : / / openmm . org / ) to energy minimize
after atomtyping a Compound using
Foyer ( https : / / github . com / mosdef - hub / foyer ) to apply a forcefield
XML file that contains valid SMARTS strings .
This function is primarily intended to be used on smaller components ,
with sizes on the order of 10 ' s to 100 ' s of particles , as the energy
minimization scales poorly with the number of particles .
Parameters
steps : int , optional , default = 1000
The number of optimization iterations
forcefield : str , optional , default = ' UFF '
The generic force field to apply to the Compound for minimization .
Valid options are ' MMFF94 ' , ' MMFF94s ' , ' ' UFF ' , ' GAFF ' , and ' Ghemical ' .
Please refer to the Open Babel documentation ( http : / / open - babel .
readthedocs . io / en / latest / Forcefields / Overview . html ) when considering
your choice of force field .
Utilizing OpenMM for energy minimization requires a forcefield
XML file with valid SMARTS strings . Please refer to ( http : / / docs .
openmm . org / 7.0.0 / userguide / application . html # creating - force - fields )
for more information .
Keyword Arguments
algorithm : str , optional , default = ' cg '
The energy minimization algorithm . Valid options are ' steep ' ,
' cg ' , and ' md ' , corresponding to steepest descent , conjugate
gradient , and equilibrium molecular dynamics respectively .
For _ energy _ minimize _ openbabel
scale _ bonds : float , optional , default = 1
Scales the bond force constant ( 1 is completely on ) .
For _ energy _ minimize _ openmm
scale _ angles : float , optional , default = 1
Scales the angle force constant ( 1 is completely on )
For _ energy _ minimize _ openmm
scale _ torsions : float , optional , default = 1
Scales the torsional force constants ( 1 is completely on )
For _ energy _ minimize _ openmm
Note : Only Ryckaert - Bellemans style torsions are currently supported
scale _ nonbonded : float , optional , default = 1
Scales epsilon ( 1 is completely on )
For _ energy _ minimize _ openmm
References
If using _ energy _ minimize _ openmm ( ) , please cite :
. . [ 1 ] P . Eastman , M . S . Friedrichs , J . D . Chodera , R . J . Radmer ,
C . M . Bruns , J . P . Ku , K . A . Beauchamp , T . J . Lane ,
L . - P . Wang , D . Shukla , T . Tye , M . Houston , T . Stich ,
C . Klein , M . R . Shirts , and V . S . Pande .
" OpenMM 4 : A Reusable , Extensible , Hardware Independent
Library for High Performance Molecular Simulation . "
J . Chem . Theor . Comput . 9(1 ) : 461-469 . ( 2013 ) .
If using _ energy _ minimize _ openbabel ( ) , please cite :
. . [ 1 ] O ' Boyle , N . M . ; Banck , M . ; James , C . A . ; Morley , C . ;
Vandermeersch , T . ; Hutchison , G . R . " Open Babel : An open
chemical toolbox . " ( 2011 ) J . Cheminf . 3 , 33
. . [ 2 ] Open Babel , version X . X . X http : / / openbabel . org , ( installed
Month Year )
If using the ' MMFF94 ' force field please also cite the following :
. . [ 3 ] T . A . Halgren , " Merck molecular force field . I . Basis , form ,
scope , parameterization , and performance of MMFF94 . " ( 1996)
J . Comput . Chem . 17 , 490-519
. . [ 4 ] T . A . Halgren , " Merck molecular force field . II . MMFF94 van der
Waals and electrostatic parameters for intermolecular
interactions . " ( 1996 ) J . Comput . Chem . 17 , 520-552
. . [ 5 ] T . A . Halgren , " Merck molecular force field . III . Molecular
geometries and vibrational frequencies for MMFF94 . " ( 1996)
J . Comput . Chem . 17 , 553-586
. . [ 6 ] T . A . Halgren and R . B . Nachbar , " Merck molecular force field .
IV . Conformational energies and geometries for MMFF94 . " ( 1996)
J . Comput . Chem . 17 , 587-615
. . [ 7 ] T . A . Halgren , " Merck molecular force field . V . Extension of
MMFF94 using experimental data , additional computational data ,
and empirical rules . " ( 1996 ) J . Comput . Chem . 17 , 616-641
If using the ' MMFF94s ' force field please cite the above along with :
. . [ 8 ] T . A . Halgren , " MMFF VI . MMFF94s option for energy minimization
studies . " ( 1999 ) J . Comput . Chem . 20 , 720-729
If using the ' UFF ' force field please cite the following :
. . [ 3 ] Rappe , A . K . , Casewit , C . J . , Colwell , K . S . , Goddard , W . A . III ,
Skiff , W . M . " UFF , a full periodic table force field for
molecular mechanics and molecular dynamics simulations . " ( 1992)
J . Am . Chem . Soc . 114 , 10024-10039
If using the ' GAFF ' force field please cite the following :
. . [ 3 ] Wang , J . , Wolf , R . M . , Caldwell , J . W . , Kollman , P . A . , Case , D . A .
" Development and testing of a general AMBER force field " ( 2004)
J . Comput . Chem . 25 , 1157-1174
If using the ' Ghemical ' force field please cite the following :
. . [ 3 ] T . Hassinen and M . Perakyla , " New energy terms for reduced
protein models implemented in an off - lattice force field " ( 2001)
J . Comput . Chem . 22 , 1229-1242"""
|
tmp_dir = tempfile . mkdtemp ( )
original = clone ( self )
self . _kick ( )
self . save ( os . path . join ( tmp_dir , 'un-minimized.mol2' ) )
extension = os . path . splitext ( forcefield ) [ - 1 ]
openbabel_ffs = [ 'MMFF94' , 'MMFF94s' , 'UFF' , 'GAFF' , 'Ghemical' ]
if forcefield in openbabel_ffs :
self . _energy_minimize_openbabel ( tmp_dir , forcefield = forcefield , steps = steps , ** kwargs )
elif extension == '.xml' :
self . _energy_minimize_openmm ( tmp_dir , forcefield_files = forcefield , forcefield_name = None , steps = steps , ** kwargs )
else :
self . _energy_minimize_openmm ( tmp_dir , forcefield_files = None , forcefield_name = forcefield , steps = steps , ** kwargs )
self . update_coordinates ( os . path . join ( tmp_dir , 'minimized.pdb' ) )
|
def list_to_raw_list ( poselist ) :
"""Flatten a normal pose list into a raw list
: param poselist : a formatted list [ [ x , y , z ] , [ x , y , z , w ] ]
: return : a raw list [ x , y , z , x , y , z , w ]"""
|
if not ( isinstance ( poselist , list ) or isinstance ( poselist , tuple ) ) :
raise TypeError ( "flatten_pose({}) does not accept this type of argument" . format ( str ( type ( poselist ) ) ) )
return [ field for pose in poselist for field in pose ]
|
def finish_commit ( self , commit ) :
"""Ends the process of committing data to a Repo and persists the
Commit . Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error .
Params :
* commit : A tuple , string , or Commit object representing the commit ."""
|
req = proto . FinishCommitRequest ( commit = commit_from ( commit ) )
res = self . stub . FinishCommit ( req , metadata = self . metadata )
return res
|
def selection ( self ) :
"""Return a datetime representing the current selected date ."""
|
if not self . _selection :
return None
year , month = self . _date . year , self . _date . month
return self . datetime ( year , month , int ( self . _selection [ 0 ] ) )
|
def process_command ( self , command ) :
'''Processes a user command using aliases
Arguments :
command A user command list ( e . g . argv )
Returns : A ScubaContext object with the following attributes :
script : a list of command line strings
image : the docker image name to use'''
|
result = ScubaContext ( )
result . script = None
result . image = self . image
result . entrypoint = self . entrypoint
result . environment = self . environment . copy ( )
if command :
alias = self . aliases . get ( command [ 0 ] )
if not alias : # Command is not an alias ; use it as - is .
result . script = [ shell_quote_cmd ( command ) ]
else : # Using an alias
# Does this alias override the image and / or entrypoint ?
if alias . image :
result . image = alias . image
if alias . entrypoint is not None :
result . entrypoint = alias . entrypoint
# Merge / override the environment
if alias . environment :
result . environment . update ( alias . environment )
if len ( alias . script ) > 1 : # Alias is a multiline script ; no additional
# arguments are allowed in the scuba invocation .
if len ( command ) > 1 :
raise ConfigError ( 'Additional arguments not allowed with multi-line aliases' )
result . script = alias . script
else : # Alias is a single - line script ; perform substituion
# and add user arguments .
command . pop ( 0 )
result . script = [ alias . script [ 0 ] + ' ' + shell_quote_cmd ( command ) ]
result . script = flatten_list ( result . script )
return result
|
def channels_replies ( self , * , channel : str , thread_ts : str , ** kwargs ) -> SlackResponse :
"""Retrieve a thread of messages posted to a channel
Args :
channel ( str ) : The channel id . e . g . ' C1234567890'
thread _ ts ( str ) : The timestamp of an existing message with 0 or more replies .
e . g . ' 1234567890.123456'"""
|
kwargs . update ( { "channel" : channel , "thread_ts" : thread_ts } )
return self . api_call ( "channels.replies" , http_verb = "GET" , params = kwargs )
|
def stop_listener_thread ( self ) :
"""Kills sync _ thread greenlet before joining it"""
|
# when stopping , ` kill ` will cause the ` self . api . sync ` call in _ sync
# to raise a connection error . This flag will ensure it exits gracefully then
self . should_listen = False
if self . sync_thread :
self . sync_thread . kill ( )
self . sync_thread . get ( )
if self . _handle_thread is not None :
self . _handle_thread . get ( )
self . sync_thread = None
self . _handle_thread = None
|
def find_n50 ( self ) :
"""Calculate the N50 for each strain . N50 is defined as the largest contig such that at least half of the total
genome size is contained in contigs equal to or larger than this contig"""
|
for sample in self . metadata : # Initialise the N50 attribute in case there is no assembly , and the attribute is not created in the loop
sample [ self . analysistype ] . n50 = '-'
# Initialise a variable to store a running total of contig lengths
currentlength = 0
for contig_length in sample [ self . analysistype ] . contig_lengths : # Increment the current length with the length of the current contig
currentlength += contig_length
# If the current length is now greater than the total genome / 2 , the current contig length is the N50
if currentlength >= sample [ self . analysistype ] . genome_length * 0.5 : # Populate the dictionary , and break the loop
sample [ self . analysistype ] . n50 = contig_length
break
|
def _update_docstrings ( self ) :
"""Runs through the operation methods & updates their docstrings if
necessary .
If the method has the default placeholder docstring , this will replace
it with the docstring from the underlying connection ."""
|
ops = self . _details . resource_data [ 'operations' ]
for method_name in ops . keys ( ) :
meth = getattr ( self . __class__ , method_name , None )
if not meth :
continue
if meth . __doc__ != DEFAULT_DOCSTRING : # It already has a custom docstring . Leave it alone .
continue
# Needs updating . So there ' s at least * something * vaguely useful
# there , use the docstring from the underlying ` ` Connection ` `
# method .
# FIXME : We need to figure out a way to make this more useful , if
# possible .
api_name = ops [ method_name ] [ 'api_name' ]
conn_meth = getattr ( self . _connection , to_snake_case ( api_name ) )
# We need to do detection here , because Py2 treats ` ` . _ _ doc _ _ ` `
# as a special read - only attribute . : /
if six . PY3 :
meth . __doc__ = conn_meth . __doc__
else :
meth . __func__ . __doc__ = conn_meth . __doc__
|
def date_totals ( entries , by ) :
"""Yield a user ' s name and a dictionary of their hours"""
|
date_dict = { }
for date , date_entries in groupby ( entries , lambda x : x [ 'date' ] ) :
if isinstance ( date , datetime . datetime ) :
date = date . date ( )
d_entries = list ( date_entries )
if by == 'user' :
name = ' ' . join ( ( d_entries [ 0 ] [ 'user__first_name' ] , d_entries [ 0 ] [ 'user__last_name' ] ) )
elif by == 'project' :
name = d_entries [ 0 ] [ 'project__name' ]
else :
name = d_entries [ 0 ] [ by ]
pk = d_entries [ 0 ] [ by ]
hours = get_hours_summary ( d_entries )
date_dict [ date ] = hours
return name , pk , date_dict
|
def save_experiment ( self , name , variants ) :
"""Persist an experiment and its variants ( unless they already exist ) .
: param name a unique string name for the experiment
: param variants a list of strings , each with a unique variant name"""
|
try :
model . Experiment ( name = name , started_on = datetime . utcnow ( ) , variants = [ model . Variant ( name = v , order = i ) for i , v in enumerate ( variants ) ] )
self . Session . commit ( )
finally :
self . Session . close ( )
|
def load_shortcuts ( self ) :
"""Load shortcuts and assign to table model ."""
|
shortcuts = [ ]
for context , name , keystr in iter_shortcuts ( ) :
shortcut = Shortcut ( context , name , keystr )
shortcuts . append ( shortcut )
shortcuts = sorted ( shortcuts , key = lambda x : x . context + x . name )
# Store the original order of shortcuts
for i , shortcut in enumerate ( shortcuts ) :
shortcut . index = i
self . source_model . shortcuts = shortcuts
self . source_model . scores = [ 0 ] * len ( shortcuts )
self . source_model . rich_text = [ s . name for s in shortcuts ]
self . source_model . reset ( )
self . adjust_cells ( )
self . sortByColumn ( CONTEXT , Qt . AscendingOrder )
|
def to_ ( self , attrvals ) :
"""Create a list of Attribute instances .
: param attrvals : A dictionary of attributes and values
: return : A list of Attribute instances"""
|
attributes = [ ]
for key , value in attrvals . items ( ) :
key = key . lower ( )
attributes . append ( factory ( saml . Attribute , name = key , name_format = self . name_format , attribute_value = do_ava ( value ) ) )
return attributes
|
def aes_kdf ( key , rounds , password = None , keyfile = None ) :
"""Set up a context for AES128 - ECB encryption to find transformed _ key"""
|
cipher = AES . new ( key , AES . MODE_ECB )
key_composite = compute_key_composite ( password = password , keyfile = keyfile )
# get the number of rounds from the header and transform the key _ composite
transformed_key = key_composite
for _ in range ( 0 , rounds ) :
transformed_key = cipher . encrypt ( transformed_key )
return hashlib . sha256 ( transformed_key ) . digest ( )
|
def parse_phone ( parts , allow_multiple = False ) :
"""Parse the phone number from the ad ' s parts
parts - > The backpage ad ' s posting _ body , separated into substrings
allow _ multiple - > If false , arbitrarily chooses the most commonly occurring phone"""
|
# Get text substitutions ( ex : ' three ' - > ' 3 ' )
text_subs = misc . phone_text_subs ( )
Small = text_subs [ 'Small' ]
Magnitude = text_subs [ 'Magnitude' ]
Others = text_subs [ 'Others' ]
phone_pattern = r'1?(?:[2-9][0-8][0-9])\s?(?:[2-9][0-9]{2})\s?(?:[0-9]{2})\s?(?:[0-9]{2})'
phone_pattern_spaces = r'1?\W?[2-9]\W?[0-8]\W?[0-9]\W?[2-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]\W?[0-9]'
found_phones = [ ]
return_parts = [ ]
# Check each part for phone # and remove from parts if found
for part in parts :
body = part
# remove ' 420 ' references to avoid false positives
body = re . sub ( r'420 ?friendly' , '' , body )
body = body . replace ( ' 420 ' , '' )
body = body . replace ( '420 sp' , '' )
# Replace all disguising characters in the body
for key in Small :
body = re . sub ( r'-?' + re . escape ( key ) + r'-?' , str ( Small [ key ] ) , body )
for key in Magnitude :
body = re . sub ( r'-?' + re . escape ( key ) + r'-?' , str ( Magnitude [ key ] ) , body )
for key in Others :
body = re . sub ( r'-?' + re . escape ( key ) + r'-?' , str ( Others [ key ] ) , body )
body = re . sub ( r'\W' , ' ' , body )
body = re . sub ( r' +' , ' ' , body )
if len ( re . sub ( r'\D' , '' , body ) ) < 10 : # Less than 10 numeric digits in part - no phone number here
return_parts . append ( part )
continue ;
phones = re . findall ( phone_pattern , body )
if len ( phones ) == 0 : # No phone number in standard format
phones = re . findall ( phone_pattern_spaces , body )
if len ( phones ) > 0 : # Phone number had spaces between digits
for found in phones :
found_phones . append ( re . sub ( r'\D' , '' , found ) )
else : # Found phone in standard format
for found in phones :
found_phones . append ( re . sub ( r'\D' , '' , found ) )
if found_phones : # Phone has been found , remove from part )
for found in found_phones :
filtered_part = parser_helpers . remove_phone ( part , found )
if re . sub ( r'\W' , '' , filtered_part ) : # get rid of now - empty parts
return_parts . append ( filtered_part )
else : # Phone not found yet , add part to output
return_parts . append ( part )
if not allow_multiple : # Get most commonly occurring phone
found_phone = ''
if len ( found_phones ) > 0 :
found_phone = max ( set ( found_phones ) , key = found_phones . count )
# Return the phone along with the original parts ( minus any occurrences of the phone number )
return ( found_phone , return_parts )
else : # return all phones
return ( list ( set ( found_phones ) ) , return_parts )
|
def img_encode ( arr , ** kwargs ) :
"""Encode ndarray to base64 string image data
Parameters
arr : ndarray ( rows , cols , depth )
kwargs : passed directly to matplotlib . image . imsave"""
|
sio = BytesIO ( )
imsave ( sio , arr , ** kwargs )
sio . seek ( 0 )
img_format = kwargs [ 'format' ] if kwargs . get ( 'format' ) else 'png'
img_str = base64 . b64encode ( sio . getvalue ( ) ) . decode ( )
return 'data:image/{};base64,{}' . format ( img_format , img_str )
|
def xmlstring ( self , pretty_print = False ) :
"""Serialises this FoLiA element and all its contents to XML .
Returns :
str : a string with XML representation for this element and all its children"""
|
s = ElementTree . tostring ( self . xml ( ) , xml_declaration = False , pretty_print = pretty_print , encoding = 'utf-8' )
if sys . version < '3' :
if isinstance ( s , str ) :
s = unicode ( s , 'utf-8' )
# pylint : disable = undefined - variable
else :
if isinstance ( s , bytes ) :
s = str ( s , 'utf-8' )
s = s . replace ( 'ns0:' , '' )
# ugly patch to get rid of namespace prefix
s = s . replace ( ':ns0' , '' )
return s
|
def write_csv ( filename , data , delimiter = CSV_DELIMITER ) :
"""Write image data to CSV file
: param filename : name of CSV file to write data to
: type filename : str
: param data : image data to write to CSV file
: type data : numpy array
: param delimiter : delimiter used in CSV file . Default is ` ` ; ` `
: type delimiter : str"""
|
with open ( filename , 'w' ) as file :
csv_writer = csv . writer ( file , delimiter = delimiter )
for line in data :
csv_writer . writerow ( line )
|
def add_ref ( self , wordlist ) :
"""Adds a reference ."""
|
refname = wordlist [ 0 ] [ : - 1 ]
if ( refname in self . refs ) :
raise ReferenceError ( "[line {}]:{} already defined here (word) {} (line) {}" . format ( self . line_count , refname , self . refs [ refname ] [ 0 ] , self . refs [ refname ] [ 1 ] ) )
self . refs [ refname ] = ( self . word_count , self . line_count )
|
def run_checks ( collector ) :
"""Just run the checks for our modules"""
|
artifact = collector . configuration [ "dashmat" ] . artifact
chosen = artifact
if chosen in ( None , "" , NotSpecified ) :
chosen = None
dashmat = collector . configuration [ "dashmat" ]
modules = collector . configuration [ "__active_modules__" ]
config_root = collector . configuration [ "config_root" ]
module_options = collector . configuration [ "modules" ]
datastore = JsonDataStore ( os . path . join ( config_root , "data.json" ) )
if dashmat . redis_host :
datastore = RedisDataStore ( redis . Redis ( dashmat . redis_host ) )
scheduler = Scheduler ( datastore )
for name , module in modules . items ( ) :
if chosen is None or name == chosen :
server = module . make_server ( module_options [ name ] . server_options )
scheduler . register ( module , server , name )
scheduler . twitch ( force = True )
|
def normalize_value ( value ) :
"""Convert value to string and make it lower cased ."""
|
cast = str
if six . PY2 :
cast = unicode
# noqa
return cast ( value ) . lower ( )
|
def _wiggle_interval ( value , wiggle = 0.5 ** 44 ) :
r"""Check if ` ` value ` ` is in : math : ` \ left [ 0 , 1 \ right ] ` .
Allows a little bit of wiggle room outside the interval . Any value
within ` ` wiggle ` ` of ` ` 0.0 ` will be converted to ` ` 0.0 ` and similar
for ` ` 1.0 ` ` .
. . note : :
There is also a Fortran implementation of this function , which
will be used if it can be built .
Args :
value ( float ) : Value to check in interval .
wiggle ( Optional [ float ] ) : The amount of wiggle room around the
the endpoints ` ` 0.0 ` ` and ` ` 1.0 ` ` .
Returns :
Tuple [ float , bool ] : Pair of
* The ` ` value ` ` if it ' s in the interval , or ` ` 0 ` ` or ` ` 1 ` `
if the value lies slightly outside . If the ` ` value ` ` is
too far outside the unit interval , will be NaN .
* Boolean indicating if the ` ` value ` ` is inside the unit interval ."""
|
if - wiggle < value < wiggle :
return 0.0 , True
elif wiggle <= value <= 1.0 - wiggle :
return value , True
elif 1.0 - wiggle < value < 1.0 + wiggle :
return 1.0 , True
else :
return np . nan , False
|
def sendFuture ( self , future ) :
"""Send a Future to be executed remotely ."""
|
try :
if shared . getConst ( hash ( future . callable ) , timeout = 0 ) : # Enforce name reference passing if already shared
future . callable = SharedElementEncapsulation ( hash ( future . callable ) )
self . socket . send_multipart ( [ b"TASK" , pickle . dumps ( future , pickle . HIGHEST_PROTOCOL ) ] )
except pickle . PicklingError as e : # If element not picklable , pickle its name
# TODO : use its fully qualified name
scoop . logger . warn ( "Pickling Error: {0}" . format ( e ) )
previousCallable = future . callable
future . callable = hash ( future . callable )
self . socket . send_multipart ( [ b"TASK" , pickle . dumps ( future , pickle . HIGHEST_PROTOCOL ) ] )
future . callable = previousCallable
|
def _get_instance_attributes ( self ) :
"""Return a generator for instance attributes ' name and value .
. . code - block : : python3
for _ name , _ value in self . _ get _ instance _ attributes ( ) :
print ( " attribute name : { } " . format ( _ name ) )
print ( " attribute value : { } " . format ( _ value ) )
Returns :
generator : tuples with attribute name and value ."""
|
for name , value in self . __dict__ . items ( ) :
if name in map ( ( lambda x : x [ 0 ] ) , self . get_class_attributes ( ) ) :
yield ( name , value )
|
def to_json ( self ) :
"""Write Wea to json file
" location " : { } , / / ladybug location schema
" direct _ normal _ irradiance " : ( ) , / / Tuple of hourly direct normal
irradiance
" diffuse _ horizontal _ irradiance " : ( ) , / / Tuple of hourly diffuse
horizontal irradiance
" timestep " : float / / timestep between measurements , default is 1"""
|
return { 'location' : self . location . to_json ( ) , 'direct_normal_irradiance' : self . direct_normal_irradiance . to_json ( ) , 'diffuse_horizontal_irradiance' : self . diffuse_horizontal_irradiance . to_json ( ) , 'timestep' : self . timestep , 'is_leap_year' : self . is_leap_year }
|
def mktmpenv_cmd ( argv ) :
"""Create a temporary virtualenv ."""
|
parser = mkvirtualenv_argparser ( )
env = '.'
while ( workon_home / env ) . exists ( ) :
env = hex ( random . getrandbits ( 64 ) ) [ 2 : - 1 ]
args , rest = parser . parse_known_args ( argv )
mkvirtualenv ( env , args . python , args . packages , requirements = args . requirements , rest = rest )
print ( 'This is a temporary environment. It will be deleted when you exit' )
try :
if args . activate : # only used for testing on windows
shell ( env )
finally :
return rmvirtualenvs ( [ env ] )
|
def default_capture_file_name ( self ) :
""": returns : File name for a capture on this link"""
|
capture_file_name = "{}_{}-{}_to_{}_{}-{}" . format ( self . _nodes [ 0 ] [ "node" ] . name , self . _nodes [ 0 ] [ "adapter_number" ] , self . _nodes [ 0 ] [ "port_number" ] , self . _nodes [ 1 ] [ "node" ] . name , self . _nodes [ 1 ] [ "adapter_number" ] , self . _nodes [ 1 ] [ "port_number" ] )
return re . sub ( "[^0-9A-Za-z_-]" , "" , capture_file_name ) + ".pcap"
|
def support_support_param_password ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
support = ET . SubElement ( config , "support" , xmlns = "urn:brocade.com:mgmt:brocade-ras" )
support_param = ET . SubElement ( support , "support-param" )
password = ET . SubElement ( support_param , "password" )
password . text = kwargs . pop ( 'password' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _get_index ( self ) :
"""Get the guideline ' s index .
This must return an ` ` int ` ` .
Subclasses may override this method ."""
|
glyph = self . glyph
if glyph is not None :
parent = glyph
else :
parent = self . font
if parent is None :
return None
return parent . guidelines . index ( self )
|
def age ( * paths ) :
'''Return the minimum age of a set of files .
Returns 0 if no paths are given .
Returns time . time ( ) if a path does not exist .'''
|
if not paths :
return 0
for path in paths :
if not os . path . exists ( path ) :
return time . time ( )
return min ( [ ( time . time ( ) - os . path . getmtime ( path ) ) for path in paths ] )
|
def download ( self , url , location , ** kwargs ) :
"""Fetch document located at ` ` url ` ` and save to to ` ` location ` ` ."""
|
doc = self . go ( url , ** kwargs )
with open ( location , 'wb' ) as out :
out . write ( doc . body )
return len ( doc . body )
|
def diff_for_humans ( self , other = None , absolute = False , locale = None ) :
"""Get the difference in a human readable format in the current locale .
When comparing a value in the past to default now :
1 day ago
5 months ago
When comparing a value in the future to default now :
1 day from now
5 months from now
When comparing a value in the past to another value :
1 day before
5 months before
When comparing a value in the future to another value :
1 day after
5 months after
: type other : Date
: param absolute : removes time difference modifiers ago , after , etc
: type absolute : bool
: param locale : The locale to use for localization
: type locale : str
: rtype : str"""
|
is_now = other is None
if is_now :
other = self . today ( )
diff = self . diff ( other )
return pendulum . format_diff ( diff , is_now , absolute , locale )
|
def _get_frdata ( stream , num , name , ctype = None ) :
"""Brute force - ish method to return the FrData structure for a channel
This saves on pulling the channel type from the TOC"""
|
ctypes = ( ctype , ) if ctype else ( 'adc' , 'proc' , 'sim' )
for ctype in ctypes :
_reader = getattr ( stream , 'ReadFr{0}Data' . format ( ctype . title ( ) ) )
try :
return _reader ( num , name )
except IndexError as exc :
if FRERR_NO_CHANNEL_OF_TYPE . match ( str ( exc ) ) :
continue
raise
raise ValueError ( "no Fr{{Adc,Proc,Sim}}Data structures with the " "name {0}" . format ( name ) )
|
def get_metrics ( predicted : Union [ str , List [ str ] , Tuple [ str , ... ] ] , gold : Union [ str , List [ str ] , Tuple [ str , ... ] ] ) -> Tuple [ float , float ] :
"""Takes a predicted answer and a gold answer ( that are both either a string or a list of
strings ) , and returns exact match and the DROP F1 metric for the prediction . If you are
writing a script for evaluating objects in memory ( say , the output of predictions during
validation , or while training ) , this is the function you want to call , after using
: func : ` answer _ json _ to _ strings ` when reading the gold answer from the released data file ."""
|
predicted_bags = _answer_to_bags ( predicted )
gold_bags = _answer_to_bags ( gold )
exact_match = 1.0 if predicted_bags [ 0 ] == gold_bags [ 0 ] else 0
f1_per_bag = _align_bags ( predicted_bags [ 1 ] , gold_bags [ 1 ] )
f1 = np . mean ( f1_per_bag )
f1 = round ( f1 , 2 )
return exact_match , f1
|
def load ( cls , build_file , name , target_aliases ) :
"""A BuildFileManipulator factory class method .
Note that BuildFileManipulator requires a very strict formatting of target declaration .
In particular , it wants to see a newline after ` target _ type ( ` , ` dependencies = [ ` , and
the last param to the target constructor before the trailing ` ) ` . There are further
restrictions as well - - see the comments below or check out the example targets in
the tests for this class .
: param build _ file : A BuildFile instance to operate on .
: param name : The name of the target ( without the spec path or colon ) to operate on .
: target aliases : The callables injected into the build file context that we should treat
as target declarations ."""
|
with open ( build_file . full_path , 'r' ) as f :
source = f . read ( )
source_lines = source . split ( '\n' )
tree = ast . parse ( source )
# Since we ' re not told what the last line of an expression is , we have
# to figure it out based on the start of the expression after it .
# The interval that we consider occupied by a given expression is
# [ expr . lineno , next _ expr . lineno ) . For the last expression in the
# file , its end is the number of lines in the file .
# Also note that lineno is 1 - indexed , so we subtract 1 from everything .
intervals = [ t . lineno - 1 for t in tree . body ]
intervals . append ( len ( source_lines ) )
# Candidate target declarations
top_level_exprs = [ t for t in tree . body if isinstance ( t , ast . Expr ) ]
top_level_calls = [ e . value for e in top_level_exprs if isinstance ( e . value , ast . Call ) ]
# Just in case someone is tricky and assigns the result of a target
# declaration to a variable , though in general this is not useful
assigns = [ t for t in tree . body if isinstance ( t , ast . Assign ) ]
assigned_calls = [ t . value for t in assigns if isinstance ( t . value , ast . Call ) ]
# Final candidate declarations
calls = top_level_calls + assigned_calls
# Filter out calls that don ' t have a simple name as the function
# i . e . keep ` foo ( ) ` but not ` ( some complex expr ) ( ) `
calls = [ call for call in calls if isinstance ( call . func , ast . Name ) ]
# Now actually get all of the calls to known aliases for targets
# TODO ( pl ) : Log these
target_calls = [ call for call in calls if call . func . id in target_aliases ]
# We now have enough information to instantiate a BuildFileTarget for
# any one of these , but we ' re only interested in the one with name ` name `
def name_from_call ( call ) :
for keyword in call . keywords :
if keyword . arg == 'name' :
if isinstance ( keyword . value , ast . Str ) :
return keyword . value . s
else :
logger . warn ( 'Saw a non-string-literal name argument to a target while ' 'looking through {build_file}. Target type was {target_type}.' 'name value was {name_value}' . format ( build_file = build_file , target_type = call . func . id , name_value = keyword . value ) )
raise BuildTargetParseError ( 'Could not find name parameter to target call' 'with target type {target_type}' . format ( target_type = call . func . id ) )
calls_by_name = dict ( ( name_from_call ( call ) , call ) for call in target_calls )
if name not in calls_by_name :
raise BuildTargetParseError ( 'Could not find target named {name} in {build_file}' . format ( name = name , build_file = build_file ) )
target_call = calls_by_name [ name ]
# lineno is 1 - indexed
target_interval_index = intervals . index ( target_call . lineno - 1 )
target_start = intervals [ target_interval_index ]
target_end = intervals [ target_interval_index + 1 ]
def is_whitespace ( line ) :
return line . strip ( ) == ''
def is_comment ( line ) :
return line . strip ( ) . startswith ( '#' )
def is_ignored_line ( line ) :
return is_whitespace ( line ) or is_comment ( line )
# Walk the end back so we don ' t have any trailing whitespace
while is_ignored_line ( source_lines [ target_end - 1 ] ) :
target_end -= 1
target_source_lines = source_lines [ target_start : target_end ]
# TODO ( pl ) : This would be good logging
# print ( astpp . dump ( target _ call ) )
# print ( " Target source lines " )
# for line in target _ source _ lines :
# print ( line )
if target_call . args :
raise BuildTargetParseError ( 'Targets cannot be called with non-keyword args. Target was ' '{name} in {build_file}' . format ( name = name , build_file = build_file ) )
# TODO ( pl ) : This should probably be an assertion . In order for us to have extracted
# this target _ call by name , it must have had at least one kwarg ( name )
if not target_call . keywords :
raise BuildTargetParseError ( 'Targets cannot have no kwargs. Target type was ' '{target_type} in {build_file}' . format ( target_type = target_call . func . id , build_file = build_file ) )
if target_call . lineno == target_call . keywords [ 0 ] . value . lineno :
raise BuildTargetParseError ( 'Arguments to a target cannot be on the same line as the ' 'target type. Target type was {target_type} in {build_file} ' 'on line number {lineno}.' . format ( target_type = target_call . func . id , build_file = build_file , lineno = target_call . lineno ) )
for keyword in target_call . keywords :
kw_str = keyword . arg
kw_start_line = keyword . value . lineno
source_line = source_lines [ kw_start_line - 1 ]
kwarg_line_re = re . compile ( r'\s*?{kw_str}\s*?=\s*?\S' . format ( kw_str = kw_str ) )
if not kwarg_line_re . match ( source_line ) :
raise BuildTargetParseError ( 'kwarg line is malformed. The value of a kwarg to a target ' 'must start after the equals sign of the line with the key.' 'Build file was: {build_file}. Line number was: {lineno}' . format ( build_file = build_file , lineno = keyword . value . lineno ) )
# Same setup as for getting the target ' s interval
target_call_intervals = [ t . value . lineno - target_call . lineno for t in target_call . keywords ]
target_call_intervals . append ( len ( target_source_lines ) )
last_kwarg = target_call . keywords [ - 1 ]
last_interval_index = target_call_intervals . index ( last_kwarg . value . lineno - target_call . lineno )
last_kwarg_start = target_call_intervals [ last_interval_index ]
last_kwarg_end = target_call_intervals [ last_interval_index + 1 ]
last_kwarg_lines = target_source_lines [ last_kwarg_start : last_kwarg_end ]
if last_kwarg_lines [ - 1 ] . strip ( ) != ')' :
raise BuildTargetParseError ( 'All targets must end with a trailing ) on its own line. It ' "cannot go at the end of the last argument's line. Build file " 'was {build_file}. Target name was {target_name}. Line number ' 'was {lineno}' . format ( build_file = build_file , target_name = name , lineno = last_kwarg_end + target_call . lineno ) )
# Now that we ' ve double checked that we have the ) in the proper place ,
# remove that line from the lines owned by the last kwarg
target_call_intervals [ - 1 ] -= 1
# TODO ( pl ) : Also good logging
# for t in target _ call . keywords :
# interval _ index = target _ call _ intervals . index ( t . value . lineno - target _ call . lineno )
# print ( " interval _ index : " , interval _ index )
# start = target _ call _ intervals [ interval _ index ]
# end = target _ call _ intervals [ interval _ index + 1]
# print ( " interval : % s , % s " % ( start , end ) )
# print ( " lines : " )
# print ( ' \ n ' . join ( target _ source _ lines [ start : end ] ) )
# print ( ' \ n \ n ' )
# print ( target _ call _ intervals )
def get_dependencies_node ( target_call ) :
for keyword in target_call . keywords :
if keyword . arg == 'dependencies' :
return keyword . value
return None
dependencies_node = get_dependencies_node ( target_call )
dependencies = [ ]
if dependencies_node :
if not isinstance ( dependencies_node , ast . List ) :
raise BuildTargetParseError ( 'Found non-list dependencies argument on target {name} ' 'in build file {build_file}. Argument had invalid type ' '{node_type}' . format ( name = name , build_file = build_file , node_type = type ( dependencies_node ) ) )
last_lineno = dependencies_node . lineno
for dep_node in dependencies_node . elts :
if not dep_node . lineno > last_lineno :
raise BuildTargetParseError ( 'On line number {lineno} of build file {build_file}, found ' 'dependencies declaration where the dependencies argument ' 'and dependencies themselves were not all on separate lines.' . format ( lineno = dep_node . lineno , build_file = build_file ) )
# First , we peek up and grab any whitespace / comments above us
peek_lineno = dep_node . lineno - 1
comments_above = [ ]
while peek_lineno > last_lineno :
peek_str = source_lines [ peek_lineno - 1 ] . strip ( )
if peek_str == '' or peek_str . startswith ( '#' ) :
comments_above . insert ( 0 , peek_str . lstrip ( ' #' ) )
else :
spec = dependencies [ - 1 ] . spec if dependencies else None
raise BuildTargetParseError ( 'While parsing the dependencies of {target_name}, ' 'encountered an unusual line while trying to extract ' 'comments. This probably means that a dependency at ' 'line {lineno} in {build_file} is missing a trailing ' 'comma. The string in question was {spec}' . format ( target_name = name , lineno = peek_lineno , build_file = build_file , spec = spec ) )
peek_lineno -= 1
# Done peeking for comments above us , now capture a possible inline side - comment
dep_str = source_lines [ dep_node . lineno - 1 ]
dep_with_comments = dep_str . split ( '#' , 1 )
side_comment = None
if len ( dep_with_comments ) == 2 :
side_comment = dep_with_comments [ 1 ] . strip ( )
dep = DependencySpec ( dep_node . s , comments_above = comments_above , side_comment = side_comment )
# TODO ( pl ) : Logging here
dependencies . append ( dep )
last_lineno = dep_node . lineno
deps_interval_index = target_call_intervals . index ( dependencies_node . lineno - target_call . lineno )
deps_start = target_call_intervals [ deps_interval_index ]
deps_end = target_call_intervals [ deps_interval_index + 1 ]
# Finally , like we did for the target intervals above , we ' re going to roll back
# the end of the deps interval so we don ' t stomp on any comments after it .
while is_ignored_line ( target_source_lines [ deps_end - 1 ] ) :
deps_end -= 1
else : # If there isn ' t already a place defined for dependencies , we use
# the line interval just before the trailing ) that ends the target
deps_start = - 1
deps_end = - 1
return cls ( name = name , build_file = build_file , build_file_source_lines = source_lines , target_source_lines = target_source_lines , target_interval = ( target_start , target_end ) , dependencies = dependencies , dependencies_interval = ( deps_start , deps_end ) )
|
def get_share_metadata ( self , share_name , timeout = None ) :
'''Returns all user - defined metadata for the specified share .
: param str share _ name :
Name of existing share .
: param int timeout :
The timeout parameter is expressed in seconds .
: return :
A dictionary representing the share metadata name , value pairs .
: rtype : a dict mapping str to str'''
|
_validate_not_none ( 'share_name' , share_name )
request = HTTPRequest ( )
request . method = 'GET'
request . host = self . _get_host ( )
request . path = _get_path ( share_name )
request . query = [ ( 'restype' , 'share' ) , ( 'comp' , 'metadata' ) , ( 'timeout' , _int_to_str ( timeout ) ) , ]
response = self . _perform_request ( request )
return _parse_metadata ( response )
|
def readcbf ( name , load_header = False , load_data = True , for_nexus = False ) :
"""Read a cbf ( crystallographic binary format ) file from a Dectris PILATUS
detector .
Inputs
name : string
the file name
load _ header : bool
if the header data is to be loaded .
load _ data : bool
if the binary data is to be loaded .
for _ nexus : bool
if the array should be opened with NeXus ordering .
Output
a numpy array of the scattering data
Notes
currently only Little endian , " signed 32 - bit integer " type and
byte - offset compressed data are accepted ."""
|
with open ( name , 'rb' ) as f :
cbfbin = f . read ( )
datastart = cbfbin . find ( b'\x0c\x1a\x04\xd5' ) + 4
hed = [ x . strip ( ) for x in cbfbin [ : datastart ] . split ( b'\n' ) ]
header = { }
readingmode = None
for i in range ( len ( hed ) ) :
if not hed [ i ] : # skip empty header lines
continue
elif hed [ i ] == b';' :
continue
elif hed [ i ] . startswith ( b'_array_data.header_convention' ) :
header [ 'CBF_header_convention' ] = str ( hed [ i ] [ len ( b'_array_data.header_convention' ) : ] . strip ( ) . replace ( b'"' , b'' ) , encoding = 'utf-8' )
elif hed [ i ] . startswith ( b'_array_data.header_contents' ) :
readingmode = 'PilatusHeader'
elif hed [ i ] . startswith ( b'_array_data.data' ) :
readingmode = 'CIFHeader'
elif readingmode == 'PilatusHeader' :
if not hed [ i ] . startswith ( b'#' ) :
continue
line = hed [ i ] . strip ( ) [ 1 : ] . strip ( )
try : # try to interpret the line as the date .
header [ 'CBF_Date' ] = dateutil . parser . parse ( line )
header [ 'Date' ] = header [ 'CBF_Date' ]
continue
except ( ValueError , TypeError ) : # eat exception : if we cannot parse this line as a date , try
# another format .
pass
treated = False
for sep in ( b':' , b'=' ) :
if treated :
continue
if line . count ( sep ) == 1 :
name , value = tuple ( x . strip ( ) for x in line . split ( sep , 1 ) )
try :
m = re . match ( b'^(?P<number>-?(\d+(.\d+)?(e-?\d+)?))\s+(?P<unit>m|s|counts|eV)$' , value ) . groupdict ( )
value = float ( m [ 'number' ] )
m [ 'unit' ] = str ( m [ 'unit' ] , encoding = 'utf-8' )
except AttributeError : # the regex did not match the string , thus re . match ( )
# returned None .
pass
header [ str ( name , 'utf-8' ) ] = value
treated = True
if treated :
continue
if line . startswith ( b'Pixel_size' ) :
header [ 'XPixel' ] , header [ 'YPixel' ] = tuple ( [ float ( a . strip ( ) . split ( b' ' ) [ 0 ] ) * 1000 for a in line [ len ( b'Pixel_size' ) : ] . split ( b'x' ) ] )
else :
try :
m = re . match ( b'^(?P<label>[a-zA-Z0-9,_\.\-!\?\ ]*?)\s+(?P<number>-?(\d+(.\d+)?(e-?\d+)?))\s+(?P<unit>m|s|counts|eV)$' , line ) . groupdict ( )
except AttributeError :
pass
else :
m [ 'label' ] = str ( m [ 'label' ] , 'utf-8' )
m [ 'unit' ] = str ( m [ 'unit' ] , encoding = 'utf-8' )
if m [ 'unit' ] == b'counts' :
header [ m [ 'label' ] ] = int ( m [ 'number' ] )
else :
header [ m [ 'label' ] ] = float ( m [ 'number' ] )
if 'sensor' in m [ 'label' ] and 'thickness' in m [ 'label' ] :
header [ m [ 'label' ] ] *= 1e6
elif readingmode == 'CIFHeader' :
line = hed [ i ]
for sep in ( b':' , b'=' ) :
if line . count ( sep ) == 1 :
label , content = tuple ( x . strip ( ) for x in line . split ( sep , 1 ) )
if b'"' in content :
content = content . replace ( b'"' , b'' )
try :
content = int ( content )
except ValueError :
content = str ( content , encoding = 'utf-8' )
header [ 'CBF_' + str ( label , encoding = 'utf-8' ) ] = content
else :
pass
ret = [ ]
if load_data :
if header [ 'CBF_X-Binary-Element-Type' ] != 'signed 32-bit integer' :
raise NotImplementedError ( 'element type is not "signed 32-bit integer" in CBF, but %s.' % header [ 'CBF_X-Binary-Element-Type' ] )
if header [ 'CBF_conversions' ] != 'x-CBF_BYTE_OFFSET' :
raise NotImplementedError ( 'compression is not "x-CBF_BYTE_OFFSET" in CBF!' )
dim1 = header [ 'CBF_X-Binary-Size-Fastest-Dimension' ]
dim2 = header [ 'CBF_X-Binary-Size-Second-Dimension' ]
nbytes = header [ 'CBF_X-Binary-Size' ]
cbfdata = cbfdecompress ( bytearray ( cbfbin [ datastart : datastart + nbytes ] ) , dim1 , dim2 , for_nexus )
ret . append ( cbfdata )
if load_header :
ret . append ( header )
return tuple ( ret )
|
def send_image ( self , user_id , media_id , account = None ) :
"""发送图片消息
详情请参考
http : / / mp . weixin . qq . com / wiki / 7/12a5a320ae96fecdf0e15cb06123de9f . html
: param user _ id : 用户 ID 。 就是你收到的 ` Message ` 的 source
: param media _ id : 图片的媒体ID 。 可以通过 : func : ` upload _ media ` 上传 。
: param account : 可选 , 客服账号
: return : 返回的 JSON 数据包
使用示例 : :
from wechatpy import WeChatClient
client = WeChatClient ( ' appid ' , ' secret ' )
res = client . message . send _ image ( ' openid ' , ' media _ id ' )"""
|
data = { 'touser' : user_id , 'msgtype' : 'image' , 'image' : { 'media_id' : media_id } }
return self . _send_custom_message ( data , account = account )
|
def callOrder ( cls , * args ) : # pylint : disable = invalid - name
"""Checking the inspector is called with given priority
Args : SinonSpy , list of inspectors
eg .
[ spy1 , spy2 , spy3 ] = > spy1 is called before spy2 , spy2 is called before spy3
[ spy1 , spy2 , spy1 ] = > spy1 is called before and after spy2"""
|
for spy in args :
cls . __is_spy ( spy )
for idx , val in enumerate ( args ) :
if val != args [ 0 ] :
if not ( val . calledAfter ( args [ idx - 1 ] ) ) :
raise cls . failException ( cls . message )
if val != args [ - 1 ] :
if not ( val . calledBefore ( args [ idx + 1 ] ) ) :
raise cls . failException ( cls . message )
|
def clear_input_field ( self , locator , method = 0 ) :
"""Clears the text field identified by ` locator `
The element . clear ( ) method doesn ' t seem to work properly on
all browsers , so this keyword was created to offer alternatives .
The ` method ` argument defines the method it should use in order
to clear the target field .
0 = Uses the selenium method by doing element . clear \n
1 = Sets focus on the field and presses CTRL + A , and then DELETE \n
2 = Repeatedly presses BACKSPACE until the field is empty
This keyword , when using a method other than ' 2 ' does not validate it
successfully cleared the field , you should handle this verification by yourself .
When using the method ' 2 ' , it presses delete until the field ' s value is empty .
| * Argument * | * Description * | * Example * |
| locator | Selenium 2 element locator | id = my _ id |
| method | the clearing method that should be used | no example provided |"""
|
element = self . _element_find ( locator , True , True )
if ( int ( method ) == 0 ) :
self . _info ( "Clearing input on element '%s'" % ( locator ) )
element . clear ( )
elif ( int ( method ) == 1 ) :
self . _info ( "Clearing input on element '%s' by pressing 'CTRL + A + DELETE'" % ( locator ) )
element . send_keys ( Keys . CONTROL + 'a' )
element . send_keys ( Keys . DELETE )
elif ( int ( method ) == 2 ) :
self . _info ( "Clearing input on element '%s' by repeatedly pressing BACKSPACE" % ( locator ) )
while ( len ( element . get_attribute ( 'value' ) ) != 0 ) :
element . send_keys ( Keys . BACKSPACE )
else :
element . clear ( )
|
def collect_variables ( self , variables : MultisetOfVariables ) -> None :
"""Recursively adds all variables occuring in the expression to the given multiset .
This is used internally by ` variables ` . Needs to be overwritten by inheriting container expression classes .
This method can be used when gathering the ` variables ` of multiple expressions , because only one multiset
needs to be created and that is more efficient .
Args :
variables :
Multiset of variables . All variables contained in the expression are recursively added to this multiset ."""
|
if self . variable_name is not None :
variables . add ( self . variable_name )
|
def assert_raises_regex ( expected_exception , expected_regex , extras = None , * args , ** kwargs ) :
"""Assert that an exception is raised when a function is called .
If no exception is raised , test fail . If an exception is raised but not
of the expected type , the exception is let through . If an exception of the
expected type is raised but the error message does not match the
expected _ regex , test fail .
This should only be used as a context manager :
with assert _ raises ( Exception ) :
func ( )
Args :
expected _ exception : An exception class that is expected to be
raised .
extras : An optional field for extra information to be included in
test result ."""
|
context = _AssertRaisesContext ( expected_exception , expected_regex , extras = extras )
return context
|
def parse_from_dict ( json_dict ) :
"""Given a Unified Uploader message , parse the contents and return a
MarketOrderList .
: param dict json _ dict : A Unified Uploader message as a JSON dict .
: rtype : MarketOrderList
: returns : An instance of MarketOrderList , containing the orders
within ."""
|
order_columns = json_dict [ 'columns' ]
order_list = MarketOrderList ( upload_keys = json_dict [ 'uploadKeys' ] , order_generator = json_dict [ 'generator' ] , )
for rowset in json_dict [ 'rowsets' ] :
generated_at = parse_datetime ( rowset [ 'generatedAt' ] )
region_id = rowset [ 'regionID' ]
type_id = rowset [ 'typeID' ]
order_list . set_empty_region ( region_id , type_id , generated_at )
for row in rowset [ 'rows' ] :
order_kwargs = _columns_to_kwargs ( SPEC_TO_KWARG_CONVERSION , order_columns , row )
order_kwargs . update ( { 'region_id' : region_id , 'type_id' : type_id , 'generated_at' : generated_at , } )
order_kwargs [ 'order_issue_date' ] = parse_datetime ( order_kwargs [ 'order_issue_date' ] )
order_list . add_order ( MarketOrder ( ** order_kwargs ) )
return order_list
|
def run ( self , run_fit_model = True ) :
"""Run the Graph Cut segmentation according to preset parameters .
: param run _ fit _ model : Allow to skip model fit when the model is prepared before
: return :"""
|
if run_fit_model :
self . fit_model ( self . img , self . voxelsize , self . seeds )
self . _start_time = time . time ( )
if self . segparams [ "method" ] . lower ( ) in ( "graphcut" , "gc" ) :
self . __single_scale_gc_run ( )
elif self . segparams [ "method" ] . lower ( ) in ( "multiscale_graphcut" , "multiscale_gc" , "msgc" , "msgc_lo2hi" , "lo2hi" , "multiscale_graphcut_lo2hi" , ) :
logger . debug ( "performing multiscale Graph-Cut lo2hi" )
self . __multiscale_gc_lo2hi_run ( )
elif self . segparams [ "method" ] . lower ( ) in ( "msgc_hi2lo" , "hi2lo" , "multiscale_graphcut_hi2lo" , ) :
logger . debug ( "performing multiscale Graph-Cut hi2lo" )
self . __multiscale_gc_hi2lo_run ( )
else :
logger . error ( "Unknown segmentation method: " + self . segparams [ "method" ] )
|
def excel_key ( index ) :
"""create a key for index by converting index into a base - 26 number , using A - Z as the characters ."""
|
X = lambda n : ~ n and X ( ( n // 26 ) - 1 ) + chr ( 65 + ( n % 26 ) ) or ''
return X ( int ( index ) )
|
def blast ( self , blastfile = None , outfile = None ) :
"""convert anchor file to 12 col blast file"""
|
from jcvi . formats . blast import BlastSlow , BlastLineByConversion
if not outfile :
outfile = self . filename + ".blast"
if blastfile is not None :
blasts = BlastSlow ( blastfile ) . to_dict ( )
else :
blasts = None
fw = must_open ( outfile , "w" , checkexists = True )
nlines = 0
for a , b , id in self . iter_pairs ( ) :
if ( a , b ) in blasts :
bline = blasts [ ( a , b ) ]
elif ( b , a ) in blasts :
bline = blasts [ ( b , a ) ]
else :
line = "\t" . join ( ( a , b ) )
bline = BlastLineByConversion ( line , mode = "110000000000" )
print ( bline , file = fw )
nlines += 1
fw . close ( )
logging . debug ( "A total of {0} BLAST lines written to `{1}`." . format ( nlines , outfile ) )
return outfile
|
def _build_line ( colwidths , colaligns , linefmt ) :
"Return a string which represents a horizontal line ."
|
if not linefmt :
return None
if hasattr ( linefmt , "__call__" ) :
return linefmt ( colwidths , colaligns )
else :
begin , fill , sep , end = linefmt
cells = [ fill * w for w in colwidths ]
return _build_simple_row ( cells , ( begin , sep , end ) )
|
def package_files ( directory ) :
"""Get list of data files to add to the package ."""
|
paths = [ ]
for ( path , _ , file_names ) in walk ( directory ) :
for filename in file_names :
paths . append ( join ( '..' , path , filename ) )
return paths
|
def iter_decode ( input , fallback_encoding , errors = 'replace' ) :
"""" Pull " - based decoder .
: param input :
An iterable of byte strings .
The input is first consumed just enough to determine the encoding
based on the precense of a BOM ,
then consumed on demand when the return value is .
: param fallback _ encoding :
An : class : ` Encoding ` object or a label string .
The encoding to use if : obj : ` input ` does note have a BOM .
: param errors : Type of error handling . See : func : ` codecs . register ` .
: raises : : exc : ` ~ exceptions . LookupError ` for an unknown encoding label .
: returns :
An ` ` ( output , encoding ) ` ` tuple .
: obj : ` output ` is an iterable of Unicode strings ,
: obj : ` encoding ` is the : obj : ` Encoding ` that is being used ."""
|
decoder = IncrementalDecoder ( fallback_encoding , errors )
generator = _iter_decode_generator ( input , decoder )
encoding = next ( generator )
return generator , encoding
|
def _GetAttributeNames ( self , data_type_definition ) :
"""Determines the attribute ( or field ) names of the members .
Args :
data _ type _ definition ( DataTypeDefinition ) : data type definition .
Returns :
list [ str ] : attribute names .
Raises :
FormatError : if the attribute names cannot be determined from the data
type definition ."""
|
if not data_type_definition :
raise errors . FormatError ( 'Missing data type definition' )
attribute_names = [ ]
for member_definition in data_type_definition . members :
attribute_names . append ( member_definition . name )
return attribute_names
|
def print_device_list ( self , device_list = None ) :
"""Optional parameter is a list of device objects . If omitted , will
just print all portal devices objects ."""
|
dev_list = device_list if device_list is not None else self . get_all_devices_in_portal ( )
for dev in dev_list :
print ( '{0}\t\t{1}\t\t{2}' . format ( dev [ 'info' ] [ 'description' ] [ 'name' ] , dev [ 'sn' ] , dev [ 'portals_aliases' ] if len ( dev [ 'portals_aliases' ] ) != 1 else dev [ 'portals_aliases' ] [ 0 ] ) )
|
def add_batch ( self , nlive = 500 , wt_function = None , wt_kwargs = None , maxiter = None , maxcall = None , save_bounds = True , print_progress = True , print_func = None , stop_val = None ) :
"""Allocate an additional batch of ( nested ) samples based on
the combined set of previous samples using the specified
weight function .
Parameters
nlive : int , optional
The number of live points used when adding additional samples
in the batch . Default is ` 500 ` .
wt _ function : func , optional
A cost function that takes a ` Results ` instance
and returns a log - likelihood range over which a new batch of
samples should be generated . The default function simply
computes a weighted average of the posterior and evidence
information content as : :
weight = pfrac * pweight + ( 1 . - pfrac ) * zweight
wt _ kwargs : dict , optional
Extra arguments to be passed to the weight function .
maxiter : int , optional
Maximum number of iterations allowed . Default is ` sys . maxsize `
( no limit ) .
maxcall : int , optional
Maximum number of likelihood evaluations allowed .
Default is ` sys . maxsize ` ( no limit ) .
save _ bounds : bool , optional
Whether or not to save distributions used to bound
the live points internally during dynamic live point allocations .
Default is ` True ` .
print _ progress : bool , optional
Whether to output a simple summary of the current run that
updates each iteration . Default is ` True ` .
print _ func : function , optional
A function that prints out the current state of the sampler .
If not provided , the default : meth : ` results . print _ fn ` is used .
stop _ val : float , optional
The value of the stopping criteria to be passed to
: meth : ` print _ func ` . Used internally within : meth : ` run _ nested ` to
keep track of progress ."""
|
# Initialize values .
if maxcall is None :
maxcall = sys . maxsize
if maxiter is None :
maxiter = sys . maxsize
if wt_function is None :
wt_function = weight_function
if wt_kwargs is None :
wt_kwargs = dict ( )
if print_func is None :
print_func = print_fn
# If we have either likelihood calls or iterations remaining ,
# add our new batch of live points .
ncall , niter , n = self . ncall , self . it - 1 , self . batch
if maxcall > 0 and maxiter > 0 : # Compute our sampling bounds using the provided
# weight function .
res = self . results
lnz , lnzerr = res . logz [ - 1 ] , res . logzerr [ - 1 ]
logl_bounds = wt_function ( res , wt_kwargs )
for results in self . sample_batch ( nlive_new = nlive , logl_bounds = logl_bounds , maxiter = maxiter , maxcall = maxcall , save_bounds = save_bounds ) :
( worst , ustar , vstar , loglstar , nc , worst_it , boundidx , bounditer , eff ) = results
# When initializing a batch ( i . e . when ` worst < 0 ` ) ,
# don ' t increment our call counter or our current
# number of iterations .
if worst >= 0 :
ncall += nc
niter += 1
# Reorganize results .
results = ( worst , ustar , vstar , loglstar , np . nan , np . nan , lnz , lnzerr ** 2 , np . nan , nc , worst_it , boundidx , bounditer , eff , np . nan )
# Print progress .
if print_progress :
print_func ( results , niter , ncall , nbatch = n + 1 , stop_val = stop_val , logl_min = logl_bounds [ 0 ] , logl_max = logl_bounds [ 1 ] )
# Combine batch with previous runs .
self . combine_runs ( )
# Pass back info .
return ncall , niter , logl_bounds , results
|
def _add_escape_character_for_quote_prime_character ( self , text ) :
"""Fix for https : / / github . com / openatx / facebook - wda / issues / 33
Returns :
string with properly formated quotes , or non changed text"""
|
if text is not None :
if "'" in text :
return text . replace ( "'" , "\\'" )
elif '"' in text :
return text . replace ( '"' , '\\"' )
else :
return text
else :
return text
|
def inter ( a , b ) :
"""Intersect two sets of any data type to form a third set .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / inter _ c . html
: param a : First input set .
: type a : spiceypy . utils . support _ types . SpiceCell
: param b : Second input set .
: type b : spiceypy . utils . support _ types . SpiceCell
: return : Intersection of a and b .
: rtype : spiceypy . utils . support _ types . SpiceCell"""
|
assert isinstance ( a , stypes . SpiceCell )
assert isinstance ( b , stypes . SpiceCell )
assert a . dtype == b . dtype
# Next line was redundant with [ raise NotImpImplementedError ] below
# assert a . dtype = = 0 or a . dtype = = 1 or a . dtype = = 2
if a . dtype is 0 :
c = stypes . SPICECHAR_CELL ( max ( a . size , b . size ) , max ( a . length , b . length ) )
elif a . dtype is 1 :
c = stypes . SPICEDOUBLE_CELL ( max ( a . size , b . size ) )
elif a . dtype is 2 :
c = stypes . SPICEINT_CELL ( max ( a . size , b . size ) )
else :
raise NotImplementedError
libspice . inter_c ( ctypes . byref ( a ) , ctypes . byref ( b ) , ctypes . byref ( c ) )
return c
|
def absent ( name , tags = None , region = None , key = None , keyid = None , profile = None ) :
'''Ensure VPC with passed properties is absent .
name
Name of the VPC .
tags
A list of tags . All tags must match .
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string ) that
contains a dict with region , key and keyid .'''
|
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
r = __salt__ [ 'boto_vpc.get_id' ] ( name = name , tags = tags , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in r :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to delete VPC: {0}.' . format ( r [ 'error' ] [ 'message' ] )
return ret
_id = r . get ( 'id' )
if not _id :
ret [ 'comment' ] = '{0} VPC does not exist.' . format ( name )
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'VPC {0} is set to be removed.' . format ( name )
ret [ 'result' ] = None
return ret
r = __salt__ [ 'boto_vpc.delete' ] ( vpc_name = name , tags = tags , region = region , key = key , keyid = keyid , profile = profile )
if not r [ 'deleted' ] :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to delete VPC: {0}.' . format ( r [ 'error' ] [ 'message' ] )
return ret
ret [ 'changes' ] [ 'old' ] = { 'vpc' : _id }
ret [ 'changes' ] [ 'new' ] = { 'vpc' : None }
ret [ 'comment' ] = 'VPC {0} deleted.' . format ( name )
return ret
|
def get_or_generate_vocab_inner ( data_dir , vocab_filename , vocab_size , generator , max_subtoken_length = None , reserved_tokens = None ) :
"""Inner implementation for vocab generators .
Args :
data _ dir : The base directory where data and vocab files are stored . If None ,
then do not save the vocab even if it doesn ' t exist .
vocab _ filename : relative filename where vocab file is stored
vocab _ size : target size of the vocabulary constructed by SubwordTextEncoder
generator : a generator that produces tokens from the vocabulary
max _ subtoken _ length : an optional integer . Set this to a finite value to
avoid quadratic costs during vocab building .
reserved _ tokens : List of reserved tokens . ` text _ encoder . RESERVED _ TOKENS `
should be a prefix of ` reserved _ tokens ` . If ` None ` , defaults to
` RESERVED _ TOKENS ` .
Returns :
A SubwordTextEncoder vocabulary object ."""
|
if data_dir and vocab_filename :
vocab_filepath = os . path . join ( data_dir , vocab_filename )
if tf . gfile . Exists ( vocab_filepath ) :
tf . logging . info ( "Found vocab file: %s" , vocab_filepath )
return text_encoder . SubwordTextEncoder ( vocab_filepath )
else :
vocab_filepath = None
tf . logging . info ( "Generating vocab file: %s" , vocab_filepath )
vocab = text_encoder . SubwordTextEncoder . build_from_generator ( generator , vocab_size , max_subtoken_length = max_subtoken_length , reserved_tokens = reserved_tokens )
if vocab_filepath :
tf . gfile . MakeDirs ( data_dir )
vocab . store_to_file ( vocab_filepath )
return vocab
|
def get_neighbor_names ( self , node_name : str , order : int = 1 ) -> list :
"""Get the names of all neighbors of a node , and the node itself .
: param node _ name : Node whose neighbor names are requested .
: return : A list of names of all neighbors of a node , and the node itself ."""
|
logger . info ( "In get_neighbor_names()" )
node = self . graph . vs . find ( name = node_name )
neighbors = self . graph . neighborhood ( node , order = order )
names = self . graph . vs [ neighbors ] [ "name" ]
names . append ( node_name )
return list ( names )
|
def get_nova_endpoint ( cls , json_resp , nova_api_version = None ) :
"""Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog"""
|
nova_version = nova_api_version or DEFAULT_NOVA_API_VERSION
catalog = json_resp . get ( 'token' , { } ) . get ( 'catalog' , [ ] )
nova_match = 'novav21' if nova_version == V21_NOVA_API_VERSION else 'nova'
for entry in catalog :
if entry [ 'name' ] == nova_match or 'Compute' in entry [ 'name' ] : # Collect any endpoints on the public or internal interface
valid_endpoints = { }
for ep in entry [ 'endpoints' ] :
interface = ep . get ( 'interface' , '' )
if interface in [ 'public' , 'internal' ] :
valid_endpoints [ interface ] = ep [ 'url' ]
if valid_endpoints : # Favor public endpoints over internal
nova_endpoint = valid_endpoints . get ( "public" , valid_endpoints . get ( "internal" ) )
return nova_endpoint
else :
raise MissingNovaEndpoint ( )
|
def factor_cumulative_returns ( factor_data , period , long_short = True , group_neutral = False , equal_weight = False , quantiles = None , groups = None ) :
"""Simulate a portfolio using the factor in input and returns the cumulative
returns of the simulated portfolio
Parameters
factor _ data : pd . DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date ( level 0 ) and asset ( level 1 ) ,
containing the values for a single alpha factor , forward returns for
each period , the factor quantile / bin that factor value belongs to ,
and ( optionally ) the group the asset belongs to .
- See full explanation in utils . get _ clean _ factor _ and _ forward _ returns
period : string
' factor _ data ' column name corresponding to the ' period ' returns to be
used in the computation of porfolio returns
long _ short : bool , optional
if True then simulates a dollar neutral long - short portfolio
- see performance . create _ pyfolio _ input for more details
group _ neutral : bool , optional
If True then simulates a group neutral portfolio
- see performance . create _ pyfolio _ input for more details
equal _ weight : bool , optional
Control the assets weights :
- see performance . create _ pyfolio _ input for more details
quantiles : sequence [ int ] , optional
Use only specific quantiles in the computation . By default all
quantiles are used
groups : sequence [ string ] , optional
Use only specific groups in the computation . By default all groups
are used
Returns
Cumulative returns series : pd . Series
Example :
2015-07-16 09:30:00 - 0.012143
2015-07-16 12:30:00 0.012546
2015-07-17 09:30:00 0.045350
2015-07-17 12:30:00 0.065897
2015-07-20 09:30:00 0.030957"""
|
fwd_ret_cols = utils . get_forward_returns_columns ( factor_data . columns )
if period not in fwd_ret_cols :
raise ValueError ( "Period '%s' not found" % period )
todrop = list ( fwd_ret_cols )
todrop . remove ( period )
portfolio_data = factor_data . drop ( todrop , axis = 1 )
if quantiles is not None :
portfolio_data = portfolio_data [ portfolio_data [ 'factor_quantile' ] . isin ( quantiles ) ]
if groups is not None :
portfolio_data = portfolio_data [ portfolio_data [ 'group' ] . isin ( groups ) ]
returns = factor_returns ( portfolio_data , long_short , group_neutral , equal_weight )
return cumulative_returns ( returns [ period ] , period )
|
def transmit_metrics ( self ) :
"""Keep metrics updated about how long time ago each filetype was successfully uploaded .
Transmits max once per ten seconds , regardless of how many threads are running ."""
|
global _last_stats_transmit_time
# pylint : disable = global - statement
with _STATS_LOCK : # pylint : disable = not - context - manager
if time . monotonic ( ) - _last_stats_transmit_time < 10.0 :
return
for site in self . state :
for filetype , prop in self . state [ site ] [ "upload" ] . items ( ) :
if prop [ "last_success" ] :
self . metrics . gauge ( "pghoard.last_upload_age" , time . monotonic ( ) - prop [ "last_success" ] , tags = { "site" : site , "type" : filetype , } )
_last_stats_transmit_time = time . monotonic ( )
|
def shape_factors ( n , dim = 2 ) :
"""Returns a : obj : ` numpy . ndarray ` of factors : samp : ` f ` such
that : samp : ` ( len ( f ) = = { dim } ) and ( numpy . product ( f ) = = { n } ) ` .
The returned factors are as * square * ( * cubic * , etc ) as possible .
For example : :
> > > shape _ factors ( 24 , 1)
array ( [ 24 ] )
> > > shape _ factors ( 24 , 2)
array ( [ 4 , 6 ] )
> > > shape _ factors ( 24 , 3)
array ( [ 2 , 3 , 4 ] )
> > > shape _ factors ( 24 , 4)
array ( [ 2 , 2 , 2 , 3 ] )
> > > shape _ factors ( 24 , 5)
array ( [ 1 , 2 , 2 , 2 , 3 ] )
> > > shape _ factors ( 24 , 6)
array ( [ 1 , 1 , 2 , 2 , 2 , 3 ] )
: type n : : obj : ` int `
: param n : Integer which is factored into : samp : ` { dim } ` factors .
: type dim : : obj : ` int `
: param dim : Number of factors .
: rtype : : obj : ` numpy . ndarray `
: return : A : samp : ` ( { dim } , ) ` shaped array of integers which are factors of : samp : ` { n } ` ."""
|
if dim <= 1 :
factors = [ n , ]
else :
for f in range ( int ( n ** ( 1.0 / float ( dim ) ) ) + 1 , 0 , - 1 ) :
if ( n % f ) == 0 :
factors = [ f , ] + list ( shape_factors ( n // f , dim = dim - 1 ) )
break
factors . sort ( )
return _np . array ( factors )
|
def bsp_resize ( node : tcod . bsp . BSP , x : int , y : int , w : int , h : int ) -> None :
""". . deprecated : : 2.0
Assign directly to : any : ` BSP ` attributes instead ."""
|
node . x = x
node . y = y
node . width = w
node . height = h
|
def get_statements ( self , filter = False ) :
"""Return the combined list of statements from BEL and Pathway Commons .
Internally calls : py : meth : ` get _ biopax _ stmts ` and
: py : meth : ` get _ bel _ stmts ` .
Parameters
filter : bool
If True , includes only those statements that exclusively mention
genes in : py : attr : ` gene _ list ` . Default is False .
Returns
list of : py : class : ` indra . statements . Statement `
List of INDRA statements extracted the BEL large corpus and Pathway
Commons ."""
|
bp_stmts = self . get_biopax_stmts ( filter = filter )
bel_stmts = self . get_bel_stmts ( filter = filter )
return bp_stmts + bel_stmts
|
def prune_dupes ( self ) :
"""Remove all but the last entry for a given resource URI .
Returns the number of entries removed . Also removes all entries for a
given URI where the first entry is a create and the last entry is a
delete ."""
|
n = 0
pruned1 = [ ]
seen = set ( )
deletes = { }
for r in reversed ( self . resources ) :
if ( r . uri in seen ) :
n += 1
if ( r . uri in deletes ) :
deletes [ r . uri ] = r . change
else :
pruned1 . append ( r )
seen . add ( r . uri )
if ( r . change == 'deleted' ) :
deletes [ r . uri ] = r . change
# go through all deletes and prune if first was create
pruned2 = [ ]
for r in reversed ( pruned1 ) :
if ( r . uri in deletes and deletes [ r . uri ] == 'created' ) :
n += 1
else :
pruned2 . append ( r )
self . resources = pruned2
return ( n )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.