signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def zrem ( self , key , * members ) :
"""Removes the specified members from the sorted set stored at key .
Non existing members are ignored .
An error is returned when key exists and does not hold a sorted set .
. . note : :
* * Time complexity * * : ` ` O ( M * log ( N ) ) ` ` with ` ` N ` ` being the number of
elements in the sorted set and ` ` M ` ` the number of elements to be
removed .
: param key : The key of the sorted set
: type key : : class : ` str ` , : class : ` bytes `
: param members : One or more member values to remove
: type members : : class : ` str ` , : class : ` bytes `
: rtype : int
: raises : : exc : ` ~ tredis . exceptions . RedisError `""" | return self . _execute ( [ b'ZREM' , key ] + list ( members ) ) |
def _method_error_handler ( self , response : Dict [ str , Any ] ) :
"""处理400 ~ 499段状态码 , 为对应的任务设置异常 .
Parameters : s
( response ) : - 响应的python字典形式数据
Return :
( bool ) : - 准确地说没有错误就会返回True""" | exp = response . get ( 'MESSAGE' )
code = response . get ( "CODE" )
ID = exp . get ( "ID" )
e = abort ( code , ID = ID , message = exp . get ( 'MESSAGE' ) )
self . tasks [ ID ] . set_exception ( e )
return True |
def find ( self , selector : str = '*' , containing : _Containing = None , first : bool = False , _encoding : str = None ) -> _Find :
"""Given a simple element name , returns a list of
: class : ` Element < Element > ` objects or a single one .
: param selector : Element name to find .
: param containing : If specified , only return elements that contain the provided text .
: param first : Whether or not to return just the first result .
: param _ encoding : The encoding format .
If ` ` first ` ` is ` ` True ` ` , only returns the first
: class : ` Element < Element > ` found .""" | # Convert a single containing into a list .
if isinstance ( containing , str ) :
containing = [ containing ]
encoding = _encoding or self . encoding
elements = [ Element ( element = found , default_encoding = encoding ) for found in self . pq ( selector ) ]
if containing :
elements_copy = elements . copy ( )
elements = [ ]
for element in elements_copy :
if any ( [ c . lower ( ) in element . text . lower ( ) for c in containing ] ) :
elements . append ( element )
elements . reverse ( )
return _get_first_or_list ( elements , first ) |
def cloneTable ( self , login , tableName , newTableName , flush , propertiesToSet , propertiesToExclude ) :
"""Parameters :
- login
- tableName
- newTableName
- flush
- propertiesToSet
- propertiesToExclude""" | self . send_cloneTable ( login , tableName , newTableName , flush , propertiesToSet , propertiesToExclude )
self . recv_cloneTable ( ) |
def rgb ( self , color_tuple ) :
"""Used as main setter ( rgb256 , hls , hls256 , hsv , hsv256)""" | # Check bounds
self . color = tuple ( map ( self . _apply_float_bounds , color_tuple [ : 3 ] ) )
# Include alpha if necessary
if len ( color_tuple ) > 3 :
self . alpha = self . _apply_float_bounds ( color_tuple [ 3 ] ) |
def hash ( self , rtdc_ds ) :
"""Used for identifying an ancillary computation
The data columns and the used configuration keys / values
are hashed .""" | hasher = hashlib . md5 ( )
# data columns
for col in self . req_features :
hasher . update ( obj2str ( rtdc_ds [ col ] ) )
# config keys
for sec , keys in self . req_config :
for key in keys :
val = rtdc_ds . config [ sec ] [ key ]
data = "{}:{}={}" . format ( sec , key , val )
hasher . update ( obj2str ( data ) )
return hasher . hexdigest ( ) |
def _get_adjustment ( mag , year , mmin , completeness_year , t_f , mag_inc = 0.1 ) :
'''If the magnitude is greater than the minimum in the completeness table
and the year is greater than the corresponding completeness year then
return the Weichert factor
: param float mag :
Magnitude of an earthquake
: param float year :
Year of earthquake
: param np . ndarray completeness _ table :
Completeness table
: param float mag _ inc :
Magnitude increment
: param float t _ f :
Weichert adjustment factor
: returns :
Weichert adjustment factor is event is in complete part of catalogue
(0.0 otherwise )''' | if len ( completeness_year ) == 1 :
if ( mag >= mmin ) and ( year >= completeness_year [ 0 ] ) : # No adjustment needed - event weight = = 1
return 1.0
else : # Event should not be counted
return False
kval = int ( ( ( mag - mmin ) / mag_inc ) ) + 1
if ( kval >= 1 ) and ( year >= completeness_year [ kval - 1 ] ) :
return t_f
else :
return False |
def merge ( self , branch , destination = "master" ) :
"""Merge the the given WIP branch to master ( or destination , if specified )
If the merge fails , the merge will be aborted
and then a MergeException will be thrown . The
message of the MergeException will be the
" git status " output , so details about merge
conflicts can be determined .""" | current_branch = self . current_branch ( )
if current_branch != destination :
_LOG . debug ( 'checking out ' + destination )
git ( self . gitdir , self . gitwd , "checkout" , destination )
try :
git ( self . gitdir , self . gitwd , "merge" , branch )
except sh . ErrorReturnCode :
_LOG . exception ( 'merge failed' )
# attempt to reset things so other operations can continue
git ( self . gitdir , self . gitwd , "merge" , "--abort" )
# raise an MergeException so that caller will know that the merge failed
raise MergeException ( )
new_sha = git ( self . gitdir , self . gitwd , "rev-parse" , "HEAD" )
return new_sha . strip ( ) |
def get ( self , network_id , * args , ** kwargs ) :
"""Get a network by its ID .
Args :
network _ id ( str ) : The ID of the network .
verbose ( bool ) : Retrieve the service details across the cluster in
swarm mode .
scope ( str ) : Filter the network by scope ( ` ` swarm ` ` , ` ` global ` `
or ` ` local ` ` ) .
Returns :
( : py : class : ` Network ` ) The network .
Raises :
: py : class : ` docker . errors . NotFound `
If the network does not exist .
: py : class : ` docker . errors . APIError `
If the server returns an error .""" | return self . prepare_model ( self . client . api . inspect_network ( network_id , * args , ** kwargs ) ) |
def from_signed_raw ( cls : Type [ RevocationType ] , signed_raw : str ) -> RevocationType :
"""Return Revocation document instance from a signed raw string
: param signed _ raw : raw document file in duniter format
: return :""" | lines = signed_raw . splitlines ( True )
n = 0
version = int ( Revocation . parse_field ( "Version" , lines [ n ] ) )
n += 1
Revocation . parse_field ( "Type" , lines [ n ] )
n += 1
currency = Revocation . parse_field ( "Currency" , lines [ n ] )
n += 1
issuer = Revocation . parse_field ( "Issuer" , lines [ n ] )
n += 1
identity_uid = Revocation . parse_field ( "IdtyUniqueID" , lines [ n ] )
n += 1
identity_timestamp = Revocation . parse_field ( "IdtyTimestamp" , lines [ n ] )
n += 1
identity_signature = Revocation . parse_field ( "IdtySignature" , lines [ n ] )
n += 1
signature = Revocation . parse_field ( "Signature" , lines [ n ] )
n += 1
identity = Identity ( version , currency , issuer , identity_uid , identity_timestamp , identity_signature )
return cls ( version , currency , identity , signature ) |
def from_mixed_draws ( cls , pst , how_dict , default = "gaussian" , num_reals = 100 , cov = None , sigma_range = 6 , enforce_bounds = True , partial = False ) :
"""instaniate a parameter ensemble from stochastic draws using a mixture of
distributions . Available distributions include ( log ) " uniform " , ( log ) " triangular " ,
and ( log ) " gaussian " . log transformation is respected .
Parameters
pst : pyemu . Pst
a Pst instance
how _ dict : dict
a dictionary of parnme keys and ' how ' values , where " how " can be " uniform " ,
" triangular " , or " gaussian " .
default : str
the default distribution to use for parameter not listed in how _ dict
num _ reals : int
number of realizations to draw
cov : pyemu . Cov
an optional Cov instance to use for drawing from gaussian distribution . If None ,
and " gaussian " is listed in how _ dict ( or default ) , then a diagonal covariance matrix
is constructed from the parameter bounds in the pst . Default is None
sigma _ range : float
the number of standard deviations implied by the bounds in the pst . Only used if
" gaussian " is in how _ dict ( or default ) and cov is None . Default is 6.
enforce _ bounds : boolean
flag to enforce parameter bounds in resulting ParameterEnsemble .
Only matters if " gaussian " is in values of how _ dict . Default is True .
partial : bool
flag to allow a partial ensemble ( not all pars included ) . Default is False""" | # error checking
accept = { "uniform" , "triangular" , "gaussian" }
assert default in accept , "ParameterEnsemble.from_mixed_draw() error: 'default' must be in {0}" . format ( accept )
par_org = pst . parameter_data . copy ( )
pset = set ( pst . adj_par_names )
hset = set ( how_dict . keys ( ) )
missing = pset . difference ( hset )
# assert len ( missing ) = = 0 , " ParameterEnsemble . from _ mixed _ draws ( ) error : the following par names are not in " + \
# " in how _ dict : { 0 } " . format ( ' , ' . join ( missing ) )
if not partial and len ( missing ) > 0 :
print ( "{0} par names missing in how_dict, these parameters will be sampled using {1} (the 'default')" . format ( len ( missing ) , default ) )
for m in missing :
how_dict [ m ] = default
missing = hset . difference ( pset )
assert len ( missing ) == 0 , "ParameterEnsemble.from_mixed_draws() error: the following par names are not in " + " in the pst: {0}" . format ( ',' . join ( missing ) )
unknown_draw = [ ]
for pname , how in how_dict . items ( ) :
if how not in accept :
unknown_draw . append ( "{0}:{1}" . format ( pname , how ) )
if len ( unknown_draw ) > 0 :
raise Exception ( "ParameterEnsemble.from_mixed_draws() error: the following hows are not recognized:{0}" . format ( ',' . join ( unknown_draw ) ) )
# work out ' how ' grouping
how_groups = { how : [ ] for how in accept }
for pname , how in how_dict . items ( ) :
how_groups [ how ] . append ( pname )
# gaussian
pes = [ ]
if len ( how_groups [ "gaussian" ] ) > 0 :
gset = set ( how_groups [ "gaussian" ] )
par_gaussian = par_org . loc [ gset , : ]
# par _ gaussian . sort _ values ( by = " parnme " , inplace = True )
par_gaussian . sort_index ( inplace = True )
pst . parameter_data = par_gaussian
if cov is not None :
cset = set ( cov . row_names )
# gset = set ( how _ groups [ " gaussian " ] )
diff = gset . difference ( cset )
assert len ( diff ) == 0 , "ParameterEnsemble.from_mixed_draws() error: the 'cov' is not compatible with " + " the parameters listed as 'gaussian' in how_dict, the following are not in the cov:{0}" . format ( ',' . join ( diff ) )
else :
cov = Cov . from_parameter_data ( pst , sigma_range = sigma_range )
pe_gauss = ParameterEnsemble . from_gaussian_draw ( pst , cov , num_reals = num_reals , enforce_bounds = enforce_bounds )
pes . append ( pe_gauss )
if len ( how_groups [ "uniform" ] ) > 0 :
par_uniform = par_org . loc [ how_groups [ "uniform" ] , : ]
# par _ uniform . sort _ values ( by = " parnme " , inplace = True )
par_uniform . sort_index ( inplace = True )
pst . parameter_data = par_uniform
pe_uniform = ParameterEnsemble . from_uniform_draw ( pst , num_reals = num_reals )
pes . append ( pe_uniform )
if len ( how_groups [ "triangular" ] ) > 0 :
par_tri = par_org . loc [ how_groups [ "triangular" ] , : ]
# par _ tri . sort _ values ( by = " parnme " , inplace = True )
par_tri . sort_index ( inplace = True )
pst . parameter_data = par_tri
pe_tri = ParameterEnsemble . from_triangular_draw ( pst , num_reals = num_reals )
pes . append ( pe_tri )
df = pd . DataFrame ( index = np . arange ( num_reals ) , columns = par_org . parnme . values )
df . loc [ : , : ] = np . NaN
fixed_tied = par_org . loc [ par_org . partrans . apply ( lambda x : x in [ "fixed" , "tied" ] ) , "parval1" ] . to_dict ( )
for p , v in fixed_tied . items ( ) :
df . loc [ : , p ] = v
for pe in pes :
df . loc [ pe . index , pe . columns ] = pe
if partial :
df = df . dropna ( axis = 1 )
elif df . shape != df . dropna ( ) . shape :
raise Exception ( "ParameterEnsemble.from_mixed_draws() error: NaNs in final parameter ensemble" )
pst . parameter_data = par_org
return ParameterEnsemble . from_dataframe ( df = df , pst = pst ) |
def unsubscribe ( self , subscriber : 'Subscriber' ) -> None :
"""Unsubscribe the given subscriber
: param subscriber : subscriber to unsubscribe
: raises SubscriptionError : if subscriber is not subscribed ( anymore )""" | # here is a special implementation which is replacing the more
# obvious one : self . _ subscriptions . remove ( subscriber ) - this will not
# work because list . remove ( x ) is doing comparision for equality .
# Applied to publishers this will return another publisher instead of
# a boolean result
for i , _s in enumerate ( self . _subscriptions ) :
if _s is subscriber :
self . _subscriptions . pop ( i )
return
raise SubscriptionError ( 'Subscriber is not registered' ) |
def get_symmetrized_structure ( self ) :
"""Get a symmetrized structure . A symmetrized structure is one where the
sites have been grouped into symmetrically equivalent groups .
Returns :
: class : ` pymatgen . symmetry . structure . SymmetrizedStructure ` object .""" | ds = self . get_symmetry_dataset ( )
sg = SpacegroupOperations ( self . get_space_group_symbol ( ) , self . get_space_group_number ( ) , self . get_symmetry_operations ( ) )
return SymmetrizedStructure ( self . _structure , sg , ds [ "equivalent_atoms" ] , ds [ "wyckoffs" ] ) |
def parse ( self , fo ) :
"""Convert BioProspector output to motifs
Parameters
fo : file - like
File object containing BioProspector output .
Returns
motifs : list
List of Motif instances .""" | motifs = [ ]
p = re . compile ( r'^\d+\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)' )
pwm = [ ]
motif_id = ""
for line in fo . readlines ( ) :
if line . startswith ( "Motif #" ) :
if pwm :
m = Motif ( pwm )
m . id = "BioProspector_w%s_%s" % ( len ( m ) , motif_id )
motifs . append ( m )
motif_id = line . split ( "#" ) [ 1 ] . split ( ":" ) [ 0 ]
pwm = [ ]
else :
m = p . search ( line )
if m :
pwm . append ( [ float ( m . group ( x ) ) / 100.0 for x in range ( 1 , 5 ) ] )
if pwm :
m = Motif ( pwm )
m . id = "BioProspector_w%s_%s" % ( len ( m ) , motif_id )
motifs . append ( m )
return motifs |
def datetime_to_iso_date ( the_datetime , use_micros = False ) :
"""> > > datetime _ to _ iso _ date ( datetime . datetime ( 2013 , 12 , 26 , 10 , 11 , 12 ) )
'2013-12-26T10:11:12Z '
> > > datetime _ to _ iso _ date ( datetime . datetime ( 2013 , 12 , 26 , 10 , 11 , 12 , 456789 ) )
'2013-12-26T10:11:12Z '
> > > datetime _ to _ iso _ date ( datetime . datetime ( 2013 , 12 , 26 , 10 , 11 , 12 ) , use _ micros = True )
'2013-12-26T10:11:12Z '
> > > datetime _ to _ iso _ date ( datetime . datetime ( 2013 , 12 , 26 , 10 , 11 , 12 , 456789 ) , use _ micros = True )
'2013-12-26T10:11:12.456789Z '
> > > datetime _ to _ iso _ date ( datetime . datetime ( 2013 , 12 , 26 , 10 , 11 , 12 , 1 ) , use _ micros = True )
'2013-12-26T10:11:12.000001Z '""" | if not use_micros :
return the_datetime . strftime ( ISO_DT )
else :
return the_datetime . isoformat ( ) + 'Z' |
def add_resize_bilinear ( self , name , input_name , output_name , target_height = 1 , target_width = 1 , mode = 'ALIGN_ENDPOINTS_MODE' ) :
"""Add resize bilinear layer to the model . A layer that resizes the input to a given spatial size using bilinear interpolation .
Parameters
name : str
The name of this layer .
input _ name : str
The input blob name of this layer .
output _ name : str
The output blob name of this layer .
target _ height : int
Output height dimension .
target _ width : int
Output width dimension .
mode : str
Following values are supported : ' STRICT _ ALIGN _ ENDPOINTS _ MODE ' , ' ALIGN _ ENDPOINTS _ MODE ' , ' UPSAMPLE _ MODE ' , ' ROI _ ALIGN _ MODE ' .
This parameter determines the sampling grid used for bilinear interpolation . Kindly refer to NeuralNetwork . proto for details .
See Also
add _ upsample""" | spec = self . spec
nn_spec = self . nn_spec
# Add a new inner - product layer
spec_layer = nn_spec . layers . add ( )
spec_layer . name = name
spec_layer . input . append ( input_name )
spec_layer . output . append ( output_name )
spec_layer_params = spec_layer . resizeBilinear
spec_layer_params . targetSize . append ( target_height )
spec_layer_params . targetSize . append ( target_width )
if mode == 'ALIGN_ENDPOINTS_MODE' :
spec_layer_params . mode . samplingMethod = _NeuralNetwork_pb2 . SamplingMode . Method . Value ( 'ALIGN_ENDPOINTS_MODE' )
elif mode == 'STRICT_ALIGN_ENDPOINTS_MODE' :
spec_layer_params . mode . samplingMethod = _NeuralNetwork_pb2 . SamplingMode . Method . Value ( 'STRICT_ALIGN_ENDPOINTS_MODE' )
elif mode == 'UPSAMPLE_MODE' :
spec_layer_params . mode . samplingMethod = _NeuralNetwork_pb2 . SamplingMode . Method . Value ( 'UPSAMPLE_MODE' )
elif mode == 'ROI_ALIGN_MODE' :
spec_layer_params . mode . samplingMethod = _NeuralNetwork_pb2 . SamplingMode . Method . Value ( 'ROI_ALIGN_MODE' )
else :
raise ValueError ( "Unspported resize bilinear mode %s" % mode ) |
def construct_operation_validators ( api_path , path_definition , operation_definition , context ) :
"""- consumes ( did the request conform to the content types this api consumes )
- produces ( did the response conform to the content types this endpoint produces )
- parameters ( did the parameters of this request validate )
TODO : move path parameter validation to here , because each operation
can override any of the path level parameters .
- schemes ( was the request scheme correct )
- security : TODO since security isn ' t yet implemented .""" | validators = { }
# sanity check
assert 'context' not in operation_definition
assert 'api_path' not in operation_definition
assert 'path_definition' not in operation_definition
for key in operation_definition . keys ( ) :
if key not in validator_mapping : # TODO : is this the right thing to do ?
continue
validators [ key ] = validator_mapping [ key ] ( context = context , api_path = api_path , path_definition = path_definition , ** operation_definition )
# Global defaults
if 'consumes' in context and 'consumes' not in validators :
validators [ 'consumes' ] = validator_mapping [ 'consumes' ] ( ** context )
if 'parameters' in path_definition and 'parameters' not in validators :
validators [ 'parameters' ] = validator_mapping [ 'parameters' ] ( context = context , api_path = api_path , path_definition = path_definition , parameters = path_definition [ 'parameters' ] , ** operation_definition )
return validators |
def _filter_stmts ( self , stmts , frame , offset ) :
"""Filter the given list of statements to remove ignorable statements .
If self is not a frame itself and the name is found in the inner
frame locals , statements will be filtered to remove ignorable
statements according to self ' s location .
: param stmts : The statements to filter .
: type stmts : list ( NodeNG )
: param frame : The frame that all of the given statements belong to .
: type frame : NodeNG
: param offset : The line offset to filter statements up to .
: type offset : int
: returns : The filtered statements .
: rtype : list ( NodeNG )""" | # if offset = = - 1 , my actual frame is not the inner frame but its parent
# class A ( B ) : pass
# we need this to resolve B correctly
if offset == - 1 :
myframe = self . frame ( ) . parent . frame ( )
else :
myframe = self . frame ( )
# If the frame of this node is the same as the statement
# of this node , then the node is part of a class or
# a function definition and the frame of this node should be the
# the upper frame , not the frame of the definition .
# For more information why this is important ,
# see Pylint issue # 295.
# For example , for ' b ' , the statement is the same
# as the frame / scope :
# def test ( b = 1 ) :
if self . statement ( ) is myframe and myframe . parent :
myframe = myframe . parent . frame ( )
mystmt = self . statement ( )
# line filtering if we are in the same frame
# take care node may be missing lineno information ( this is the case for
# nodes inserted for living objects )
if myframe is frame and mystmt . fromlineno is not None :
assert mystmt . fromlineno is not None , mystmt
mylineno = mystmt . fromlineno + offset
else : # disabling lineno filtering
mylineno = 0
_stmts = [ ]
_stmt_parents = [ ]
statements = self . _get_filtered_node_statements ( stmts )
for node , stmt in statements : # line filtering is on and we have reached our location , break
if stmt . fromlineno > mylineno > 0 :
break
# Ignore decorators with the same name as the
# decorated function
# Fixes issue # 375
if mystmt is stmt and is_from_decorator ( self ) :
continue
assert hasattr ( node , "assign_type" ) , ( node , node . scope ( ) , node . scope ( ) . locals , )
assign_type = node . assign_type ( )
if node . has_base ( self ) :
break
_stmts , done = assign_type . _get_filtered_stmts ( self , node , _stmts , mystmt )
if done :
break
optional_assign = assign_type . optional_assign
if optional_assign and assign_type . parent_of ( self ) : # we are inside a loop , loop var assignment is hiding previous
# assignment
_stmts = [ node ]
_stmt_parents = [ stmt . parent ]
continue
# XXX comment various branches below ! ! !
try :
pindex = _stmt_parents . index ( stmt . parent )
except ValueError :
pass
else : # we got a parent index , this means the currently visited node
# is at the same block level as a previously visited node
if _stmts [ pindex ] . assign_type ( ) . parent_of ( assign_type ) : # both statements are not at the same block level
continue
# if currently visited node is following previously considered
# assignment and both are not exclusive , we can drop the
# previous one . For instance in the following code : :
# if a :
# x = 1
# else :
# x = 2
# print x
# we can ' t remove neither x = 1 nor x = 2 when looking for ' x '
# of ' print x ' ; while in the following : :
# x = 1
# x = 2
# print x
# we can remove x = 1 when we see x = 2
# moreover , on loop assignment types , assignment won ' t
# necessarily be done if the loop has no iteration , so we don ' t
# want to clear previous assignments if any ( hence the test on
# optional _ assign )
if not ( optional_assign or are_exclusive ( _stmts [ pindex ] , node ) ) :
if ( # In case of partial function node , if the statement is different
# from the origin function then it can be deleted otherwise it should
# remain to be able to correctly infer the call to origin function .
not node . is_function or node . qname ( ) != "PartialFunction" or node . name != _stmts [ pindex ] . name ) :
del _stmt_parents [ pindex ]
del _stmts [ pindex ]
if isinstance ( node , AssignName ) :
if not optional_assign and stmt . parent is mystmt . parent :
_stmts = [ ]
_stmt_parents = [ ]
elif isinstance ( node , DelName ) :
_stmts = [ ]
_stmt_parents = [ ]
continue
if not are_exclusive ( self , node ) :
_stmts . append ( node )
_stmt_parents . append ( stmt . parent )
return _stmts |
def commit ( self ) :
"""Commit changes to disk if attached .
This method helps normalize the interface for detached and
attached tables and makes writing attached tables a bit more
efficient . For detached tables nothing is done , as there is no
notion of changes , but neither is an error raised ( unlike with
: meth : ` write ` ) . For attached tables , if all changes are new
records , the changes are appended to the existing file , and
otherwise the whole file is rewritten .""" | if not self . is_attached ( ) :
return
changes = self . list_changes ( )
if changes :
indices , records = zip ( * changes )
if min ( indices ) > self . _last_synced_index :
self . write ( records , append = True )
else :
self . write ( append = False ) |
def equities_sids_for_country_code ( self , country_code ) :
"""Return all of the sids for a given country .
Parameters
country _ code : str
An ISO 3166 alpha - 2 country code .
Returns
tuple [ int ]
The sids whose exchanges are in this country .""" | sids = self . _compute_asset_lifetimes ( [ country_code ] ) . sid
return tuple ( sids . tolist ( ) ) |
def energy ( self , strand , dotparens , temp = 37.0 , pseudo = False , material = None , dangles = 'some' , sodium = 1.0 , magnesium = 0.0 ) :
'''Calculate the free energy of a given sequence structure . Runs the
\' energy \' command .
: param strand : Strand on which to run energy . Strands must be either
coral . DNA or coral . RNA ) .
: type strand : coral . DNA or coral . RNA
: param dotparens : The structure in dotparens notation .
: type dotparens : str
: param temp : Temperature setting for the computation . Negative values
are not allowed .
: type temp : float
: param pseudo : Enable pseudoknots .
: type pseudo : bool
: param material : The material setting to use in the computation . If set
to None ( the default ) , the material type is inferred
from the strands . Other settings available : ' dna ' for
DNA parameters , ' rna ' for RNA ( 1995 ) parameters , and
' rna1999 ' for the RNA 1999 parameters .
: type material : str
: param dangles : How to treat dangles in the computation . From the
user guide : For \' none \' : Dangle energies are ignored .
For \' some \' : \' A dangle energy is incorporated for
each unpaired base flanking a duplex \' . For ' all ' : all
dangle energy is considered .
: type dangles : str
: param sodium : Sodium concentration in solution ( molar ) , only applies
to DNA .
: type sodium : float
: param magnesium : Magnesium concentration in solution ( molar ) , only
applies to DNA >
: type magnesium : float
: returns : The free energy of the sequence with the specified secondary
structure .
: rtype : float''' | # Set the material ( will be used to set command material flag )
material = self . _set_material ( strand , material )
# Set up command flags
cmd_args = self . _prep_cmd_args ( temp , dangles , material , pseudo , sodium , magnesium , multi = False )
# Set up the input file and run the command . Note : no STDOUT
lines = [ str ( strand ) , dotparens ]
stdout = self . _run ( 'energy' , cmd_args , lines ) . split ( '\n' )
# Return the energy
return float ( stdout [ - 2 ] ) |
def generate_rsa_public_and_private ( bits = _DEFAULT_RSA_KEY_BITS ) :
"""< Purpose >
Generate public and private RSA keys with modulus length ' bits ' . The
public and private keys returned conform to
' securesystemslib . formats . PEMRSA _ SCHEMA ' and have the form :
' - - - - - BEGIN RSA PUBLIC KEY - - - - - . . . '
or
' - - - - - BEGIN RSA PRIVATE KEY - - - - - . . . '
The public and private keys are returned as strings in PEM format .
' generate _ rsa _ public _ and _ private ( ) ' enforces a minimum key size of 2048
bits . If ' bits ' is unspecified , a 3072 - bit RSA key is generated , which is
the key size recommended by TUF .
> > > public , private = generate _ rsa _ public _ and _ private ( 2048)
> > > securesystemslib . formats . PEMRSA _ SCHEMA . matches ( public )
True
> > > securesystemslib . formats . PEMRSA _ SCHEMA . matches ( private )
True
< Arguments >
bits :
The key size , or key length , of the RSA key . ' bits ' must be 2048 , or
greater . ' bits ' defaults to 3072 if not specified .
< Exceptions >
securesystemslib . exceptions . FormatError , if ' bits ' does not contain the
correct format .
< Side Effects >
The RSA keys are generated from pyca / cryptography ' s
rsa . generate _ private _ key ( ) function .
< Returns >
A ( public , private ) tuple containing the RSA keys in PEM format .""" | # Does ' bits ' have the correct format ?
# This check will ensure ' bits ' conforms to
# ' securesystemslib . formats . RSAKEYBITS _ SCHEMA ' . ' bits ' must be an integer
# object , with a minimum value of 2048 . Raise
# ' securesystemslib . exceptions . FormatError ' if the check fails .
securesystemslib . formats . RSAKEYBITS_SCHEMA . check_match ( bits )
# Generate the public and private RSA keys . The pyca / cryptography ' rsa '
# module performs the actual key generation . The ' bits ' argument is used ,
# and a 2048 - bit minimum is enforced by
# securesystemslib . formats . RSAKEYBITS _ SCHEMA . check _ match ( ) .
private_key = rsa . generate_private_key ( public_exponent = 65537 , key_size = bits , backend = default_backend ( ) )
# Extract the public & private halves of the RSA key and generate their
# PEM - formatted representations . Return the key pair as a ( public , private )
# tuple , where each RSA is a string in PEM format .
private_pem = private_key . private_bytes ( encoding = serialization . Encoding . PEM , format = serialization . PrivateFormat . TraditionalOpenSSL , encryption_algorithm = serialization . NoEncryption ( ) )
# Need to generate the public pem from the private key before serialization
# to PEM .
public_key = private_key . public_key ( )
public_pem = public_key . public_bytes ( encoding = serialization . Encoding . PEM , format = serialization . PublicFormat . SubjectPublicKeyInfo )
return public_pem . decode ( 'utf-8' ) , private_pem . decode ( 'utf-8' ) |
def set_wts_get_npred_wt ( gta , maskname ) :
"""Set a weights file and get the weighted npred for all the sources
Parameters
gta : ` fermipy . GTAnalysis `
The analysis object
maskname : str
The path to the file with the mask
Returns
odict : dict
Dictionary mapping from source name to weighted npred""" | if is_null ( maskname ) :
maskname = None
gta . set_weights_map ( maskname )
for name in gta . like . sourceNames ( ) :
gta . _init_source ( name )
gta . _update_roi ( )
return build_srcdict ( gta , 'npred_wt' ) |
def setup_default_layouts ( self , index , settings ) :
"""Setup default layouts when run for the first time .""" | self . setUpdatesEnabled ( False )
first_spyder_run = bool ( self . first_spyder_run )
# Store copy
if first_spyder_run :
self . set_window_settings ( * settings )
else :
if self . last_plugin :
if self . last_plugin . ismaximized :
self . maximize_dockwidget ( restore = True )
if not ( self . isMaximized ( ) or self . maximized_flag ) :
self . showMaximized ( )
min_width = self . minimumWidth ( )
max_width = self . maximumWidth ( )
base_width = self . width ( )
self . setFixedWidth ( base_width )
# IMPORTANT : order has to be the same as defined in the config file
MATLAB , RSTUDIO , VERTICAL , HORIZONTAL = range ( self . DEFAULT_LAYOUTS )
# Define widgets locally
editor = self . editor
console_ipy = self . ipyconsole
console_int = self . console
outline = self . outlineexplorer
explorer_project = self . projects
explorer_file = self . explorer
explorer_variable = self . variableexplorer
plots = self . plots
history = self . historylog
finder = self . findinfiles
help_plugin = self . help
helper = self . onlinehelp
plugins = self . thirdparty_plugins
# Stored for tests
global_hidden_widgets = [ finder , console_int , explorer_project , helper ] + plugins
global_hidden_toolbars = [ self . source_toolbar , self . edit_toolbar , self . search_toolbar ]
# Layout definition
# Layouts are organized by columns , each column is organized by rows .
# Widths have to add 1.0 ( except if hidden ) , height per column has to
# add 1.0 as well
# Spyder Default Initial Layout
s_layout = { 'widgets' : [ # Column 0
[ [ explorer_project ] ] , # Column 1
[ [ editor ] ] , # Column 2
[ [ outline ] ] , # Column 3
[ [ help_plugin , explorer_variable , plots , # Row 0
helper , explorer_file , finder ] + plugins , [ console_int , console_ipy , history ] ] # Row 1
] , 'width fraction' : [ 0.05 , # Column 0 width
0.55 , # Column 1 width
0.05 , # Column 2 width
0.45 ] , # Column 3 width
'height fraction' : [ [ 1.0 ] , # Column 0 , row heights
[ 1.0 ] , # Column 1 , row heights
[ 1.0 ] , # Column 2 , row heights
[ 0.46 , 0.54 ] ] , # Column 3 , row heights
'hidden widgets' : [ outline ] + global_hidden_widgets , 'hidden toolbars' : [ ] , }
# RStudio
r_layout = { 'widgets' : [ # column 0
[ [ editor ] , # Row 0
[ console_ipy , console_int ] ] , # Row 1
# column 1
[ [ explorer_variable , plots , history , # Row 0
outline , finder ] + plugins , [ explorer_file , explorer_project , # Row 1
help_plugin , helper ] ] ] , 'width fraction' : [ 0.55 , # Column 0 width
0.45 ] , # Column 1 width
'height fraction' : [ [ 0.55 , 0.45 ] , # Column 0 , row heights
[ 0.55 , 0.45 ] ] , # Column 1 , row heights
'hidden widgets' : [ outline ] + global_hidden_widgets , 'hidden toolbars' : [ ] , }
# Matlab
m_layout = { 'widgets' : [ # column 0
[ [ explorer_file , explorer_project ] , [ outline ] ] , # column 1
[ [ editor ] , [ console_ipy , console_int ] ] , # column 2
[ [ explorer_variable , plots , finder ] + plugins , [ history , help_plugin , helper ] ] ] , 'width fraction' : [ 0.10 , # Column 0 width
0.45 , # Column 1 width
0.45 ] , # Column 2 width
'height fraction' : [ [ 0.55 , 0.45 ] , # Column 0 , row heights
[ 0.55 , 0.45 ] , # Column 1 , row heights
[ 0.55 , 0.45 ] ] , # Column 2 , row heights
'hidden widgets' : global_hidden_widgets , 'hidden toolbars' : [ ] , }
# Vertically split
v_layout = { 'widgets' : [ # column 0
[ [ editor ] , # Row 0
[ console_ipy , console_int , explorer_file , # Row 1
explorer_project , help_plugin , explorer_variable , plots , history , outline , finder , helper ] + plugins ] ] , 'width fraction' : [ 1.0 ] , # Column 0 width
'height fraction' : [ [ 0.55 , 0.45 ] ] , # Column 0 , row heights
'hidden widgets' : [ outline ] + global_hidden_widgets , 'hidden toolbars' : [ ] , }
# Horizontally split
h_layout = { 'widgets' : [ # column 0
[ [ editor ] ] , # Row 0
# column 1
[ [ console_ipy , console_int , explorer_file , # Row 0
explorer_project , help_plugin , explorer_variable , plots , history , outline , finder , helper ] + plugins ] ] , 'width fraction' : [ 0.55 , # Column 0 width
0.45 ] , # Column 1 width
'height fraction' : [ [ 1.0 ] , # Column 0 , row heights
[ 1.0 ] ] , # Column 1 , row heights
'hidden widgets' : [ outline ] + global_hidden_widgets , 'hidden toolbars' : [ ] }
# Layout selection
layouts = { 'default' : s_layout , RSTUDIO : r_layout , MATLAB : m_layout , VERTICAL : v_layout , HORIZONTAL : h_layout , }
layout = layouts [ index ]
# Remove None from widgets layout
widgets_layout = layout [ 'widgets' ]
widgets_layout_clean = [ ]
for column in widgets_layout :
clean_col = [ ]
for row in column :
clean_row = [ w for w in row if w is not None ]
if clean_row :
clean_col . append ( clean_row )
if clean_col :
widgets_layout_clean . append ( clean_col )
# Flatten widgets list
widgets = [ ]
for column in widgets_layout_clean :
for row in column :
for widget in row :
widgets . append ( widget )
# Make every widget visible
for widget in widgets :
widget . toggle_view ( True )
widget . toggle_view_action . setChecked ( True )
# We use both directions to ensure proper update when moving from
# ' Horizontal Split ' to ' Spyder Default '
# This also seems to help on random cases where the display seems
# ' empty '
for direction in ( Qt . Vertical , Qt . Horizontal ) : # Arrange the widgets in one direction
for idx in range ( len ( widgets ) - 1 ) :
first , second = widgets [ idx ] , widgets [ idx + 1 ]
if first is not None and second is not None :
self . splitDockWidget ( first . dockwidget , second . dockwidget , direction )
# Arrange the widgets in the other direction
for column in widgets_layout_clean :
for idx in range ( len ( column ) - 1 ) :
first_row , second_row = column [ idx ] , column [ idx + 1 ]
self . splitDockWidget ( first_row [ 0 ] . dockwidget , second_row [ 0 ] . dockwidget , Qt . Vertical )
# Tabify
for column in widgets_layout_clean :
for row in column :
for idx in range ( len ( row ) - 1 ) :
first , second = row [ idx ] , row [ idx + 1 ]
self . tabify_plugins ( first , second )
# Raise front widget per row
row [ 0 ] . dockwidget . show ( )
row [ 0 ] . dockwidget . raise_ ( )
# Set dockwidget widths
width_fractions = layout [ 'width fraction' ]
if len ( width_fractions ) > 1 :
_widgets = [ col [ 0 ] [ 0 ] . dockwidget for col in widgets_layout ]
self . resizeDocks ( _widgets , width_fractions , Qt . Horizontal )
# Set dockwidget heights
height_fractions = layout [ 'height fraction' ]
for idx , column in enumerate ( widgets_layout_clean ) :
if len ( column ) > 1 :
_widgets = [ row [ 0 ] . dockwidget for row in column ]
self . resizeDocks ( _widgets , height_fractions [ idx ] , Qt . Vertical )
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout [ 'hidden toolbars' ]
for toolbar in hidden_toolbars :
if toolbar is not None :
toolbar . close ( )
# Hide widgets
hidden_widgets = layout [ 'hidden widgets' ]
for widget in hidden_widgets :
if widget is not None :
widget . dockwidget . close ( )
if first_spyder_run :
self . first_spyder_run = False
else :
self . setMinimumWidth ( min_width )
self . setMaximumWidth ( max_width )
if not ( self . isMaximized ( ) or self . maximized_flag ) :
self . showMaximized ( )
self . setUpdatesEnabled ( True )
self . sig_layout_setup_ready . emit ( layout )
return layout |
def info ( self , val ) :
"""Set info metric .""" | if self . _labelname_set . intersection ( val . keys ( ) ) :
raise ValueError ( 'Overlapping labels for Info metric, metric: %s child: %s' % ( self . _labelnames , val ) )
with self . _lock :
self . _value = dict ( val ) |
def to_df ( self , method : str = 'MEMORY' , ** kwargs ) -> 'pd.DataFrame' :
"""Export this SAS Data Set to a Pandas Data Frame
: param method : defaults to MEMORY ; the original method . CSV is the other choice which uses an intermediary csv file ; faster for large data
: param kwargs :
: return : Pandas data frame""" | ll = self . _is_valid ( )
if ll :
print ( ll [ 'LOG' ] )
return None
else :
return self . sas . sasdata2dataframe ( self . table , self . libref , self . dsopts , method , ** kwargs ) |
def delete_atom ( self , * atom_numbers ) :
"""Delete atoms by atom number .
: param str atom _ numbers :
: return : None .
: rtype : : py : obj : ` None `""" | for atom_number in atom_numbers :
deletion_atom = self . atom_by_number ( atom_number = atom_number )
# update atom numbers
for atom in self . atoms :
if int ( atom . atom_number ) > int ( atom_number ) :
atom . atom_number = str ( int ( atom . atom_number ) - 1 )
# find index of a bond to remove and update ctab data dict with new atom numbers
for index , bond in enumerate ( self . bonds ) :
bond . update_atom_numbers ( )
if atom_number in { bond . first_atom_number , bond . second_atom_number } :
self . bonds . remove ( bond )
# remove atom from neighbors list
for atom in self . atoms :
if deletion_atom in atom . neighbors :
atom . neighbors . remove ( deletion_atom )
self . atoms . remove ( deletion_atom ) |
def _get_brew_versions ( ) :
"""Retrieve versions of tools installed via brew .""" | from bcbio import install
tooldir = install . get_defaults ( ) . get ( "tooldir" )
brew_cmd = os . path . join ( tooldir , "bin" , "brew" ) if tooldir else "brew"
try :
vout = subprocess . check_output ( [ brew_cmd , "list" , "--versions" ] )
except OSError : # brew not installed / used
vout = ""
out = { }
for vstr in vout . split ( "\n" ) :
if vstr . strip ( ) :
parts = vstr . rstrip ( ) . split ( )
name = parts [ 0 ]
v = parts [ - 1 ]
out [ name ] = v
return out |
def _detect ( self ) :
"""Detect the suicidal functions""" | results = [ ]
for c in self . contracts :
functions = self . detect_suicidal ( c )
for func in functions :
txt = "{}.{} ({}) allows anyone to destruct the contract\n"
info = txt . format ( func . contract . name , func . name , func . source_mapping_str )
json = self . generate_json_result ( info )
self . add_function_to_json ( func , json )
results . append ( json )
return results |
def get_user_switchable_roles ( self ) :
"""Returns user ' s role list except current role as a tuple
( role . key , role . name )
Returns :
( list ) : list of tuples , user ' s role list except current role""" | roles = [ ]
for rs in self . current . user . role_set : # rs . role ! = self . current . role is not True after python version 2.7.12
if rs . role . key != self . current . role . key :
roles . append ( ( rs . role . key , '%s %s' % ( rs . role . unit . name , rs . role . abstract_role . name ) ) )
return roles |
def list_nodes_min ( ) :
'''Return a list of registered VMs , with minimal information
CLI Example :
. . code - block : : bash
salt ' * ' vboxmanage . list _ nodes _ min''' | ret = { }
cmd = '{0} list vms' . format ( vboxcmd ( ) )
for line in salt . modules . cmdmod . run ( cmd ) . splitlines ( ) :
if not line . strip ( ) :
continue
comps = line . split ( )
name = comps [ 0 ] . replace ( '"' , '' )
ret [ name ] = True
return ret |
def dom_processing ( self , value ) :
"""The dom _ processing property .
Args :
value ( string ) . the property value .""" | if value == self . _defaults [ 'domProcessing' ] and 'domProcessing' in self . _values :
del self . _values [ 'domProcessing' ]
else :
self . _values [ 'domProcessing' ] = value |
def _cache_get_entry ( self , entry_name , key = ENTIRE_ENTRY_KEY , default = False ) :
"""Returns cache entry parameter value by its name .
: param str entry _ name :
: param str key :
: param type default :
: return :""" | if key is self . ENTIRE_ENTRY_KEY :
return self . _cache [ entry_name ]
return self . _cache [ entry_name ] . get ( key , default ) |
def contains ( ell , p , shell_only = False ) :
"""Check to see whether point is inside
conic .
: param exact : Only solutions exactly on conic
are considered ( default : False ) .""" | v = augment ( p )
_ = ell . solve ( v )
return N . allclose ( _ , 0 ) if shell_only else _ <= 0 |
def level ( self , new_level ) :
"""Sets the new output level .""" | if self . _level == new_level :
return
self . _lutron . send ( Lutron . OP_EXECUTE , Output . _CMD_TYPE , self . _integration_id , Output . _ACTION_ZONE_LEVEL , "%.2f" % new_level )
self . _level = new_level |
def get_networks ( self ) :
"""Get the networks assoiated with the resource description .
Returns
list of tuple roles , network""" | networks = self . c_resources [ "networks" ]
result = [ ]
for net in networks :
_c_network = net . get ( "_c_network" )
if _c_network is None :
continue
roles = utils . get_roles_as_list ( net )
result . append ( ( roles , _c_network ) )
return result |
def libseq_parse ( self , line ) :
"""This method parses a line containing a lib _ seq string and adds it to
the lib _ seq key in the self . params dictionary""" | count = line . count ( ',' )
if count > 1 :
raise ValueError ( """ERROR: There are too many commas in your
lib_seq line. There should only be one, between the two filepath
globs.""" )
if count == 0 :
raise ValueError ( """ERROR: There are no commas in your
lib_seq line. There should be one, between the two filepath
globs.""" )
line = [ x . strip ( ) for x in line . split ( ) if x ]
# print ( line )
# This block tries to fix user input if there is an inappropriate
# number of commas or spaces associated with commas
if len ( line ) > 13 :
commas = [ x for x in line if "," in x ]
if commas : # this handles the case if there was an extra space added on
# both sides of the comma
if ( len ( commas ) == 1 ) and ( commas [ 0 ] != ',' ) :
if ( commas [ 0 ] [ - 1 ] == ',' ) or ( commas [ 0 ] [ 0 ] == ',' ) :
line = [ line [ 0 ] ] + [ "" . join ( line [ 1 : 3 ] ) ] + line [ 3 : ]
# This handles the case where there is an extra space on either
# side of the comma
elif line [ 2 ] == ',' :
line = [ line [ 0 ] ] + [ "" . join ( line [ 1 : 4 ] ) ] + line [ 4 : : ]
# make sure there is an appropriate numer of elements in list
if ( len ( line ) ) != 13 :
raise ValueError ( """ERROR: There are {0} values in the lib_seq line when
there should be 12. Check your input meraculous
config file for errors and make sure that it matches
the specification for the Meraculous manual.""" . format ( len ( line ) ) )
for index in self . indices :
if index == 1 :
globs = [ os . path . abspath ( x . strip ( ) ) for x in line [ 1 ] . split ( ',' ) if x ]
# make sure there are only two filepaths to search for globs
if len ( globs ) != 2 :
raise ValueError ( """ERROR: remember to split your glob
filepaths in lib_seq with a comma. This error occured because
the program was looking for two glob filepaths, but instead
found {}: {}""" . format ( len ( globs ) , globs ) )
self . data [ "globs" ] = globs
# Note that glob . glob returns an empty list if the file or glob
# filepath does not exist .
forwards = glob . glob ( os . path . abspath ( globs [ 0 ] ) )
reverses = glob . glob ( os . path . abspath ( globs [ 1 ] ) )
# print ( os . path . abspath ( globs [ 0 ] ) )
# print ( " globs : " , globs )
# print ( " forwards : " , forwards )
# print ( " reverses : " , reverses )
# At this point the
if ( len ( forwards ) < 1 ) or ( len ( reverses ) < 1 ) : # print ( " forwards : " , forwards )
# print ( " reverses : " , reverses )
report = [ ]
if len ( forwards ) < 1 :
report . append ( globs [ 0 ] )
if len ( reverses ) < 1 :
report . append ( globs [ 1 ] )
raise ValueError ( """ERROR: no read files were found for the glob strings,
{}.
Please check the glob string and/or use absolute
file paths. A common source of this error is using
relative filepaths from the incorrect directory, or
referring to files that do not exist.""" . format ( report ) )
if len ( forwards ) != len ( reverses ) :
raise ValueError ( """ERROR: the number of reverse read files does not
match the number of forward read files""" )
# zip the pairs together into tuples , not lists
self . data [ "pairs" ] = [ x for x in zip ( sorted ( forwards ) , sorted ( reverses ) ) ]
self . data [ self . indices . get ( index ) ] = line [ index ] |
def get_range_start_line_number ( self , rng ) :
""". . warning : : not implemented""" | sys . stderr . write ( "error unimplemented get_range_start_line\n" )
sys . exit ( )
for i in range ( 0 , len ( self . _lines ) ) :
if rng . cmp ( self . _lines [ i ] [ 'rng' ] ) == 0 :
return i + 1
return None |
def _get_image_stream_info_for_build_request ( self , build_request ) :
"""Return ImageStream , and ImageStreamTag name for base _ image of build _ request
If build _ request is not auto instantiated , objects are not fetched
and None , None is returned .""" | image_stream = None
image_stream_tag_name = None
if build_request . has_ist_trigger ( ) :
image_stream_tag_id = build_request . trigger_imagestreamtag
image_stream_id , image_stream_tag_name = image_stream_tag_id . split ( ':' )
try :
image_stream = self . get_image_stream ( image_stream_id ) . json ( )
except OsbsResponseException as x :
if x . status_code != 404 :
raise
if image_stream :
try :
self . get_image_stream_tag ( image_stream_tag_id ) . json ( )
except OsbsResponseException as x :
if x . status_code != 404 :
raise
return image_stream , image_stream_tag_name |
def compile_rename_column ( self , blueprint , command , connection ) :
"""Compile a rename column command .
: param blueprint : The blueprint
: type blueprint : Blueprint
: param command : The command
: type command : Fluent
: param connection : The connection
: type connection : orator . connections . Connection
: rtype : list""" | sql = [ ]
# If foreign keys are on , we disable them
foreign_keys = self . _connection . select ( "PRAGMA foreign_keys" )
if foreign_keys :
foreign_keys = bool ( foreign_keys [ 0 ] )
if foreign_keys :
sql . append ( "PRAGMA foreign_keys = OFF" )
sql += super ( SQLiteSchemaGrammar , self ) . compile_rename_column ( blueprint , command , connection )
if foreign_keys :
sql . append ( "PRAGMA foreign_keys = ON" )
return sql |
def log ( cls , level , message ) :
""": param level : The log level as in the Python logging documentation , 5 different possible values with increasing
severity
: type level : String of value ' DEBUG ' , ' INFO ' , ' WARNING ' , ' ERROR ' or ' CRITICAL ' .
: param message : The logging data which should be written to the log file . In order to achieve a csv - file
compatible format , all fields must be separated by a colon . Furthermore , all strings which could contain
colons , spaces or other special characters must be enclosed in double - quotes .
e . g . ' { main _ data _ id } , " { exception _ type } " , " { message } " , { wd _ id } , { duration } ' . format (
main _ data _ id = < main _ id > ,
exception _ type = < excpetion type > ,
message = < exception message > ,
wd _ id = < wikidata id > ,
duration = < duration of action >
: type message : str""" | if cls . logger is None :
cls . setup_logging ( )
log_levels = { 'DEBUG' : logging . DEBUG , 'ERROR' : logging . ERROR , 'INFO' : logging . INFO , 'WARNING' : logging . WARNING , 'CRITICAL' : logging . CRITICAL }
cls . logger . log ( level = log_levels [ level ] , msg = message ) |
def objset_data ( self ) :
'''Return the terms representation for our objset interface .
If there is no qualifier , return just the string content value .
If there is a qualifier , return a dict of { q : qualifier , v : content }''' | if not self . qualifier :
return unicode ( self . content )
else :
return dict ( q = unicode ( self . qualifier ) , v = unicode ( self . content ) ) |
async def get_source_list ( self , scheme : str = "" ) -> List [ Source ] :
"""Return available sources for playback .""" | res = await self . services [ "avContent" ] [ "getSourceList" ] ( scheme = scheme )
return [ Source . make ( ** x ) for x in res ] |
def fuzzy_get_value ( obj , approximate_key , default = None , ** kwargs ) :
"""Like fuzzy _ get , but assume the obj is dict - like and return the value without the key
Notes :
Argument order is in reverse order relative to ` fuzzywuzzy . process . extractOne ( ) `
but in the same order as get ( self , key ) method on dicts
Arguments :
obj ( dict - like ) : object to run the get method on using the key that is most similar to one within the dict
approximate _ key ( str ) : key to look for a fuzzy match within the dict keys
default ( obj ) : the value to return if a similar key cannote be found in the ` possible _ keys `
similarity ( str ) : fractional similiarity between the approximate _ key and the dict key
(0.9 means 90 % of characters must be identical )
tuple _ joiner ( str ) : Character to use as delimitter / joiner between tuple elements .
Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it .
key _ and _ value ( bool ) : Whether to return both the key and its value ( True ) or just the value ( False ) .
Default is the same behavior as dict . get ( i . e . key _ and _ value = False )
dict _ keys ( list of str ) : if you already have a set of keys to search , this will save this funciton
a little time and RAM
Examples :
> > > fuzzy _ get _ value ( { ' seller ' : 2.7 , ' sailor ' : set ( ' e ' ) } , ' sail ' ) = = set ( [ ' e ' ] )
True
> > > fuzzy _ get _ value ( { ' seller ' : 2.7 , ' sailor ' : set ( ' e ' ) , ' camera ' : object ( ) } , ' SLR ' )
2.7
> > > fuzzy _ get _ value ( { ' seller ' : 2.7 , ' sailor ' : set ( ' e ' ) , ' camera ' : object ( ) } , ' I ' ) = = set ( [ ' e ' ] )
True
> > > fuzzy _ get _ value ( { ' word ' : tuple ( ' word ' ) , ' noun ' : tuple ( ' noun ' ) } , ' woh ! ' , similarity = . 3)
( ' w ' , ' o ' , ' r ' , ' d ' )
> > > df = pd . DataFrame ( np . arange ( 6*2 ) . reshape ( 2,6 ) , columns = ( ' alpha ' , ' beta ' , ' omega ' , ' begin ' , ' life ' , ' end ' ) )
> > > fuzzy _ get _ value ( df , ' life ' ) [ 0 ] , fuzzy _ get ( df , ' omega ' ) [ 0]
(4 , 2)""" | dict_obj = OrderedDict ( obj )
try :
return dict_obj [ list ( dict_obj . keys ( ) ) [ int ( approximate_key ) ] ]
except ( ValueError , IndexError ) :
pass
return fuzzy_get ( dict_obj , approximate_key , key_and_value = False , ** kwargs ) |
def textForSaving ( self ) :
"""Get text with correct EOL symbols . Use this method for saving a file to storage""" | lines = self . text . splitlines ( )
if self . text . endswith ( '\n' ) : # splitlines ignores last \ n
lines . append ( '' )
return self . eol . join ( lines ) + self . eol |
def get_extension_reports ( self , publisher_name , extension_name , days = None , count = None , after_date = None ) :
"""GetExtensionReports .
[ Preview API ] Returns extension reports
: param str publisher _ name : Name of the publisher who published the extension
: param str extension _ name : Name of the extension
: param int days : Last n days report . If afterDate and days are specified , days will take priority
: param int count : Number of events to be returned
: param datetime after _ date : Use if you want to fetch events newer than the specified date
: rtype : object""" | route_values = { }
if publisher_name is not None :
route_values [ 'publisherName' ] = self . _serialize . url ( 'publisher_name' , publisher_name , 'str' )
if extension_name is not None :
route_values [ 'extensionName' ] = self . _serialize . url ( 'extension_name' , extension_name , 'str' )
query_parameters = { }
if days is not None :
query_parameters [ 'days' ] = self . _serialize . query ( 'days' , days , 'int' )
if count is not None :
query_parameters [ 'count' ] = self . _serialize . query ( 'count' , count , 'int' )
if after_date is not None :
query_parameters [ 'afterDate' ] = self . _serialize . query ( 'after_date' , after_date , 'iso-8601' )
response = self . _send ( http_method = 'GET' , location_id = '79e0c74f-157f-437e-845f-74fbb4121d4c' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'object' , response ) |
def clear ( self ) :
"""Clears all the axes to start fresh .""" | for ax in self . flat_grid :
for im_h in ax . findobj ( AxesImage ) :
im_h . remove ( ) |
def normalize ( vector , cutoffp = ( 0 , 100 ) , model = False ) :
r"""Returns a feature - wise normalized version of the supplied vector . Normalization is
achieved to [ 0,1 ] over the complete vector using shifting and scaling .
When cut - off percentile ( cutoffp ) values other than ( 0 , 100 ) are supplied , the values
lying before or behind the supplied percentiles are cut - off i . e . shifted to fit the
range .
When model is set to True , an additional model describing the normalization is
returned , that can at a later point be passed to the ` normalize _ with _ model ` function
to normalize other feature vectors accordingly to the one passed .
The vector is expected to have the form samples * features i . e . : :
s1 s2 s3 [ . . . ]
f1
f2
Therefore a supplied vector : :
s1 s2 s3
f1 1.5 1 2
f2 - 1 0 1
would result in the returned vector : :
s1 s2 s3
f1 0.50 0.00 1.00
f2 0.00 0.50 1.00
Parameters
vector : sequence
A sequence of feature vectors to normalize .
cutoffp : ( float , float )
Cut - off percentiles .
model : bool
Whether to return the learned normalization model .
Returns
normalized _ feature _ vectors : ndarray
The normalized versions of the input vectors .
model : tuple , optional
The learned normalization model .""" | vector = numpy . array ( vector , dtype = numpy . float )
# add a singleton dimension if required
if 1 == vector . ndim :
vector = vector [ : , None ]
# compute lower and upper range border of each row using the supplied percentiles
minp , maxp = numpy . percentile ( vector , cutoffp , 0 )
# shift outliers to fit range
for i in range ( vector . shape [ 1 ] ) :
vector [ : , i ] [ vector [ : , i ] < minp [ i ] ] = minp [ i ]
vector [ : , i ] [ vector [ : , i ] > maxp [ i ] ] = maxp [ i ]
# normalize
minv = vector . min ( 0 )
vector -= minv
maxv = vector . max ( 0 )
vector /= maxv
if not model :
return vector
else :
return vector , ( minp , maxp , minv , maxv ) |
def genome_for_reference_name ( reference_name , allow_older_downloaded_release = True ) :
"""Given a genome reference name , such as " GRCh38 " , returns the
corresponding Ensembl Release object .
If ` allow _ older _ downloaded _ release ` is True , and some older releases have
been downloaded , then return the most recent locally available release .
Otherwise , return the newest release of Ensembl ( even if its data hasn ' t
already been downloaded ) .""" | reference_name = normalize_reference_name ( reference_name )
species = find_species_by_reference ( reference_name )
( min_ensembl_release , max_ensembl_release ) = species . reference_assemblies [ reference_name ]
if allow_older_downloaded_release : # go through candidate releases in descending order
for release in reversed ( range ( min_ensembl_release , max_ensembl_release + 1 ) ) : # check if release has been locally downloaded
candidate = EnsemblRelease . cached ( release = release , species = species )
if candidate . required_local_files_exist ( ) :
return candidate
# see if any of the releases between [ max , min ] are already locally
# available
return EnsemblRelease . cached ( release = max_ensembl_release , species = species ) |
def send_broks_to_modules ( self ) :
"""Put broks into module queues
Only broks without sent _ to _ externals to True are sent
Only modules that ask for broks will get some
: return : None""" | t00 = time . time ( )
nb_sent = 0
broks = [ ]
for broker_link in list ( self . my_daemon . brokers . values ( ) ) :
for brok in broker_link . broks :
if not getattr ( brok , 'sent_to_externals' , False ) :
brok . to_send = True
broks . append ( brok )
if not broks :
return
logger . debug ( "sending %d broks to modules..." , len ( broks ) )
for mod in self . my_daemon . modules_manager . get_external_instances ( ) :
logger . debug ( "Look for sending to module %s" , mod . get_name ( ) )
module_queue = mod . to_q
if module_queue :
to_send = [ b for b in broks if mod . want_brok ( b ) ]
module_queue . put ( to_send )
nb_sent += len ( to_send )
# No more need to send them
for broker_link in list ( self . my_daemon . brokers . values ( ) ) :
for brok in broker_link . broks :
if not getattr ( brok , 'sent_to_externals' , False ) :
brok . to_send = False
brok . sent_to_externals = True
logger . debug ( "Time to send %d broks (after %d secs)" , nb_sent , time . time ( ) - t00 ) |
def _check_decorator ( fct ) :
"""Check if the plugin is enabled .""" | def wrapper ( self , * args , ** kw ) :
if self . is_enable ( ) :
ret = fct ( self , * args , ** kw )
else :
ret = self . stats
return ret
return wrapper |
def _get_initial_residual ( self , x0 ) :
'''Return the projected initial residual .
Returns : math : ` MPM _ l ( b - Ax _ 0 ) ` .''' | if x0 is None :
Mlr = self . linear_system . Mlb
else :
r = self . linear_system . b - self . linear_system . A * x0
Mlr = self . linear_system . Ml * r
PMlr , self . UMlr = self . projection . apply_complement ( Mlr , return_Ya = True )
MPMlr = self . linear_system . M * PMlr
MPMlr_norm = utils . norm ( PMlr , MPMlr , ip_B = self . linear_system . ip_B )
return MPMlr , PMlr , MPMlr_norm |
def install ( self , host ) :
"""Setup common to all Qt - based hosts""" | print ( "Installing.." )
if self . _state [ "installed" ] :
return
if self . is_headless ( ) :
log . info ( "Headless host" )
return
print ( "aboutToQuit.." )
self . app . aboutToQuit . connect ( self . _on_application_quit )
if host == "Maya" :
print ( "Maya host.." )
window = { widget . objectName ( ) : widget for widget in self . app . topLevelWidgets ( ) } [ "MayaWindow" ]
else :
window = self . find_window ( )
# Install event filter
print ( "event filter.." )
event_filter = self . EventFilter ( window )
window . installEventFilter ( event_filter )
for signal in SIGNALS_TO_REMOVE_EVENT_FILTER :
pyblish . api . register_callback ( signal , self . uninstall )
log . info ( "Installed event filter" )
self . window = window
self . _state [ "installed" ] = True
self . _state [ "eventFilter" ] = event_filter |
def is_directory_index ( resource ) :
"""Classify the input resource as a directory index or not .""" | remote_regexp = re . compile ( r"^https?://(.+)/?$" , re . I )
result = remote_regexp . match ( resource )
if result is not None :
juicer . utils . Log . log_debug ( "%s matches directory index regexp" % resource )
return True
else :
juicer . utils . Log . log_debug ( "%s doesn't match directory index regexp" % resource )
return False |
def assist ( self , project_path , source , position , filename ) :
"""Return completion match and list of completion proposals
: param project _ path : absolute project path
: param source : unicode or byte string code source
: param position : character or byte cursor position
: param filename : absolute path of file with source code
: returns : tuple ( completion match , sorted list of proposals )""" | return self . _call ( 'assist' , project_path , source , position , filename ) |
def _install_p4v_linux ( self , url ) :
"""Install perforce applications and binaries for linux""" | lib . extract_targz ( url , self . directory . install_directory ( self . feature_name ) , remove_common_prefix = True )
bin_path = os . path . join ( self . directory . install_directory ( self . feature_name ) , 'bin' )
if os . path . exists ( bin_path ) :
for f in os . listdir ( bin_path ) :
self . directory . symlink_to_bin ( f , os . path . join ( bin_path , f ) )
return True |
def _evalDayStr ( self , datetimeString , sourceTime ) :
"""Evaluate text passed by L { _ partialParseDaystr ( ) }""" | s = datetimeString . strip ( )
sourceTime = self . _evalDT ( datetimeString , sourceTime )
# Given string is a natural language date string like today , tomorrow . .
( yr , mth , dy , hr , mn , sec , wd , yd , isdst ) = sourceTime
try :
offset = self . ptc . dayOffsets [ s ]
except KeyError :
offset = 0
if self . ptc . StartTimeFromSourceTime :
startHour = hr
startMinute = mn
startSecond = sec
else :
startHour = 9
startMinute = 0
startSecond = 0
self . currentContext . updateAccuracy ( pdtContext . ACU_DAY )
start = datetime . datetime ( yr , mth , dy , startHour , startMinute , startSecond )
target = start + datetime . timedelta ( days = offset )
return target . timetuple ( ) |
def get_attr ( self , name , default = None , fail_silently = True ) :
"""try extra context""" | try :
return getattr ( self , name )
except KeyError :
extra_context = getattr ( self , "extra_context" )
if name in extra_context :
value = extra_context [ name ]
if callable ( value ) :
return value ( request = None )
return default |
def create_task ( self , * command_tokens , ** command_env ) :
""": return : WLauncherScheduleTask""" | return self . __scheduled_task_cls ( self . basic_command ( ) , * command_tokens , ** command_env ) |
def __hide_or_show_root_items ( self , item ) :
"""show _ all _ files option is disabled : hide all root items except * item *
show _ all _ files option is enabled : do nothing""" | for _it in self . get_top_level_items ( ) :
_it . setHidden ( _it is not item and not self . show_all_files ) |
def visit_with ( self , node ) : # ' with ' without ' as ' is possible
"""return an astroid . With node as string""" | items = ", " . join ( ( "%s" % expr . accept ( self ) ) + ( vars and " as %s" % ( vars . accept ( self ) ) or "" ) for expr , vars in node . items )
return "with %s:\n%s" % ( items , self . _stmt_list ( node . body ) ) |
def _parse_client_keys ( stream ) :
'''This parses a hidden - service " client _ keys " file , either stealth or
basic ( they ' re the same , except " stealth " includes a
" client - key " ) . Returns a list of HiddenServiceClientAuth ( ) instances .
Note that the key does NOT include the " - - - - BEGIN - - - " markers ,
nor * any * embedded whitespace . It is * just * the key blob .''' | def parse_error ( data ) :
raise RuntimeError ( "Parse error at: " + data )
class ParserState ( object ) :
def __init__ ( self ) :
self . keys = [ ]
self . reset ( )
def reset ( self ) :
self . name = None
self . cookie = None
self . key = [ ]
def create_key ( self ) :
if self . name is not None :
self . keys . append ( HiddenServiceClientAuth ( self . name , self . cookie , self . key ) )
self . reset ( )
def set_name ( self , name ) :
self . create_key ( )
self . name = name . split ( ) [ 1 ]
def set_cookie ( self , cookie ) :
self . cookie = cookie . split ( ) [ 1 ]
if self . cookie . endswith ( '==' ) :
self . cookie = self . cookie [ : - 2 ]
def add_key_line ( self , line ) :
self . key . append ( line )
from txtorcon . spaghetti import FSM , State , Transition
init = State ( 'init' )
got_name = State ( 'got_name' )
got_cookie = State ( 'got_cookie' )
reading_key = State ( 'got_key' )
parser_state = ParserState ( )
# initial state ; we want " client - name " or it ' s an error
init . add_transitions ( [ Transition ( got_name , lambda line : line . startswith ( 'client-name ' ) , parser_state . set_name ) , Transition ( init , lambda line : not line . startswith ( 'client-name ' ) , parse_error ) , ] )
# next up is " descriptor - cookie " or it ' s an error
got_name . add_transitions ( [ Transition ( got_cookie , lambda line : line . startswith ( 'descriptor-cookie ' ) , parser_state . set_cookie ) , Transition ( init , lambda line : not line . startswith ( 'descriptor-cookie ' ) , parse_error ) , ] )
# the " interesting bit " : there ' s either a client - name if we ' re a
# " basic " file , or an RSA key ( with " client - key " before it )
got_cookie . add_transitions ( [ Transition ( reading_key , lambda line : line . startswith ( 'client-key' ) , None ) , Transition ( got_name , lambda line : line . startswith ( 'client-name ' ) , parser_state . set_name ) , ] )
# if we ' re reading an RSA key , we accumulate it in current _ key . key
# until we hit a line starting with " client - name "
reading_key . add_transitions ( [ Transition ( reading_key , lambda line : not line . startswith ( 'client-name' ) , parser_state . add_key_line ) , Transition ( got_name , lambda line : line . startswith ( 'client-name ' ) , parser_state . set_name ) , ] )
# create our FSM and parse the data
fsm = FSM ( [ init , got_name , got_cookie , reading_key ] )
for line in stream . readlines ( ) :
fsm . process ( line . strip ( ) )
parser_state . create_key ( )
# make sure we get the " last " one
return parser_state . keys |
def make_patch ( self ) :
'''Currently only works if all the pieces are Arcgons .
In this case returns a multiple - piece path . Otherwise throws an exception .''' | paths = [ p . make_patch ( ) . get_path ( ) for p in self . pieces ]
vertices = np . concatenate ( [ p . vertices for p in paths ] )
codes = np . concatenate ( [ p . codes for p in paths ] )
return PathPatch ( Path ( vertices , codes ) ) |
def to_python ( self , value ) :
"""Validates that the input can be converted to a date . Returns a
Python datetime . date object .""" | if value in validators . EMPTY_VALUES :
return None
if isinstance ( value , datetime . datetime ) :
return value . date ( )
if isinstance ( value , datetime . date ) :
return value
if isinstance ( value , list ) : # Input comes from a 2 SplitDateWidgets , for example . So , it ' s two
# components : start date and end date .
if len ( value ) != 2 :
raise ValidationError ( self . error_messages [ 'invalid' ] )
if value [ 0 ] in validators . EMPTY_VALUES and value [ 1 ] in validators . EMPTY_VALUES :
return None
start_value = value [ 0 ]
end_value = value [ 1 ]
start_date = None
end_date = None
for format in self . input_formats or formats . get_format ( 'DATE_INPUT_FORMATS' ) :
try :
start_date = datetime . datetime ( * time . strptime ( start_value , format ) [ : 6 ] ) . date ( )
except ValueError :
continue
for format in self . input_formats or formats . get_format ( 'DATE_INPUT_FORMATS' ) :
try :
end_date = datetime . datetime ( * time . strptime ( end_value , format ) [ : 6 ] ) . date ( )
except ValueError :
continue
return ( start_date , end_date ) |
def stat ( self ) :
"""Returns ( opened connections , free connections , waiters )""" | return ( self . _opened_conns , len ( self . _free_conn ) , len ( self . _waitings ) ) |
def pad_to_bounding_box ( image , offset_height , offset_width , target_height , target_width , dynamic_shape = False ) :
"""Pad ` image ` with zeros to the specified ` height ` and ` width ` .
Adds ` offset _ height ` rows of zeros on top , ` offset _ width ` columns of
zeros on the left , and then pads the image on the bottom and right
with zeros until it has dimensions ` target _ height ` , ` target _ width ` .
This op does nothing if ` offset _ * ` is zero and the image already has size
` target _ height ` by ` target _ width ` .
Args :
image : 3 - D tensor with shape ` [ height , width , channels ] `
offset _ height : Number of rows of zeros to add on top .
offset _ width : Number of columns of zeros to add on the left .
target _ height : Height of output image .
target _ width : Width of output image .
dynamic _ shape : Whether the input image has undertermined shape . If set to
` True ` , shape information will be retrieved at run time . Default to
` False ` .
Returns :
3 - D tensor of shape ` [ target _ height , target _ width , channels ] `
Raises :
ValueError : If the shape of ` image ` is incompatible with the ` offset _ * ` or
` target _ * ` arguments , and ` dynamic _ shape ` is set to ` False ` .""" | image = ops . convert_to_tensor ( image , name = 'image' )
_Check3DImage ( image , require_static = ( not dynamic_shape ) )
height , width , depth = _ImageDimensions ( image , dynamic_shape = dynamic_shape )
after_padding_width = target_width - offset_width - width
after_padding_height = target_height - offset_height - height
if not dynamic_shape :
if target_width < width :
raise ValueError ( 'target_width must be >= width' )
if target_height < height :
raise ValueError ( 'target_height must be >= height' )
if after_padding_width < 0 :
raise ValueError ( 'target_width not possible given ' 'offset_width and image width' )
if after_padding_height < 0 :
raise ValueError ( 'target_height not possible given ' 'offset_height and image height' )
# Do not pad on the depth dimensions .
if ( dynamic_shape or offset_width or offset_height or after_padding_width or after_padding_height ) :
paddings = array_ops . reshape ( array_ops . pack ( [ offset_height , after_padding_height , offset_width , after_padding_width , 0 , 0 ] ) , [ 3 , 2 ] )
padded = array_ops . pad ( image , paddings )
if not dynamic_shape :
padded . set_shape ( [ target_height , target_width , depth ] )
else :
padded = image
return padded |
def getAsKmlPngAnimation ( self , tableName , timeStampedRasters = [ ] , rasterIdFieldName = 'id' , rasterFieldName = 'raster' , documentName = 'default' , noDataValue = 0 , alpha = 1.0 , drawOrder = 0 , cellSize = None , resampleMethod = 'NearestNeighbour' , discreet = False ) :
"""Return a sequence of rasters with timestamps as a kml with time markers for animation .
: param tableName : Name of the table to extract rasters from
: param timeStampedRasters : List of dictionaries with keys : rasterId , dateTime
rasterId = a unique integer identifier used to locate the raster ( usually value of primary key column )
dateTime = a datetime object representing the time the raster occurs
e . g :
timeStampedRasters = [ { ' rasterId ' : 1 , ' dateTime ' : datetime ( 1970 , 1 , 1 ) } ,
{ ' rasterId ' : 2 , ' dateTime ' : datetime ( 1970 , 1 , 2 ) } ,
{ ' rasterId ' : 3 , ' dateTime ' : datetime ( 1970 , 1 , 3 ) } ]
: param rasterIdFieldName : Name of the id field for rasters ( usually the primary key field )
: param rasterFieldName : Name of the field where rasters are stored ( of type raster )
: param documentName : The name to give to the KML document ( will be listed in legend under this name )
: param noDataValue : The value to be used as the no data value ( default is 0)
: param alpha : The transparency to apply to each raster cell
: param drawOrder : The draw order determines the order images are stacked if multiple are showing .
: param cellSize : Specify this parameter to resample the rasters to a different size the cells ( e . g . : 30 to
resample to cells with dimensions 30 x 30 in units of the raster spatial reference system ) .
NOTE : the processing time increases exponentially with shrinking cellSize values .
: rtype : ( string , list )""" | if not self . isNumber ( noDataValue ) :
raise ValueError ( 'RASTER CONVERSION ERROR: noDataValue must be a number.' )
if not self . isNumber ( drawOrder ) :
raise ValueError ( 'RASTER CONVERSION ERROR: drawOrder must be a number.' )
if not ( alpha >= 0 and alpha <= 1.0 ) :
raise ValueError ( "RASTER CONVERSION ERROR: alpha must be between 0.0 and 1.0." )
# Extract raster Ids and validate
rasterIds = [ ]
for timeStampedRaster in timeStampedRasters : # Validate dictionary
if 'rasterId' not in timeStampedRaster :
raise ValueError ( 'RASTER CONVERSION ERROR: rasterId must be provided for each raster.' )
elif 'dateTime' not in timeStampedRaster :
raise ValueError ( 'RASTER CONVERSION ERROR: dateTime must be provided for each raster.' )
rasterIds . append ( str ( timeStampedRaster [ 'rasterId' ] ) )
# Get the color ramp and parameters
minValue , maxValue = self . getMinMaxOfRasters ( session = self . _session , table = tableName , rasterIds = rasterIds , rasterIdField = rasterIdFieldName , rasterField = rasterFieldName , noDataValue = noDataValue )
mappedColorRamp = ColorRampGenerator . mapColorRampToValues ( colorRamp = self . _colorRamp , minValue = minValue , maxValue = maxValue , alpha = alpha )
# Join strings in list to create ramp
rampString = mappedColorRamp . getPostGisColorRampString ( )
# Get a PNG representation of each raster
result = self . getRastersAsPngs ( session = self . _session , tableName = tableName , rasterIds = rasterIds , postGisRampString = rampString , rasterField = rasterFieldName , rasterIdField = rasterIdFieldName , cellSize = cellSize , resampleMethod = resampleMethod )
binaryPNGs = [ ]
for row in result :
binaryPNGs . append ( row . png )
# Determine extents for the KML wrapper file via query
statement = '''
SELECT (foo.metadata).*
FROM (
SELECT ST_MetaData(ST_Transform({0}, 4326, 'Bilinear')) as metadata
FROM {1}
WHERE {2}={3}
) As foo;
''' . format ( rasterFieldName , tableName , rasterIdFieldName , rasterIds [ 0 ] )
result = self . _session . execute ( statement )
for row in result :
upperLeftY = row . upperlefty
scaleY = row . scaley
height = row . height
upperLeftX = row . upperleftx
scaleX = row . scalex
width = row . width
north = upperLeftY
south = upperLeftY + ( scaleY * height )
east = upperLeftX + ( scaleX * width )
west = upperLeftX
# Default to time delta to None
deltaTime = None
# Calculate delta time between images if more than one
time1 = timeStampedRasters [ 0 ] [ 'dateTime' ]
if len ( timeStampedRasters ) >= 2 :
time2 = timeStampedRasters [ 1 ] [ 'dateTime' ]
deltaTime = time2 - time1
# Initialize KML Document
kml = ET . Element ( 'kml' , xmlns = 'http://www.opengis.net/kml/2.2' )
document = ET . SubElement ( kml , 'Document' )
docName = ET . SubElement ( document , 'name' )
docName . text = documentName
if not discreet : # Embed the color ramp in SLD format
document . append ( ET . fromstring ( mappedColorRamp . getColorMapAsContinuousSLD ( ) ) )
else :
values = [ ]
document . append ( ET . fromstring ( mappedColorRamp . getColorMapAsDiscreetSLD ( values ) ) )
# Apply special style to hide legend items
style = ET . SubElement ( document , 'Style' , id = 'check-hide-children' )
listStyle = ET . SubElement ( style , 'ListStyle' )
listItemType = ET . SubElement ( listStyle , 'listItemType' )
listItemType . text = 'checkHideChildren'
styleUrl = ET . SubElement ( document , 'styleUrl' )
styleUrl . text = '#check-hide-children'
for index , timeStampedRaster in enumerate ( timeStampedRasters ) : # Extract variable
if deltaTime :
dateTime = timeStampedRaster [ 'dateTime' ]
prevDateTime = dateTime - deltaTime
# GroundOverlay
groundOverlay = ET . SubElement ( document , 'GroundOverlay' )
overlayName = ET . SubElement ( groundOverlay , 'name' )
overlayName . text = 'Overlay'
if deltaTime : # Create TimeSpan tag
timeSpan = ET . SubElement ( groundOverlay , 'TimeSpan' )
# Create begin tag
begin = ET . SubElement ( timeSpan , 'begin' )
begin . text = prevDateTime . strftime ( '%Y-%m-%dT%H:%M:%S' )
end = ET . SubElement ( timeSpan , 'end' )
end . text = dateTime . strftime ( '%Y-%m-%dT%H:%M:%S' )
# DrawOrder
drawOrderElement = ET . SubElement ( groundOverlay , 'drawOrder' )
drawOrderElement . text = str ( drawOrder )
# Define Region
regionElement = ET . SubElement ( groundOverlay , 'Region' )
latLonBox = ET . SubElement ( regionElement , 'LatLonBox' )
northElement = ET . SubElement ( latLonBox , 'north' )
northElement . text = str ( north )
southElement = ET . SubElement ( latLonBox , 'south' )
southElement . text = str ( south )
eastElement = ET . SubElement ( latLonBox , 'east' )
eastElement . text = str ( east )
westElement = ET . SubElement ( latLonBox , 'west' )
westElement . text = str ( west )
# Href to PNG
iconElement = ET . SubElement ( groundOverlay , 'Icon' )
hrefElement = ET . SubElement ( iconElement , 'href' )
hrefElement . text = 'raster{0}.png' . format ( index )
# LatLonBox
latLonBox = ET . SubElement ( groundOverlay , 'LatLonBox' )
northElement = ET . SubElement ( latLonBox , 'north' )
northElement . text = str ( north )
southElement = ET . SubElement ( latLonBox , 'south' )
southElement . text = str ( south )
eastElement = ET . SubElement ( latLonBox , 'east' )
eastElement . text = str ( east )
westElement = ET . SubElement ( latLonBox , 'west' )
westElement . text = str ( west )
return ET . tostring ( kml ) , binaryPNGs |
def query ( self , ** query ) :
"""Gets the results of query , with optional parameters sort , limit , skip , and fields .
: param query : Optional parameters . Valid options are sort , limit , skip , and fields
: type query : ` ` dict ` `
: return : Array of documents retrieved by query .
: rtype : ` ` array ` `""" | return json . loads ( self . _get ( '' , ** query ) . body . read ( ) . decode ( 'utf-8' ) ) |
def prj_add_dep ( self , * args , ** kwargs ) :
"""Add more departments to the project .
: returns : None
: rtype : None
: raises : None""" | if not self . cur_prj :
return
dialog = DepAdderDialog ( project = self . cur_prj )
dialog . exec_ ( )
deps = dialog . deps
for dep in deps :
depdata = djitemdata . DepartmentItemData ( dep )
treemodel . TreeItem ( depdata , self . prj_dep_model . root ) |
def files_in_path ( path ) :
"""Return a list of all files in a path but exclude git folders .""" | aggregated_files = [ ]
for dir_ , _ , files in os . walk ( path ) :
for file in files :
relative_dir = os . path . relpath ( dir_ , path )
if ".git" not in relative_dir :
relative_file = os . path . join ( relative_dir , file )
aggregated_files . append ( relative_file )
return aggregated_files |
def nvmlDeviceSetApplicationsClocks ( handle , maxMemClockMHz , maxGraphicsClockMHz ) :
r"""* Set clocks that applications will lock to .
* Sets the clocks that compute and graphics applications will be running at .
* e . g . CUDA driver requests these clocks during context creation which means this property
* defines clocks at which CUDA applications will be running unless some overspec event
* occurs ( e . g . over power , over thermal or external HW brake ) .
* Can be used as a setting to request constant performance .
* On Pascal and newer hardware , this will automatically disable automatic boosting of clocks .
* On K80 and newer Kepler and Maxwell GPUs , users desiring fixed performance should also call
* \ ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting
* above the clock value being set .
* For Kepler & tm ; or newer non - GeForce fully supported devices and Maxwell or newer GeForce devices .
* Requires root / admin permissions .
* See \ ref nvmlDeviceGetSupportedMemoryClocks and \ ref nvmlDeviceGetSupportedGraphicsClocks
* for details on how to list available clocks combinations .
* After system reboot or driver reload applications clocks go back to their default value .
* See \ ref nvmlDeviceResetApplicationsClocks .
* @ param device The identifier of the target device
* @ param memClockMHz Requested memory clock in MHz
* @ param graphicsClockMHz Requested graphics clock in MHz
* @ return
* - \ ref NVML _ SUCCESS if new settings were successfully set
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a device is invalid or \ a memClockMHz and \ a graphicsClockMHz
* is not a valid clock combination
* - \ ref NVML _ ERROR _ NO _ PERMISSION if the user doesn ' t have permission to perform this operation
* - \ ref NVML _ ERROR _ NOT _ SUPPORTED if the device doesn ' t support this feature
* - \ ref NVML _ ERROR _ GPU _ IS _ LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
nvmlReturn _ t DECLDIR nvmlDeviceSetApplicationsClocks""" | fn = _nvmlGetFunctionPointer ( "nvmlDeviceSetApplicationsClocks" )
ret = fn ( handle , c_uint ( maxMemClockMHz ) , c_uint ( maxGraphicsClockMHz ) )
_nvmlCheckReturn ( ret )
return None |
def _sending_task ( self , backend ) :
"""Used internally to safely increment ` backend ` s task count . Returns the
overall count of tasks for ` backend ` .""" | with self . backend_mutex :
self . backends [ backend ] += 1
self . task_counter [ backend ] += 1
this_task = self . task_counter [ backend ]
return this_task |
def generate_uuid ( basedata = None ) :
"""Provides a _ random _ UUID with no input , or a UUID4 - format MD5 checksum of any input data provided""" | if basedata is None :
return str ( uuid . uuid4 ( ) )
elif isinstance ( basedata , str ) :
checksum = hashlib . md5 ( basedata ) . hexdigest ( )
return '%8s-%4s-%4s-%4s-%12s' % ( checksum [ 0 : 8 ] , checksum [ 8 : 12 ] , checksum [ 12 : 16 ] , checksum [ 16 : 20 ] , checksum [ 20 : 32 ] ) |
def bounce ( sequence ) :
'''Return a driver function that can advance a " bounced " sequence
of values .
. . code - block : : none
seq = [ 0 , 1 , 2 , 3]
# bounce ( seq ) = > [ 0 , 1 , 2 , 3 , 3 , 2 , 1 , 0 , 0 , 1 , 2 , . . . ]
Args :
sequence ( seq ) : a sequence of values for the driver to bounce''' | N = len ( sequence )
def f ( i ) :
div , mod = divmod ( i , N )
if div % 2 == 0 :
return sequence [ mod ]
else :
return sequence [ N - mod - 1 ]
return partial ( force , sequence = _advance ( f ) ) |
def decr ( self , key , delta = 1 ) :
"""Decrements the specified key value by the specified value .
: param str | unicode key :
: param int delta :
: rtype : bool""" | return uwsgi . cache_dec ( key , delta , self . timeout , self . name ) |
def update ( self , lambda_mults = [ 1.0 ] , localizer = None , run_subset = None , use_approx = True , calc_only = False ) :
"""update the iES one GLM cycle
Parameters
lambda _ mults : list
a list of lambda multipliers to test . Each lambda mult value will require
evaluating ( a subset of ) the parameter ensemble .
localizer : pyemu . Matrix
a jacobian localizing matrix
run _ subset : int
the number of realizations to test for each lambda _ mult value . For example ,
if run _ subset = 30 and num _ reals = 100 , the first 30 realizations will be run ( in
parallel ) for each lambda _ mult value . Then the best lambda _ mult is selected and the
remaining 70 realizations for that lambda _ mult value are run ( in parallel ) .
use _ approx : bool
a flag to use the MLE or MAP upgrade solution . True indicates use MLE solution
calc _ only : bool
a flag to calculate the upgrade matrix only ( not run the ensemble ) . This is mostly for
debugging and testing on travis . Default is False
Example
` ` > > > import pyemu ` `
` ` > > > es = pyemu . EnsembleSmoother ( pst = " pest . pst " ) ` `
` ` > > > es . initialize ( num _ reals = 100 ) ` `
` ` > > > es . update ( lambda _ mults = [ 0.1,1.0,10.0 ] , run _ subset = 30 ) ` `""" | # if not self . parensemble . istransformed :
# self . parensemble . _ transform ( inplace = False )
if run_subset is not None :
if run_subset >= self . obsensemble . shape [ 0 ] :
self . logger . warn ( "run_subset ({0}) >= num of active reals ({1})...ignoring " . format ( run_subset , self . obsensemble . shape [ 0 ] ) )
run_subset = None
self . iter_num += 1
mat_prefix = self . pst . filename . replace ( '.pst' , '' ) + ".{0}" . format ( self . iter_num )
self . logger . log ( "iteration {0}" . format ( self . iter_num ) )
self . logger . statement ( "{0} active realizations" . format ( self . obsensemble . shape [ 0 ] ) )
if self . obsensemble . shape [ 0 ] < 2 :
self . logger . lraise ( "at least active 2 realizations (really like 300) are needed to update" )
if not self . _initialized : # raise Exception ( " must call initialize ( ) before update ( ) " )
self . logger . lraise ( "must call initialize() before update()" )
self . logger . log ( "calculate scaled delta obs" )
scaled_delta_obs = self . _calc_delta_obs ( self . obsensemble )
self . logger . log ( "calculate scaled delta obs" )
self . logger . log ( "calculate scaled delta par" )
scaled_delta_par = self . _calc_delta_par ( self . parensemble )
self . logger . log ( "calculate scaled delta par" )
self . logger . log ( "calculate pseudo inv comps" )
u , s , v = scaled_delta_obs . pseudo_inv_components ( eigthresh = self . pst . svd_data . eigthresh )
s . col_names = s . row_names
self . logger . log ( "calculate pseudo inv comps" )
self . logger . log ( "calculate obs diff matrix" )
# obs _ diff = self . obscov _ inv _ sqrt * self . _ get _ residual _ obs _ matrix ( self . obsensemble ) . T
obs_diff = self . obscov_inv_sqrt * self . phi . get_residual_obs_matrix ( self . obsensemble ) . T
self . logger . log ( "calculate obs diff matrix" )
if self . save_mats :
np . savetxt ( mat_prefix + ".obs_diff.dat" , scaled_delta_obs . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".par_diff.dat" , scaled_delta_par . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".u.dat" , u . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".s.dat" , s . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".v.dat" , v . x , fmt = "%15.6e" )
# here is the math part . . . calculate upgrade matrices
mean_lam , std_lam , paren_lam , obsen_lam = [ ] , [ ] , [ ] , [ ]
lam_vals = [ ]
for ilam , cur_lam_mult in enumerate ( lambda_mults ) :
parensemble_cur_lam = self . parensemble . copy ( )
# print ( parensemble _ cur _ lam . isnull ( ) . values . any ( ) )
cur_lam = self . current_lambda * cur_lam_mult
lam_vals . append ( cur_lam )
self . logger . log ( "calcs for lambda {0}" . format ( cur_lam_mult ) )
scaled_ident = Cov . identity_like ( s ) * ( cur_lam + 1.0 )
scaled_ident += s ** 2
scaled_ident = scaled_ident . inv
# build up this matrix as a single element so we can apply
# localization
self . logger . log ( "building upgrade_1 matrix" )
upgrade_1 = - 1.0 * ( self . parcov_inv_sqrt * scaled_delta_par ) * v * s * scaled_ident * u . T
if self . save_mats :
np . savetxt ( mat_prefix + ".ivec.dat" . format ( self . iter_num ) , scaled_ident . x , fmt = "%15.6e" )
self . logger . log ( "building upgrade_1 matrix" )
# apply localization
if localizer is not None :
self . logger . log ( "applying localization" )
upgrade_1 . hadamard_product ( localizer )
self . logger . log ( "applying localization" )
# apply residual information
self . logger . log ( "applying residuals" )
upgrade_1 *= obs_diff
self . logger . log ( "applying residuals" )
self . logger . log ( "processing upgrade_1" )
if self . save_mats :
np . savetxt ( mat_prefix + ".upgrade_1.dat" , upgrade_1 . T . x , fmt = "%15.6e" )
upgrade_1 = upgrade_1 . to_dataframe ( )
upgrade_1 . index . name = "parnme"
upgrade_1 = upgrade_1 . T
upgrade_1 . index = [ int ( i ) for i in upgrade_1 . index ]
upgrade_1 . to_csv ( self . pst . filename + ".upgrade_1.{0:04d}.csv" . format ( self . iter_num ) )
if upgrade_1 . isnull ( ) . values . any ( ) :
self . logger . lraise ( "NaNs in upgrade_1" )
self . logger . log ( "processing upgrade_1" )
# print ( upgrade _ 1 . isnull ( ) . values . any ( ) )
# print ( parensemble _ cur _ lam . index )
# print ( upgrade _ 1 . index )
parensemble_cur_lam += upgrade_1
# parameter - based upgrade portion
if not use_approx and self . iter_num > 1 : # if True :
self . logger . log ( "building upgrade_2 matrix" )
par_diff = ( self . parensemble - self . parensemble_0 . loc [ self . parensemble . index , : ] ) . as_pyemu_matrix ( ) . T
x4 = self . Am . T * self . parcov_inv_sqrt * par_diff
x5 = self . Am * x4
x6 = scaled_delta_par . T * x5
x7 = v * scaled_ident * v . T * x6
ug2_mat = - 1.0 * ( self . parcov_inv_sqrt * scaled_delta_par * x7 )
upgrade_2 = ug2_mat . to_dataframe ( )
upgrade_2 . index . name = "parnme"
upgrade_2 = upgrade_2 . T
upgrade_2 . to_csv ( self . pst . filename + ".upgrade_2.{0:04d}.csv" . format ( self . iter_num ) )
upgrade_2 . index = [ int ( i ) for i in upgrade_2 . index ]
if self . save_mats :
np . savetxt ( mat_prefix + ".scaled_par_resid.dat" , par_diff . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".x4.dat" , x4 . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".x5.dat" , x5 . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".x6.dat" , x6 . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".x7.dat" , x7 . x , fmt = "%15.6e" )
np . savetxt ( mat_prefix + ".upgrade_2.dat" , ug2_mat . T . x , fmt = "%15.6e" )
if upgrade_2 . isnull ( ) . values . any ( ) :
self . logger . lraise ( "NaNs in upgrade_2" )
parensemble_cur_lam += upgrade_2
self . logger . log ( "building upgrade_2 matrix" )
self . logger . log ( "enforcing bounds" )
parensemble_cur_lam . enforce ( self . enforce_bounds )
self . logger . log ( "enforcing bounds" )
self . logger . log ( "filling fixed parameters" )
# fill in fixed pars with initial values
fi = parensemble_cur_lam . fixed_indexer
li = parensemble_cur_lam . log_indexer
log_values = self . pst . parameter_data . loc [ : , "parval1" ] . copy ( )
log_values . loc [ li ] = log_values . loc [ li ] . apply ( np . log10 )
fixed_vals = log_values . loc [ fi ]
for fname , fval in zip ( fixed_vals . index , fixed_vals . values ) : # if fname not in df . columns :
# continue
# print ( fname )
parensemble_cur_lam . loc [ : , fname ] = fval
self . logger . log ( "filling fixed parameters" )
# this is for testing failed runs on upgrade testing
# works with the 10par _ xsec smoother test
# parensemble _ cur _ lam . iloc [ : , : ] = - 100000.0
# some hackery - we lose track of the transform flag here , but just
# know it is transformed . Need to create dataframe here because
# pd . concat doesn ' t like par ensembles later
paren_lam . append ( pd . DataFrame ( parensemble_cur_lam . loc [ : , : ] ) )
self . logger . log ( "calcs for lambda {0}" . format ( cur_lam_mult ) )
if calc_only :
return
# subset if needed
# and combine lambda par ensembles into one par ensemble for evaluation
if run_subset is not None and run_subset < self . parensemble . shape [ 0 ] : # subset _ idx = [ " { 0 : d } " . format ( i ) for i in np . random . randint ( 0 , self . parensemble . shape [ 0 ] - 1 , run _ subset ) ]
subset_idx = self . parensemble . iloc [ : run_subset , : ] . index . values
self . logger . statement ( "subset idxs: " + ',' . join ( [ str ( s ) for s in subset_idx ] ) )
# more tracking of transformed - just know it ! Creating dataframes . . .
paren_lam_subset = [ pe . loc [ subset_idx , : ] for pe in paren_lam ]
paren_combine = pd . concat ( paren_lam_subset , ignore_index = True )
paren_lam_subset = None
else :
subset_idx = self . parensemble . index . values
paren_combine = pd . concat ( paren_lam , ignore_index = True )
self . logger . log ( "evaluating ensembles for lambdas : {0}" . format ( ',' . join ( [ "{0:8.3E}" . format ( l ) for l in lam_vals ] ) ) )
# back to par ensemble and know it is transformed
paren_combine = ParameterEnsemble . from_dataframe ( df = paren_combine , pst = self . pst , istransformed = True )
failed_runs , obsen_combine = self . _calc_obs ( paren_combine )
self . logger . log ( "evaluating ensembles for lambdas : {0}" . format ( ',' . join ( [ "{0:8.3E}" . format ( l ) for l in lam_vals ] ) ) )
paren_combine = None
if failed_runs is not None and len ( failed_runs ) == obsen_combine . shape [ 0 ] :
self . logger . lraise ( "all runs failed - cannot continue" )
# unpack lambda obs ensembles from combined obs ensemble
nrun_per_lam = self . obsensemble . shape [ 0 ]
if run_subset is not None :
nrun_per_lam = run_subset
obsen_lam = [ ]
for i in range ( len ( lam_vals ) ) :
sidx = i * nrun_per_lam
eidx = sidx + nrun_per_lam
oe = ObservationEnsemble . from_dataframe ( df = obsen_combine . iloc [ sidx : eidx , : ] . copy ( ) , pst = self . pst )
oe . index = subset_idx
# check for failed runs in this set - drop failed runs from obs ensembles
if failed_runs is not None :
failed_runs_this = np . array ( [ f for f in failed_runs if f >= sidx and f < eidx ] ) - sidx
if len ( failed_runs_this ) > 0 :
if len ( failed_runs_this ) == oe . shape [ 0 ] :
self . logger . warn ( "all runs failed for lambda {0}" . format ( lam_vals [ i ] ) )
else :
self . logger . warn ( "{0} run failed for lambda {1}" . format ( len ( failed_runs_this ) , lam_vals [ i ] ) )
oe . iloc [ failed_runs_this , : ] = np . NaN
oe = oe . dropna ( )
paren_lam [ i ] . iloc [ failed_runs_this , : ] = np . NaN
paren_lam [ i ] = ParameterEnsemble . from_dataframe ( df = paren_lam [ i ] . dropna ( ) , pst = self . pst )
paren_lam [ i ] . __instransformed = True
# don ' t drop bad reals here , instead , mask bad reals in the lambda
# selection and drop later
# if self . drop _ bad _ reals is not None :
# assert isinstance ( drop _ bad _ reals , float )
# drop _ idx = np . argwhere ( self . current _ phi _ vec > self . drop _ bad _ reals ) . flatten ( )
# run _ ids = self . obsensemble . index . values
# drop _ idx = run _ ids [ drop _ idx ]
# if len ( drop _ idx ) = = self . obsensemble . shape [ 0 ] :
# raise Exception ( " dropped all realizations as ' bad ' " )
# if len ( drop _ idx ) > 0:
# self . logger . warn ( " { 0 } realizations dropped as ' bad ' ( indices : { 1 } ) " . \
# format ( len ( drop _ idx ) , ' , ' . join ( [ str ( d ) for d in drop _ idx ] ) ) )
# self . parensemble . loc [ drop _ idx , : ] = np . NaN
# self . parensemble = self . parensemble . dropna ( )
# self . obsensemble . loc [ drop _ idx , : ] = np . NaN
# self . obsensemble = self . obsensemble . dropna ( )
# self . current _ phi _ vec = self . _ calc _ phi _ vec ( self . obsensemble )
obsen_lam . append ( oe )
obsen_combine = None
# here is where we need to select out the " best " lambda par and obs
# ensembles
# phi _ vecs = [ self . _ calc _ phi _ vec ( obsen ) for obsen in obsen _ lam ]
# phi _ vecs _ reg = [ self . _ calc _ regul _ phi _ vec ( paren ) for paren in paren _ lam ]
# if self . regul _ factor > 0.0:
# for i , ( pv , prv ) in enumerate ( zip ( phi _ vecs , phi _ vecs _ reg ) ) :
# phi _ vecs [ i ] = pv + ( prv * self . regul _ factor )
self . logger . log ( "calc lambda phi vectors" )
phi_vecs = [ self . phi . get_meas_and_regul_phi ( oe , pe . loc [ oe . index , : ] ) for oe , pe in zip ( obsen_lam , paren_lam ) ]
self . logger . log ( "calc lambda phi vectors" )
if self . drop_bad_reals is not None :
for i , ( meas_pv , regul_pv ) in enumerate ( phi_vecs ) : # for testing the drop _ bad _ reals functionality
# pv [ [ 0,3,7 ] ] = self . drop _ bad _ reals + 1.0
regul_pv = regul_pv . copy ( )
regul_pv [ meas_pv > self . drop_bad_reals ] = np . NaN
regul_pv = regul_pv [ ~ np . isnan ( regul_pv ) ]
meas_pv [ meas_pv > self . drop_bad_reals ] = np . NaN
meas_pv = meas_pv [ ~ np . isnan ( meas_pv ) ]
if len ( meas_pv ) == 0 : # raise Exception ( " all realization for lambda { 0 } dropped as ' bad ' " . \
# format ( lam _ vals [ i ] ) )
self . logger . warn ( "all realizations for lambda {0} marked as 'bad'" )
meas_pv = np . zeros_like ( obsen_lam [ 0 ] . shape [ 0 ] ) + 1.0e+30
regul_pv = np . zeros_like ( obsen_lam [ 0 ] . shape [ 0 ] ) + 1.0e+30
phi_vecs [ i ] = ( meas_pv , regul_pv )
mean_std_meas = [ ( pv [ 0 ] . mean ( ) , pv [ 0 ] . std ( ) ) for pv in phi_vecs ]
mean_std_regul = [ ( pv [ 1 ] . mean ( ) , pv [ 1 ] . std ( ) ) for pv in phi_vecs ]
update_pars = False
update_lambda = False
self . logger . statement ( "**************************" )
# self . logger . statement ( str ( datetime . now ( ) ) )
self . logger . statement ( "lambda testing summary" )
self . logger . statement ( "total runs:{0}" . format ( self . total_runs ) )
self . logger . statement ( "iteration: {0}" . format ( self . iter_num ) )
self . logger . statement ( "current lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}" . format ( self . current_lambda , self . last_best_mean , self . last_best_std ) )
# accept a new best if its within 10%
best_mean = self . last_best_mean * 1.1
best_std = self . last_best_std * 1.1
best_i = 0
for i , ( ( mm , ms ) , ( rm , rs ) ) in enumerate ( zip ( mean_std_meas , mean_std_regul ) ) :
self . logger . statement ( " tested lambda:{0:15.6G}, meas mean:{1:15.6G}, meas std:{2:15.6G}" . format ( self . current_lambda * lambda_mults [ i ] , mm , ms ) )
self . logger . statement ( "{0:30s}regul mean:{1:15.6G}, regul std:{2:15.6G}" . format ( ' ' , rm , rs ) )
m = mm + ( self . regul_factor * rm )
s = ms + ( self . regul_factor * rs )
if m < best_mean :
update_pars = True
best_mean = m
best_i = i
if s < best_std :
update_lambda = True
best_std = s
if np . isnan ( best_mean ) :
self . logger . lraise ( "best mean = NaN" )
if np . isnan ( best_std ) :
self . logger . lraise ( "best std = NaN" )
if not update_pars :
self . current_lambda *= max ( lambda_mults ) * 10.0
self . current_lambda = min ( self . current_lambda , 100000 )
self . logger . statement ( "not accepting iteration, increased lambda:{0}" . format ( self . current_lambda ) )
else : # more transformation status hard coding - ugly
self . parensemble = ParameterEnsemble . from_dataframe ( df = paren_lam [ best_i ] , pst = self . pst , istransformed = True )
if run_subset is not None :
failed_runs , self . obsensemble = self . _calc_obs ( self . parensemble )
if failed_runs is not None :
self . logger . warn ( "dropping failed realizations" )
self . parensemble . loc [ failed_runs , : ] = np . NaN
self . parensemble = self . parensemble . dropna ( )
self . obsensemble . loc [ failed_runs , : ] = np . NaN
self . obsensemble = self . obsensemble . dropna ( )
self . phi . update ( )
best_mean = self . phi . comp_phi . mean ( )
best_std = self . phi . comp_phi . std ( )
else :
self . obsensemble = obsen_lam [ best_i ]
# reindex parensemble in case failed runs
self . parensemble = ParameterEnsemble . from_dataframe ( df = self . parensemble . loc [ self . obsensemble . index ] , pst = self . pst , istransformed = self . parensemble . istransformed )
self . phi . update ( )
if self . drop_bad_reals is not None : # for testing drop _ bad _ reals functionality
# self . current _ phi _ vec [ : : 2 ] = self . drop _ bad _ reals + 1.0
# drop _ idx = np . argwhere ( self . current _ phi _ vec > self . drop _ bad _ reals ) . flatten ( )
drop_idx = np . argwhere ( self . phi . comp_phi > self . drop_bad_reals ) . flatten ( )
run_ids = self . obsensemble . index . values
drop_idx = run_ids [ drop_idx ]
if len ( drop_idx ) > self . obsensemble . shape [ 0 ] - 3 :
raise Exception ( "dropped too many realizations as 'bad'" )
if len ( drop_idx ) > 0 :
self . logger . warn ( "{0} realizations dropped as 'bad' (indices :{1})" . format ( len ( drop_idx ) , ',' . join ( [ str ( d ) for d in drop_idx ] ) ) )
self . parensemble . loc [ drop_idx , : ] = np . NaN
self . parensemble = self . parensemble . dropna ( )
self . obsensemble . loc [ drop_idx , : ] = np . NaN
self . obsensemble = self . obsensemble . dropna ( )
self . phi . update ( )
best_mean = self . phi . comp_phi . mean ( )
best_std = self . phi . comp_phi . std ( )
self . phi . report ( cur_lam = self . current_lambda * lambda_mults [ best_i ] )
self . logger . statement ( " best lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}" . format ( self . current_lambda * lambda_mults [ best_i ] , best_mean , best_std ) )
# self . logger . statement ( " actual mean phi : { 0:15.6G } " . format ( float ( self . current _ actual _ phi . mean ( ) ) ) )
self . last_best_mean = best_mean
self . last_best_std = best_std
if update_lambda : # be aggressive
self . current_lambda *= ( lambda_mults [ best_i ] * 0.75 )
# but don ' t let lambda get too small
self . current_lambda = max ( self . current_lambda , 0.00001 )
self . logger . statement ( "updating lambda: {0:15.6G}" . format ( self . current_lambda ) )
self . logger . statement ( "**************************\n" )
self . parensemble . to_csv ( self . pst . filename + self . paren_prefix . format ( self . iter_num ) )
self . obsensemble . to_csv ( self . pst . filename + self . obsen_prefix . format ( self . iter_num ) )
if self . raw_sweep_out is not None :
self . raw_sweep_out . to_csv ( self . pst . filename + "_sweepraw{0}.csv" . format ( self . iter_num ) )
self . logger . log ( "iteration {0}" . format ( self . iter_num ) ) |
def get_issue_labels ( self , issue_key ) :
"""Get issue labels .
: param issue _ key :
: return :""" | url = 'rest/api/2/issue/{issue_key}?fields=labels' . format ( issue_key = issue_key )
return ( self . get ( url ) or { } ) . get ( 'fields' ) . get ( 'labels' ) |
def gp_size ( self , _gp_size ) :
"""Store the new start address attribute of the BFD file being
processed .""" | if not self . _ptr :
raise BfdException ( "BFD not initialized" )
return _bfd . set_gp_size ( self . _ptr , _gp_size ) |
def _can_connect ( self ) :
"""Tries to connect to the configured host : port and returns True if the connection was established""" | self . log ( 'Trying to reach configured connectivity check endpoint' , lvl = verbose )
try :
socket . setdefaulttimeout ( self . config . timeout )
socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) . connect ( ( self . config . host , self . config . port ) )
return True
except Exception as ex :
self . log ( ex , pretty = True , lvl = debug )
return False |
def deduplicate ( pairs , aa = False , ignore_primer_regions = False ) :
'''Removes duplicate sequences from a list of Pair objects .
If a Pair has heavy and light chains , both chains must identically match heavy and light chains
from another Pair to be considered a duplicate . If a Pair has only a single chain ,
identical matches to that chain will cause the single chain Pair to be considered a duplicate ,
even if the comparison Pair has both chains .
Note that identical sequences are identified by simple string comparison , so sequences of
different length that are identical over the entirety of the shorter sequence are not
considered duplicates .
By default , comparison is made on the nucleotide sequence . To use the amino acid sequence instead ,
set aa = True .''' | nr_pairs = [ ]
just_pairs = [ p for p in pairs if p . is_pair ]
single_chains = [ p for p in pairs if not p . is_pair ]
_pairs = just_pairs + single_chains
for p in _pairs :
duplicates = [ ]
for nr in nr_pairs :
identical = True
vdj = 'vdj_aa' if aa else 'vdj_nt'
offset = 4 if aa else 12
if p . heavy is not None :
if nr . heavy is None :
identical = False
else :
heavy = p . heavy [ vdj ] [ offset : - offset ] if ignore_primer_regions else p . heavy [ vdj ]
nr_heavy = nr . heavy [ vdj ] [ offset : - offset ] if ignore_primer_regions else nr . heavy [ vdj ]
if heavy != nr_heavy :
identical = False
if p . light is not None :
if nr . light is None :
identical = False
else :
light = p . light [ vdj ] [ offset : - offset ] if ignore_primer_regions else p . light [ vdj ]
nr_light = nr . light [ vdj ] [ offset : - offset ] if ignore_primer_regions else nr . light [ vdj ]
if light != nr_light :
identical = False
duplicates . append ( identical )
if any ( duplicates ) :
continue
else :
nr_pairs . append ( p )
return nr_pairs |
def login ( self , email , password , android_id ) :
"""Authenticate to Google with the provided credentials .
Args :
email ( str ) : The account to use .
password ( str ) : The account password .
android _ id ( str ) : An identifier for this client .
Raises :
LoginException : If there was a problem logging in .""" | self . _email = email
self . _android_id = android_id
res = gpsoauth . perform_master_login ( self . _email , password , self . _android_id )
if 'Token' not in res :
raise exception . LoginException ( res . get ( 'Error' ) , res . get ( 'ErrorDetail' ) )
self . _master_token = res [ 'Token' ]
self . refresh ( )
return True |
def destroy_venv ( env_path , venvscache = None ) :
"""Destroy a venv .""" | # remove the venv itself in disk
logger . debug ( "Destroying virtualenv at: %s" , env_path )
shutil . rmtree ( env_path , ignore_errors = True )
# remove venv from cache
if venvscache is not None :
venvscache . remove ( env_path ) |
def mac_address_table_aging_time_legacy_time_out ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
mac_address_table = ET . SubElement ( config , "mac-address-table" , xmlns = "urn:brocade.com:mgmt:brocade-mac-address-table" )
aging_time = ET . SubElement ( mac_address_table , "aging-time" )
legacy_time_out = ET . SubElement ( aging_time , "legacy-time-out" )
legacy_time_out . text = kwargs . pop ( 'legacy_time_out' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _name_to_tensor ( self , tensor_name ) :
"""The tensor with the given name .
Args :
tensor _ name : a string , name of a tensor in the graph .
Returns :
a tf . Tensor or mtf . Tensor""" | id1 , id2 = self . _tensor_name_to_ids [ tensor_name ]
return self . _operations [ id1 ] . outputs [ id2 ] |
def datasetHeaderChunk ( key , lines ) :
"""Process the dataset header""" | KEYWORDS = ( 'DATASET' , 'OBJTYPE' , 'VECTYPE' , 'BEGSCL' , 'BEGVEC' , 'OBJID' , 'ND' , 'NC' , 'NAME' )
TYPE_KEYS = ( 'BEGSCL' , 'BEGVEC' )
result = { 'type' : None , 'numberData' : None , 'numberCells' : None , 'name' : None , 'objectID' : None , 'objectType' : None , 'vectorType' : None }
chunks = pt . chunk ( KEYWORDS , lines )
for key , chunkList in iteritems ( chunks ) :
for chunk in chunkList :
schunk = pt . splitLine ( chunk [ 0 ] )
if key == 'ND' :
result [ 'numberData' ] = int ( schunk [ 1 ] )
elif key == 'NC' :
result [ 'numberCells' ] = int ( schunk [ 1 ] )
elif key == 'NAME' :
result [ 'name' ] = schunk [ 1 ]
elif key == 'OBJID' :
result [ 'objectID' ] = int ( schunk [ 1 ] )
elif key == 'OBJTYPE' :
result [ 'objectType' ] = schunk [ 1 ]
elif key == 'VECTYPE' :
result [ 'vectorType' ] = schunk [ 1 ]
elif key in TYPE_KEYS :
result [ 'type' ] = schunk [ 0 ]
return result |
def open ( self , configuration , flags ) :
"""Opens a CAN connection using ` CanalOpen ( ) ` .
: param str configuration : the configuration : " device _ id ; baudrate "
: param int flags : the flags to be set
: raises can . CanError : if any error occurred
: returns : Valid handle for CANAL API functions on success""" | try : # we need to convert this into bytes , since the underlying DLL cannot
# handle non - ASCII configuration strings
config_ascii = configuration . encode ( 'ascii' , 'ignore' )
result = self . __m_dllBasic . CanalOpen ( config_ascii , flags )
except Exception as ex : # catch any errors thrown by this call and re - raise
raise can . CanError ( 'CanalOpen() failed, configuration: "{}", error: {}' . format ( configuration , ex ) )
else : # any greater - than - zero return value indicates a success
# ( see https : / / grodansparadis . gitbooks . io / the - vscp - daemon / canal _ interface _ specification . html )
# raise an error if the return code is < = 0
if result <= 0 :
raise can . CanError ( 'CanalOpen() failed, configuration: "{}", return code: {}' . format ( configuration , result ) )
else :
return result |
def db_for_write ( self , model , ** hints ) :
"""Prevent write actions on read - only tables .
Raises :
WriteNotSupportedError : If models . sf _ access is ` ` read _ only ` ` .""" | try :
if model . sf_access == READ_ONLY :
raise WriteNotSupportedError ( "%r is a read-only model." % model )
except AttributeError :
pass
return None |
def ssh_config ( ssh_user , ssh_private_key_file ) :
"""Create temporary ssh config file .""" | try :
ssh_file = NamedTemporaryFile ( delete = False , mode = 'w+' )
ssh_file . write ( 'Host *\n' )
ssh_file . write ( ' IdentityFile %s\n' % ssh_private_key_file )
ssh_file . write ( ' User %s' % ssh_user )
ssh_file . close ( )
yield ssh_file . name
finally :
with ignored ( OSError ) :
os . remove ( ssh_file . name ) |
def get_hline ( ) :
"""gets a horiztonal line""" | return Window ( width = LayoutDimension . exact ( 1 ) , height = LayoutDimension . exact ( 1 ) , content = FillControl ( '-' , token = Token . Line ) ) |
def userInformation ( MoreData_presence = 0 ) :
"""USER INFORMATION Section 9.3.31""" | a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x20 )
# 000100000
c = UserUser ( )
packet = a / b / c
if MoreData_presence is 1 :
d = MoreDataHdr ( ieiMD = 0xA0 , eightBitMD = 0x0 )
packet = packet / d
return packet |
def path ( * components ) :
"""Get a file path .
Concatenate all components into a path .""" | _path = os . path . join ( * components )
_path = os . path . expanduser ( _path )
return _path |
def process_edge_search ( self , current , neighbor , pred , q , component , algo , ** kargs ) :
'''API : process _ edge _ search ( self , current , neighbor , pred , q , component ,
algo , * * kargs )
Description :
Used by search ( ) method . Processes edges according to the underlying
algortihm . User does not need to call this method directly .
Input :
current : Name of the current node .
neighbor : Name of the neighbor node .
pred : Predecessor tree .
q : Data structure that holds nodes to be processed in a queue .
component : component number .
algo : Search algorithm . See search ( ) documentation .
kwargs : Keyword arguments .
Post :
' color ' , ' distance ' , ' component ' attribute of nodes and edges may
change .''' | if algo == 'Dijkstra' :
return self . process_edge_dijkstra ( current , neighbor , pred , q , component )
if algo == 'Prim' :
return self . process_edge_prim ( current , neighbor , pred , q , component )
neighbor_node = self . get_node ( neighbor )
if current == None :
neighbor_node . set_attr ( 'distance' , 0 )
if isinstance ( q , PriorityQueue ) :
q . push ( neighbor , 0 )
else :
q . push ( neighbor )
if component != None :
neighbor_node . set_attr ( 'component' , component )
neighbor_node . set_attr ( 'label' , component )
else :
neighbor_node . set_attr ( 'label' , 0 )
return
if isinstance ( q , PriorityQueue ) :
current_priority = q . get_priority ( neighbor )
if algo == 'UnweightedSPT' or algo == 'BFS' :
priority = self . get_node ( current ) . get_attr ( 'distance' ) + 1
if algo == 'DFS' :
priority = - self . get_node ( current ) . get_attr ( 'distance' ) - 1
if current_priority is not None and priority >= current_priority :
return
q . push ( neighbor , priority )
if algo == 'UnweightedSPT' or algo == 'BFS' :
neighbor_node . set_attr ( 'distance' , priority )
if algo == 'DFS' :
neighbor_node . set_attr ( 'depth' , - priority )
else :
distance = self . get_node ( current ) . get_attr ( 'distance' ) + 1
if ( ( algo == 'UnweightedSPT' or algo == 'BFS' ) and neighbor_node . get_attr ( 'distance' ) is not None ) :
return
neighbor_node . set_attr ( 'distance' , distance )
neighbor_node . set_attr ( 'label' , str ( distance ) )
q . push ( neighbor )
pred [ neighbor ] = current
neighbor_node . set_attr ( 'color' , 'red' )
if component != None :
neighbor_node . set_attr ( 'component' , component )
neighbor_node . set_attr ( 'label' , component )
self . display ( ) |
def qteTextChanged ( self ) :
"""Search for sub - string matches .
This method is triggered by Qt whenever the text changes ,
ie . whenever the user has altered the input . Extract the
new input , find all matches , and highlight them accordingly .""" | # Remove any previous highlighting .
self . clearHighlighting ( )
SCI = self . qteWidget
# Compile a list of spans that contain the specified string .
self . compileMatchList ( )
# Return if the substring does not exist in the text .
if len ( self . matchList ) == 0 :
return
# Make a copy of the style bits of the document , overwrite
# those parts containing a substring , and then write them
# back all at once . This is much faster than calling the
# styling methods repeatedly .
# Make a copy of the document style bits and determine the
# cursor position in the document .
style = bytearray ( self . styleOrig )
cur = SCI . positionFromLineIndex ( * self . cursorPosOrig )
# Style all matches .
self . selMatchIdx = 0
for start , stop in self . matchList :
if start < cur :
self . selMatchIdx += 1
style [ start : stop ] = bytes ( b'\x1e' ) * ( stop - start )
# If the cursor is after the last possible match ( eg . always
# the case when the cursor is at the end of the file ) then
# self . selMatchIdx will point beyond the list .
if self . selMatchIdx == len ( self . matchList ) :
self . selMatchIdx = 0
# Style the first match after the current cursor position
# differently to indicate that it is the currently
# selected one .
start , stop = self . matchList [ self . selMatchIdx ]
style [ start : stop ] = bytes ( b'\x1f' ) * ( stop - start )
# Place the cursor at the start of the currently selected match .
line , col = SCI . lineIndexFromPosition ( start )
SCI . setCursorPosition ( line , col )
self . selMatchIdx += 1
# Apply the modified style array to the document .
self . qteWidget . SCISetStylingEx ( 0 , 0 , style ) |
def set_remote_addr ( self , dst_mac , dst_ip ) :
"""Configure remote ethernet and IP addresses .""" | self . dst_mac = dst_mac
self . dst_ip = dst_ip
if not ( dst_mac == "FF:FF:FF:FF:FF:FF" or dst_ip == "255.255.255.255" ) :
self . _remote_addr_config = True
LOG . info ( "[BFD][%s][REMOTE] Remote address configured: %s, %s." , hex ( self . _local_discr ) , self . dst_ip , self . dst_mac ) |
def add_authorization_policy ( access_token , ck_id , oid ) :
'''Add Media Service Authorization Policy .
Args :
access _ token ( str ) : A valid Azure authentication token .
ck _ id ( str ) : A Media Service Asset Content Key ID .
options _ id ( str ) : A Media Service OID .
Returns :
HTTP response . JSON body .''' | path = '/ContentKeys'
body = '{"AuthorizationPolicyId":"' + oid + '"}'
return helper_add ( access_token , ck_id , path , body ) |
def check_for_wdiff ( ) :
"""Checks if the ` wdiff ` command can be found .
Raises :
WdiffNotFoundError : if ` ` wdiff ` ` is not found .""" | cmd = [ 'which' , CMD_WDIFF ]
DEVNULL = open ( os . devnull , 'wb' )
proc = sub . Popen ( cmd , stdout = DEVNULL )
proc . wait ( )
DEVNULL . close ( )
if proc . returncode != 0 :
msg = "the `{}` command can't be found" . format ( CMD_WDIFF )
raise WdiffNotFoundError ( msg ) |
def atlas_peer_update_health ( peer_hostport , received_response , peer_table = None ) :
"""Mark the given peer as alive at this time .
Update times at which we contacted it ,
and update its health score .
Use the global health table by default ,
or use the given health info if set .""" | with AtlasPeerTableLocked ( peer_table ) as ptbl :
if peer_hostport not in ptbl . keys ( ) :
return False
# record that we contacted this peer , and whether or not we useful info from it
now = time_now ( )
# update timestamps ; remove old data
new_times = [ ]
for ( t , r ) in ptbl [ peer_hostport ] [ 'time' ] :
if t + atlas_peer_lifetime_interval ( ) < now :
continue
new_times . append ( ( t , r ) )
new_times . append ( ( now , received_response ) )
ptbl [ peer_hostport ] [ 'time' ] = new_times
return True |
def get ( name , rc_file = '~/.odoorpcrc' ) :
"""Return the session configuration identified by ` name `
from the ` rc _ file ` file .
> > > import odoorpc
> > > from pprint import pprint as pp
> > > pp ( odoorpc . session . get ( ' foo ' ) ) # doctest : + SKIP
{ ' database ' : ' db _ name ' ,
' host ' : ' localhost ' ,
' passwd ' : ' password ' ,
' port ' : 8069,
' protocol ' : ' jsonrpc ' ,
' timeout ' : 120,
' type ' : ' ODOO ' ,
' user ' : ' admin ' }
. . doctest : :
: hide :
> > > import odoorpc
> > > session = ' % s _ session ' % DB
> > > odoo . save ( session )
> > > data = odoorpc . session . get ( session )
> > > data [ ' host ' ] = = HOST
True
> > > data [ ' protocol ' ] = = PROTOCOL
True
> > > data [ ' port ' ] = = int ( PORT )
True
> > > data [ ' database ' ] = = DB
True
> > > data [ ' user ' ] = = USER
True
> > > data [ ' passwd ' ] = = PWD
True
> > > data [ ' type ' ] = = ' ODOO '
True
: raise : ` ValueError ` ( wrong session name )""" | conf = ConfigParser ( )
conf . read ( [ os . path . expanduser ( rc_file ) ] )
if not conf . has_section ( name ) :
raise ValueError ( "'%s' session does not exist in %s" % ( name , rc_file ) )
return { 'type' : conf . get ( name , 'type' ) , 'host' : conf . get ( name , 'host' ) , 'protocol' : conf . get ( name , 'protocol' ) , 'port' : conf . getint ( name , 'port' ) , 'timeout' : conf . getfloat ( name , 'timeout' ) , 'user' : conf . get ( name , 'user' ) , 'passwd' : conf . get ( name , 'passwd' ) , 'database' : conf . get ( name , 'database' ) , } |
def cache_jobs ( opts , jid , ret ) :
'''Write job information to cache''' | serial = salt . payload . Serial ( opts = opts )
fn_ = os . path . join ( opts [ 'cachedir' ] , 'minion_jobs' , jid , 'return.p' )
jdir = os . path . dirname ( fn_ )
if not os . path . isdir ( jdir ) :
os . makedirs ( jdir )
with salt . utils . files . fopen ( fn_ , 'w+b' ) as fp_ :
fp_ . write ( serial . dumps ( ret ) ) |
def gradient_compression_params ( args : argparse . Namespace ) -> Optional [ Dict [ str , Any ] ] :
""": param args : Arguments as returned by argparse .
: return : Gradient compression parameters or None .""" | if args . gradient_compression_type is None :
return None
else :
return { 'type' : args . gradient_compression_type , 'threshold' : args . gradient_compression_threshold } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.