signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _slugify ( text , delim = u'-' ) :
"""Generates an ASCII - only slug .""" | result = [ ]
for word in _punct_re . split ( text . lower ( ) ) :
word = word . encode ( 'utf-8' )
if word :
result . append ( word )
slugified = delim . join ( [ i . decode ( 'utf-8' ) for i in result ] )
return re . sub ( '[^a-zA-Z0-9\\s\\-]{1}' , replace_char , slugified ) . lower ( ) |
def tail ( ctx ) :
"""Show the last 10 lines of the log file""" | click . echo ( 'tailing logs' )
for e in ctx . tail ( ) [ - 10 : ] :
ts = datetime . utcfromtimestamp ( e [ 'timestamp' ] // 1000 ) . isoformat ( )
click . echo ( "{}: {}" . format ( ts , e [ 'message' ] ) )
click . echo ( 'done' ) |
def add_from_lists ( self , data_list = None , fun_list = None , dsp_list = None ) :
"""Add multiple function and data nodes to dispatcher .
: param data _ list :
It is a list of data node kwargs to be loaded .
: type data _ list : list [ dict ] , optional
: param fun _ list :
It is a list of function node kwargs to be loaded .
: type fun _ list : list [ dict ] , optional
: param dsp _ list :
It is a list of sub - dispatcher node kwargs to be loaded .
: type dsp _ list : list [ dict ] , optional
: returns :
- Data node ids .
- Function node ids .
- Sub - dispatcher node ids .
: rtype : ( list [ str ] , list [ str ] , list [ str ] )
. . seealso : : : func : ` add _ data ` , : func : ` add _ func ` , : func : ` add _ function ` ,
: func : ` add _ dispatcher `
* * Example * * :
. . testsetup : :
> > > dsp = Dispatcher ( name = ' Dispatcher ' )
Define a data list : :
> > > data _ list = [
. . . { ' data _ id ' : ' a ' } ,
. . . { ' data _ id ' : ' b ' } ,
. . . { ' data _ id ' : ' c ' } ,
Define a functions list : :
> > > def func ( a , b ) :
. . . return a + b
> > > fun _ list = [
. . . { ' function ' : func , ' inputs ' : [ ' a ' , ' b ' ] , ' outputs ' : [ ' c ' ] }
Define a sub - dispatchers list : :
> > > sub _ dsp = Dispatcher ( name = ' Sub - dispatcher ' )
> > > sub _ dsp . add _ function ( function = func , inputs = [ ' e ' , ' f ' ] ,
. . . outputs = [ ' g ' ] )
' func '
> > > dsp _ list = [
. . . { ' dsp _ id ' : ' Sub ' , ' dsp ' : sub _ dsp ,
. . . ' inputs ' : { ' a ' : ' e ' , ' b ' : ' f ' } , ' outputs ' : { ' g ' : ' c ' } } ,
Add function and data nodes to dispatcher : :
> > > dsp . add _ from _ lists ( data _ list , fun _ list , dsp _ list )
( [ ' a ' , ' b ' , ' c ' ] , [ ' func ' ] , [ ' Sub ' ] )""" | if data_list : # Add data nodes .
data_ids = [ self . add_data ( ** v ) for v in data_list ]
# Data ids .
else :
data_ids = [ ]
if fun_list : # Add function nodes .
fun_ids = [ self . add_function ( ** v ) for v in fun_list ]
# Func ids .
else :
fun_ids = [ ]
if dsp_list : # Add dispatcher nodes .
dsp_ids = [ self . add_dispatcher ( ** v ) for v in dsp_list ]
# Dsp ids .
else :
dsp_ids = [ ]
# Return data , function , and sub - dispatcher node ids .
return data_ids , fun_ids , dsp_ids |
def auto_register_search_models ( ) :
"""Auto register all search models""" | for config in models_config . get_all_configs ( ) :
if config . disable_search_index :
continue
search . register ( config . model . objects . get_queryset ( ) , ModelSearchAdapter , fields = config . search_fields , exclude = config . search_exclude_fields , ) |
def do_action_to_descendants ( analysis_request , transition_id ) :
"""Cascades the transition passed in to the descendant partitions""" | for partition in analysis_request . getDescendants ( all_descendants = False ) :
do_action_for ( partition , transition_id ) |
def callback ( self , herald_svc , message ) :
"""Tries to call the callback of the post message .
Avoids errors to go outside this method .
: param herald _ svc : Herald service instance
: param message : Received answer message""" | if self . __callback is not None :
try : # pylint : disable = W0703
self . __callback ( herald_svc , message )
except Exception as ex :
_logger . exception ( "Error calling callback: %s" , ex ) |
def trace_module ( module , tracer = tracer , pattern = r".*" , flags = 0 ) :
"""Traces given module members using given tracer .
: param module : Module to trace .
: type module : ModuleType
: param tracer : Tracer .
: type tracer : object
: param pattern : Matching pattern .
: type pattern : unicode
: param flags : Matching regex flags .
: type flags : int
: return : Definition success .
: rtype : bool
: note : Only members exported by * * _ _ all _ _ * * attribute will be traced .""" | if is_traced ( module ) :
return False
global REGISTERED_MODULES
for name , function in inspect . getmembers ( module , inspect . isfunction ) :
if name not in module . __all__ or not re . search ( pattern , name , flags = flags ) :
continue
trace_function ( module , function , tracer )
for name , cls in inspect . getmembers ( module , inspect . isclass ) :
if name not in module . __all__ or not re . search ( pattern , name , flags = flags ) :
continue
trace_class ( cls , tracer , pattern , flags )
REGISTERED_MODULES . add ( module )
set_traced ( module )
return True |
def emit ( class_ , method , tag , contents ) :
"""Constructs a byte string of an ASN . 1 DER - encoded value
This is typically not useful . Instead , use one of the standard classes from
asn1crypto . core , or construct a new class with specific fields , and call the
. dump ( ) method .
: param class _ :
An integer ASN . 1 class value : 0 ( universal ) , 1 ( application ) ,
2 ( context ) , 3 ( private )
: param method :
An integer ASN . 1 method value : 0 ( primitive ) , 1 ( constructed )
: param tag :
An integer ASN . 1 tag value
: param contents :
A byte string of the encoded byte contents
: return :
A byte string of the ASN . 1 DER value ( header and contents )""" | if not isinstance ( class_ , int ) :
raise TypeError ( 'class_ must be an integer, not %s' % type_name ( class_ ) )
if class_ < 0 or class_ > 3 :
raise ValueError ( 'class_ must be one of 0, 1, 2 or 3, not %s' % class_ )
if not isinstance ( method , int ) :
raise TypeError ( 'method must be an integer, not %s' % type_name ( method ) )
if method < 0 or method > 1 :
raise ValueError ( 'method must be 0 or 1, not %s' % method )
if not isinstance ( tag , int ) :
raise TypeError ( 'tag must be an integer, not %s' % type_name ( tag ) )
if tag < 0 :
raise ValueError ( 'tag must be greater than zero, not %s' % tag )
if not isinstance ( contents , byte_cls ) :
raise TypeError ( 'contents must be a byte string, not %s' % type_name ( contents ) )
return _dump_header ( class_ , method , tag , contents ) + contents |
def retrieveVals ( self ) :
"""Retrieve values for graphs .""" | fs = FSinfo ( self . _fshost , self . _fsport , self . _fspass )
if self . hasGraph ( 'fs_calls' ) :
count = fs . getCallCount ( )
self . setGraphVal ( 'fs_calls' , 'calls' , count )
if self . hasGraph ( 'fs_channels' ) :
count = fs . getChannelCount ( )
self . setGraphVal ( 'fs_channels' , 'channels' , count ) |
def svn_versions_from_vcs ( tag_prefix , root , verbose = False ) :
"""Return a dictionary of values derived directly from the VCS . This is the
third attempt to find information by get _ versions ( ) .""" | if not os . path . exists ( os . path . join ( root , '.svn' ) ) :
if verbose :
print ( "no .svn in %s." % root )
return { }
current_module = sys . modules [ __name__ ]
# If we ' re running from _ version . py .
tag_url = getattr ( current_module , 'svn_tag_url' , None )
# If we ' re running from versioneer . py .
if tag_url is None :
vcs_settings = getattr ( current_module , 'vcs_settings' , None )
if vcs_settings is not None and 'svn' in vcs_settings and 'tag_url' in vcs_settings [ 'svn' ] :
tag_url = vcs_settings [ 'svn' ] [ 'tag_url' ]
if tag_url is None :
raise ValueError ( "Please define VCS-specific 'tag_url' setting for " "'svn' within 'versioneer'." )
svn_commands = [ 'svn' ]
info_xml = run_command ( svn_commands , [ 'ls' , '--xml' , tag_url ] , cwd = root )
# TODO ( dustin ) : This should raise an EnvironmentError upon failure .
if info_xml is None :
print ( "Error accessing Subversion for latest version." )
return { }
( releases , latest_revision ) = svn_parse_tag_xml ( info_xml )
release_info = releases [ latest_revision ]
release_name = release_info [ 'name' ]
versions = { 'default' : release_name , 'version' : release_name , 'full' : release_name }
# Examples of strings returned by Git .
# versions [ " closest _ tag " ]
# versions [ " distance " ]
# versions [ " short _ revisionid " ]
# versions [ " dirty " ]
# versions [ " pep440 " ]
# versions [ " describe " ]
# versions [ " default " ]
# versions [ " dash _ dirty " ]
# versions [ " closest _ tag _ or _ zero " ]
# versions [ " dash _ distance " ]
return versions |
def add ( doc , pointer , value ) :
"""Add element to sequence , member to mapping .
: param doc : the document base
: param pointer : the path to add in it
: param value : the new value
: return : the new object""" | return Target ( doc ) . add ( pointer , value ) . document |
def extra ( name : str , desc : str ) -> Callable :
"""Decorator for slave channel ' s " additional features " interface .
Args :
name ( str ) : A human readable name for the function .
desc ( str ) : A short description and usage of it . Use
` ` { function _ name } ` ` in place of the function name
in the description .
Returns :
The decorated method .""" | def attr_dec ( f ) :
f . __setattr__ ( "extra_fn" , True )
f . __setattr__ ( "name" , name )
f . __setattr__ ( "desc" , desc )
return f
return attr_dec |
def create_many ( self , statements ) :
"""Creates multiple statement entries .""" | create_statements = [ ]
for statement in statements :
statement_data = statement . serialize ( )
tag_data = list ( set ( statement_data . pop ( 'tags' , [ ] ) ) )
statement_data [ 'tags' ] = tag_data
if not statement . search_text :
statement_data [ 'search_text' ] = self . tagger . get_bigram_pair_string ( statement . text )
if not statement . search_in_response_to and statement . in_response_to :
statement_data [ 'search_in_response_to' ] = self . tagger . get_bigram_pair_string ( statement . in_response_to )
create_statements . append ( statement_data )
self . statements . insert_many ( create_statements ) |
def act ( self , action ) :
"""Send a single action . This is a shortcut for ` actions ` .""" | if action and action . ListFields ( ) : # Skip no - ops .
return self . actions ( sc_pb . RequestAction ( actions = [ action ] ) ) |
def _update_feature_log_prob ( self , alpha ) :
"""Apply smoothing to raw counts and recompute log probabilities""" | smoothed_fc = self . feature_count_ + alpha
smoothed_cc = self . class_count_ + alpha * 2
self . feature_log_prob_ = ( np . log ( smoothed_fc ) - np . log ( smoothed_cc . reshape ( - 1 , 1 ) ) ) |
def find_icelines ( self ) :
"""Finds iceline according to the surface temperature .
This method is called by the private function
: func : ` ~ climlab . surface . albedo . Iceline . _ compute `
and updates following attributes according to the freezing temperature
` ` self . param [ ' Tf ' ] ` ` and the surface temperature ` ` self . param [ ' Ts ' ] ` ` :
* * Object attributes * * \n
: ivar Field noice : a Field of booleans which are ` ` True ` ` where
: math : ` T _ s \\ ge T _ f `
: ivar Field ice : a Field of booleans which are ` ` True ` ` where
: math : ` T _ s < T _ f `
: ivar array icelat : an array with two elements indicating the
ice - edge latitudes
: ivar float ice _ area : fractional area covered by ice ( 0 - 1)
: ivar dict diagnostics : keys ` ` ' icelat ' ` ` and ` ` ' ice _ area ' ` ` are updated""" | Tf = self . param [ 'Tf' ]
Ts = self . state [ 'Ts' ]
lat_bounds = self . domains [ 'Ts' ] . axes [ 'lat' ] . bounds
self . noice = np . where ( Ts >= Tf , True , False )
self . ice = np . where ( Ts < Tf , True , False )
# Ice cover in fractional area
self . ice_area = global_mean ( self . ice * np . ones_like ( self . Ts ) )
# Express ice cover in terms of ice edge latitudes
if self . ice . all ( ) : # 100 % ice cover
self . icelat = np . array ( [ - 0. , 0. ] )
elif self . noice . all ( ) : # zero ice cover
self . icelat = np . array ( [ - 90. , 90. ] )
else : # there is some ice edge
# Taking np . diff of a boolean array gives True at the boundaries between True and False
boundary_indices = np . where ( np . diff ( self . ice . squeeze ( ) ) ) [ 0 ] + 1
# check for asymmetry case : [ - 90 , x ] or [ x , 90]
# - > boundary _ indices hold only one value for icelat
if boundary_indices . size == 1 :
if self . ice [ 0 ] == True : # case : [ x , 90]
# extend indice array by missing value for northpole
boundary_indices = np . append ( boundary_indices , self . ice . size )
elif self . ice [ - 1 ] == True : # case : [ - 90 , x ]
# extend indice array by missing value for northpole
boundary_indices = np . insert ( boundary_indices , 0 , 0 )
# check for asymmetry case : [ - 90 , x ] or [ x , 90]
# - > boundary _ indices hold only one value for icelat
if boundary_indices . size == 1 :
if self . ice [ 0 ] == True : # case : [ x , 90]
# extend indice array by missing value for northpole
boundary_indices = np . append ( boundary_indices , self . ice . size )
elif self . ice [ - 1 ] == True : # case : [ - 90 , x ]
# extend indice array by missing value for northpole
boundary_indices = np . insert ( boundary_indices , 0 , 0 )
self . icelat = lat_bounds [ boundary_indices ] |
def column ( self , name ) :
"""Returns the index of the column at the given name .
: param name | < str >
: return < int > ( - 1 if not found )""" | columns = self . columns ( )
if name in columns :
return columns . index ( name )
else :
check = projex . text . underscore ( name )
for i , column in enumerate ( columns ) :
if projex . text . underscore ( column ) == check :
return i
return - 1 |
def find_model ( sender , model_name ) :
"""Register new model to ORM""" | MC = get_mc ( )
model = MC . get ( ( MC . c . model_name == model_name ) & ( MC . c . uuid != '' ) )
if model :
model_inst = model . get_instance ( )
orm . set_model ( model_name , model_inst . table_name , appname = __name__ , model_path = '' )
return orm . __models__ . get ( model_name ) |
def _section_from_spec ( self , spec ) :
'''Retrieve a section given a " spec " ( either number or name ) .
Return None if no such section exists in the file .''' | try :
num = int ( spec )
if num < self . elf_file . num_sections ( ) :
return self . elf_file . get_section ( num )
else :
return None
except ValueError : # Not a number . Must be a name then
return self . elf_file . get_section_by_name ( spec ) |
def tap ( self , interceptor ) :
"""Invokes interceptor with the obj , and then returns obj .
The primary purpose of this method is to " tap into " a method chain , in
order to perform operations on intermediate results within the chain .""" | interceptor ( self . obj )
return self . _wrap ( self . obj ) |
def pages_siblings_menu ( context , page , url = '/' ) :
"""Get the parent page of the given page and render a nested list of its
child pages . Good for rendering a secondary menu .
: param page : the page where to start the menu from .
: param url : not used anymore .""" | lang = context . get ( 'lang' , pages_settings . PAGE_DEFAULT_LANGUAGE )
page = get_page_from_string_or_id ( page , lang )
if page :
siblings = page . get_siblings ( )
context . update ( { 'children' : siblings , 'page' : page } )
return context |
def get_schema ( repo , content_type ) :
"""Return a schema for a content type in a repository .
: param Repo repo :
The git repository .
: returns : dict""" | try :
with open ( os . path . join ( repo . working_dir , '_schemas' , '%s.avsc' % ( content_type , ) ) , 'r' ) as fp :
data = fp . read ( )
return avro . schema . parse ( data )
except IOError : # pragma : no cover
raise NotFound ( 'Schema does not exist.' ) |
def eval ( self ) :
"""Recursively evals the node . Exits with an
error if not resolved .""" | Expr . ignore = False
result = self . try_eval ( )
Expr . ignore = True
return result |
def _find_directory_upwards ( base : str , target : str ) -> Optional [ str ] :
"""Walk directories upwards from base , until the root directory is
reached . At each step , check if the target directory exist , and return
it if found . Return None if the search is unsuccessful .""" | while True :
step = os . path . join ( base , target )
LOG . debug ( "Trying with: `%s`" , step )
if os . path . isdir ( step ) :
return step
parent_directory = os . path . dirname ( base )
if parent_directory == base : # We have reached the root .
break
base = parent_directory
return None |
def evaluate ( self , data , env ) :
"""Evaluate the predicates and values""" | bool_idx = self . predicate_expr . evaluate ( data , env )
true_value = self . true_value_expr . evaluate ( data , env )
false_value = self . false_value_expr . evaluate ( data , env )
true_idx = np . where ( bool_idx ) [ 0 ]
false_idx = np . where ( ~ bool_idx ) [ 0 ]
result = np . repeat ( None , len ( data ) )
result [ true_idx ] = self . nice_value ( true_value , true_idx )
result [ false_idx ] = self . nice_value ( false_value , false_idx )
return np . array ( list ( result ) ) |
def _compile_fdpf ( self ) :
"""Fast Decoupled Power Flow execution : Implement g ( y )""" | string = '"""\n'
string += 'system.dae.init_g()\n'
for pflow , gcall , call in zip ( self . pflow , self . gcall , self . gcalls ) :
if pflow and gcall :
string += call
string += 'system.dae.reset_small_g()\n'
string += '\n'
string += '"""'
self . fdpf = compile ( eval ( string ) , '' , 'exec' ) |
def _generate_examples ( self , label_images ) :
"""Generate example for each image in the dict .""" | for label , image_paths in label_images . items ( ) :
for image_path in image_paths :
yield { "image" : image_path , "label" : label , } |
def user_event ( uid ) :
"""获取用户动态
: param uid : 用户的ID , 可通过登录或者其他接口获取""" | if uid is None :
raise ParamsError ( )
r = NCloudBot ( )
r . method = 'USER_EVENT'
r . params = { 'uid' : uid }
r . data = { 'time' : - 1 , 'getcounts' : True , "csrf_token" : "" }
r . send ( )
return r . response |
def cli ( ctx , uuid , output_format = "gzip" ) :
"""Download pre - prepared data by UUID
Output :
The downloaded content""" | return ctx . gi . io . download ( uuid , output_format = output_format ) |
def determine_result ( self , returncode , returnsignal , output , isTimeout ) :
"""Parse the output of the tool and extract the verification result .
This method always needs to be overridden .
If the tool gave a result , this method needs to return one of the
benchexec . result . RESULT _ * strings .
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
( e . g . , " CRASH " , " OUT _ OF _ MEMORY " , etc . ) .""" | for line in output :
if line . startswith ( 'KLEE: ERROR: ' ) :
if line . find ( 'ASSERTION FAIL:' ) != - 1 :
return result . RESULT_FALSE_REACH
elif line . find ( 'memory error: out of bound pointer' ) != - 1 :
return result . RESULT_FALSE_DEREF
elif line . find ( 'overflow' ) != - 1 :
return result . RESULT_FALSE_OVERFLOW
else :
return "ERROR ({0})" . format ( returncode )
if line . startswith ( 'KLEE: done' ) :
return result . RESULT_DONE
return result . RESULT_UNKNOWN |
def search_track ( self , artist , album = None , track = None , full_album_art_uri = False ) :
"""Search for an artist , an artist ' s albums , or specific track .
Args :
artist ( str ) : an artist ' s name .
album ( str , optional ) : an album name . Default ` None ` .
track ( str , optional ) : a track name . Default ` None ` .
full _ album _ art _ uri ( bool ) : whether the album art URI should be
absolute ( i . e . including the IP address ) . Default ` False ` .
Returns :
A ` SearchResult ` instance .""" | subcategories = [ artist ]
subcategories . append ( album or '' )
# Perform the search
result = self . get_album_artists ( full_album_art_uri = full_album_art_uri , subcategories = subcategories , search_term = track , complete_result = True )
result . _metadata [ 'search_type' ] = 'search_track'
return result |
def reachable_nodes ( self , p_id , p_recursive = True , p_reverse = False ) :
"""Returns the set of all neighbors that the given node can reach .
If recursive , it will also return the neighbor ' s neighbors , etc .
If reverse , the arrows are reversed and then the reachable neighbors
are located .""" | stack = [ p_id ]
visited = set ( )
result = set ( )
while len ( stack ) :
current = stack . pop ( )
if current in visited or current not in self . _edges :
continue
visited . add ( current )
if p_reverse :
parents = [ node for node , neighbors in self . _edges . items ( ) if current in neighbors ]
stack = stack + parents
result = result . union ( parents )
else :
stack = stack + list ( self . _edges [ current ] )
result = result . union ( self . _edges [ current ] )
if not p_recursive :
break
return result |
def SurfFlux ( self , forc , parameter , simTime , humRef , tempRef , windRef , boundCond , intFlux ) :
"""Calculate net heat flux , and update element layer temperatures""" | # Calculated per unit area ( m ^ 2)
dens = forc . pres / ( 1000 * 0.287042 * tempRef * ( 1. + 1.607858 * humRef ) )
# air density ( kgd m - 3)
self . aeroCond = 5.8 + 3.7 * windRef
# Convection coef ( ref : uwg , eq . 12 ) )
if ( self . horizontal ) : # For roof , mass , road
# Evaporation ( m s - 1 ) , Film water & soil latent heat
if not self . is_near_zero ( self . waterStorage ) and self . waterStorage > 0.0 : # N . B In the current uwg code , latent heat from evapotranspiration , stagnant water ,
# or anthropogenic sources is not modelled due to the difficulty of validation , and
# lack of reliability of precipitation data from EPW files . Therefore this condition
# is never run because all elements have had their waterStorage hardcoded to 0.
qtsat = self . qsat ( [ self . layerTemp [ 0 ] ] , [ forc . pres ] , parameter ) [ 0 ]
eg = self . aeroCond * parameter . colburn * dens * ( qtsat - humRef ) / parameter . waterDens / parameter . cp
self . waterStorage = min ( self . waterStorage + simTime . dt * ( forc . prec - eg ) , parameter . wgmax )
self . waterStorage = max ( self . waterStorage , 0. )
else :
eg = 0.
soilLat = eg * parameter . waterDens * parameter . lv
# Winter , no veg
if simTime . month < parameter . vegStart and simTime . month > parameter . vegEnd :
self . solAbs = ( 1. - self . albedo ) * self . solRec
# ( W m - 2)
vegLat = 0.
vegSens = 0.
else : # Summer , veg
self . solAbs = ( ( 1. - self . vegCoverage ) * ( 1. - self . albedo ) + self . vegCoverage * ( 1. - parameter . vegAlbedo ) ) * self . solRec
vegLat = self . vegCoverage * parameter . grassFLat * ( 1. - parameter . vegAlbedo ) * self . solRec
vegSens = self . vegCoverage * ( 1. - parameter . grassFLat ) * ( 1. - parameter . vegAlbedo ) * self . solRec
self . lat = soilLat + vegLat
# Sensible & net heat flux
self . sens = vegSens + self . aeroCond * ( self . layerTemp [ 0 ] - tempRef )
self . flux = - self . sens + self . solAbs + self . infra - self . lat
# ( W m - 2)
else : # For vertical surfaces ( wall )
self . solAbs = ( 1. - self . albedo ) * self . solRec
self . lat = 0.
# Sensible & net heat flux
self . sens = self . aeroCond * ( self . layerTemp [ 0 ] - tempRef )
self . flux = - self . sens + self . solAbs + self . infra - self . lat
# ( W m - 2)
self . layerTemp = self . Conduction ( simTime . dt , self . flux , boundCond , forc . deepTemp , intFlux )
self . T_ext = self . layerTemp [ 0 ]
self . T_int = self . layerTemp [ - 1 ] |
def collapse ( self , weight_attr = 'weight' ) :
"""Returns a : ref : ` networkx . Graph < networkx : graph > ` or : class : ` networkx . DiGraph ` in which
the edges between each pair of nodes are collapsed into a single
weighted edge .""" | if self . directed :
graph = nx . DiGraph ( )
else :
graph = nx . Graph ( )
# Transfer all nodes and attributes .
for n , attrs in self . master_graph . nodes ( data = True ) :
graph . add_node ( n , attrs )
for s , t , attrs in self . master_graph . edges ( data = True ) :
if not graph . has_edge ( s , t ) :
graph . add_edge ( s , t )
if 'weight' not in graph [ s ] [ t ] :
graph [ s ] [ t ] [ 'weight' ] = 0.
if weight_attr in attrs :
graph [ s ] [ t ] [ 'weight' ] += attrs [ weight_attr ]
else :
graph [ s ] [ t ] [ 'weight' ] += 1.
gname = attrs [ 'graph' ]
for k , v in attrs . iteritems ( ) :
if k in [ weight_attr , 'graph' ] :
continue
if k not in graph [ s ] [ t ] :
graph [ s ] [ t ] [ k ] = { }
graph [ s ] [ t ] [ k ] [ gname ] = v
return graph |
def set_index ( self , keys , drop = True , append = False , inplace = False , verify_integrity = False ) :
"""Set the DataFrame index using existing columns .
Set the DataFrame index ( row labels ) using one or more existing
columns or arrays ( of the correct length ) . The index can replace the
existing index or expand on it .
Parameters
keys : label or array - like or list of labels / arrays
This parameter can be either a single column key , a single array of
the same length as the calling DataFrame , or a list containing an
arbitrary combination of column keys and arrays . Here , " array "
encompasses : class : ` Series ` , : class : ` Index ` , ` ` np . ndarray ` ` , and
instances of : class : ` ~ collections . abc . Iterator ` .
drop : bool , default True
Delete columns to be used as the new index .
append : bool , default False
Whether to append columns to existing index .
inplace : bool , default False
Modify the DataFrame in place ( do not create a new object ) .
verify _ integrity : bool , default False
Check the new index for duplicates . Otherwise defer the check until
necessary . Setting to False will improve the performance of this
method .
Returns
DataFrame
Changed row labels .
See Also
DataFrame . reset _ index : Opposite of set _ index .
DataFrame . reindex : Change to new indices or expand indices .
DataFrame . reindex _ like : Change to same indices as other DataFrame .
Examples
> > > df = pd . DataFrame ( { ' month ' : [ 1 , 4 , 7 , 10 ] ,
. . . ' year ' : [ 2012 , 2014 , 2013 , 2014 ] ,
. . . ' sale ' : [ 55 , 40 , 84 , 31 ] } )
> > > df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the ' month ' column :
> > > df . set _ index ( ' month ' )
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns ' year ' and ' month ' :
> > > df . set _ index ( [ ' year ' , ' month ' ] )
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column :
> > > df . set _ index ( [ pd . Index ( [ 1 , 2 , 3 , 4 ] ) , ' year ' ] )
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series :
> > > s = pd . Series ( [ 1 , 2 , 3 , 4 ] )
> > > df . set _ index ( [ s , s * * 2 ] )
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31""" | inplace = validate_bool_kwarg ( inplace , 'inplace' )
if not isinstance ( keys , list ) :
keys = [ keys ]
err_msg = ( 'The parameter "keys" may be a column key, one-dimensional ' 'array, or a list containing only valid column keys and ' 'one-dimensional arrays.' )
missing = [ ]
for col in keys :
if isinstance ( col , ( ABCIndexClass , ABCSeries , np . ndarray , list , abc . Iterator ) ) : # arrays are fine as long as they are one - dimensional
# iterators get converted to list below
if getattr ( col , 'ndim' , 1 ) != 1 :
raise ValueError ( err_msg )
else : # everything else gets tried as a key ; see GH 24969
try :
found = col in self . columns
except TypeError :
raise TypeError ( err_msg + ' Received column of ' 'type {}' . format ( type ( col ) ) )
else :
if not found :
missing . append ( col )
if missing :
raise KeyError ( 'None of {} are in the columns' . format ( missing ) )
if inplace :
frame = self
else :
frame = self . copy ( )
arrays = [ ]
names = [ ]
if append :
names = [ x for x in self . index . names ]
if isinstance ( self . index , ABCMultiIndex ) :
for i in range ( self . index . nlevels ) :
arrays . append ( self . index . _get_level_values ( i ) )
else :
arrays . append ( self . index )
to_remove = [ ]
for col in keys :
if isinstance ( col , ABCMultiIndex ) :
for n in range ( col . nlevels ) :
arrays . append ( col . _get_level_values ( n ) )
names . extend ( col . names )
elif isinstance ( col , ( ABCIndexClass , ABCSeries ) ) : # if Index then not MultiIndex ( treated above )
arrays . append ( col )
names . append ( col . name )
elif isinstance ( col , ( list , np . ndarray ) ) :
arrays . append ( col )
names . append ( None )
elif isinstance ( col , abc . Iterator ) :
arrays . append ( list ( col ) )
names . append ( None )
# from here , col can only be a column label
else :
arrays . append ( frame [ col ] . _values )
names . append ( col )
if drop :
to_remove . append ( col )
if len ( arrays [ - 1 ] ) != len ( self ) : # check newest element against length of calling frame , since
# ensure _ index _ from _ sequences would not raise for append = False .
raise ValueError ( 'Length mismatch: Expected {len_self} rows, ' 'received array of length {len_col}' . format ( len_self = len ( self ) , len_col = len ( arrays [ - 1 ] ) ) )
index = ensure_index_from_sequences ( arrays , names )
if verify_integrity and not index . is_unique :
duplicates = index [ index . duplicated ( ) ] . unique ( )
raise ValueError ( 'Index has duplicate keys: {dup}' . format ( dup = duplicates ) )
# use set to handle duplicate column names gracefully in case of drop
for c in set ( to_remove ) :
del frame [ c ]
# clear up memory usage
index . _cleanup ( )
frame . index = index
if not inplace :
return frame |
def get_pattern ( self , structure , scaled = True , two_theta_range = ( 0 , 90 ) ) :
"""Calculates the powder neutron diffraction pattern for a structure .
Args :
structure ( Structure ) : Input structure
scaled ( bool ) : Whether to return scaled intensities . The maximum
peak is set to a value of 100 . Defaults to True . Use False if
you need the absolute values to combine ND plots .
two _ theta _ range ( [ float of length 2 ] ) : Tuple for range of
two _ thetas to calculate in degrees . Defaults to ( 0 , 90 ) . Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength .
Returns :
( NDPattern )""" | if self . symprec :
finder = SpacegroupAnalyzer ( structure , symprec = self . symprec )
structure = finder . get_refined_structure ( )
wavelength = self . wavelength
latt = structure . lattice
is_hex = latt . is_hexagonal ( )
# Obtained from Bragg condition . Note that reciprocal lattice
# vector length is 1 / d _ hkl .
min_r , max_r = ( 0 , 2 / wavelength ) if two_theta_range is None else [ 2 * sin ( radians ( t / 2 ) ) / wavelength for t in two_theta_range ]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt . reciprocal_lattice_crystallographic
recip_pts = recip_latt . get_points_in_sphere ( [ [ 0 , 0 , 0 ] ] , [ 0 , 0 , 0 ] , max_r )
if min_r :
recip_pts = [ pt for pt in recip_pts if pt [ 1 ] >= min_r ]
# Create a flattened array of coeffs , fcoords and occus . This is
# used to perform vectorized computation of atomic scattering factors
# later . Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array .
coeffs = [ ]
fcoords = [ ]
occus = [ ]
dwfactors = [ ]
for site in structure :
for sp , occu in site . species . items ( ) :
try :
c = ATOMIC_SCATTERING_LEN [ sp . symbol ]
except KeyError :
raise ValueError ( "Unable to calculate ND pattern as " "there is no scattering coefficients for" " %s." % sp . symbol )
coeffs . append ( c )
dwfactors . append ( self . debye_waller_factors . get ( sp . symbol , 0 ) )
fcoords . append ( site . frac_coords )
occus . append ( occu )
coeffs = np . array ( coeffs )
fcoords = np . array ( fcoords )
occus = np . array ( occus )
dwfactors = np . array ( dwfactors )
peaks = { }
two_thetas = [ ]
for hkl , g_hkl , ind , _ in sorted ( recip_pts , key = lambda i : ( i [ 1 ] , - i [ 0 ] [ 0 ] , - i [ 0 ] [ 1 ] , - i [ 0 ] [ 2 ] ) ) : # Force miller indices to be integers .
hkl = [ int ( round ( i ) ) for i in hkl ]
if g_hkl != 0 :
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin ( wavelength * g_hkl / 2 )
# s = sin ( theta ) / wavelength = 1 / 2d = | ghkl | / 2 ( d =
# 1 / | ghkl | )
s = g_hkl / 2
# Calculate Debye - Waller factor
dw_correction = np . exp ( - dwfactors * ( s ** 2 ) )
# Vectorized computation of g . r for all fractional coords and
# hkl .
g_dot_r = np . dot ( fcoords , np . transpose ( [ hkl ] ) ) . T [ 0 ]
# Structure factor = sum of atomic scattering factors ( with
# position factor exp ( 2j * pi * g . r and occupancies ) .
# Vectorized computation .
f_hkl = np . sum ( coeffs * occus * np . exp ( 2j * pi * g_dot_r ) * dw_correction )
# Lorentz polarization correction for hkl
lorentz_factor = 1 / ( sin ( theta ) ** 2 * cos ( theta ) )
# Intensity for hkl is modulus square of structure factor .
i_hkl = ( f_hkl * f_hkl . conjugate ( ) ) . real
two_theta = degrees ( 2 * theta )
if is_hex : # Use Miller - Bravais indices for hexagonal lattices .
hkl = ( hkl [ 0 ] , hkl [ 1 ] , - hkl [ 0 ] - hkl [ 1 ] , hkl [ 2 ] )
# Deal with floating point precision issues .
ind = np . where ( np . abs ( np . subtract ( two_thetas , two_theta ) ) < self . TWO_THETA_TOL )
if len ( ind [ 0 ] ) > 0 :
peaks [ two_thetas [ ind [ 0 ] [ 0 ] ] ] [ 0 ] += i_hkl * lorentz_factor
peaks [ two_thetas [ ind [ 0 ] [ 0 ] ] ] [ 1 ] . append ( tuple ( hkl ) )
else :
peaks [ two_theta ] = [ i_hkl * lorentz_factor , [ tuple ( hkl ) ] , d_hkl ]
two_thetas . append ( two_theta )
# Scale intensities so that the max intensity is 100.
max_intensity = max ( [ v [ 0 ] for v in peaks . values ( ) ] )
x = [ ]
y = [ ]
hkls = [ ]
d_hkls = [ ]
for k in sorted ( peaks . keys ( ) ) :
v = peaks [ k ]
fam = get_unique_families ( v [ 1 ] )
if v [ 0 ] / max_intensity * 100 > self . SCALED_INTENSITY_TOL :
x . append ( k )
y . append ( v [ 0 ] )
hkls . append ( fam )
d_hkls . append ( v [ 2 ] )
nd = DiffractionPattern ( x , y , hkls , d_hkls )
if scaled :
nd . normalize ( mode = "max" , value = 100 )
return nd |
def _init_style ( self ) :
"""Inits style options""" | self . _background = QtGui . QColor ( 'white' )
self . _foreground = QtGui . QColor ( 'black' )
self . _whitespaces_foreground = QtGui . QColor ( 'light gray' )
app = QtWidgets . QApplication . instance ( )
self . _sel_background = app . palette ( ) . highlight ( ) . color ( )
self . _sel_foreground = app . palette ( ) . highlightedText ( ) . color ( )
self . _font_size = 10
self . font_name = "" |
def fix_pyqt5_QGraphicsItem_itemChange ( ) :
"""Attempt to remedy :
https : / / www . riverbankcomputing . com / pipermail / pyqt / 2016 - February / 037015 . html""" | from PyQt5 . QtWidgets import QGraphicsObject , QGraphicsItem
class Obj ( QGraphicsObject ) :
def itemChange ( self , change , value ) :
return QGraphicsObject . itemChange ( self , change , value )
obj = Obj ( )
parent = Obj ( )
obj . setParentItem ( parent )
if obj . parentItem ( ) is None : # There was probably already some signal defined using QObject ' s
# subclass from QtWidgets .
# We will monkey patch the QGraphicsItem . itemChange and explicitly
# sip . cast all input and output QGraphicsItem instances
import sip
QGraphicsItem_itemChange_old = QGraphicsItem . itemChange
# All the QGraphicsItem . ItemChange flags which accept / return
# a QGraphicsItem
changeset = { QGraphicsItem . ItemParentChange , QGraphicsItem . ItemParentHasChanged , QGraphicsItem . ItemChildAddedChange , QGraphicsItem . ItemChildRemovedChange , }
def QGraphicsItem_itemChange ( self , change , value ) :
if change in changeset :
if isinstance ( value , QGraphicsItem ) :
value = sip . cast ( value , QGraphicsItem )
rval = QGraphicsItem_itemChange_old ( self , change , value )
if isinstance ( rval , QGraphicsItem ) :
rval = sip . cast ( rval , QGraphicsItem )
return rval
else :
return QGraphicsItem_itemChange_old ( self , change , value )
QGraphicsItem . itemChange = QGraphicsItem_itemChange
warnings . warn ( "Monkey patching QGraphicsItem.itemChange" , RuntimeWarning ) |
def list ( ctx ) :
'''Display all the stored key / value .''' | file = ctx . obj [ 'FILE' ]
dotenv_as_dict = dotenv_values ( file )
for k , v in dotenv_as_dict . items ( ) :
click . echo ( '%s=%s' % ( k , v ) ) |
def rand ( x_bounds , x_types , lowerbound , upperbound , max_retries = 100 ) :
'''Key idea is that we try to move towards upperbound , by randomly choose one
value for each parameter . However , for the last parameter ,
we need to make sure that its value can help us get above lowerbound''' | outputs = None
if check_feasibility ( x_bounds , lowerbound , upperbound ) is True : # Order parameters by their range size . We want the smallest range first ,
# because the corresponding parameter has less numbers to choose from
x_idx_sorted = [ ]
for i , _ in enumerate ( x_bounds ) :
if x_types [ i ] == "discrete_int" :
x_idx_sorted . append ( [ i , len ( x_bounds [ i ] ) ] )
elif ( x_types [ i ] == "range_int" ) or ( x_types [ i ] == "range_continuous" ) :
x_idx_sorted . append ( [ i , math . floor ( x_bounds [ i ] [ 1 ] - x_bounds [ i ] [ 0 ] ) ] )
x_idx_sorted = sorted ( x_idx_sorted , key = itemgetter ( 1 ) )
for _ in range ( max_retries ) :
budget_allocated = 0
outputs = [ None ] * len ( x_bounds )
for i , _ in enumerate ( x_idx_sorted ) :
x_idx = x_idx_sorted [ i ] [ 0 ]
# The amount of unallocated space that we have
budget_max = upperbound - budget_allocated
# NOT the Last x that we need to assign a random number
if i < ( len ( x_idx_sorted ) - 1 ) :
if x_bounds [ x_idx ] [ 0 ] <= budget_max :
if x_types [ x_idx ] == "discrete_int" : # Note the valid integer
temp = [ ]
for j in x_bounds [ x_idx ] :
if j <= budget_max :
temp . append ( j )
# Randomly pick a number from the integer array
if temp :
outputs [ x_idx ] = temp [ random . randint ( 0 , len ( temp ) - 1 ) ]
elif ( x_types [ x_idx ] == "range_int" ) or ( x_types [ x_idx ] == "range_continuous" ) :
outputs [ x_idx ] = random . randint ( x_bounds [ x_idx ] [ 0 ] , min ( x_bounds [ x_idx ] [ - 1 ] , budget_max ) )
else : # The last x that we need to assign a random number
randint_lowerbound = lowerbound - budget_allocated
randint_lowerbound = 0 if randint_lowerbound < 0 else randint_lowerbound
# This check :
# is our smallest possible value going to overflow the available budget space ,
# and is our largest possible value going to underflow the lower bound
if ( x_bounds [ x_idx ] [ 0 ] <= budget_max ) and ( x_bounds [ x_idx ] [ - 1 ] >= randint_lowerbound ) :
if x_types [ x_idx ] == "discrete_int" :
temp = [ ]
for j in x_bounds [ x_idx ] : # if ( j < = budget _ max ) and ( j > = randint _ lowerbound ) :
if randint_lowerbound <= j <= budget_max :
temp . append ( j )
if temp :
outputs [ x_idx ] = temp [ random . randint ( 0 , len ( temp ) - 1 ) ]
elif ( x_types [ x_idx ] == "range_int" ) or ( x_types [ x_idx ] == "range_continuous" ) :
outputs [ x_idx ] = random . randint ( randint_lowerbound , min ( x_bounds [ x_idx ] [ 1 ] , budget_max ) )
if outputs [ x_idx ] is None :
break
else :
budget_allocated += outputs [ x_idx ]
if None not in outputs :
break
return outputs |
def _get_embedded ( self , name ) :
'''Return an embedded struct object to calculate the size or use _ tobytes ( True ) to convert just the
embedded parts .
: param name : either the original type , or the name of the original type . It is always the type
used in type definitions , even if it is already replaced once or more .
: returns : an embedded struct''' | if hasattr ( name , 'readablename' ) :
name = name . readablename
t , i = self . _target . _embedded_indices [ name ]
return t . _seqs [ i ] |
def project_workspace_addsitedir ( sitedir ) :
"""Similar to site . addsitedir ( ) but prefers new sitedir over existing ones .
Therefore , prefers local packages over installed packages .
. . note : :
This allows to support * . pth files and zip - / egg - imports
similar to an installed site - packages directory .""" | assert os . path . isdir ( sitedir )
try :
from site import addsitedir
except ImportError : # - - USE : Python2.7 site . py package
from pysite import addsitedir
next_package_pos = len ( sys . path )
addsitedir ( sitedir )
# - - POST - PROCESS : Move new packages from end to begin of sys . path list .
pos = 0
new_packages = sys . path [ next_package_pos : ]
del sys . path [ next_package_pos : ]
sys . path [ pos : pos ] = new_packages |
def adjust_ip ( self , ip = None ) :
"""Called to explicitely fixup an associated IP header
The function adjusts the IP header based on conformance rules
and the group address encoded in the IGMP message .
The rules are :
1 . Send General Group Query to 224.0.0.1 ( all systems )
2 . Send Leave Group to 224.0.0.2 ( all routers )
3a . Otherwise send the packet to the group address
3b . Send reports / joins to the group address
4 . ttl = 1 ( RFC 2236 , section 2)
5 . send the packet with the router alert IP option ( RFC 2236 , section 2)""" | if ip != None and ip . haslayer ( IP ) :
if ( self . type == 0x11 ) :
if ( self . gaddr == "0.0.0.0" ) :
ip . dst = "224.0.0.1"
# IP rule 1
retCode = True
elif isValidMCAddr ( self . gaddr ) :
ip . dst = self . gaddr
# IP rule 3a
retCode = True
else :
print ( "Warning: Using invalid Group Address" )
retCode = False
elif ( ( self . type == 0x17 ) and isValidMCAddr ( self . gaddr ) ) :
ip . dst = "224.0.0.2"
# IP rule 2
retCode = True
elif ( ( self . type == 0x12 ) or ( self . type == 0x16 ) ) and ( isValidMCAddr ( self . gaddr ) ) :
ip . dst = self . gaddr
# IP rule 3b
retCode = True
else :
print ( "Warning: Using invalid IGMP Type" )
retCode = False
else :
print ( "Warning: No IGMP Group Address set" )
retCode = False
if retCode == True :
ip . ttl = 1
# IP Rule 4
ip . options = [ IPOption_Router_Alert ( ) ]
# IP rule 5
return retCode |
def on_key ( self , event ) :
"""Sets aborted state if escape is pressed""" | if self . main_window . grid . actions . pasting and event . GetKeyCode ( ) == wx . WXK_ESCAPE :
self . aborted = True
event . Skip ( ) |
def nie ( self ) :
"""https : / / es . wikipedia . org / wiki / N % C3 % BAmero _ de _ identidad _ de _ extranjero
: return : a random Spanish NIE""" | first_chr = random . randrange ( 0 , 3 )
doi_body = str ( random . randrange ( 0 , 10000000 ) ) . zfill ( 7 )
control = self . _calculate_control_doi ( str ( first_chr ) + doi_body )
return "XYZ" [ first_chr ] + doi_body + control |
def add_blank_row ( self , label ) :
"""Add a blank row with only an index value to self . df .
This is done inplace .""" | col_labels = self . df . columns
blank_item = pd . Series ( { } , index = col_labels , name = label )
# use . loc to add in place ( append won ' t do that )
self . df . loc [ blank_item . name ] = blank_item
return self . df |
def chunk_upload_file ( self , name , folder_id , file_path , progress_callback = None , chunk_size = 1024 * 1024 * 1 ) :
"""Upload a file chunk by chunk .
The whole file is never loaded in memory .
Use this function for big file .
The callback ( transferred , total ) to let you know the upload progress .
Upload can be cancelled if the callback raise an Exception .
> > > def progress _ callback ( transferred , total ) :
. . . print ' Uploaded % i bytes of % i ' % ( transferred , total , )
. . . if user _ request _ cancel :
. . . raise MyCustomCancelException ( )
Args :
name ( str ) : Name of the file on your Box storage .
folder _ id ( int ) : ID of the folder where to upload the file .
file _ path ( str ) : Local path of the file to upload .
progress _ callback ( func ) : Function called each time a chunk is uploaded .
chunk _ size ( int ) : Size of chunks .
Returns :
dict . Response from Box .
Raises :
BoxError : An error response is returned from Box ( status _ code > = 400 ) .
BoxHttpResponseError : Response from Box is malformed .
requests . exceptions . * : Any connection related problem .""" | try :
return self . __do_chunk_upload_file ( name , folder_id , file_path , progress_callback , chunk_size )
except BoxError , ex :
if ex . status != 401 :
raise
# tokens had been refreshed , so we start again the upload
return self . __do_chunk_upload_file ( name , folder_id , file_path , progress_callback , chunk_size ) |
def parse_args ( self , req = None , strict = False , http_error_code = 400 ) :
"""Parse all arguments from the provided request and return the results
as a Namespace
: param req : Can be used to overwrite request from Flask
: param strict : if req includes args not in parser , throw 400 BadRequest exception
: param http _ error _ code : use custom error code for ` flask _ restful . abort ( ) `""" | if req is None :
req = request
namespace = self . namespace_class ( )
# A record of arguments not yet parsed ; as each is found
# among self . args , it will be popped out
req . unparsed_arguments = dict ( self . argument_class ( '' ) . source ( req ) ) if strict else { }
errors = { }
for arg in self . args :
value , found = arg . parse ( req , self . bundle_errors )
if isinstance ( value , ValueError ) :
errors . update ( found )
found = None
if found or arg . store_missing :
namespace [ arg . dest or arg . name ] = value
if errors :
flask_restful . abort ( http_error_code , message = errors )
if strict and req . unparsed_arguments :
raise exceptions . BadRequest ( 'Unknown arguments: %s' % ', ' . join ( req . unparsed_arguments . keys ( ) ) )
return namespace |
def get_map_matrix ( inputfile ) :
"""Return the matrix representation of the genetic map .
: arg inputfile : the path to the input file from which to retrieve the
genetic map .""" | matrix = read_input_file ( inputfile )
output = [ ]
for row in matrix :
if row [ 3 ] :
output . append ( [ row [ 3 ] , row [ 1 ] , row [ 2 ] ] )
return output |
def validate_configs ( self ) :
"""Check that required config are set .
: raises : class : ` ~ responsebot . common . exceptions . MissingConfigError ` : if a required config is missing""" | # Check required arguments , validate values
for conf in self . REQUIRED_CONFIGS :
if conf not in self . _config :
raise MissingConfigError ( 'Missing required configuration %s' % conf ) |
def user_post_delete_handler ( sender , ** kwargs ) :
"""Sends a metric to InfluxDB when a User object is deleted .""" | total = get_user_model ( ) . objects . all ( ) . count ( )
data = [ { 'measurement' : 'django_auth_user_delete' , 'tags' : { 'host' : settings . INFLUXDB_TAGS_HOST , } , 'fields' : { 'value' : 1 , } , 'time' : timezone . now ( ) . isoformat ( ) , } ]
write_points ( data )
data = [ { 'measurement' : 'django_auth_user_count' , 'tags' : { 'host' : settings . INFLUXDB_TAGS_HOST , } , 'fields' : { 'value' : total , } , 'time' : timezone . now ( ) . isoformat ( ) , } ]
write_points ( data ) |
def from_stanford_dependencies ( this_class , stream , trees , include_erased = False , include_punct = True ) :
"""Construct a Corpus . stream is an iterable over strings where
each string is a line representing a Stanford Dependency as in
the output of the command line Stanford Dependency tool :
deprel ( gov - index , dep - depindex )
Sentences are separated by blank lines . A corresponding list of
Penn Treebank formatted trees must be provided as well .""" | stream = iter ( stream )
corpus = this_class ( )
for tree in trees :
sentence = Sentence . from_stanford_dependencies ( stream , tree , include_erased , include_punct )
corpus . append ( sentence )
return corpus |
def get_optional_attribute ( self , element , attribute ) :
"""Attempt to retrieve an optional attribute from the xml and return None on failure .""" | try :
return self . get_attribute ( element , attribute )
except self . XmlError :
return None |
def _advance ( self ) :
"""Return the value of the current token and read the next one into
self . cur _ token .""" | cur_val = None if self . cur_token is None else self . cur_token . value
try :
self . cur_token = next ( self . _tokenizer )
except StopIteration :
self . cur_token = None
return cur_val |
def convert_wav ( org_wav_fn : Path , tgt_wav_fn : Path ) -> None :
"""Converts the wav into a 16bit mono 16000Hz wav .
Args :
org _ wav _ fn : A ` Path ` to the original wave file
tgt _ wav _ fn : The ` Path ` to output the processed wave file""" | if not org_wav_fn . exists ( ) :
raise FileNotFoundError
args = [ config . FFMPEG_PATH , "-i" , str ( org_wav_fn ) , "-ac" , "1" , "-ar" , "16000" , str ( tgt_wav_fn ) ]
subprocess . run ( args ) |
def extractTagsAndParams ( self , elements , text , matches ) :
"""Replaces all occurrences of HTML - style comments and the given tags
in the text with a random marker and returns teh next text . The output
parameter $ matches will be an associative array filled with data in
the form :
' UNIQ - xxxxx ' = > array (
' element ' ,
' tag content ' ,
array ( ' param ' = > ' x ' ) ,
' < element param = " x " > tag content < / element > ' ) )""" | stripped = u''
taglist = u'|' . join ( elements )
if taglist not in _startRegexHash :
_startRegexHash [ taglist ] = re . compile ( ur"<(" + taglist + ur")(\s+[^>]*?|\s*?)(/?>)|<(!--)" , re . UNICODE | re . IGNORECASE )
start = _startRegexHash [ taglist ]
while text != u'' :
p = start . split ( text , 1 )
stripped += p [ 0 ]
if len ( p ) == 1 :
break
elif p [ 4 ] : # comment
element = p [ 4 ]
attributes = u''
close = u''
else :
element = p [ 1 ]
attributes = p [ 2 ]
close = p [ 3 ]
inside = p [ 5 ]
global _extractTagsAndParams_n
marker = self . uniq_prefix + u'-' + element + u'-' + ( u"%08X" % _extractTagsAndParams_n ) + u'-QINU'
_extractTagsAndParams_n += 1
stripped += marker
if close == u'/>' : # empty element tag , < tag / >
content = None
text = inside
tail = None
else :
if element == u'!--' :
end = _endCommentPat
else :
if element not in _endRegexHash :
_endRegexHash [ element ] = re . compile ( ur'(</' + element + ur'\s*>)' , re . UNICODE | re . IGNORECASE )
end = _endRegexHash [ element ]
q = end . split ( inside , 1 )
content = q [ 0 ]
if len ( q ) < 3 : # no end tag
tail = ''
text = ''
else :
tail = q [ 1 ]
text = q [ 2 ]
matches [ marker ] = ( element , content , self . decodeTagAttributes ( attributes ) , u"<" + element + attributes + close + content + tail )
return stripped |
def genome_size ( peaks_file , haploid = True ) :
"""Finds the genome size of an organsim , based on the peaks file created by kmercountexact . sh
: param peaks _ file : Path to peaks file created by kmercountexact .
: param haploid : Set to True if organism of interest is haploid , False if not . Default True .
: return : size of genome , as an int . If size could not be found , return will be 0.""" | size = 0
with open ( peaks_file ) as peaks :
lines = peaks . readlines ( )
for line in lines :
if haploid :
if '#haploid_genome_size' in line :
size = int ( line . split ( ) [ 1 ] )
else :
if '#genome_size' in line :
size = int ( line . split ( ) [ 1 ] )
return size |
def cdrom_image ( self , cdrom_image ) :
"""Sets the cdrom image for this QEMU VM .
: param cdrom _ image : QEMU cdrom image path""" | self . _cdrom_image = self . manager . get_abs_image_path ( cdrom_image )
log . info ( 'QEMU VM "{name}" [{id}] has set the QEMU cdrom image path to {cdrom_image}' . format ( name = self . _name , id = self . _id , cdrom_image = self . _cdrom_image ) ) |
def _generate_malformed_query ( data ) :
"""Generates a query on the ` ` _ all ` ` field with all the query content .
Args :
data ( six . text _ type or list ) : The query in the format of ` ` six . text _ type ` ` ( when used from parsing driver )
or ` ` list ` ` when used from withing the ES visitor .""" | if isinstance ( data , six . text_type ) : # Remove colon character ( special character for ES )
query_str = data . replace ( ':' , ' ' )
else :
query_str = ' ' . join ( [ word . strip ( ':' ) for word in data . children ] )
return { 'simple_query_string' : { 'fields' : [ '_all' ] , 'query' : query_str } } |
def reftrack_restricted_data ( rt , role , attr ) :
"""Return the data for restriction of the given attr of the given reftrack
: param rt : the : class : ` jukeboxcore . reftrack . Reftrack ` holds the data
: type rt : : class : ` jukeboxcore . reftrack . Reftrack `
: param role : item data role
: type role : QtCore . Qt . ItemDataRole
: returns : data for the restriction
: rtype : depending on role
: raises : None""" | if role == QtCore . Qt . DisplayRole :
if rt . is_restricted ( getattr ( rt , attr , None ) ) :
return "Restricted"
else :
return "Allowed" |
def _on_ws_message ( self , ws , message ) :
"""on _ message callback of websocket class , load the message into a dict and then
update an Ack Object with the results
: param ws : web socket connection that the message was received on
: param message : web socket message in text form
: return : None""" | logging . debug ( message )
json_list = json . loads ( message )
for rx_ack in json_list :
ack = EventHub_pb2 . Ack ( )
for key , value in rx_ack . items ( ) :
setattr ( ack , key , value )
self . _publisher_callback ( ack ) |
def daily_stats ( self , from_date = None , to_date = None ) :
"""Gets the daily stats for a particular period .
Args :
from _ date ( str ) : The first day of the period ( inclusive ) in
YYYYMMDD format .
to _ date ( str ) : The last day of the period ( inclusive ) in
YYYYMMDD format .
See : https : / / auth0 . com / docs / api / management / v2 # ! / Stats / get _ daily""" | return self . client . get ( self . _url ( 'daily' ) , params = { 'from' : from_date , 'to' : to_date } ) |
def parse ( bin_payload , block_height ) :
"""NOTE : the first three bytes will be missing
wire format ( Pre - STACKs Phase 1)
0 2 3 23 39
magic op hash ( ns _ id , script _ pubkey , reveal _ addr ) consensus hash
wire format ( Post - STACKs phase 1)
0 2 3 23 39 47
magic op hash ( ns _ id , script _ pubkey , reveal _ addr ) consensus hash token fee ( big - endian )
Returns {
' opcode ' : . . .
' preorder _ hash ' : . . .
' consensus _ hash ' : . . .
' token _ fee ' : . . .""" | if len ( bin_payload ) < LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] :
log . warning ( "Invalid namespace preorder payload length %s" % len ( bin_payload ) )
return None
namespace_id_hash = bin_payload [ : LENGTHS [ 'preorder_name_hash' ] ]
consensus_hash = bin_payload [ LENGTHS [ 'preorder_name_hash' ] : LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] ]
tokens_burned = None
epoch_features = get_epoch_features ( block_height )
if len ( bin_payload ) > LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] :
if EPOCH_FEATURE_STACKS_BUY_NAMESPACES not in epoch_features : # not allowed - - we can ' t use tokens in this epoch
log . warning ( "Invalid payload {}: expected {} bytes" . format ( bin_payload . encode ( 'hex' ) , LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] ) )
return None
if len ( bin_payload ) != LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] + LENGTHS [ 'tokens_burnt' ] : # not allowed - - invalid length
log . warning ( "Invalid payload {}: expected {} bytes" . format ( bin_payload . encode ( 'hex' ) , LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] + LENGTHS [ 'tokens_burnt' ] ) )
return None
bin_tokens_burned = bin_payload [ LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] : LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] + LENGTHS [ 'tokens_burnt' ] ]
tokens_burned = int ( bin_tokens_burned . encode ( 'hex' ) , 16 )
else : # only allow the absence of the tokens field if we ' re in a pre - STACKs epoch
if EPOCH_FEATURE_STACKS_BUY_NAMESPACES in epoch_features : # not allowed - - - we need the stacks token field
log . warning ( 'Invalid payload {}: expected {} bytes' . format ( bin_payload . encode ( 'hex' ) , LENGTHS [ 'preorder_name_hash' ] + LENGTHS [ 'consensus_hash' ] + LENGTHS [ 'tokens_burnt' ] ) )
return None
namespace_id_hash = hexlify ( namespace_id_hash )
consensus_hash = hexlify ( consensus_hash )
return { 'opcode' : 'NAMESPACE_PREORDER' , 'preorder_hash' : namespace_id_hash , 'consensus_hash' : consensus_hash , 'token_fee' : tokens_burned } |
def new_cstr ( self , input , null_if_empty = True ) :
"""Converts the input into an encoded C String ( NUL - terminated )
: param input : The python string
: param null _ if _ empty : If the input is empty , return NULL rather
than the empty string
: return : The C string""" | if input :
enc = input . encode ( 'utf-8' )
else :
enc = '' . encode ( 'utf-8' )
if not enc :
if null_if_empty :
return self . _ffi . NULL
try :
return self . _cache [ enc ]
except KeyError :
cstr = self . _ffi . new ( 'char[]' , enc )
self . _cache [ enc ] = cstr
return cstr |
def upload_pg_hba_conf ( template_name = None , pg_version = None , pg_cluster = 'main' , restart = True ) :
"""Upload configuration for pg _ hba . conf
If the version is not given it will be guessed .""" | template_name = template_name or u'postgres/pg_hba.conf'
version = pg_version or detect_version ( )
config = { 'version' : version , 'cluster' : pg_cluster }
destination = u'/etc/postgresql/%(version)s/%(cluster)s/pg_hba.conf' % config
upload_template ( template_name , destination , use_sudo = True )
if restart :
restart_service ( u'postgresql' ) |
def get_git_blob ( self , sha ) :
""": calls : ` GET / repos / : owner / : repo / git / blobs / : sha < http : / / developer . github . com / v3 / git / blobs > ` _
: param sha : string
: rtype : : class : ` github . GitBlob . GitBlob `""" | assert isinstance ( sha , ( str , unicode ) ) , sha
headers , data = self . _requester . requestJsonAndCheck ( "GET" , self . url + "/git/blobs/" + sha )
return github . GitBlob . GitBlob ( self . _requester , headers , data , completed = True ) |
def _date ( val , offset = None ) :
"""A special pseudo - type for pipeline arguments .
This allows us to parse dates as Python datetimes , including special values like ' now '
and ' today ' , as well as apply offsets to the datetime .
Args :
val : a string containing the value for the datetime . This can be ' now ' , ' today ' ( midnight at
start of day ) , ' yesterday ' ( midnight at start of yesterday ) , or a formatted date that
will be passed to the datetime constructor . Note that ' now ' etc are assumed to
be in UTC .
offset : for date arguments a string containing a comma - separated list of
relative offsets to apply of the form < n > < u > where < n > is an integer and
< u > is a single character unit ( d = day , m = month , y = year , h = hour , m = minute ) .
Returns :
A Python datetime resulting from starting at < val > and applying the sequence of deltas
specified in < offset > .""" | if val is None :
return val
if val == '' or val == 'now' :
when = datetime . datetime . utcnow ( )
elif val == 'today' :
dt = datetime . datetime . utcnow ( )
when = datetime . datetime ( dt . year , dt . month , dt . day )
elif val == 'yesterday' :
dt = datetime . datetime . utcnow ( ) - datetime . timedelta ( 1 )
when = datetime . datetime ( dt . year , dt . month , dt . day )
else :
when = datetime . datetime . strptime ( val , "%Y%m%d" )
if offset is not None :
for part in offset . split ( ',' ) :
unit = part [ - 1 ]
quantity = int ( part [ : - 1 ] )
# We can use timedelta for days and under , but not for years and months
if unit == 'y' :
when = datetime . datetime ( year = when . year + quantity , month = when . month , day = when . day , hour = when . hour , minute = when . minute )
elif unit == 'm' :
new_year = when . year
new_month = when . month + quantity
if new_month < 1 :
new_month = - new_month
new_year += 1 + ( new_month // 12 )
new_month = 12 - new_month % 12
elif new_month > 12 :
new_year += ( new_month - 1 ) // 12
new_month = 1 + ( new_month - 1 ) % 12
when = datetime . datetime ( year = new_year , month = new_month , day = when . day , hour = when . hour , minute = when . minute )
elif unit == 'd' :
when += datetime . timedelta ( days = quantity )
elif unit == 'h' :
when += datetime . timedelta ( hours = quantity )
elif unit == 'M' :
when += datetime . timedelta ( minutes = quantity )
return when |
def plotall ( xargs ) :
"""% prog plotall input . bed
Plot the matchings between the reconstructed pseudomolecules and the maps .
This command will plot each reconstructed object ( non - singleton ) .""" | p = OptionParser ( plotall . __doc__ )
add_allmaps_plot_options ( p )
opts , args , iopts = p . set_image_options ( xargs , figsize = "10x6" )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
inputbed , = args
pf = inputbed . rsplit ( "." , 1 ) [ 0 ]
agpfile = pf + ".chr.agp"
agp = AGP ( agpfile )
objects = [ ob for ob , lines in agp . iter_object ( ) ]
for seqid in natsorted ( objects ) :
plot ( xargs + [ seqid ] ) |
def unregister ( self , plugin = None , plugin_file = None ) :
"""Unregister all plugins , or a specific plugin , via an instance , or file ( path ) containing plugin ( s ) .
When this method is called without any arguments then all plugins will be deactivated .
: param plugin : Plugin to unregister .
: param plugin _ file : File containing plugin ( s ) to unregister .
: return : Does not Return .""" | if plugin is None and plugin_file is None :
for name , plugin in self . plugins . items ( ) :
plugin . deactivate ( )
del self . plugins [ name ]
return
if plugin is not None :
if plugin . name in self . plugins :
plugin . deactivate ( )
del self . plugins [ plugin . name ]
if plugin_file is not None :
plugs_in_file = PluginManager . get_plugins_in_module ( plugin_file )
if plugs_in_file is None :
return
for classPlugin in plugs_in_file :
if not self . has_plugin ( classPlugin . name , classPlugin ) :
continue
self . get_plugin ( classPlugin . name ) . deactivate ( )
del self . plugins [ classPlugin . name ] |
def get ( self , app_id , view_specifier ) :
"""Retrieve the definition of a given view , provided the app _ id and the view _ id
: param app _ id : the app id
: param view _ specifier :
Can be one of the following :
1 . The view ID
2 . The view ' s name
3 . " last " to look up the last view used""" | return self . transport . GET ( url = '/view/app/{}/{}' . format ( app_id , view_specifier ) ) |
def subset_main ( args ) :
"""Separate method from main ( ) in order to make testing easier and to
enable command - line access .""" | # Read in each of the command line arguments
rid = _read_arg ( args . rid )
cid = _read_arg ( args . cid )
exclude_rid = _read_arg ( args . exclude_rid )
exclude_cid = _read_arg ( args . exclude_cid )
# If GCT , use subset _ gctoo
if args . in_path . endswith ( ".gct" ) :
in_gct = parse_gct . parse ( args . in_path )
out_gct = sg . subset_gctoo ( in_gct , rid = rid , cid = cid , exclude_rid = exclude_rid , exclude_cid = exclude_cid )
# If GCTx , use parse _ gctx
else :
if ( exclude_rid is not None ) or ( exclude_cid is not None ) :
msg = "exclude_{rid,cid} args not currently supported for parse_gctx."
raise ( Exception ( msg ) )
logger . info ( "Using hyperslab selection functionality of parse_gctx..." )
out_gct = parse_gctx . parse ( args . in_path , rid = rid , cid = cid )
# Write the output gct
if args . out_type == "gctx" :
wgx . write ( out_gct , args . out_name )
else :
wg . write ( out_gct , args . out_name , data_null = "NaN" , metadata_null = "NA" , filler_null = "NA" ) |
def calc_synch_snu_ujy ( b , ne , delta , sinth , width , elongation , dist , ghz , E0 = 1. ) :
"""Calculate a flux density from pure gyrosynchrotron emission .
This combines Dulk ( 1985 ) equations 40 and 41 , which are fitting functions
assuming a power - law electron population , with standard radiative transfer
through a uniform medium . Arguments are :
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV .
delta
The power - law index defining the energy distribution of the electron population ,
with ` ` n ( E ) ~ E ^ ( - delta ) ` ` . The equation is valid for ` ` 2 < ~ delta < ~ 5 ` ` .
sinth
The sine of the angle between the line of sight and the magnetic field direction .
It ' s not specified for what range of values the expressions work well .
width
The characteristic cross - sectional width of the emitting region , in cm .
elongation
The the elongation of the emitting region ; ` ` depth = width * elongation ` ` .
dist
The distance to the emitting region , in cm .
ghz
The frequencies at which to evaluate the spectrum , * * in GHz * * .
E0
The minimum energy of electrons to consider , in MeV . Defaults to 1 so that
these functions can be called identically to the gyrosynchrotron functions .
The return value is the flux density * * in μJy * * . The arguments can be
Numpy arrays .
No complaints are raised if you attempt to use the equations outside of
their range of validity .""" | hz = ghz * 1e9
eta = calc_synch_eta ( b , ne , delta , sinth , hz , E0 = E0 )
kappa = calc_synch_kappa ( b , ne , delta , sinth , hz , E0 = E0 )
snu = calc_snu ( eta , kappa , width , elongation , dist )
ujy = snu * cgs . jypercgs * 1e6
return ujy |
def primary_keys_full ( cls ) :
"""Get primary key properties for a SQLAlchemy cls .
Taken from marshmallow _ sqlalchemy""" | mapper = cls . __mapper__
return [ mapper . get_property_by_column ( column ) for column in mapper . primary_key ] |
def indexable_role ( principal ) :
"""Return a string suitable for query against ` allowed _ roles _ and _ users `
field .
: param principal : It can be : data : ` Anonymous ` , : data : ` Authenticated ` ,
or an instance of : class : ` User ` or : class : ` Group ` .""" | principal = unwrap ( principal )
if hasattr ( principal , "is_anonymous" ) and principal . is_anonymous : # transform anonymous user to anonymous role
principal = Anonymous
if isinstance ( principal , Role ) :
return f"role:{principal.name}"
elif isinstance ( principal , User ) :
fmt = "user:{:d}"
elif isinstance ( principal , Group ) :
fmt = "group:{:d}"
else :
raise ValueError ( repr ( principal ) )
return fmt . format ( principal . id ) |
def _bse_cli_list_basis_sets ( args ) :
'''Handles the list - basis - sets subcommand''' | metadata = api . filter_basis_sets ( args . substr , args . family , args . role , args . data_dir )
if args . no_description :
liststr = metadata . keys ( )
else :
liststr = format_columns ( [ ( k , v [ 'description' ] ) for k , v in metadata . items ( ) ] )
return '\n' . join ( liststr ) |
def attach_session ( self ) :
"""Create a session and inject it as context for this command and any
subcommands .""" | assert self . session is None
root = self . find_root ( )
session = self . Session ( root )
root . inject_context ( session = session )
return session |
def kakwani ( values , ineq_axis , weights = None ) :
"""Computes the Kakwani index""" | from scipy . integrate import simps
if weights is None :
weights = ones ( len ( values ) )
# sign = - 1
# if tax = = True :
# sign = - 1
# else :
# sign = 1
PLCx , PLCy = pseudo_lorenz ( values , ineq_axis , weights )
LCx , LCy = lorenz ( ineq_axis , weights )
del PLCx
return simps ( ( LCy - PLCy ) , LCx ) |
def add_all_from_dict ( self , dictionary , ** kwargs ) :
"""Batch - add function implementations to the library .
: param dictionary : A mapping from name to procedure class , i . e . the first two arguments to add ( )
: param kwargs : Any additional kwargs will be passed to the constructors of _ each _ procedure class""" | for name , procedure in dictionary . items ( ) :
self . add ( name , procedure , ** kwargs ) |
def reduce_fit ( interface , state , label , inp ) :
"""Function separates aggregation of continuous and discrete features .
For continuous features it aggregates partially calculated means and variances and returns them . For discrete
features it aggregates pairs and returns them . Pairs with label occurrences are used to calculate prior probabilities""" | from disco . util import kvgroup
# function for grouping values by key
import numpy as np
out = interface . output ( 0 )
# all outputted pairs have the same output label
# model of naive Bayes stores label names , sum of all label occurrences and pairs
# ( feature index , feature values ) for discrete features which are needed to optimize predict phase .
fit_model = { "y_labels" : [ ] , "y_sum" : 0 , "iv" : set ( ) }
combiner = { }
# combiner maintains correct order of means and variances .
means , variances = [ ] , [ ]
k_prev = ""
for key , value in kvgroup ( inp ) : # input pairs are sorted and grouped by key
k_split = key . split ( state [ "delimiter" ] )
# pair is split
if len ( k_split ) == 3 : # discrete features
# store pair ( feature index , feature value )
fit_model [ "iv" ] . add ( tuple ( k_split [ 1 : ] ) )
# aggregate and output occurrences of a pair
out . add ( tuple ( k_split ) , sum ( value ) )
elif len ( k_split ) == 2 : # continuous features
# if label is different than previous .
# This enables calculation of all variances and means for every feature for current label .
if k_split [ 0 ] != k_prev and k_prev != "" :
mean , var = zip ( * [ combiner [ key ] for key in sorted ( combiner . keys ( ) ) ] )
means . append ( mean )
variances . append ( var )
# number of elements , partial mean , partial variance .
n_a = mean_a = var_a = 0
# code aggregates partially calculated means and variances
for n_b , mean_b , var_b in value :
n_ab = n_a + n_b
var_a = ( ( n_a * var_a + n_b * var_b ) / float ( n_ab ) ) + ( n_a * n_b * ( ( mean_b - mean_a ) / float ( n_ab ) ) ** 2 )
mean_a = ( n_a * mean_a + n_b * mean_b ) / float ( n_ab )
n_a = n_ab
# maintains correct order of statistics for every feature
combiner [ int ( k_split [ 1 ] ) ] = ( mean_a , var_a + 1e-9 )
k_prev = k_split [ 0 ]
else : # aggregates label occurrences
fit_model [ key ] = np . sum ( value )
fit_model [ "y_sum" ] += fit_model [ key ]
# sum of all label occurrences
fit_model [ "y_labels" ] . append ( key )
# if statistics for continuous features were not output in last iteration
if len ( means ) > 0 :
mean , var = zip ( * [ combiner [ key ] for key in sorted ( combiner . keys ( ) ) ] )
out . add ( "mean" , np . array ( means + [ mean ] , dtype = np . float32 ) )
variances = np . array ( variances + [ var ] , dtype = np . float32 )
out . add ( "var" , variances )
out . add ( "var_log" , np . log ( np . pi * variances ) )
# calculation of prior probabilities
prior = [ fit_model [ y_label ] / float ( fit_model [ "y_sum" ] ) for y_label in fit_model [ "y_labels" ] ]
out . add ( "prior" , np . array ( prior , dtype = np . float32 ) )
out . add ( "prior_log" , np . log ( prior ) )
out . add ( "iv" , list ( fit_model [ "iv" ] ) )
out . add ( "y_labels" , fit_model [ "y_labels" ] ) |
def _ltu16 ( ins ) :
'''Compares & pops top 2 operands out of the stack , and checks
if the 1st operand < 2nd operand ( top of the stack ) .
Pushes 0 if False , 1 if True .
16 bit unsigned version''' | output = _16bit_oper ( ins . quad [ 2 ] , ins . quad [ 3 ] )
output . append ( 'or a' )
output . append ( 'sbc hl, de' )
output . append ( 'sbc a, a' )
output . append ( 'push af' )
return output |
def get_template ( name ) :
"""Look for ' name ' in the vr . runners . templates folder . Return its contents .
> > > import six
> > > tmpl = get _ template ( ' base _ image . lxc ' )
> > > isinstance ( tmpl , six . string _ types )
True""" | path = 'templates/' + name
b_stream = pkg_resources . resource_stream ( 'vr.imager' , path )
return b_stream . read ( ) . decode ( 'utf-8' ) |
def _get_uniprot_id ( agent ) :
"""Return the UniProt ID for an agent , looking up in HGNC if necessary .
If the UniProt ID is a list then return the first ID by default .""" | up_id = agent . db_refs . get ( 'UP' )
hgnc_id = agent . db_refs . get ( 'HGNC' )
if up_id is None :
if hgnc_id is None : # If both UniProt and HGNC refs are missing we can ' t
# sequence check and so don ' t report a failure .
return None
# Try to get UniProt ID from HGNC
up_id = hgnc_client . get_uniprot_id ( hgnc_id )
# If this fails , again , we can ' t sequence check
if up_id is None :
return None
# If the UniProt ID is a list then choose the first one .
if not isinstance ( up_id , basestring ) and isinstance ( up_id [ 0 ] , basestring ) :
up_id = up_id [ 0 ]
return up_id |
def streaming_market_filter ( market_ids = None , bsp_market = None , betting_types = None , event_type_ids = None , event_ids = None , turn_in_play_enabled = None , market_types = None , venues = None , country_codes = None , race_types = None ) :
""": param list market _ ids : filter market data to data pertaining to specific marketIds .
: param list event _ type _ ids : filter market data to data pertaining to specific event _ type ids .
: param list event _ ids : filter market data to data pertaining to specific event ids .
: param bool turn _ in _ play _ enabled : restriction on whether market will turn in play or not , not supplied returns all .
: param list venues : restrict markets by venue ( only horse racing has venue at the moment )
: param bool bsp _ market : restriction on bsp , not supplied will return all .
: param list betting _ types : filter on betting types
: param list market _ types : filter market data by market types .
: param list country _ codes : filter based on country codes
: param list race _ types : filter race types
: return : dict""" | args = locals ( )
return { to_camel_case ( k ) : v for k , v in args . items ( ) if v is not None } |
def _parse_player_position ( self , player_info ) :
"""Parse the player ' s position .
The player ' s position isn ' t contained within a unique tag and the
player ' s meta information should be iterated through until ' Position '
is found as it contains the desired text .
Parameters
player _ info : PyQuery object
A PyQuery object of the player ' s information on the HTML stats
page .""" | for section in player_info ( 'div#meta p' ) . items ( ) :
if 'Position' in str ( section ) :
position = section . text ( ) . replace ( 'Position: ' , '' )
setattr ( self , '_position' , position )
break |
def read_cs_g0_contribution ( self ) :
"""Parse the G0 contribution of NMR chemical shielding .
Returns :
G0 contribution matrix as list of list .""" | header_pattern = r'^\s+G\=0 CONTRIBUTION TO CHEMICAL SHIFT \(field along BDIR\)\s+$\n' r'^\s+-{50,}$\n' r'^\s+BDIR\s+X\s+Y\s+Z\s*$\n' r'^\s+-{50,}\s*$\n'
row_pattern = r'(?:\d+)\s+' + r'\s+' . join ( [ r'([-]?\d+\.\d+)' ] * 3 )
footer_pattern = r'\s+-{50,}\s*$'
self . read_table_pattern ( header_pattern , row_pattern , footer_pattern , postprocess = float , last_one_only = True , attribute_name = "cs_g0_contribution" ) |
def addfield ( self , pkt , s , val ) :
"""Reconstruct the header because the TLS type may have been updated .
Then , append the content .""" | res = b""
for p in val :
res += self . i2m ( pkt , p )
if ( isinstance ( pkt , _GenericTLSSessionInheritance ) and _tls_version_check ( pkt . tls_session . tls_version , 0x0304 ) and not isinstance ( pkt , TLS13ServerHello ) ) :
return s + res
if not pkt . type :
pkt . type = 0
hdr = struct . pack ( "!B" , pkt . type ) + s [ 1 : 5 ]
return hdr + res |
def set ( self , section , key , value ) :
"""set function sets a particular value for the specified key in the
specified section and writes it to the config file .
Parameters :
* * * section ( string ) : * * the section under which the config should be saved . Only accepted values are - oxd , client
* * * key ( string ) : * * the key / name of the config value
* * * value ( string ) : * * the value which needs to be stored as a string
Returns :
* * success ( bool ) : * * a boolean indication of whether the value was stored successfully in the file""" | if not self . parser . has_section ( section ) :
logger . warning ( "Invalid config section: %s" , section )
return False
self . parser . set ( section , key , value )
with open ( self . config_file , 'wb' ) as cfile :
self . parser . write ( cfile )
return True |
def build_branch ( self ) :
"""get build branch .
: return : build branch or None if not found""" | # pylint : disable = len - as - condition
if len ( self . dutinformation ) > 0 and ( self . dutinformation . get ( 0 ) . build is not None ) :
return self . dutinformation . get ( 0 ) . build . branch
return None |
def middle_begin ( self , index ) :
"""Set the index where MIDDLE starts .
: param int index : the new index for MIDDLE begin""" | if ( index < 0 ) or ( index > self . all_length ) :
raise ValueError ( u"The given index is not valid" )
self . __middle_begin = index |
def regions ( ) :
"""Get all available regions for the RDS service .
: rtype : list
: return : A list of : class : ` boto . rds . regioninfo . RDSRegionInfo `""" | return [ RDSRegionInfo ( name = 'us-east-1' , endpoint = 'rds.us-east-1.amazonaws.com' ) , RDSRegionInfo ( name = 'eu-west-1' , endpoint = 'rds.eu-west-1.amazonaws.com' ) , RDSRegionInfo ( name = 'us-west-1' , endpoint = 'rds.us-west-1.amazonaws.com' ) , RDSRegionInfo ( name = 'us-west-2' , endpoint = 'rds.us-west-2.amazonaws.com' ) , RDSRegionInfo ( name = 'sa-east-1' , endpoint = 'rds.sa-east-1.amazonaws.com' ) , RDSRegionInfo ( name = 'ap-northeast-1' , endpoint = 'rds.ap-northeast-1.amazonaws.com' ) , RDSRegionInfo ( name = 'ap-southeast-1' , endpoint = 'rds.ap-southeast-1.amazonaws.com' ) ] |
def delete_os_nwk_db ( self , net_id , seg , vlan ) :
"""Delete the Openstack Network from the database .
Release the segmentation ID , VLAN associated with the net .
Delete the network given the partial name .
Delete the entry from Network DB , given the net ID .
Delete the entry from Firewall DB , given the net ID .
Release the IN / OUT sug = bnets associated with the net .""" | if seg is not None :
self . service_segs . release_segmentation_id ( seg )
if vlan is not None :
self . service_vlans . release_segmentation_id ( vlan )
self . os_helper . delete_network_all_subnets ( net_id )
# There ' s a chance that OS network got created but it ' s ID
# was not put in DB
# So , deleting networks in os that has part of the special
# name
self . os_helper . delete_network_subname ( fw_const . IN_SERVICE_NWK )
self . delete_network_db ( net_id )
self . clear_fw_entry_by_netid ( net_id )
self . service_in_ip . release_subnet_by_netid ( net_id )
self . service_out_ip . release_subnet_by_netid ( net_id ) |
def finish ( self ) :
"""Finish the compressor . This will emit the remaining output data and
transition the compressor to a completed state . The compressor cannot
be used again after this point , and must be replaced .""" | chunks = [ ]
while lib . BrotliEncoderIsFinished ( self . _encoder ) == lib . BROTLI_FALSE :
chunks . append ( self . _compress ( b'' , lib . BROTLI_OPERATION_FINISH ) )
return b'' . join ( chunks ) |
def project_list ( self ) :
"""The list of : py : class : ` pylsdj . Project ` s that the
. sav file contains""" | return [ ( i , self . projects [ i ] ) for i in sorted ( self . projects . keys ( ) ) ] |
def send_action ( self , recipient_id , action , notification_type = NotificationType . regular ) :
"""Send typing indicators or send read receipts to the specified recipient .
https : / / developers . facebook . com / docs / messenger - platform / send - api - reference / sender - actions
Input :
recipient _ id : recipient id to send to
action : action type ( mark _ seen , typing _ on , typing _ off )
Output :
Response from API as < dict >""" | return self . send_recipient ( recipient_id , { 'sender_action' : action } , notification_type ) |
def split_by_criteria ( dictionary , keys = None , prefix = None ) : # type : ( dict , set or list or tuple ) - > SplitResultSpec
"""Split a dictionary in two by the provided keys .
Args :
dictionary ( dict [ str , object ] ) : A Python dictionary
keys ( sequence [ str ] ) : A sequence of keys which will be added the split criteria
prefix ( str ) : A prefix which will be added the split criteria
Returns :
` SplitResultSpec ` : A collections . namedtuple with the following attributes :
* Args :
included ( dict [ str , object ] : A dictionary with the keys included in the criteria .
excluded ( dict [ str , object ] : A dictionary with the keys not included in the criteria .""" | keys = keys or [ ]
keys = set ( keys )
included_items = { k : dictionary [ k ] for k in dictionary . keys ( ) if k in keys or ( prefix and k . startswith ( prefix ) ) }
excluded_items = { k : dictionary [ k ] for k in dictionary . keys ( ) if k not in included_items }
return SplitResultSpec ( included = included_items , excluded = excluded_items ) |
def install_logger ( logger = None , module = None ) :
"""Installs given logger in given module or default logger in caller introspected module .
: param logger : Logger to install .
: type logger : Logger
: param module : Module .
: type module : ModuleType
: return : Logger .
: rtype : Logger""" | logger = logging . getLogger ( Constants . logger ) if logger is None else logger
if module is None : # Note : inspect . getmodule ( ) can return the wrong module if it has been imported with different relatives paths .
module = sys . modules . get ( inspect . currentframe ( ) . f_back . f_globals [ "__name__" ] )
setattr ( module , "LOGGER" , logger )
foundations . trace . register_module ( module )
return logger |
def increment ( version ) :
"""Return an incremented version string .""" | release_version = os . environ . get ( "RELEASE_VERSION" , None )
if release_version is not None :
return release_version
if isinstance ( version , LegacyVersion ) :
msg = """{0} is considered a legacy version and does not
support automatic incrementing. Please bring your version
numbering into PEP440 standards and then it can be
automatically incremented.
"""
raise Exception ( msg . format ( version ) )
release_type = os . environ . get ( "RELEASE_TYPE" , "micro" )
v = version . _version
# epoch
epoch_name , epoch = VersionUtils . get_version_number ( v , 0 , None , "!" )
pre_name , pre = VersionUtils . get_version_number ( v , 3 , None , "pre" )
post_name , post = VersionUtils . get_version_number ( v , 4 , None , "post" )
dev_name , dev = VersionUtils . get_version_number ( v , 2 , None , "dev" )
_ , major = VersionUtils . get_version_number ( v [ 1 ] , 0 , 0 )
_ , minor = VersionUtils . get_version_number ( v [ 1 ] , 1 , None )
_ , micro = VersionUtils . get_version_number ( v [ 1 ] , 2 , None )
# Handle dev / pre / post
if release_type == "pre" :
micro , post , pre = VersionUtils . process_pre ( micro , post , pre )
if release_type == "post" :
dev , post = VersionUtils . process_post ( dev , post )
if release_type == "dev" :
dev = VersionUtils . process_dev ( dev )
if release_type == "micro" :
dev , micro , minor , post , pre = VersionUtils . process_micro ( dev , micro , minor , post , pre )
if release_type == "minor" :
dev , micro , minor , post , pre = VersionUtils . process_minor ( dev , micro , minor , post , pre )
if release_type == "major" :
dev , major , micro , minor , post , pre = VersionUtils . process_major ( dev , major , micro , minor , post , pre )
# Handle Epoch
if release_type == "epoch" :
dev , epoch , major , micro , minor , post , pre = VersionUtils . process_epoch ( dev , epoch , major , micro , minor , post , pre )
local = "" . join ( v [ 5 ] or [ ] ) or None
version_list = [ major , minor , micro ]
if release_type not in [ "epoch" , "major" , "minor" , "micro" , "pre" ] :
version_list += list ( v [ 1 ] [ 3 : ] )
version_string = "." . join ( [ str ( x ) for x in version_list if x or x == 0 ] )
if epoch :
version_string = str ( epoch ) + epoch_name + version_string
if pre is not None :
version_string = VersionUtils . calc_pre_version_string ( pre , pre_name , version_string )
if post is not None :
version_string += "." + post_name + str ( post )
if dev is not None :
version_string += "." + dev_name + str ( dev )
if local is not None :
version_string += "." + str ( local )
return version_string |
def _build_protobuf ( self ) :
"""Build a query protobuf .
Relies on the current state of the iterator .
: rtype :
: class : ` . query _ pb2 . Query `
: returns : The query protobuf object for the current
state of the iterator .""" | pb = _pb_from_query ( self . _query )
start_cursor = self . next_page_token
if start_cursor is not None :
pb . start_cursor = base64 . urlsafe_b64decode ( start_cursor )
end_cursor = self . _end_cursor
if end_cursor is not None :
pb . end_cursor = base64 . urlsafe_b64decode ( end_cursor )
if self . max_results is not None :
pb . limit . value = self . max_results - self . num_results
if start_cursor is None and self . _offset is not None : # NOTE : We don ' t need to add an offset to the request protobuf
# if we are using an existing cursor , because the offset
# is only relative to the start of the result set , not
# relative to each page ( this method is called per - page )
pb . offset = self . _offset
return pb |
def _handle_tag_text ( self , text ) :
"""Handle regular * text * inside of an HTML open tag .""" | next = self . _read ( 1 )
if not self . _can_recurse ( ) or text not in self . MARKERS :
self . _emit_text ( text )
elif text == next == "{" :
self . _parse_template_or_argument ( )
elif text == next == "[" :
self . _parse_wikilink ( )
elif text == "<" :
self . _parse_tag ( )
else :
self . _emit_text ( text ) |
def align ( self , alignraster , how = np . mean , cxsize = None , cysize = None ) :
'''geo . align ( geo2 , how = np . mean )
Returns both georasters aligned and with the same pixelsize''' | return align_georasters ( self , alignraster , how = how , cxsize = cxsize , cysize = cysize ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.