signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def make_vocab_from_args ( args : argparse . Namespace ) :
"""Just converts from an ` ` argparse . Namespace ` ` object to params ."""
|
parameter_path = args . param_path
overrides = args . overrides
serialization_dir = args . serialization_dir
params = Params . from_file ( parameter_path , overrides )
make_vocab_from_params ( params , serialization_dir )
|
def reindex_variables ( variables : Mapping [ Any , Variable ] , sizes : Mapping [ Any , int ] , indexes : Mapping [ Any , pd . Index ] , indexers : Mapping , method : Optional [ str ] = None , tolerance : Any = None , copy : bool = True , ) -> 'Tuple[OrderedDict[Any, Variable], OrderedDict[Any, pd.Index]]' :
"""Conform a dictionary of aligned variables onto a new set of variables ,
filling in missing values with NaN .
Not public API .
Parameters
variables : dict - like
Dictionary of xarray . Variable objects .
sizes : dict - like
Dictionary from dimension names to integer sizes .
indexes : dict - like
Dictionary of indexes associated with variables .
indexers : dict
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels . Any mis - matched coordinate values
will be filled in with NaN , and any mis - matched dimension names will
simply be ignored .
method : { None , ' nearest ' , ' pad ' / ' ffill ' , ' backfill ' / ' bfill ' } , optional
Method to use for filling index values in ` ` indexers ` ` not found in
this dataset :
* None ( default ) : don ' t fill gaps
* pad / ffill : propagate last valid index value forward
* backfill / bfill : propagate next valid index value backward
* nearest : use nearest valid index value
tolerance : optional
Maximum distance between original and new labels for inexact matches .
The values of the index at the matching locations must satisfy the
equation ` ` abs ( index [ indexer ] - target ) < = tolerance ` ` .
copy : bool , optional
If ` ` copy = True ` ` , data in the return values is always copied . If
` ` copy = False ` ` and reindexing is unnecessary , or can be performed
with only slice operations , then the output may share memory with
the input . In either case , new xarray objects are always returned .
Returns
reindexed : OrderedDict
Dict of reindexed variables .
new _ indexes : OrderedDict
Dict of indexes associated with the reindexed variables ."""
|
from . dataarray import DataArray
# create variables for the new dataset
reindexed = OrderedDict ( )
# type : OrderedDict [ Any , Variable ]
# build up indexers for assignment along each dimension
int_indexers = { }
new_indexes = OrderedDict ( indexes )
masked_dims = set ( )
unchanged_dims = set ( )
for dim , indexer in indexers . items ( ) :
if isinstance ( indexer , DataArray ) and indexer . dims != ( dim , ) :
warnings . warn ( "Indexer has dimensions {0:s} that are different " "from that to be indexed along {1:s}. " "This will behave differently in the future." . format ( str ( indexer . dims ) , dim ) , FutureWarning , stacklevel = 3 )
target = new_indexes [ dim ] = utils . safe_cast_to_index ( indexers [ dim ] )
if dim in indexes :
index = indexes [ dim ]
if not index . is_unique :
raise ValueError ( 'cannot reindex or align along dimension %r because the ' 'index has duplicate values' % dim )
int_indexer = get_indexer_nd ( index , target , method , tolerance )
# We uses negative values from get _ indexer _ nd to signify
# values that are missing in the index .
if ( int_indexer < 0 ) . any ( ) :
masked_dims . add ( dim )
elif np . array_equal ( int_indexer , np . arange ( len ( index ) ) ) :
unchanged_dims . add ( dim )
int_indexers [ dim ] = int_indexer
if dim in variables :
var = variables [ dim ]
args = ( var . attrs , var . encoding )
# type : tuple
else :
args = ( )
reindexed [ dim ] = IndexVariable ( ( dim , ) , target , * args )
for dim in sizes :
if dim not in indexes and dim in indexers :
existing_size = sizes [ dim ]
new_size = indexers [ dim ] . size
if existing_size != new_size :
raise ValueError ( 'cannot reindex or align along dimension %r without an ' 'index because its size %r is different from the size of ' 'the new index %r' % ( dim , existing_size , new_size ) )
for name , var in variables . items ( ) :
if name not in indexers :
key = tuple ( slice ( None ) if d in unchanged_dims else int_indexers . get ( d , slice ( None ) ) for d in var . dims )
needs_masking = any ( d in masked_dims for d in var . dims )
if needs_masking :
new_var = var . _getitem_with_mask ( key )
elif all ( is_full_slice ( k ) for k in key ) : # no reindexing necessary
# here we need to manually deal with copying data , since
# we neither created a new ndarray nor used fancy indexing
new_var = var . copy ( deep = copy )
else :
new_var = var [ key ]
reindexed [ name ] = new_var
return reindexed , new_indexes
|
def artist_commentary_list ( self , text_matches = None , post_id = None , post_tags_match = None , original_present = None , translated_present = None ) :
"""list artist commentary .
Parameters :
text _ matches ( str ) :
post _ id ( int ) :
post _ tags _ match ( str ) : The commentary ' s post ' s tags match the
giventerms . Meta - tags not supported .
original _ present ( str ) : Can be : yes , no .
translated _ present ( str ) : Can be : yes , no ."""
|
params = { 'search[text_matches]' : text_matches , 'search[post_id]' : post_id , 'search[post_tags_match]' : post_tags_match , 'search[original_present]' : original_present , 'search[translated_present]' : translated_present }
return self . _get ( 'artist_commentaries.json' , params )
|
def stratHeun ( f , G , y0 , tspan , dW = None ) :
"""Use the Stratonovich Heun algorithm to integrate Stratonovich equation
dy = f ( y , t ) dt + G ( y , t ) \ circ dW ( t )
where y is the d - dimensional state vector , f is a vector - valued function ,
G is an d x m matrix - valued function giving the noise coefficients and
dW ( t ) = ( dW _ 1 , dW _ 2 , . . . dW _ m ) is a vector of independent Wiener increments
Args :
f : callable ( y , t ) returning ( d , ) array
Vector - valued function to define the deterministic part of the system
G : callable ( y , t ) returning ( d , m ) array
Matrix - valued function to define the noise coefficients of the system
y0 : array of shape ( d , ) giving the initial state vector y ( t = = 0)
tspan ( array ) : The sequence of time points for which to solve for y .
These must be equally spaced , e . g . np . arange ( 0,10,0.005)
tspan [ 0 ] is the intial time corresponding to the initial state y0.
dW : optional array of shape ( len ( tspan ) - 1 , d ) . This is for advanced use ,
if you want to use a specific realization of the d independent Wiener
processes . If not provided Wiener increments will be generated randomly
Returns :
y : array , with shape ( len ( tspan ) , len ( y0 ) )
With the initial value y0 in the first row
Raises :
SDEValueError
See also :
W . Rumelin ( 1982 ) Numerical Treatment of Stochastic Differential
Equations
R . Mannella ( 2002 ) Integration of Stochastic Differential Equations
on a Computer
K . Burrage , P . M . Burrage and T . Tian ( 2004 ) Numerical methods for strong
solutions of stochastic differential equations : an overview"""
|
( d , m , f , G , y0 , tspan , dW , __ ) = _check_args ( f , G , y0 , tspan , dW , None )
N = len ( tspan )
h = ( tspan [ N - 1 ] - tspan [ 0 ] ) / ( N - 1 )
# allocate space for result
y = np . zeros ( ( N , d ) , dtype = type ( y0 [ 0 ] ) )
if dW is None : # pre - generate Wiener increments ( for m independent Wiener processes ) :
dW = deltaW ( N - 1 , m , h )
y [ 0 ] = y0 ;
for n in range ( 0 , N - 1 ) :
tn = tspan [ n ]
tnp1 = tspan [ n + 1 ]
yn = y [ n ]
dWn = dW [ n , : ]
fn = f ( yn , tn )
Gn = G ( yn , tn )
ybar = yn + fn * h + Gn . dot ( dWn )
fnbar = f ( ybar , tnp1 )
Gnbar = G ( ybar , tnp1 )
y [ n + 1 ] = yn + 0.5 * ( fn + fnbar ) * h + 0.5 * ( Gn + Gnbar ) . dot ( dWn )
return y
|
def value ( self ) :
"""Return the node value , if we ' re a leaf node .
Examples :
> > > c = Configuration ( " test " )
> > > c [ ' x ' ] = { " y " : { " value " : None } , " z " : { " value " : 2 } }
> > > c [ ' x ' ] [ ' y ' ] . value = = None
True
> > > c [ ' x ' ] [ ' z ' ] . value
> > > c [ ' x ' ] . value
TEST _ X _ Y = null
TEST _ X _ Z = 2"""
|
def validate ( node_value ) :
if hasattr ( node_value , 'validate' ) :
node_value . validate ( )
return node_value
if 'value' in self . node :
return validate ( self . node [ 'value' ] )
return self
|
def register_token ( self , * args , ** kwargs ) :
"""Register token
Accepts :
- token _ name [ string ]
- contract _ address [ hex string ]
- blockchain [ string ] token ' s blockchain ( QTUMTEST , ETH )
Returns dictionary with following fields :
- success [ Bool ]"""
|
client = HTTPClient ( self . withdraw_server_address + self . withdraw_endpoint )
if check_sig :
return client . request ( 'register_token' , self . signature_validator . sign ( kwargs ) )
else :
return client . request ( 'register_token' , kwargs )
|
def _mp_run_check ( tasks , results , options ) :
"""a helper function for multiprocessing with DistReport ."""
|
try :
for index , change in iter ( tasks . get , None ) : # this is the part that takes up all of our time and
# produces side - effects like writing out files for all of
# the report formats .
change . check ( )
# rather than serializing the completed change ( which
# could be rather large now that it ' s been realized ) , we
# send back only what we want , which is the squashed
# overview , and throw away the used bits .
squashed = squash ( change , options = options )
change . clear ( )
results . put ( ( index , squashed ) )
except KeyboardInterrupt : # prevent a billion lines of backtrace from hitting the user
# in the face
return
|
async def _set_persistent_menu ( self ) :
"""Define the persistent menu for all pages"""
|
page = self . settings ( )
if 'menu' in page :
await self . _send_to_messenger_profile ( page , { 'persistent_menu' : page [ 'menu' ] , } )
logger . info ( 'Set menu for page %s' , page [ 'page_id' ] )
|
def arches ( self ) :
"""Return a list of architectures for this task .
: returns : a list of arch strings ( eg [ " ppc64le " , " x86_64 " ] ) . The list
is empty if this task has no arches associated with it ."""
|
if self . method == 'image' :
return self . params [ 2 ]
if self . arch :
return [ self . arch ]
return [ ]
|
def right_click_zijderveld ( self , event ) :
"""toggles between zoom and pan effects for the zijderveld on right
click
Parameters
event : the wx . MouseEvent that triggered the call of this function
Alters
zijderveld _ setting , toolbar1 setting"""
|
if event . LeftIsDown ( ) or event . ButtonDClick ( ) :
return
elif self . zijderveld_setting == "Zoom" :
self . zijderveld_setting = "Pan"
try :
self . toolbar1 . pan ( 'off' )
except TypeError :
pass
elif self . zijderveld_setting == "Pan" :
self . zijderveld_setting = "Zoom"
try :
self . toolbar1 . zoom ( )
except TypeError :
pass
|
def get_map_data ( self ) :
"""Returns a serializable data set describing the map location"""
|
return { 'containerSelector' : '#' + self . get_map_element_id ( ) , 'center' : self . map_center_description , 'marker' : self . map_marker_description or self . map_center_description , 'zoom' : self . map_zoom , 'href' : self . get_map_href ( ) , 'key' : getattr ( settings , 'GOOGLE_MAPS_API_KEY' , '' ) , # Python ' s line - splitting is more cross - OS compatible , so we feed
# a pre - built array to the front - end
'description' : [ line for line in self . map_description . splitlines ( ) if line ] , }
|
def load_yaml_by_path ( cls , path , log_debug = False ) :
"""Load a yaml file that is at given path ,
if the path is not a string , it is assumed it ' s a file - like object"""
|
try :
if isinstance ( path , six . string_types ) :
return yaml . load ( open ( path , 'r' ) , Loader = Loader ) or { }
else :
return yaml . load ( path , Loader = Loader ) or { }
except ( yaml . scanner . ScannerError , yaml . parser . ParserError ) as e :
log_level = logging . DEBUG if log_debug else logging . WARNING
logger . log ( log_level , 'Yaml error in {path} (line {ln}, column {col}): {err}' . format ( path = path , ln = e . problem_mark . line , col = e . problem_mark . column , err = e . problem ) )
return None
|
def create_job_id ( self , data ) :
"""Create a new job id and reference ( refs / aetros / job / < id > ) by creating a new commit with empty tree . That
root commit is the actual job id . A reference is then created to the newest ( head ) commit of this commit history .
The reference will always be updated once a new commit is added ."""
|
self . add_file ( 'aetros/job.json' , simplejson . dumps ( data , indent = 4 ) )
tree_id = self . write_tree ( )
self . job_id = self . command_exec ( [ 'commit-tree' , '-m' , "JOB_CREATED" , tree_id ] ) [ 0 ] . decode ( 'utf-8' ) . strip ( )
out , code , err = self . command_exec ( [ 'show-ref' , self . ref_head ] , allowed_to_fail = True )
if not code :
self . logger . warning ( "Generated job id already exists, because exact same experiment values given. Ref " + self . ref_head )
self . command_exec ( [ 'update-ref' , self . ref_head , self . job_id ] )
# make sure we have checkedout all files we have added until now . Important for simple models , so we have the
# actual model . py and dataset scripts .
if not os . path . exists ( self . work_tree ) :
os . makedirs ( self . work_tree )
# updates index and working tree
# ' - - ' , ' . ' is important to not update HEAD
self . command_exec ( [ '--work-tree' , self . work_tree , 'checkout' , self . ref_head , '--' , '.' ] )
# every caller needs to make sure to call git . push
return self . job_id
|
def csl_styles ( ** kwargs ) :
'''Get list of styles from https : / / github . com / citation - style - language / styles
: param kwargs : any additional arguments will be passed on to ` requests . get `
: return : list , of CSL styles
Usage : :
from habanero import cn
cn . csl _ styles ( )'''
|
base = "https://api.github.com/repos/citation-style-language/styles"
tt = requests . get ( base + '/commits?per_page=1' , ** kwargs )
tt . raise_for_status ( )
check_json ( tt )
commres = tt . json ( )
sha = commres [ 0 ] [ 'sha' ]
sty = requests . get ( base + "/git/trees/" + sha , ** kwargs )
sty . raise_for_status ( )
check_json ( sty )
res = sty . json ( )
files = [ z [ 'path' ] for z in res [ 'tree' ] ]
matches = [ re . search ( ".csl" , g ) for g in files ]
csls = [ x . string for x in filter ( None , matches ) ]
return [ re . sub ( ".csl" , "" , x ) for x in csls ]
|
def get_inner_text ( text , entities ) :
"""Gets the inner text that ' s surrounded by the given entities .
For instance : text = ' hey ! ' , entity = MessageEntityBold ( 2 , 2 ) - > ' y ! ' .
: param text : the original text .
: param entities : the entity or entities that must be matched .
: return : a single result or a list of the text surrounded by the entities ."""
|
text = add_surrogate ( text )
result = [ ]
for e in entities :
start = e . offset
end = e . offset + e . length
result . append ( del_surrogate ( text [ start : end ] ) )
return result
|
def isPe64 ( self ) :
"""Determines if the current L { PE } instance is a PE64 file .
@ rtype : bool
@ return : C { True } if the current L { PE } instance is a PE64 file . Otherwise , returns C { False } ."""
|
if self . ntHeaders . optionalHeader . magic . value == consts . PE64 :
return True
return False
|
def mk_kwargs ( cls , kwargs ) :
"""Pop recognized arguments from a keyword list ."""
|
ret = { }
kws = [ 'row_factory' , 'body' , 'parent' ]
for k in kws :
if k in kwargs :
ret [ k ] = kwargs . pop ( k )
return ret
|
def get_kde_home_dir ( ) :
"""Return KDE home directory or None if not found ."""
|
if os . environ . get ( "KDEHOME" ) :
kde_home = os . path . abspath ( os . environ [ "KDEHOME" ] )
else :
home = os . environ . get ( "HOME" )
if not home : # $ HOME is not set
return
kde3_home = os . path . join ( home , ".kde" )
kde4_home = os . path . join ( home , ".kde4" )
if fileutil . find_executable ( "kde4-config" ) : # kde4
kde3_file = kde_home_to_config ( kde3_home )
kde4_file = kde_home_to_config ( kde4_home )
if os . path . exists ( kde4_file ) and os . path . exists ( kde3_file ) :
if fileutil . get_mtime ( kde4_file ) >= fileutil . get_mtime ( kde3_file ) :
kde_home = kde4_home
else :
kde_home = kde3_home
else :
kde_home = kde4_home
else : # kde3
kde_home = kde3_home
return kde_home if os . path . exists ( kde_home ) else None
|
def __find_smallest ( self ) :
"""Find the smallest uncovered value in the matrix ."""
|
minval = sys . maxsize
for i in range ( self . n ) :
for j in range ( self . n ) :
if ( not self . row_covered [ i ] ) and ( not self . col_covered [ j ] ) :
if minval > self . C [ i ] [ j ] :
minval = self . C [ i ] [ j ]
return minval
|
def get_next_pid ( self , namespace = None , count = None ) :
"""Request next available pid or pids from Fedora , optionally in a specified
namespace . Calls : meth : ` ApiFacade . getNextPID ` .
. . deprecated : : 0.14
Mint pids for new objects with
: func : ` eulfedora . models . DigitalObject . get _ default _ pid `
instead , or call : meth : ` ApiFacade . getNextPID ` directly .
: param namespace : ( optional ) get the next pid in the specified pid namespace ;
otherwise , Fedora will return the next pid in the configured default namespace .
: param count : ( optional ) get the specified number of pids ; by default , returns 1 pid
: rtype : string or list of strings"""
|
# this method should no longer be needed - default pid logic moved to DigitalObject
warnings . warn ( """get_next_pid() method is deprecated; you should mint new pids via DigitalObject or ApiFacade.getNextPID() instead.""" , DeprecationWarning )
kwargs = { }
if namespace :
kwargs [ 'namespace' ] = namespace
elif self . default_pidspace :
kwargs [ 'namespace' ] = self . default_pidspace
if count :
kwargs [ 'numPIDs' ] = count
r = self . api . getNextPID ( ** kwargs )
nextpids = parse_xml_object ( NewPids , r . content , r . url )
if count is None :
return nextpids . pids [ 0 ]
else :
return nextpids . pids
|
def getiddfile ( versionid ) :
"""find the IDD file of the E + installation"""
|
vlist = versionid . split ( '.' )
if len ( vlist ) == 1 :
vlist = vlist + [ '0' , '0' ]
elif len ( vlist ) == 2 :
vlist = vlist + [ '0' ]
ver_str = '-' . join ( vlist )
eplus_exe , _ = eppy . runner . run_functions . install_paths ( ver_str )
eplusfolder = os . path . dirname ( eplus_exe )
iddfile = '{}/Energy+.idd' . format ( eplusfolder , )
return iddfile
|
def images ( self ) :
"""This method returns the listing image .
: return :"""
|
try :
uls = self . _ad_page_content . find ( "ul" , { "class" : "smi-gallery-list" } )
except Exception as e :
if self . _debug :
logging . error ( "Error getting images. Error message: " + e . args [ 0 ] )
return
images = [ ]
if uls is None :
return
for li in uls . find_all ( 'li' ) :
if li . find ( 'img' ) [ 'src' ] :
images . append ( li . find ( 'img' ) [ 'src' ] )
return images
|
def DefineStdSpectraForUnits ( ) :
"""Define ` ` StdSpectrum ` ` attribute for all the supported
: ref : ` pysynphot - flux - units ` .
This is automatically done on module import . The attribute
stores the source spectrum necessary for normalization in
the corresponding flux unit .
For ` ` photlam ` ` , ` ` photnu ` ` , ` ` flam ` ` , ` ` fnu ` ` , Jy , and mJy ,
the spectrum is flat in the respective units with flux value of 1.
For counts and ` ` obmag ` ` , it is flat in the unit of counts
with flux value of : math : ` 1 / N ` , where : math : ` N ` is the
size of default wavelength set ( see ` ~ pysynphot . refs ` ) .
For ` ` abmag ` ` and ` ` stmag ` ` , it is flat in the respective units
with flux value of 0 mag . That is equivalent to
: math : ` 3.63 \\ times 10 ^ { - 20 } ` ` ` fnu ` ` and
: math : ` 3.63 \\ times 10 ^ { - 9 } ` ` ` flam ` ` , respectively .
For ` ` vegamag ` ` , it is simply : ref : ` pysynphot - vega - spec ` ."""
|
# Linear flux - density units
units . Flam . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'flam' )
units . Fnu . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'fnu' )
units . Photlam . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'photlam' )
units . Photnu . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'photnu' )
units . Jy . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'jy' )
units . mJy . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'mjy' )
# Non - density units
scale = 1.0 / _default_waveset . size
units . Counts . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'counts' ) * scale
units . OBMag . StdSpectrum = FlatSpectrum ( 1 , fluxunits = 'counts' ) * scale
# Magnitude flux - density units
units . ABMag . StdSpectrum = FlatSpectrum ( 3.63e-20 , fluxunits = 'fnu' )
units . STMag . StdSpectrum = FlatSpectrum ( 3.63e-9 , fluxunits = 'flam' )
units . VegaMag . StdSpectrum = Vega
|
def decrypt ( self , token , key , cek = None ) :
"""Decrypts a JWT
: param token : The JWT
: param key : A key to use for decrypting
: param cek : Ephemeral cipher key
: return : The decrypted message"""
|
if not isinstance ( token , JWEnc ) :
jwe = JWEnc ( ) . unpack ( token )
else :
jwe = token
self . jwt = jwe . encrypted_key ( )
jek = jwe . encrypted_key ( )
_decrypt = RSAEncrypter ( self . with_digest ) . decrypt
_alg = jwe . headers [ "alg" ]
if cek :
pass
elif _alg == "RSA-OAEP" :
cek = _decrypt ( jek , key , 'pkcs1_oaep_padding' )
elif _alg == "RSA-OAEP-256" :
cek = _decrypt ( jek , key , 'pkcs1_oaep_256_padding' )
elif _alg == "RSA1_5" :
cek = _decrypt ( jek , key )
else :
raise NotSupportedAlgorithm ( _alg )
self [ "cek" ] = cek
enc = jwe . headers [ "enc" ]
if enc not in SUPPORTED [ "enc" ] :
raise NotSupportedAlgorithm ( enc )
auth_data = jwe . b64_protected_header ( )
msg = self . _decrypt ( enc , cek , jwe . ciphertext ( ) , auth_data = auth_data , iv = jwe . initialization_vector ( ) , tag = jwe . authentication_tag ( ) )
if "zip" in jwe . headers and jwe . headers [ "zip" ] == "DEF" :
msg = zlib . decompress ( msg )
return msg
|
def open_telemetry_logs ( logpath_telem , logpath_telem_raw ) :
'''open log files'''
|
if opts . append_log or opts . continue_mode :
mode = 'a'
else :
mode = 'w'
mpstate . logfile = open ( logpath_telem , mode = mode )
mpstate . logfile_raw = open ( logpath_telem_raw , mode = mode )
print ( "Log Directory: %s" % mpstate . status . logdir )
print ( "Telemetry log: %s" % logpath_telem )
# use a separate thread for writing to the logfile to prevent
# delays during disk writes ( important as delays can be long if camera
# app is running )
t = threading . Thread ( target = log_writer , name = 'log_writer' )
t . daemon = True
t . start ( )
|
def resolvePrefix ( self , prefix , default = Namespace . default ) :
"""Resolve the specified prefix to a namespace . The I { nsprefixes } is
searched . If not found , it walks up the tree until either resolved or
the top of the tree is reached . Searching up the tree provides for
inherited mappings .
@ param prefix : A namespace prefix to resolve .
@ type prefix : basestring
@ param default : An optional value to be returned when the prefix
cannot be resolved .
@ type default : ( I { prefix } , I { URI } )
@ return : The namespace that is mapped to I { prefix } in this context .
@ rtype : ( I { prefix } , I { URI } )"""
|
n = self
while n is not None :
if prefix in n . nsprefixes :
return ( prefix , n . nsprefixes [ prefix ] )
if prefix in self . specialprefixes :
return ( prefix , self . specialprefixes [ prefix ] )
n = n . parent
return default
|
def get_thumbsdir ( self , path ) :
"""path :
path of the source image"""
|
# Thumbsdir could be a callable
# In that case , the path is built on the fly , based on the source path
thumbsdir = self . thumbsdir
if callable ( self . thumbsdir ) :
thumbsdir = self . thumbsdir ( path )
return thumbsdir
|
def proccesser_markdown ( lowstate_item , config , ** kwargs ) :
'''Takes low state data and returns a dict of proccessed data
that is by default used in a jinja template when rendering a markdown highstate _ doc .
This ` lowstate _ item _ markdown ` given a lowstate item , returns a dict like :
. . code - block : : yaml
vars : # the raw lowstate _ item that was proccessed
id : # the ' id ' of the state .
id _ full : # combo of the state type and id " state : id "
state : # name of the salt state module
function : # name of the state function
name : # value of ' name : ' passed to the salt state module
state _ function : # the state name and function name
markdown : # text data to describe a state
requisites : # requisite like [ watch _ in , require _ in ]
details : # state name , parameters and other details like file contents'''
|
# TODO : switch or . . . ext call .
s = lowstate_item
state_function = '{0}.{1}' . format ( s [ 'state' ] , s [ 'fun' ] )
id_full = '{0}: {1}' . format ( s [ 'state' ] , s [ '__id__' ] )
# TODO : use salt defined STATE _ REQUISITE _ IN _ KEYWORDS
requisites = ''
if s . get ( 'watch' ) :
requisites += 'run or update after changes in:\n'
for w in s . get ( 'watch' , [ ] ) :
requisites += _format_markdown_requisite ( w . items ( ) [ 0 ] [ 0 ] , w . items ( ) [ 0 ] [ 1 ] )
requisites += '\n'
if s . get ( 'watch_in' ) :
requisites += 'after changes, run or update:\n'
for w in s . get ( 'watch_in' , [ ] ) :
requisites += _format_markdown_requisite ( w . items ( ) [ 0 ] [ 0 ] , w . items ( ) [ 0 ] [ 1 ] )
requisites += '\n'
if s . get ( 'require' ) and s . get ( 'require' ) :
requisites += 'require:\n'
for w in s . get ( 'require' , [ ] ) :
requisites += _format_markdown_requisite ( w . items ( ) [ 0 ] [ 0 ] , w . items ( ) [ 0 ] [ 1 ] )
requisites += '\n'
if s . get ( 'require_in' ) :
requisites += 'required in:\n'
for w in s . get ( 'require_in' , [ ] ) :
requisites += _format_markdown_requisite ( w . items ( ) [ 0 ] [ 0 ] , w . items ( ) [ 0 ] [ 1 ] )
requisites += '\n'
details = ''
if state_function == 'highstate_doc.note' :
if 'contents' in s :
details += '\n{0}\n' . format ( s [ 'contents' ] )
if 'source' in s :
text = __salt__ [ 'cp.get_file_str' ] ( s [ 'source' ] )
if text :
details += '\n{0}\n' . format ( text )
else :
details += '\n{0}\n' . format ( 'ERROR: opening {0}' . format ( s [ 'source' ] ) )
if state_function == 'pkg.installed' :
pkgs = s . get ( 'pkgs' , s . get ( 'name' ) )
details += '\n```\ninstall: {0}\n```\n' . format ( pkgs )
if state_function == 'file.recurse' :
details += '''recurse copy of files\n'''
y = _state_data_to_yaml_string ( s )
if y :
details += '```\n{0}\n```\n' . format ( y )
if '!doc_recurse' in id_full :
findfiles = __salt__ [ 'file.find' ] ( path = s . get ( 'name' ) , type = 'f' )
if len ( findfiles ) < 10 or '!doc_recurse_force' in id_full :
for f in findfiles :
details += _format_markdown_system_file ( f , config )
else :
details += ''' > Skipping because more than 10 files to display.\n'''
details += ''' > HINT: to force include !doc_recurse_force in state id.\n'''
else :
details += ''' > For more details review logs and Salt state files.\n\n'''
details += ''' > HINT: for improved docs use multiple file.managed states or file.archive, git.latest. etc.\n'''
details += ''' > HINT: to force doc to show all files in path add !doc_recurse .\n'''
if state_function == 'file.blockreplace' :
if s . get ( 'content' ) :
details += 'ensure block of content is in file\n```\n{0}\n```\n' . format ( _md_fix ( s [ 'content' ] ) )
if s . get ( 'source' ) :
text = '** source: ' + s . get ( 'source' )
details += 'ensure block of content is in file\n```\n{0}\n```\n' . format ( _md_fix ( text ) )
if state_function == 'file.managed' :
details += _format_markdown_system_file ( s [ 'name' ] , config )
# if no state doc is created use default state as yaml
if not details :
y = _state_data_to_yaml_string ( s )
if y :
details += '```\n{0}```\n' . format ( y )
r = { 'vars' : lowstate_item , 'state' : s [ 'state' ] , 'name' : s [ 'name' ] , 'function' : s [ 'fun' ] , 'id' : s [ '__id__' ] , 'id_full' : id_full , 'state_function' : state_function , 'markdown' : { 'requisites' : requisites . decode ( 'utf-8' ) , 'details' : details . decode ( 'utf-8' ) } }
return r
|
def parse_tokens ( self , tokens ) :
"""Parse a sequence of tokens
returns tuple of ( parsed tokens , suggestions )"""
|
if len ( tokens ) == 1 :
return list ( ) , tokens , { "kubectl" : self . ast . help }
else :
tokens . reverse ( )
parsed , unparsed , suggestions = self . treewalk ( self . ast , parsed = list ( ) , unparsed = tokens )
if not suggestions and unparsed : # TODO : @ vogxn : This is hack until we include expected value types for each option and argument .
# Whenver we recieve no suggestions but are left with unparsed tokens we pop the value and walk the
# tree again without values
logger . debug ( "unparsed tokens remain, possible value encountered" )
unparsed . pop ( )
parsed . reverse ( )
unparsed . extend ( parsed )
logger . debug ( "resuming treewalk with tokens: %s" , unparsed )
return self . treewalk ( self . ast , parsed = list ( ) , unparsed = unparsed )
else :
return parsed , unparsed , suggestions
|
def read ( self , files = None ) :
"""Read settings from given config files .
@ raises : LinkCheckerError on syntax errors in the config file ( s )"""
|
if files is None :
cfiles = [ ]
else :
cfiles = files [ : ]
if not cfiles :
userconf = get_user_config ( )
if os . path . isfile ( userconf ) :
cfiles . append ( userconf )
# filter invalid files
filtered_cfiles = [ ]
for cfile in cfiles :
if not os . path . isfile ( cfile ) :
log . warn ( LOG_CHECK , _ ( "Configuration file %r does not exist." ) , cfile )
elif not fileutil . is_readable ( cfile ) :
log . warn ( LOG_CHECK , _ ( "Configuration file %r is not readable." ) , cfile )
else :
filtered_cfiles . append ( cfile )
log . debug ( LOG_CHECK , "reading configuration from %s" , filtered_cfiles )
confparse . LCConfigParser ( self ) . read ( filtered_cfiles )
|
def reverse_char ( self , hints ) :
"""Return QuerySet of objects from SQLAlchemy of results .
Parameters
hints : list of str
strings to lookup
Returns
: class : ` sqlalchemy . orm . query . Query ` :
reverse matches"""
|
if isinstance ( hints , string_types ) :
hints = [ hints ]
Unihan = self . sql . base . classes . Unihan
columns = Unihan . __table__ . columns
return self . sql . session . query ( Unihan ) . filter ( or_ ( * [ column . contains ( hint ) for column in columns for hint in hints ] ) )
|
def equalize_adaptive_clahe ( image , ntiles = 8 , clip_limit = 0.01 ) :
"""Return contrast limited adaptive histogram equalized image .
The return value is normalised to the range 0 to 1.
: param image : numpy array or : class : ` jicimagelib . image . Image ` of dtype float
: param ntiles : number of tile regions
: param clip _ limit : clipping limit in range 0 to 1,
higher values give more contrast"""
|
# Convert input for skimage .
skimage_float_im = normalise ( image )
if np . all ( skimage_float_im ) :
raise ( RuntimeError ( "Cannot equalise when there is no variation." ) )
normalised = skimage . exposure . equalize_adapthist ( skimage_float_im , ntiles_x = ntiles , ntiles_y = ntiles , clip_limit = clip_limit )
assert np . max ( normalised ) == 1.0
assert np . min ( normalised ) == 0.0
return normalised
|
def command ( self , cmd , progress_hook = None , * args , ** kwargs ) :
"""Execute a model command .
: param cmd : Name of the command .
: param progress _ hook : A function to which progress updates are passed ."""
|
cmds = cmd . split ( None , 1 )
# split commands and simulations
sim_names = cmds [ 1 : ]
# simulations
if not sim_names :
sim_names = self . cmd_layer . reg . iterkeys ( )
for sim_name in sim_names :
sim_cmd = getattr ( self . cmd_layer . reg [ sim_name ] , cmd )
sim_cmd ( self , progress_hook = progress_hook , * args , ** kwargs )
|
def _trna_annotation ( data ) :
"""use tDRmapper to quantify tRNAs"""
|
trna_ref = op . join ( dd . get_srna_trna_file ( data ) )
name = dd . get_sample_name ( data )
work_dir = utils . safe_makedir ( os . path . join ( dd . get_work_dir ( data ) , "trna" , name ) )
in_file = op . basename ( data [ "clean_fastq" ] )
tdrmapper = os . path . join ( os . path . dirname ( sys . executable ) , "TdrMappingScripts.pl" )
perl_export = utils . get_perl_exports ( )
if not file_exists ( trna_ref ) or not file_exists ( tdrmapper ) :
logger . info ( "There is no tRNA annotation to run TdrMapper." )
return work_dir
out_file = op . join ( work_dir , in_file + ".hq_cs.mapped" )
if not file_exists ( out_file ) :
with tx_tmpdir ( data ) as txdir :
with utils . chdir ( txdir ) :
utils . symlink_plus ( data [ "clean_fastq" ] , op . join ( txdir , in_file ) )
cmd = ( "{perl_export} && perl {tdrmapper} {trna_ref} {in_file}" ) . format ( ** locals ( ) )
do . run ( cmd , "tRNA for %s" % name )
for filename in glob . glob ( "*mapped*" ) :
shutil . move ( filename , work_dir )
return work_dir
|
def activate_ ( self , n_buffer , image_dimensions , shmem_name ) :
"""Shared mem info is given . Now we can create the shmem client"""
|
self . active = True
self . image_dimensions = image_dimensions
self . client = ShmemRGBClient ( name = shmem_name , n_ringbuffer = n_buffer , # size of ring buffer
width = image_dimensions [ 0 ] , height = image_dimensions [ 1 ] , # client timeouts if nothing has been received in 1000 milliseconds
mstimeout = 1000 , verbose = False )
self . postActivate_ ( )
|
def show_minimum_needs_configuration ( self ) :
"""Show the minimum needs dialog ."""
|
# import here only so that it is AFTER i18n set up
from safe . gui . tools . minimum_needs . needs_manager_dialog import ( NeedsManagerDialog )
dialog = NeedsManagerDialog ( parent = self . iface . mainWindow ( ) , dock = self . dock_widget )
dialog . exec_ ( )
|
def reassign_ids ( doc , verbose = False ) :
"""Assign new IDs to all rows in all LSC tables in doc so that there
are no collisions when the LIGO _ LW elements are merged ."""
|
# Can ' t simply run reassign _ ids ( ) on doc because we need to
# construct a fresh old - - > new mapping within each LIGO _ LW block .
for n , elem in enumerate ( doc . childNodes ) :
if verbose :
print >> sys . stderr , "reassigning row IDs: %.1f%%\r" % ( 100.0 * ( n + 1 ) / len ( doc . childNodes ) ) ,
if elem . tagName == ligolw . LIGO_LW . tagName :
table . reassign_ids ( elem )
if verbose :
print >> sys . stderr , "reassigning row IDs: 100.0%"
return doc
|
def add_unique_runid ( testcase , run_id = None ) :
"""Adds run id to the test description .
The ` run _ id ` runs makes the descriptions unique between imports and force Polarion
to update every testcase every time ."""
|
testcase [ "description" ] = '{}<br id="{}"/>' . format ( testcase . get ( "description" ) or "" , run_id or id ( add_unique_runid ) )
|
def read_handle ( url , cache = None , mode = "rb" ) :
"""Read from any URL with a file handle .
Use this to get a handle to a file rather than eagerly load the data :
with read _ handle ( url ) as handle :
result = something . load ( handle )
result . do _ something ( )
When program execution leaves this ` with ` block , the handle will be closed
automatically .
Args :
url : a URL including scheme or a local path
Returns :
A file handle to the specified resource if it could be reached .
The handle will be closed automatically once execution leaves this context ."""
|
scheme = urlparse ( url ) . scheme
if cache == 'purge' :
_purge_cached ( url )
cache = None
if _is_remote ( scheme ) and cache is None :
cache = True
log . debug ( "Cache not specified, enabling because resource is remote." )
if cache :
handle = _read_and_cache ( url , mode = mode )
else :
if scheme in ( "http" , "https" ) :
handle = _handle_web_url ( url , mode = mode )
elif scheme in ( "gs" ) :
handle = _handle_gfile ( url , mode = mode )
else :
handle = open ( url , mode = mode )
yield handle
handle . close ( )
|
def redo ( self ) :
"""Re - sync the change recorded in this trigger log .
Creates a ` ` NEW ` ` live trigger log from the data in this archived trigger log and sets
the state of this archived instance to ` ` REQUEUED ` ` .
. . seealso : : : meth : ` . TriggerLog . redo `
Returns :
The : class : ` . TriggerLog ` instance that was created from the data of this archived log ."""
|
trigger_log = self . _to_live_trigger_log ( state = TRIGGER_LOG_STATE [ 'NEW' ] )
trigger_log . save ( force_insert = True )
# make sure we get a fresh row
self . state = TRIGGER_LOG_STATE [ 'REQUEUED' ]
self . save ( update_fields = [ 'state' ] )
return trigger_log
|
def capture_termination_signal ( please_stop ) :
"""WILL SIGNAL please _ stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN"""
|
def worker ( please_stop ) :
seen_problem = False
while not please_stop :
request_time = ( time . time ( ) - timer . START ) / 60
# MINUTES
try :
response = requests . get ( "http://169.254.169.254/latest/meta-data/spot/termination-time" )
seen_problem = False
if response . status_code not in [ 400 , 404 ] :
Log . alert ( "Shutdown AWS Spot Node {{name}} {{type}}" , name = machine_metadata . name , type = machine_metadata . aws_instance_type )
please_stop . go ( )
except Exception as e :
e = Except . wrap ( e )
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e :
Log . note ( "AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)" )
return
elif seen_problem : # IGNORE THE FIRST PROBLEM
Log . warning ( "AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)" , time = request_time , cause = e )
seen_problem = True
( Till ( seconds = 61 ) | please_stop ) . wait ( )
( Till ( seconds = 11 ) | please_stop ) . wait ( )
Thread . run ( "listen for termination" , worker )
|
def _request_login ( self , method , ** kwargs ) :
"""Send a treq HTTP POST request to / ssllogin
: param method : treq method to use , for example " treq . post " or
" treq _ kerberos . post " .
: param kwargs : kwargs to pass to treq or treq _ kerberos , for example
" auth " or " agent " .
: returns : deferred that when fired returns a dict from sslLogin"""
|
url = self . url + '/ssllogin'
# Build the XML - RPC HTTP request body by hand and send it with
# treq .
factory = KojiQueryFactory ( path = None , host = None , method = 'sslLogin' )
payload = factory . payload
try :
response = yield method ( url , data = payload , ** kwargs )
except ResponseFailed as e :
failure = e . reasons [ 0 ]
failure . raiseException ( )
if response . code > 200 :
raise KojiLoginException ( 'HTTP %d error' % response . code )
# Process the XML - RPC response content from treq .
content = yield response . content ( )
if hasattr ( xmlrpc , 'loads' ) : # Python 2:
result = xmlrpc . loads ( content ) [ 0 ] [ 0 ]
else :
result = xmlrpc . client . loads ( content ) [ 0 ] [ 0 ]
defer . returnValue ( result )
|
def get_control_connection_host ( self ) :
"""Returns the control connection host metadata ."""
|
connection = self . control_connection . _connection
endpoint = connection . endpoint if connection else None
return self . metadata . get_host ( endpoint ) if endpoint else None
|
def drawFrom ( self , cumsum , r ) :
"""Draws a value from a cumulative sum .
Parameters :
cumsum : array
Cumulative sum from which shall be drawn .
Returns :
int : Index of the cumulative sum element drawn ."""
|
a = cumsum . rsplit ( )
if len ( a ) > 1 :
b = eval ( a [ 0 ] ) [ int ( a [ 1 ] ) ]
else :
b = eval ( a [ 0 ] )
return np . nonzero ( b >= r ) [ 0 ] [ 0 ]
|
def get_ir_reciprocal_mesh ( mesh , cell , is_shift = None , is_time_reversal = True , symprec = 1e-5 , is_dense = False ) :
"""Return k - points mesh and k - point map to the irreducible k - points .
The symmetry is serched from the input cell .
Parameters
mesh : array _ like
Uniform sampling mesh numbers .
dtype = ' intc ' , shape = ( 3 , )
cell : spglib cell tuple
Crystal structure .
is _ shift : array _ like , optional
[0 , 0 , 0 ] gives Gamma center mesh and value 1 gives half mesh shift .
Default is None which equals to [ 0 , 0 , 0 ] .
dtype = ' intc ' , shape = ( 3 , )
is _ time _ reversal : bool , optional
Whether time reversal symmetry is included or not . Default is True .
symprec : float , optional
Symmetry tolerance in distance . Default is 1e - 5.
is _ dense : bool , optional
grid _ mapping _ table is returned with dtype = ' uintp ' if True . Otherwise
its dtype = ' intc ' . Default is False .
Returns
grid _ mapping _ table : ndarray
Grid point mapping table to ir - gird - points .
dtype = ' intc ' or ' uintp ' , shape = ( prod ( mesh ) , )
grid _ address : ndarray
Address of all grid points .
dtype = ' intc ' , shspe = ( prod ( mesh ) , 3)"""
|
_set_no_error ( )
lattice , positions , numbers , _ = _expand_cell ( cell )
if lattice is None :
return None
if is_dense :
dtype = 'uintp'
else :
dtype = 'intc'
grid_mapping_table = np . zeros ( np . prod ( mesh ) , dtype = dtype )
grid_address = np . zeros ( ( np . prod ( mesh ) , 3 ) , dtype = 'intc' )
if is_shift is None :
is_shift = [ 0 , 0 , 0 ]
if spg . ir_reciprocal_mesh ( grid_address , grid_mapping_table , np . array ( mesh , dtype = 'intc' ) , np . array ( is_shift , dtype = 'intc' ) , is_time_reversal * 1 , lattice , positions , numbers , symprec ) > 0 :
return grid_mapping_table , grid_address
else :
return None
|
def collect ( basepath , exclude = None , processPlugins = True ) :
"""Collects all the packages associated with the inputted filepath .
: param module | < module >
: return ( [ < str > pkg , . . ] , [ ( < str > path , < str > relpath ) , . . ] data )"""
|
if exclude is None :
exclude = [ '.py' , '.pyc' , '.pyo' , '.css' , '.exe' ]
imports = [ ]
datas = [ ]
# walk the folder structure looking for all packages and data files
basename = os . path . basename ( basepath )
basepath = os . path . abspath ( basepath )
baselen = len ( basepath ) - len ( basename )
plugfiles = [ ]
for root , folders , files in os . walk ( basepath ) :
if '.svn' in root or '.git' in root :
continue
# mark the plugins file for load
plugdata = None
if processPlugins and '__plugins__.py' in files :
filename = os . path . join ( root , '__plugins__.py' )
package = projex . packageFromPath ( filename ) + '.__plugins__'
pkgpath = projex . packageRootPath ( filename )
if pkgpath not in sys . path :
sys . path . insert ( 0 , pkgpath )
# import the plugins module
__import__ ( package )
pkg = sys . modules [ package ]
recurse = getattr ( pkg , '__recurse__' , False )
plugdata = { 'recurse' : recurse , 'packages' : [ ] , 'path' : root }
plugfiles . append ( plugdata )
# look for any recursion plugins
else :
for data in plugfiles :
if data [ 'recurse' ] and root . startswith ( data [ 'path' ] ) :
plugdata = data
break
if plugdata is not None :
packages = plugdata [ 'packages' ]
# include package plugins
for folder in folders :
pkgpath = os . path . join ( root , folder , '__init__.py' )
if os . path . exists ( pkgpath ) :
packages . append ( projex . packageFromPath ( pkgpath ) )
for file_ in files :
module , ext = os . path . splitext ( file_ )
# look for python modules
if ext == '.py' :
package_path = projex . packageFromPath ( os . path . join ( root , file_ ) )
if not package_path :
continue
if module != '__init__' :
package_path += '.' + module
imports . append ( package_path )
# test to see if this is a plugin file
if plugdata is not None and module not in ( '__init__' , '__plugins__' ) :
plugdata [ 'packages' ] . append ( package_path )
# look for data
elif ext not in exclude :
src = os . path . join ( root , file_ )
targ = os . path . join ( root [ baselen : ] )
datas . append ( ( src , targ ) )
# save the plugin information
for plugdata in plugfiles :
fname = os . path . join ( plugdata [ 'path' ] , '__plugins__.py' )
packages = plugdata [ 'packages' ]
plugs = ',\n' . join ( map ( lambda x : "r'{0}'" . format ( x ) , packages ) )
data = [ '__recurse__ = {0}' . format ( plugdata [ 'recurse' ] ) , '__toc__ = [{0}]' . format ( plugs ) ]
# write the data to the system
f = open ( fname , 'w' )
f . write ( '\n' . join ( data ) )
f . close ( )
return imports , datas
|
def convertToFree ( stream , length_limit = True ) :
"""Convert stream from fixed source form to free source form ."""
|
linestack = [ ]
for line in stream :
convline = FortranLine ( line , length_limit )
if convline . is_regular :
if convline . isContinuation and linestack :
linestack [ 0 ] . continueLine ( )
for l in linestack :
yield str ( l )
linestack = [ ]
linestack . append ( convline )
for l in linestack :
yield str ( l )
|
def determine_version ( self , request , api_version = None ) :
"""Determines the appropriate version given the set api _ version , the request header , and URL query params"""
|
if api_version is False :
api_version = None
for version in self . versions :
if version and "v{0}" . format ( version ) in request . path :
api_version = version
break
request_version = set ( )
if api_version is not None :
request_version . add ( api_version )
version_header = request . get_header ( "X-API-VERSION" )
if version_header :
request_version . add ( version_header )
version_param = request . get_param ( 'api_version' )
if version_param is not None :
request_version . add ( version_param )
if len ( request_version ) > 1 :
raise ValueError ( 'You are requesting conflicting versions' )
return next ( iter ( request_version or ( None , ) ) )
|
def cell_type_specificity ( ax ) :
'''make an imshow of the intranetwork connectivity'''
|
masked_array = np . ma . array ( params . T_yX , mask = params . T_yX == 0 )
# cmap = plt . get _ cmap ( ' hot ' , 20)
# cmap . set _ bad ( ' k ' , 0.5)
# im = ax . imshow ( masked _ array , cmap = cmap , vmin = 0 , interpolation = ' nearest ' )
im = ax . pcolormesh ( masked_array , cmap = cmap , vmin = 0 , )
# interpolation = ' nearest ' )
ax . axis ( ax . axis ( 'tight' ) )
ax . invert_yaxis ( )
ax . xaxis . set_ticks_position ( 'top' )
ax . set_xticks ( np . arange ( 9 ) + 0.5 )
ax . set_yticks ( np . arange ( 16 ) + 0.5 )
ax . set_xticklabels ( params . X , rotation = 270 )
ax . set_yticklabels ( params . y , )
ax . xaxis . set_label_position ( 'top' )
ax . set_xlabel ( r'$X$' , labelpad = - 1 , fontsize = 8 )
ax . set_ylabel ( r'$y$' , labelpad = 0 , rotation = 0 , fontsize = 8 )
rect = np . array ( ax . get_position ( ) . bounds )
rect [ 0 ] += rect [ 2 ] + 0.01
rect [ 2 ] = 0.01
fig = plt . gcf ( )
cax = fig . add_axes ( rect )
cbar = plt . colorbar ( im , cax = cax )
# cbar . set _ label ( r ' $ \ mathcal { T } _ { yX } $ ' , ha = ' center ' )
cbar . set_label ( r'$\mathcal{T}_{yX}$' , labelpad = 0 )
|
def get_argument_values ( type_def : Union [ GraphQLField , GraphQLDirective ] , node : Union [ FieldNode , DirectiveNode ] , variable_values : Dict [ str , Any ] = None , ) -> Dict [ str , Any ] :
"""Get coerced argument values based on provided definitions and nodes .
Prepares an dict of argument values given a list of argument definitions and list
of argument AST nodes ."""
|
coerced_values : Dict [ str , Any ] = { }
arg_defs = type_def . args
arg_nodes = node . arguments
if not arg_defs or arg_nodes is None :
return coerced_values
arg_node_map = { arg . name . value : arg for arg in arg_nodes }
for name , arg_def in arg_defs . items ( ) :
arg_type = arg_def . type
argument_node = cast ( ArgumentNode , arg_node_map . get ( name ) )
variable_values = cast ( Dict [ str , Any ] , variable_values )
if argument_node and isinstance ( argument_node . value , VariableNode ) :
variable_name = argument_node . value . name . value
has_value = variable_values and variable_name in variable_values
is_null = has_value and variable_values [ variable_name ] is None
else :
has_value = argument_node is not None
is_null = has_value and isinstance ( argument_node . value , NullValueNode )
if not has_value and arg_def . default_value is not INVALID : # If no argument was provided where the definition has a default value ,
# use the default value .
coerced_values [ name ] = arg_def . default_value
elif ( not has_value or is_null ) and is_non_null_type ( arg_type ) : # If no argument or a null value was provided to an argument with a non - null
# type ( required ) , produce a field error .
if is_null :
raise GraphQLError ( f"Argument '{name}' of non-null type" f" '{arg_type}' must not be null." , argument_node . value , )
elif argument_node and isinstance ( argument_node . value , VariableNode ) :
raise GraphQLError ( f"Argument '{name}' of required type" f" '{arg_type}' was provided the variable" f" '${variable_name}'" " which was not provided a runtime value." , argument_node . value , )
else :
raise GraphQLError ( f"Argument '{name}' of required type '{arg_type}'" " was not provided." , node , )
elif has_value :
if isinstance ( argument_node . value , NullValueNode ) : # If the explicit value ` None ` was provided , an entry in the coerced
# values must exist as the value ` None ` .
coerced_values [ name ] = None
elif isinstance ( argument_node . value , VariableNode ) :
variable_name = argument_node . value . name . value
# Note : This Does no further checking that this variable is correct .
# This assumes that this query has been validated and the variable
# usage here is of the correct type .
coerced_values [ name ] = variable_values [ variable_name ]
else :
value_node = argument_node . value
coerced_value = value_from_ast ( value_node , arg_type , variable_values )
if coerced_value is INVALID : # Note : ` values _ of _ correct _ type ` validation should catch this before
# execution . This is a runtime check to ensure execution does not
# continue with an invalid argument value .
raise GraphQLError ( f"Argument '{name}'" f" has invalid value {print_ast(value_node)}." , argument_node . value , )
coerced_values [ name ] = coerced_value
return coerced_values
|
def _parse_CHANLIMIT ( value ) :
"""> > > res = FeatureSet . _ parse _ CHANLIMIT ( ' ibe : 250 , xyz : 100 ' )
> > > len ( res )
> > > res [ ' x ' ]
100
> > > res [ ' i ' ] = = res [ ' b ' ] = = res [ ' e ' ] = = 250
True"""
|
pairs = map ( string_int_pair , value . split ( ',' ) )
return dict ( ( target , number ) for target_keys , number in pairs for target in target_keys )
|
def from_dict ( cls , d ) :
"""Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object
created using the as _ dict method .
: param d : dict representation of the SimplestChemenvStrategy object
: return : StructureEnvironments object"""
|
return cls ( distance_cutoff = d [ "distance_cutoff" ] , angle_cutoff = d [ "angle_cutoff" ] , additional_condition = d [ "additional_condition" ] , continuous_symmetry_measure_cutoff = d [ "continuous_symmetry_measure_cutoff" ] , symmetry_measure_type = d [ "symmetry_measure_type" ] )
|
def XOR ( classical_reg1 , classical_reg2 ) :
"""Produce an exclusive OR instruction .
: param classical _ reg1 : The first classical register , which gets modified .
: param classical _ reg2 : The second classical register or immediate value .
: return : A ClassicalOr instance ."""
|
left , right = unpack_reg_val_pair ( classical_reg1 , classical_reg2 )
return ClassicalExclusiveOr ( left , right )
|
def main ( ) :
'''monica helps you order food from the timeline'''
|
arguments = docopt ( __doc__ , version = __version__ )
if arguments [ 'configure' ] and flag :
configure ( )
if arguments [ 'cuisine' ] :
if arguments [ 'list' ] :
cuisine ( 'list' )
else :
cuisine ( arguments [ '<cuisine-id>' ] )
elif arguments [ 'surprise' ] :
surprise ( )
elif arguments [ 'reviews' ] :
reviews ( arguments [ '<restaurant-id>' ] )
elif arguments [ 'search' ] :
search ( arguments [ 'QUERY' ] )
elif arguments [ 'budget' ] :
try :
money = arguments [ '<budget>' ]
money = float ( money )
budget ( money )
except :
print 'Budget should be a number!'
elif arguments [ 'restaurant' ] :
restaurant ( arguments [ '<restaurant-id>' ] )
else :
print ( __doc__ )
|
def register_optionables ( self , optionables ) :
"""Registers the given subsystem types .
: param optionables : The Optionable types to register .
: type optionables : : class : ` collections . Iterable ` containing
: class : ` pants . option . optionable . Optionable ` subclasses ."""
|
if not isinstance ( optionables , Iterable ) :
raise TypeError ( 'The optionables must be an iterable, given {}' . format ( optionables ) )
optionables = tuple ( optionables )
if not optionables :
return
invalid_optionables = [ s for s in optionables if not isinstance ( s , type ) or not issubclass ( s , Optionable ) ]
if invalid_optionables :
raise TypeError ( 'The following items from the given optionables are not Optionable ' 'subclasses:\n\t{}' . format ( '\n\t' . join ( str ( i ) for i in invalid_optionables ) ) )
self . _optionables . update ( optionables )
|
def hexstr ( text ) :
'''Ensure a string is valid hex .
Args :
text ( str ) : String to normalize .
Examples :
Norm a few strings :
hexstr ( ' 0xff00 ' )
hexstr ( ' ff00 ' )
Notes :
Will accept strings prefixed by ' 0x ' or ' 0X ' and remove them .
Returns :
str : Normalized hex string .'''
|
text = text . strip ( ) . lower ( )
if text . startswith ( ( '0x' , '0X' ) ) :
text = text [ 2 : ]
if not text :
raise s_exc . BadTypeValu ( valu = text , name = 'hexstr' , mesg = 'No string left after stripping' )
try : # checks for valid hex width and does character
# checking in C without using regex
s_common . uhex ( text )
except ( binascii . Error , ValueError ) as e :
raise s_exc . BadTypeValu ( valu = text , name = 'hexstr' , mesg = str ( e ) )
return text
|
def create_application ( self , team_id , name , url = None ) :
"""Creates an application under a given team .
: param team _ id : Team identifier .
: param name : The name of the new application being created .
: param url : The url of where the application is located ."""
|
params = { 'name' : name }
if url :
params [ 'url' ] = url
return self . _request ( 'POST' , 'rest/teams/' + str ( team_id ) + '/applications/new' , params )
|
def ExpandGroups ( path ) :
"""Performs group expansion on a given path .
For example , given path ` foo / { bar , baz } / { quux , norf } ` this method will yield
` foo / bar / quux ` , ` foo / bar / norf ` , ` foo / baz / quux ` , ` foo / baz / norf ` .
Args :
path : A path to expand .
Yields :
Paths that can be obtained from given path by expanding groups ."""
|
precondition . AssertType ( path , Text )
chunks = [ ]
offset = 0
for match in PATH_GROUP_REGEX . finditer ( path ) :
chunks . append ( [ path [ offset : match . start ( ) ] ] )
chunks . append ( match . group ( "alts" ) . split ( "," ) )
offset = match . end ( )
chunks . append ( [ path [ offset : ] ] )
for prod in itertools . product ( * chunks ) :
yield "" . join ( prod )
|
def tempfilename ( ** kwargs ) :
"""Reserve a temporary file for future use .
This is useful if you want to get a temporary file name , write to it in the
future and ensure that if an exception is thrown the temporary file is removed ."""
|
kwargs . update ( delete = False )
try :
f = NamedTemporaryFile ( ** kwargs )
f . close ( )
yield f . name
except Exception :
if os . path . exists ( f . name ) : # Ensure we clean up after ourself
os . unlink ( f . name )
raise
|
def _is_pingable ( ip ) :
"""Checks whether an IP address is reachable by pinging .
Use linux utils to execute the ping ( ICMP ECHO ) command .
Sends 5 packets with an interval of 0.2 seconds and timeout of 1
seconds . Runtime error implies unreachability else IP is pingable .
: param ip : IP to check
: return : bool - True or False depending on pingability ."""
|
ping_cmd = [ 'ping' , '-c' , '5' , '-W' , '1' , '-i' , '0.2' , ip ]
try :
linux_utils . execute ( ping_cmd , check_exit_code = True )
return True
except RuntimeError :
LOG . warning ( "Cannot ping ip address: %s" , ip )
return False
|
def assign_properties ( thing ) :
"""Assign properties to an object .
When creating something via a post request ( e . g . a node ) , you can pass the
properties of the object in the request . This function gets those values
from the request and fills in the relevant columns of the table ."""
|
details = request_parameter ( parameter = "details" , optional = True )
if details :
setattr ( thing , "details" , loads ( details ) )
for p in range ( 5 ) :
property_name = "property" + str ( p + 1 )
property = request_parameter ( parameter = property_name , optional = True )
if property :
setattr ( thing , property_name , property )
session . commit ( )
|
def complete ( self ) :
"""Complete credentials are valid and are either two - legged or include a token ."""
|
return self . valid and ( self . access_token or self . refresh_token or self . type == 2 )
|
def calcinds ( data , threshold , ignoret = None ) :
"""Find indexes for data above ( or below ) given threshold ."""
|
inds = [ ]
for i in range ( len ( data [ 'time' ] ) ) :
snr = data [ 'snrs' ] [ i ]
time = data [ 'time' ] [ i ]
if ( threshold >= 0 and snr > threshold ) :
if ignoret :
incl = [ t0 for ( t0 , t1 ) in ignoret if np . round ( time ) . astype ( int ) in range ( t0 , t1 ) ]
logger . debug ( '{} {} {} {}' . format ( np . round ( time ) . astype ( int ) , t0 , t1 , incl ) )
if not incl :
inds . append ( i )
else :
inds . append ( i )
elif threshold < 0 and snr < threshold :
if ignoret :
incl = [ t0 for ( t0 , t1 ) in ignoret if np . round ( time ) . astype ( int ) in range ( t0 , t1 ) ]
logger . debug ( '{} {} {} {}' . format ( np . round ( time ) . astype ( int ) , t0 , t1 , incl ) )
if not incl :
inds . append ( i )
else :
inds . append ( i )
return inds
|
def tobam_cl ( data , out_file , is_paired = False ) :
"""Prepare command line for producing de - duplicated sorted output .
- If no deduplication , sort and prepare a BAM file .
- If paired , then use samblaster and prepare discordant outputs .
- If unpaired , use biobambam ' s bammarkduplicates"""
|
do_dedup = _check_dedup ( data )
umi_consensus = dd . get_umi_consensus ( data )
with file_transaction ( data , out_file ) as tx_out_file :
if not do_dedup :
yield ( sam_to_sortbam_cl ( data , tx_out_file ) , tx_out_file )
elif umi_consensus :
yield ( _sam_to_grouped_umi_cl ( data , umi_consensus , tx_out_file ) , tx_out_file )
elif is_paired and _need_sr_disc_reads ( data ) and not _too_many_contigs ( dd . get_ref_file ( data ) ) :
sr_file = "%s-sr.bam" % os . path . splitext ( out_file ) [ 0 ]
disc_file = "%s-disc.bam" % os . path . splitext ( out_file ) [ 0 ]
with file_transaction ( data , sr_file ) as tx_sr_file :
with file_transaction ( data , disc_file ) as tx_disc_file :
yield ( samblaster_dedup_sort ( data , tx_out_file , tx_sr_file , tx_disc_file ) , tx_out_file )
else :
yield ( _biobambam_dedup_sort ( data , tx_out_file ) , tx_out_file )
|
def igetattr ( self , name , context = None , class_context = True ) :
"""Infer the possible values of the given variable .
: param name : The name of the variable to infer .
: type name : str
: returns : The inferred possible values .
: rtype : iterable ( NodeNG or Uninferable )"""
|
# set lookup name since this is necessary to infer on import nodes for
# instance
context = contextmod . copy_context ( context )
context . lookupname = name
try :
attr = self . getattr ( name , context , class_context = class_context ) [ 0 ]
for inferred in bases . _infer_stmts ( [ attr ] , context , frame = self ) : # yield Uninferable object instead of descriptors when necessary
if not isinstance ( inferred , node_classes . Const ) and isinstance ( inferred , bases . Instance ) :
try :
inferred . _proxied . getattr ( "__get__" , context )
except exceptions . AttributeInferenceError :
yield inferred
else :
yield util . Uninferable
else :
yield function_to_method ( inferred , self )
except exceptions . AttributeInferenceError as error :
if not name . startswith ( "__" ) and self . has_dynamic_getattr ( context ) : # class handle some dynamic attributes , return a Uninferable object
yield util . Uninferable
else :
raise exceptions . InferenceError ( error . message , target = self , attribute = name , context = context )
|
def email ( self , to , msg ) :
"""Quickly send an email from a default address . Calls : py : meth : ` gmail _ email ` .
* * stored credential name : GMAIL _ EMAIL *
: param string to : The email address to send the email to .
: param msg : The content of the email . See : py : meth : ` gmail _ email ` ."""
|
logging . debug ( 'Emailing someone' )
return self . gmail_email ( self . _credentials [ 'GMAIL_EMAIL' ] , to , msg )
|
def build_subtree_strut ( self , result , * args , ** kwargs ) :
"""Returns a dictionary in form of
{ node : Resource , children : { node _ id : Resource } }
: param result :
: return :"""
|
items = list ( result )
root_elem = { "node" : None , "children" : OrderedDict ( ) }
if len ( items ) == 0 :
return root_elem
for _ , node in enumerate ( items ) :
new_elem = { "node" : node . Resource , "children" : OrderedDict ( ) }
path = list ( map ( int , node . path . split ( "/" ) ) )
parent_node = root_elem
normalized_path = path [ : - 1 ]
if normalized_path :
for path_part in normalized_path :
parent_node = parent_node [ "children" ] [ path_part ]
parent_node [ "children" ] [ new_elem [ "node" ] . resource_id ] = new_elem
return root_elem
|
def read_hdf5_timeseries ( h5f , path = None , start = None , end = None , ** kwargs ) :
"""Read a ` TimeSeries ` from HDF5"""
|
# read data
kwargs . setdefault ( 'array_type' , TimeSeries )
series = read_hdf5_array ( h5f , path = path , ** kwargs )
# crop if needed
if start is not None or end is not None :
return series . crop ( start , end )
return series
|
def evaluate_model_single_recording_preloaded ( preprocessing_queue , feature_list , model , output_semantics , recording , recording_id = None ) :
"""Evaluate a model for a single recording , after everything has been loaded .
Parameters
preprocessing _ queue : list
List of all preprocessing objects .
feature _ list : list
List of all feature objects .
model : dict
Neural network model .
output _ semantics : list
List that defines what an output means .
recording : string in JSON format
The handwritten recording in JSON format .
recording _ id : int or None
For debugging purposes ."""
|
handwriting = handwritten_data . HandwrittenData ( recording , raw_data_id = recording_id )
handwriting . preprocessing ( preprocessing_queue )
x = handwriting . feature_extraction ( feature_list )
import nntoolkit . evaluate
model_output = nntoolkit . evaluate . get_model_output ( model , [ x ] )
return nntoolkit . evaluate . get_results ( model_output , output_semantics )
|
def output ( self , mode = 'file' , forced = False , context = None ) :
"""The general output method , override in subclass if you need to do
any custom modification . Calls other mode specific methods or simply
returns the content directly ."""
|
output = '\n' . join ( self . filter_input ( forced , context = context ) )
if not output :
return ''
if settings . COMPRESS_ENABLED or forced :
filtered_output = self . filter_output ( output )
return self . handle_output ( mode , filtered_output , forced )
return output
|
def verify_telesign_callback_signature ( api_key , signature , json_str ) :
"""Verify that a callback was made by TeleSign and was not sent by a malicious client by verifying the signature .
: param api _ key : the TeleSign API api _ key associated with your account .
: param signature : the TeleSign Authorization header value supplied in the callback , as a string .
: param json _ str : the POST body text , that is , the JSON string sent by TeleSign describing the transaction status ."""
|
your_signature = b64encode ( HMAC ( b64decode ( api_key ) , json_str . encode ( "utf-8" ) , sha256 ) . digest ( ) ) . decode ( "utf-8" )
if len ( signature ) != len ( your_signature ) :
return False
# avoid timing attack with constant time equality check
signatures_equal = True
for x , y in zip ( signature , your_signature ) :
if not x == y :
signatures_equal = False
return signatures_equal
|
def arping ( net , timeout = 2 , cache = 0 , verbose = None , ** kargs ) :
"""Send ARP who - has requests to determine which hosts are up
arping ( net , [ cache = 0 , ] [ iface = conf . iface , ] [ verbose = conf . verb ] ) - > None
Set cache = True if you want arping to modify internal ARP - Cache"""
|
if verbose is None :
verbose = conf . verb
ans , unans = srp ( Ether ( dst = "ff:ff:ff:ff:ff:ff" ) / ARP ( pdst = net ) , verbose = verbose , filter = "arp and arp[7] = 2" , timeout = timeout , iface_hint = net , ** kargs )
ans = ARPingResult ( ans . res )
if cache and ans is not None :
for pair in ans :
conf . netcache . arp_cache [ pair [ 1 ] . psrc ] = ( pair [ 1 ] . hwsrc , time . time ( ) )
if verbose :
ans . show ( )
return ans , unans
|
def calc_file_md5 ( filepath , chunk_size = None ) :
"""Calculate a file ' s md5 checksum . Use the specified chunk _ size for IO or the
default 256KB
: param filepath :
: param chunk _ size :
: return :"""
|
if chunk_size is None :
chunk_size = 256 * 1024
md5sum = hashlib . md5 ( )
with io . open ( filepath , 'r+b' ) as f :
datachunk = f . read ( chunk_size )
while datachunk is not None and len ( datachunk ) > 0 :
md5sum . update ( datachunk )
datachunk = f . read ( chunk_size )
return md5sum . hexdigest ( )
|
def evpn_next_hop_unchanged ( self , ** kwargs ) :
"""Configure next hop unchanged for an EVPN neighbor .
You probably don ' t want this method . You probably want to configure
an EVPN neighbor using ` BGP . neighbor ` . That will configure next - hop
unchanged automatically .
Args :
ip _ addr ( str ) : IP Address of BGP neighbor .
rbridge _ id ( str ) : The rbridge ID of the device on which BGP will be
configured in a VCS fabric .
delete ( bool ) : Deletes the neighbor if ` delete ` is ` ` True ` ` .
get ( bool ) : Get config instead of editing config . ( True , False )
callback ( function ) : A function executed upon completion of the
method . The only parameter passed to ` callback ` will be the
` ` ElementTree ` ` ` config ` .
Returns :
Return value of ` callback ` .
Raises :
None
Examples :
> > > import pynos . device
> > > conn = ( ' 10.24.39.203 ' , ' 22 ' )
> > > auth = ( ' admin ' , ' password ' )
> > > with pynos . device . Device ( conn = conn , auth = auth ) as dev :
. . . output = dev . bgp . local _ asn ( local _ as = ' 65535 ' ,
. . . rbridge _ id = ' 225 ' )
. . . output = dev . bgp . neighbor ( ip _ addr = ' 10.10.10.10 ' ,
. . . remote _ as = ' 65535 ' , rbridge _ id = ' 225 ' )
. . . output = dev . bgp . evpn _ next _ hop _ unchanged ( rbridge _ id = ' 225 ' ,
. . . ip _ addr = ' 10.10.10.10 ' )
. . . output = dev . bgp . evpn _ next _ hop _ unchanged ( rbridge _ id = ' 225 ' ,
. . . ip _ addr = ' 10.10.10.10 ' , get = True )
. . . output = dev . bgp . evpn _ next _ hop _ unchanged ( rbridge _ id = ' 225 ' ,
. . . ip _ addr = ' 10.10.10.10 ' , delete = True )"""
|
callback = kwargs . pop ( 'callback' , self . _callback )
args = dict ( rbridge_id = kwargs . pop ( 'rbridge_id' , '1' ) , evpn_neighbor_ipv4_address = kwargs . pop ( 'ip_addr' ) )
next_hop_unchanged = getattr ( self . _rbridge , 'rbridge_id_router_router_bgp_address_' 'family_l2vpn_evpn_neighbor_evpn_' 'neighbor_ipv4_next_hop_unchanged' )
config = next_hop_unchanged ( ** args )
if kwargs . pop ( 'delete' , False ) :
config . find ( './/*next-hop-unchanged' ) . set ( 'operation' , 'delete' )
if kwargs . pop ( 'get' , False ) :
return callback ( config , handler = 'get_config' )
return callback ( config )
|
def make_dropdown_widget ( cls , description = 'Description' , options = [ 'Label 1' , 'Label 2' ] , value = 'Label 1' , file_path = None , layout = Layout ( ) , handler = None ) :
"Return a Dropdown widget with specified ` handler ` ."
|
dd = widgets . Dropdown ( description = description , options = options , value = value , layout = layout )
if file_path is not None :
dd . file_path = file_path
if handler is not None :
dd . observe ( handler , names = [ 'value' ] )
return dd
|
def __get_timemachine ( self ) :
"""Return a TimeMachine for the object on which this action was
performed and at the time of this action ."""
|
if not self . __timemachine :
self . __timemachine = TimeMachine ( self . object_uid , step = self . id , )
return self . __timemachine . at ( self . id )
|
def write_line ( self , fix = True ) :
"""Output line containing values to console and csv file .
Only committed values are written to css file .
: param bool fix : to commit measurement values"""
|
cells = [ ]
csv_values = [ ]
for m in self . values ( ) :
cells . append ( m . render_value ( fix = fix ) )
if isinstance ( m , MultiMetric ) :
for sub in m . values ( ) :
csv_values . append ( sub . to_csv ( ) )
else :
csv_values . append ( m . to_csv ( ) )
if fix :
m . reset ( )
if fix and self . _csvfile :
self . _write_csv_row ( csv_values )
c = _ansi [ "gray" ] + "|" + _ansi [ "reset" ]
if self . _last_line_fixed :
stdout . write ( "\n\r" )
else :
stdout . write ( "\r" )
if not fix :
stdout . write ( _ansi [ "reverse" ] )
stdout . write ( c . join ( cells ) )
stdout . flush ( )
self . _last_line_fixed = fix
|
def consume ( self , service_agreement_id , did , service_definition_id , consumer_account , destination , index = None ) :
"""Consume the asset data .
Using the service endpoint defined in the ddo ' s service pointed to by service _ definition _ id .
Consumer ' s permissions is checked implicitly by the secret - store during decryption
of the contentUrls .
The service endpoint is expected to also verify the consumer ' s permissions to consume this
asset .
This method downloads and saves the asset datafiles to disk .
: param service _ agreement _ id : str
: param did : DID , str
: param service _ definition _ id : identifier of the service inside the asset DDO , str
: param consumer _ account : Account instance of the consumer
: param destination : str path
: param index : Index of the document that is going to be downloaded , int
: return : str path to saved files"""
|
ddo = self . resolve ( did )
if index is not None :
assert isinstance ( index , int ) , logger . error ( 'index has to be an integer.' )
assert index >= 0 , logger . error ( 'index has to be 0 or a positive integer.' )
return self . _asset_consumer . download ( service_agreement_id , service_definition_id , ddo , consumer_account , destination , BrizoProvider . get_brizo ( ) , self . _get_secret_store ( consumer_account ) , index )
|
def parse_insert_metrics ( fn ) :
"""Parse the output from Picard ' s CollectInsertSizeMetrics and return as pandas
Series .
Parameters
filename : str of filename or file handle
Filename of the Picard output you want to parse .
Returns
metrics : pandas . Series
Insert size metrics .
hist : pandas . Series
Insert size histogram ."""
|
with open ( fn ) as f :
lines = [ x . strip ( ) . split ( '\t' ) for x in f . readlines ( ) ]
index = lines [ 6 ]
vals = lines [ 7 ]
for i in range ( len ( index ) - len ( vals ) ) :
vals . append ( np . nan )
for i , v in enumerate ( vals ) :
if type ( v ) == str :
try :
vals [ i ] = int ( v )
except ValueError :
try :
vals [ i ] = float ( v )
except ValueError :
continue
metrics = pd . Series ( vals , index = index )
vals = np . array ( lines [ 11 : - 1 ] )
hist = pd . Series ( vals [ : , 1 ] , index = [ int ( float ( x ) ) for x in vals [ : , 0 ] ] )
hist = pd . to_numeric ( hist )
return metrics , hist
|
def summary ( self , prn = None , lfilter = None ) :
"""prints a summary of each SndRcv packet pair
prn : function to apply to each packet pair instead of lambda s , r : " % s = = > % s " % ( s . summary ( ) , r . summary ( ) )
lfilter : truth function to apply to each packet pair to decide whether it will be displayed"""
|
for s , r in self . res :
if lfilter is not None :
if not lfilter ( s , r ) :
continue
if prn is None :
print ( self . _elt2sum ( ( s , r ) ) )
else :
print ( prn ( s , r ) )
|
def checkForSave ( self ) :
"""Checks to see if the current document has been modified and should be saved .
: return < bool >"""
|
# if the file is not modified , then save is not needed
if ( not self . isModified ( ) ) :
return True
options = QMessageBox . Yes | QMessageBox . No | QMessageBox . Cancel
question = 'Would you like to save your changes to %s?' % self . windowTitle ( )
answer = QMessageBox . question ( None , 'Save Changes' , question , options )
if ( answer == QMessageBox . Yes ) :
return self . save ( )
elif ( answer == QMessageBox . Cancel ) :
return False
return True
|
def _phiforce ( self , R , phi = 0. , t = 0. ) :
"""NAME :
_ phiforce
PURPOSE :
evaluate the azimuthal force
INPUT :
phi
OUTPUT :
F _ phi ( R ( , \ phi , t ) )
HISTORY :
2016-06-02 - Written - Bovy ( UofT )"""
|
return self . _Pot . phiforce ( R , 0. , phi = phi , t = t , use_physical = False )
|
def inspect ( self ) :
"""Inspect access attempt , used for catpcha flow
: return :"""
|
last_attempt = self . get_last_failed_access_attempt ( ip_address = self . ip , captcha_enabled = True , captcha_passed = False , is_expired = False )
if last_attempt is None and not self . request . user . is_authenticated ( ) : # create a new entry
user_access = self . _FailedAccessAttemptModel ( ip_address = self . ip , username = self . username , captcha_enabled = True , captcha_passed = False , is_expired = False )
elif last_attempt :
user_access = last_attempt
if self . request . method == 'POST' :
if not self . request . user . is_authenticated ( ) :
user_access . user_agent = self . request . META . get ( 'HTTP_USER_AGENT' , '<unknown user agent>' ) [ : 255 ]
user_access . username = self . username
user_access . failed_attempts += 1
user_access . params_get = self . request . GET
user_access . params_post = self . request . POST
if user_access . failed_attempts >= self . max_failed_attempts :
user_access . is_locked = True
user_access . save ( )
elif self . request . user . is_authenticated ( ) and last_attempt :
last_attempt . is_expired = True
last_attempt . save ( )
|
def export_curves_stats ( self , aids , key ) :
""": returns : a dictionary rlzi - > record of dtype loss _ curve _ dt"""
|
oq = self . dstore [ 'oqparam' ]
stats = oq . hazard_stats ( ) . items ( )
# pair ( name , func )
stat2idx = { stat [ 0 ] : s for s , stat in enumerate ( stats ) }
if 'loss_curves-stats' in self . dstore : # classical _ risk
dset = self . dstore [ 'loss_curves-stats' ]
data = dset [ aids ]
# shape ( A , S )
if key == 'stats' :
return { stat [ 0 ] : data [ : , s ] for s , stat in enumerate ( stats ) }
else : # a specific statistics
return { key : data [ : , stat2idx [ key ] ] }
elif 'curves-stats' in self . dstore : # event _ based _ risk
dset = self . dstore [ 'curves-stats' ]
data = dset [ aids ]
if key == 'stats' :
return { stat [ 0 ] : data [ : , s ] for s , stat in enumerate ( stats ) }
else : # a specific statistics
return { key : data [ : , stat2idx [ key ] ] }
else :
raise KeyError ( 'no loss curves in %s' % self . dstore )
|
def from_e164 ( text , origin = public_enum_domain ) :
"""Convert an E . 164 number in textual form into a Name object whose
value is the ENUM domain name for that number .
@ param text : an E . 164 number in textual form .
@ type text : str
@ param origin : The domain in which the number should be constructed .
The default is e164 . arpa .
@ type : dns . name . Name object or None
@ rtype : dns . name . Name object"""
|
parts = [ d for d in text if d . isdigit ( ) ]
parts . reverse ( )
return dns . name . from_text ( '.' . join ( parts ) , origin = origin )
|
def crop_to_bounds ( self , val ) :
"""Return the given value cropped to be within the hard bounds
for this parameter .
If a numeric value is passed in , check it is within the hard
bounds . If it is larger than the high bound , return the high
bound . If it ' s smaller , return the low bound . In either case , the
returned value could be None . If a non - numeric value is passed
in , set to be the default value ( which could be None ) . In no
case is an exception raised ; all values are accepted ."""
|
# Currently , values outside the bounds are silently cropped to
# be inside the bounds ; it may be appropriate to add a warning
# in such cases .
if _is_number ( val ) :
if self . bounds is None :
return val
vmin , vmax = self . bounds
if vmin is not None :
if val < vmin :
return vmin
if vmax is not None :
if val > vmax :
return vmax
elif self . allow_None and val is None :
return val
else : # non - numeric value sent in : reverts to default value
return self . default
return val
|
def capture_event ( self , event , hint = None , scope = None ) : # type : ( Dict [ str , Any ] , Any , Scope ) - > Optional [ str ]
"""Captures an event .
This takes the ready made event and an optoinal hint and scope . The
hint is internally used to further customize the representation of the
error . When provided it ' s a dictionary of optional information such
as exception info .
If the transport is not set nothing happens , otherwise the return
value of this function will be the ID of the captured event ."""
|
if self . transport is None :
return None
if hint is None :
hint = { }
rv = event . get ( "event_id" )
if rv is None :
event [ "event_id" ] = rv = uuid . uuid4 ( ) . hex
if not self . _should_capture ( event , hint , scope ) :
return None
event = self . _prepare_event ( event , hint , scope )
# type : ignore
if event is None :
return None
self . transport . capture_event ( event )
return rv
|
def get_interfaces ( self , socket_connection = None ) :
"""Returns the a list of Interface objects the service implements ."""
|
if not socket_connection :
socket_connection = self . open_connection ( )
close_socket = True
else :
close_socket = False
# noinspection PyUnresolvedReferences
_service = self . handler ( self . _interfaces [ "org.varlink.service" ] , socket_connection )
self . info = _service . GetInfo ( )
if close_socket :
socket_connection . close ( )
return self . info [ 'interfaces' ]
|
def get_properties ( self ) :
"""Return the properties of this Configuration object .
The Dictionary object returned is a private copy for the caller and may
be changed without influencing the stored configuration .
If called just after the configuration is created and before update has
been called , this method returns None .
: return : A private copy of the properties for the caller or null .
These properties must not contain the " service . bundleLocation "
property . The value of this property may be obtained from the
get _ bundle _ location ( ) method ."""
|
with self . __lock :
if self . __deleted :
raise ValueError ( "{0} has been deleted" . format ( self . __pid ) )
elif not self . __updated : # Fresh configuration
return None
# Filter a copy of the properties
props = self . __properties . copy ( )
try :
del props [ services . CONFIG_PROP_BUNDLE_LOCATION ]
except KeyError : # Ignore
pass
return props
|
def pkill ( pattern , user = None , signal = 15 , full = False ) :
'''Kill processes matching a pattern .
. . code - block : : bash
salt ' * ' ps . pkill pattern [ user = username ] [ signal = signal _ number ] \ [ full = ( true | false ) ]
pattern
Pattern to search for in the process list .
user
Limit matches to the given username . Default : All users .
signal
Signal to send to the process ( es ) . See manpage entry for kill
for possible values . Default : 15 ( SIGTERM ) .
full
A boolean value indicating whether only the name of the command or
the full command line should be matched against the pattern .
* * Examples : * *
Send SIGHUP to all httpd processes on all ' www ' minions :
. . code - block : : bash
salt ' www . * ' ps . pkill httpd signal = 1
Send SIGKILL to all bash processes owned by user ' tom ' :
. . code - block : : bash
salt ' * ' ps . pkill bash signal = 9 user = tom'''
|
killed = [ ]
for proc in psutil . process_iter ( ) :
name_match = pattern in ' ' . join ( _get_proc_cmdline ( proc ) ) if full else pattern in _get_proc_name ( proc )
user_match = True if user is None else user == _get_proc_username ( proc )
if name_match and user_match :
try :
proc . send_signal ( signal )
killed . append ( _get_proc_pid ( proc ) )
except psutil . NoSuchProcess :
pass
if not killed :
return None
else :
return { 'killed' : killed }
|
def from_hdf ( cls , fp , template_hash , root = None , load_to_memory = True , load_now = False ) :
"""Load a compressed waveform from the given hdf file handler .
The waveform is retrieved from :
` fp [ ' [ { root } / ] compressed _ waveforms / { template _ hash } / { param } ' ] ` ,
where ` param ` is the ` sample _ points ` , ` amplitude ` , and ` phase ` .
Parameters
fp : h5py . File
An open hdf file to write the compressed waveform to .
template _ hash : { hash , int , str }
The id of the waveform .
root : { None , str }
Retrieve the ` compressed _ waveforms ` group from the given string .
If ` None ` , ` compressed _ waveforms ` will be assumed to be in the
top level .
load _ to _ memory : { True , bool }
Set the ` load _ to _ memory ` attribute to the given value in the
returned instance .
load _ now : { False , bool }
Immediately load the ` sample _ points ` / ` amplitude ` / ` phase ` to memory .
Returns
CompressedWaveform
An instance of this class with parameters loaded from the hdf file ."""
|
if root is None :
root = ''
else :
root = '%s/' % ( root )
group = '%scompressed_waveforms/%s' % ( root , str ( template_hash ) )
fp_group = fp [ group ]
sample_points = fp_group [ 'sample_points' ]
amp = fp_group [ 'amplitude' ]
phase = fp_group [ 'phase' ]
if load_now :
sample_points = sample_points [ : ]
amp = amp [ : ]
phase = phase [ : ]
return cls ( sample_points , amp , phase , interpolation = fp_group . attrs [ 'interpolation' ] , tolerance = fp_group . attrs [ 'tolerance' ] , mismatch = fp_group . attrs [ 'mismatch' ] , precision = fp_group . attrs [ 'precision' ] , load_to_memory = load_to_memory )
|
def replace ( self , refobj , reference , taskfileinfo ) :
"""Replace the given reference with the given taskfileinfo
: param refobj : the refobj that is linked to the reference
: param reference : the reference object . E . g . in Maya a reference node
: param taskfileinfo : the taskfileinfo that will replace the old entity
: type taskfileinfo : : class : ` jukeboxcore . filesys . TaskFileInfo `
: returns : None
: rtype : None
: raises : None"""
|
jbfile = JB_File ( taskfileinfo )
filepath = jbfile . get_fullpath ( )
cmds . file ( filepath , loadReference = reference )
ns = cmds . referenceQuery ( reference , namespace = True )
# query the actual new namespace
content = cmds . namespaceInfo ( ns , listOnlyDependencyNodes = True , dagPath = True )
# get the content
scenenode = self . get_scenenode ( content )
# get the scene node
self . get_refobjinter ( ) . connect_reftrack_scenenode ( refobj , scenenode )
|
def create ( self , ** kwargs ) :
"""Creates a new statement matching the keyword arguments specified .
Returns the created statement ."""
|
Statement = self . get_model ( 'statement' )
if 'tags' in kwargs :
kwargs [ 'tags' ] = list ( set ( kwargs [ 'tags' ] ) )
if 'search_text' not in kwargs :
kwargs [ 'search_text' ] = self . tagger . get_bigram_pair_string ( kwargs [ 'text' ] )
if 'search_in_response_to' not in kwargs :
if kwargs . get ( 'in_response_to' ) :
kwargs [ 'search_in_response_to' ] = self . tagger . get_bigram_pair_string ( kwargs [ 'in_response_to' ] )
inserted = self . statements . insert_one ( kwargs )
kwargs [ 'id' ] = inserted . inserted_id
return Statement ( ** kwargs )
|
def get_last_components_by_type ( component_types , topic_id , db_conn = None ) :
"""For each component type of a topic , get the last one ."""
|
db_conn = db_conn or flask . g . db_conn
schedule_components_ids = [ ]
for ct in component_types :
where_clause = sql . and_ ( models . COMPONENTS . c . type == ct , models . COMPONENTS . c . topic_id == topic_id , models . COMPONENTS . c . export_control == True , models . COMPONENTS . c . state == 'active' )
# noqa
query = ( sql . select ( [ models . COMPONENTS . c . id ] ) . where ( where_clause ) . order_by ( sql . desc ( models . COMPONENTS . c . created_at ) ) )
cmpt_id = db_conn . execute ( query ) . fetchone ( )
if cmpt_id is None :
msg = 'Component of type "%s" not found or not exported.' % ct
raise dci_exc . DCIException ( msg , status_code = 412 )
cmpt_id = cmpt_id [ 0 ]
if cmpt_id in schedule_components_ids :
msg = ( 'Component types %s malformed: type %s duplicated.' % ( component_types , ct ) )
raise dci_exc . DCIException ( msg , status_code = 412 )
schedule_components_ids . append ( cmpt_id )
return schedule_components_ids
|
def assert_or_raise ( stmt : bool , exception : Exception , * exception_args , ** exception_kwargs ) -> None :
"""If the statement is false , raise the given exception ."""
|
if not stmt :
raise exception ( * exception_args , ** exception_kwargs )
|
def LSLS ( self , params ) :
"""LSLS [ Ra , ] Ra , Rc
LSLS [ Ra , ] Rb , # imm5
Logical shift left Rb by Rc or imm5 and store the result in Ra
imm5 is [ 0 , 31]
In the register shift , the first two operands must be the same register
Ra , Rb , and Rc must be low registers
If Ra is omitted , then it is assumed to be Rb"""
|
# This instruction allows for an optional destination register
# If it is omitted , then it is assumed to be Rb
# As defined in http : / / infocenter . arm . com / help / index . jsp ? topic = / com . arm . doc . dui0662b / index . html
try :
Ra , Rb , Rc = self . get_three_parameters ( self . THREE_PARAMETER_COMMA_SEPARATED , params )
except iarm . exceptions . ParsingError :
Rb , Rc = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params )
Ra = Rb
if self . is_register ( Rc ) : # LSLS Ra , Ra , Rb
self . check_arguments ( low_registers = ( Ra , Rc ) )
self . match_first_two_parameters ( Ra , Rb )
def LSLS_func ( ) : # Set the C flag , or the last shifted out bit
if ( self . register [ Rc ] < self . _bit_width ) and ( self . register [ Ra ] & ( 1 << ( self . _bit_width - self . register [ Rc ] ) ) ) :
self . set_APSR_flag_to_value ( 'C' , 1 )
else :
self . set_APSR_flag_to_value ( 'C' , 0 )
self . register [ Ra ] = self . register [ Ra ] << self . register [ Rc ]
self . set_NZ_flags ( self . register [ Ra ] )
else : # LSLS Ra , Rb , # imm5
self . check_arguments ( low_registers = ( Ra , Rb ) , imm5 = ( Rc , ) )
shift_amount = self . check_immediate ( Rc )
def LSLS_func ( ) : # Set the C flag , or the last shifted out bit
if ( shift_amount < self . _bit_width ) and ( self . register [ Rb ] & ( 1 << ( self . _bit_width - shift_amount ) ) ) :
self . set_APSR_flag_to_value ( 'C' , 1 )
else :
self . set_APSR_flag_to_value ( 'C' , 0 )
self . register [ Ra ] = self . register [ Rb ] << shift_amount
self . set_NZ_flags ( self . register [ Ra ] )
return LSLS_func
|
def bus_factor ( self , ignore_globs = None , include_globs = None , by = 'projectd' ) :
"""An experimental heuristic for truck factor of a repository calculated by the current distribution of blame in
the repository ' s primary branch . The factor is the fewest number of contributors whose contributions make up at
least 50 % of the codebase ' s LOC
: param ignore _ globs : ( optional , default = None ) a list of globs to ignore , default none excludes nothing
: param include _ globs : ( optinal , default = None ) a list of globs to include , default of None includes everything .
: return :"""
|
if by == 'file' :
raise NotImplementedError ( 'File-wise bus factor' )
elif by == 'projectd' :
blame = self . blame ( ignore_globs = ignore_globs , include_globs = include_globs , by = 'repository' )
blame = blame . sort_values ( by = [ 'loc' ] , ascending = False )
total = blame [ 'loc' ] . sum ( )
cumulative = 0
tc = 0
for idx in range ( blame . shape [ 0 ] ) :
cumulative += blame . ix [ idx , 'loc' ]
tc += 1
if cumulative >= total / 2 :
break
return pd . DataFrame ( [ [ 'projectd' , tc ] ] , columns = [ 'projectd' , 'bus factor' ] )
elif by == 'repository' :
df = pd . DataFrame ( columns = [ 'repository' , 'bus factor' ] )
for repo in self . repos :
try :
df = df . append ( repo . bus_factor ( ignore_globs = include_globs , include_globs = include_globs , by = by ) )
except GitCommandError :
print ( 'Warning! Repo: %s couldn\'t be inspected' % ( repo , ) )
df . reset_index ( )
return df
|
def crc ( self ) :
"""A zlib . crc32 or zlib . adler32 checksum
of the current data .
Returns
crc : int , checksum from zlib . crc32 or zlib . adler32"""
|
if self . _modified_c or not hasattr ( self , '_hashed_crc' ) :
if self . flags [ 'C_CONTIGUOUS' ] :
self . _hashed_crc = crc32 ( self )
else : # the case where we have sliced our nice
# contiguous array into a non - contiguous block
# for example ( note slice * after * track operation ) :
# t = util . tracked _ array ( np . random . random ( 10 ) ) [ : : - 1]
contiguous = np . ascontiguousarray ( self )
self . _hashed_crc = crc32 ( contiguous )
self . _modified_c = False
return self . _hashed_crc
|
def with_mfa ( self , mfa_token ) :
"""Set the MFA token for the next request .
` mfa _ token ` s are only good for one request . Use this method to chain into
the protected action you want to perform .
Note : Only useful for Application authentication .
Usage :
account . with _ mfa ( application . totp . now ( ) ) . pay ( . . . )
Args :
mfa _ token ( str / function , optional ) : TOTP token for the Application
OR a callable / function which will generate such a token when called .
Returns :
self"""
|
if hasattr ( mfa_token , '__call__' ) : # callable ( ) is unsupported by 3.1 and 3.2
self . context . mfa_token = mfa_token . __call__ ( )
else :
self . context . mfa_token = mfa_token
return self
|
def load_family_details ( self , pheno_covar ) :
"""Load family data updating the pheno _ covar with family ids found .
: param pheno _ covar : Phenotype / covariate object
: return : None"""
|
file = open ( self . fam_details )
header = file . readline ( )
format = file . readline ( )
self . file_index = 0
mask_components = [ ]
# 1s indicate an individual is to be masked out
for line in file :
words = line . strip ( ) . split ( )
indid = ":" . join ( words [ 0 : 2 ] )
if DataParser . valid_indid ( indid ) :
mask_components . append ( 0 )
sex = int ( words [ 5 ] )
pheno = float ( words [ 6 ] )
pheno_covar . add_subject ( indid , sex , pheno )
else :
mask_components . append ( 1 )
mask_components = numpy . array ( mask_components )
self . ind_mask = numpy . zeros ( len ( mask_components ) * 2 , dtype = numpy . int8 ) . reshape ( - 1 , 2 )
self . ind_mask [ 0 : , 0 ] = mask_components
self . ind_mask [ 0 : , 1 ] = mask_components
self . ind_count = self . ind_mask . shape [ 0 ]
pheno_covar . freeze_subjects ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.