signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def weighted ( self ) :
"""This method creates a weighted copy of the internal formula . As a
result , an object of class : class : ` WCNF ` is returned . Every clause
of the CNF formula is * soft * in the new WCNF formula and its weight
is equal to ` ` 1 ` ` . The set of hard clauses of the formula is empty .
: return : an object of class : class : ` WCNF ` .
Example :
. . code - block : : python
> > > from pysat . formula import CNF
> > > cnf = CNF ( from _ clauses = [ [ - 1 , 2 ] , [ 3 , 4 ] ] )
> > > wcnf = cnf . weighted ( )
> > > print wcnf . hard
> > > print wcnf . soft
[ [ - 1 , 2 ] , [ 3 , 4 ] ]
> > > print wcnf . wght
[1 , 1]""" | wcnf = WCNF ( )
wcnf . nv = self . nv
wcnf . hard = [ ]
wcnf . soft = copy . deepcopy ( self . clauses )
wcnf . wght = [ 1 for cl in wcnf . soft ]
self . topw = len ( wcnf . wght ) + 1
wcnf . comments = self . comments [ : ]
return wcnf |
def _lsb_release_info ( self ) :
"""Get the information items from the lsb _ release command output .
Returns :
A dictionary containing all information items .""" | if not self . include_lsb :
return { }
with open ( os . devnull , 'w' ) as devnull :
try :
cmd = ( 'lsb_release' , '-a' )
stdout = subprocess . check_output ( cmd , stderr = devnull )
except OSError : # Command not found
return { }
content = stdout . decode ( sys . getfilesystemencoding ( ) ) . splitlines ( )
return self . _parse_lsb_release_content ( content ) |
def do_chunked_gzip ( infh , outfh , filename ) :
"""A memory - friendly way of compressing the data .""" | import gzip
gzfh = gzip . GzipFile ( 'rawlogs' , mode = 'wb' , fileobj = outfh )
if infh . closed :
infh = open ( infh . name , 'r' )
else :
infh . seek ( 0 )
readsize = 0
sys . stdout . write ( 'Gzipping {0}: ' . format ( filename ) )
if os . stat ( infh . name ) . st_size :
infh . seek ( 0 )
progressbar = ProgressBar ( sys . stdout , os . stat ( infh . name ) . st_size , "bytes gzipped" )
while True :
chunk = infh . read ( GZIP_CHUNK_SIZE )
if not chunk :
break
if sys . version_info [ 0 ] >= 3 : # noinspection PyArgumentList
gzfh . write ( bytes ( chunk , "utf-8" ) )
else :
gzfh . write ( chunk )
readsize += len ( chunk )
progressbar . redraw ( readsize )
gzfh . close ( ) |
def no_counterpart_found ( string , options , rc_so_far ) :
"""Takes action determined by options . else _ action . Unless told to
raise an exception , this function returns the errno that is supposed
to be returned in this case .
: param string : The lookup string .
: param options : ArgumentParser or equivalent to provide
options . else _ action , options . else _ errno , options . no _ newline
: param rc _ so _ far : Becomes set to the value set in options .""" | logger . debug ( "options.else_action: %s" , options . else_action )
if options . else_action == "passthrough" :
format_list = [ string ]
output_fd = sys . stdout
elif options . else_action == "exception" :
raise KeyError ( "No counterpart found for: %s" % ( string ) )
elif options . else_action == "error" :
format_list = [ "# No counterpart found for: %s" % ( string ) ]
output_fd = sys . stderr
if not options . no_newline :
format_list . append ( "\n" )
output_fd . write ( "" . join ( format_list ) )
return options . else_errno |
def count_in_date ( x = 'date_time' , filter_dict = None , model = DEFAULT_MODEL , app = DEFAULT_APP , sort = True , limit = 100000 ) :
"""Count the number of records for each discrete ( categorical ) value of a field and return a dict of two lists , the field values and the counts .
> > > from django . db import transaction
> > > transaction . rollback ( )
> > > x , y = count _ in _ date ( x = ' date ' , filter _ dict = { ' model _ _ icontains ' : ' LC5 ' } , limit = 5 , sort = 1)
> > > len ( x ) = = len ( y ) = = 5
True
> > > y [ 1 ] > = y [ 0]
True""" | sort = sort_prefix ( sort )
model = get_model ( model , app )
filter_dict = filter_dict or { }
x = fuzzy . extractOne ( str ( x ) , model . _meta . get_all_field_names ( ) ) [ 0 ]
objects = model . objects . filter ( ** filter_dict )
objects = objects . extra ( { 'date_bin_for_counting' : 'date(%s)' % x } )
objects = objects . values ( 'date_bin_for_counting' )
objects = objects . annotate ( count_of_records_per_date_bin = djmodels . Count ( 'pk' ) )
# FIXME : this duplicates the dict of lists sort below
if sort is not None :
objects = objects . order_by ( sort + 'date_bin_for_counting' )
objects = objects . all ( )
if limit :
objects = objects [ : int ( limit ) ]
objects = util . sod_transposed ( objects )
if sort is not None :
objects = sorted_dict_of_lists ( objects , field_names = [ 'count_of_records_per_date_bin' , 'date_bin_for_counting' ] , reverse = bool ( sort ) )
# logger . info ( x )
return objects [ 'date_bin_for_counting' ] , objects [ 'count_of_records_per_date_bin' ] |
def remove_event_handler ( self , handler , event_name ) :
"""Remove event handler ` handler ` from registered handlers of the engine
Args :
handler ( callable ) : the callable event handler that should be removed
event _ name : The event the handler attached to .""" | if event_name not in self . _event_handlers :
raise ValueError ( "Input event name '{}' does not exist" . format ( event_name ) )
new_event_handlers = [ ( h , args , kwargs ) for h , args , kwargs in self . _event_handlers [ event_name ] if h != handler ]
if len ( new_event_handlers ) == len ( self . _event_handlers [ event_name ] ) :
raise ValueError ( "Input handler '{}' is not found among registered event handlers" . format ( handler ) )
self . _event_handlers [ event_name ] = new_event_handlers |
def compile_fund ( workbook , sheet , row , col ) :
"""Compile funding entries . Iter both rows at the same time . Keep adding entries until both cells are empty .
: param obj workbook :
: param str sheet :
: param int row :
: param int col :
: return list of dict : l""" | logger_excel . info ( "enter compile_fund" )
l = [ ]
temp_sheet = workbook . sheet_by_name ( sheet )
while col < temp_sheet . ncols :
col += 1
try : # Make a dictionary for this funding entry .
_curr = { 'agency' : temp_sheet . cell_value ( row , col ) , 'grant' : temp_sheet . cell_value ( row + 1 , col ) , "principalInvestigator" : temp_sheet . cell_value ( row + 2 , col ) , "country" : temp_sheet . cell_value ( row + 3 , col ) }
# Make a list for all
_exist = [ temp_sheet . cell_value ( row , col ) , temp_sheet . cell_value ( row + 1 , col ) , temp_sheet . cell_value ( row + 2 , col ) , temp_sheet . cell_value ( row + 3 , col ) ]
# Remove all empty items from the list
_exist = [ i for i in _exist if i ]
# If we have all empty entries , then don ' t continue . Quit funding and return what we have .
if not _exist :
return l
# We have funding data . Add this funding block to the growing list .
l . append ( _curr )
except IndexError as e :
logger_excel . debug ( "compile_fund: IndexError: sheet:{} row:{} col:{}, {}" . format ( sheet , row , col , e ) )
logger_excel . info ( "exit compile_fund" )
return l |
def tar_archive ( context ) :
"""Archive specified path to a tar archive .
Args :
context : dictionary - like . context is mandatory .
context [ ' tar ' ] [ ' archive ' ] must exist . It ' s a dictionary .
keys are the paths to archive .
values are the destination output paths .
Example :
tar :
archive :
- in : path / to / dir
out : path / to / destination . tar . xs
- in : another / my . file
out : . / my . tar . xs
This will archive directory path / to / dir to path / to / destination . tar . xs ,
and also archive file another / my . file to . / my . tar . xs""" | logger . debug ( "start" )
mode = get_file_mode_for_writing ( context )
for item in context [ 'tar' ] [ 'archive' ] : # value is the destination tar . Allow string interpolation .
destination = context . get_formatted_string ( item [ 'out' ] )
# key is the source to archive
source = context . get_formatted_string ( item [ 'in' ] )
with tarfile . open ( destination , mode ) as archive_me :
logger . debug ( f"Archiving '{source}' to '{destination}'" )
archive_me . add ( source , arcname = '.' )
logger . info ( f"Archived '{source}' to '{destination}'" )
logger . debug ( "end" ) |
def asym ( scatterer , h_pol = True ) :
"""Asymmetry parameter for the current setup , with polarization .
Args :
scatterer : a Scatterer instance .
h _ pol : If True ( default ) , use horizontal polarization .
If False , use vertical polarization .
Returns :
The asymmetry parameter .""" | if scatterer . psd_integrator is not None :
return scatterer . psd_integrator . get_angular_integrated ( scatterer . psd , scatterer . get_geometry ( ) , "asym" )
old_geom = scatterer . get_geometry ( )
cos_t0 = np . cos ( scatterer . thet0 * deg_to_rad )
sin_t0 = np . sin ( scatterer . thet0 * deg_to_rad )
p0 = scatterer . phi0 * deg_to_rad
def integrand ( thet , phi ) :
( scatterer . phi , scatterer . thet ) = ( phi * rad_to_deg , thet * rad_to_deg )
cos_T_sin_t = 0.5 * ( np . sin ( 2 * thet ) * cos_t0 + ( 1 - np . cos ( 2 * thet ) ) * sin_t0 * np . cos ( p0 - phi ) )
I = sca_intensity ( scatterer , h_pol )
return I * cos_T_sin_t
try :
cos_int = dblquad ( integrand , 0.0 , 2 * np . pi , lambda x : 0.0 , lambda x : np . pi ) [ 0 ]
finally :
scatterer . set_geometry ( old_geom )
return cos_int / sca_xsect ( scatterer , h_pol ) |
def gmres_prolongation_smoothing ( A , T , B , BtBinv , Sparsity_Pattern , maxiter , tol , weighting = 'local' , Cpt_params = None ) :
"""Use GMRES to smooth T by solving A T = 0 , subject to nullspace and sparsity constraints .
Parameters
A : csr _ matrix , bsr _ matrix
SPD sparse NxN matrix
Should be at least nonsymmetric or indefinite
T : bsr _ matrix
Tentative prolongator , a NxM sparse matrix ( M < N ) .
This is initial guess for the equation A T = 0.
Assumed that T B _ c = B _ f
B : array
Near - nullspace modes for coarse grid , i . e . , B _ c .
Has shape ( M , k ) where k is the number of coarse candidate vectors .
BtBinv : array
3 dimensional array such that ,
BtBinv [ i ] = pinv ( B _ i . H Bi ) , and B _ i is B restricted
to the neighborhood ( in the matrix graph ) of dof of i .
Sparsity _ Pattern : csr _ matrix , bsr _ matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
' block ' , ' diagonal ' or ' local ' construction of the diagonal
preconditioning
Cpt _ params : tuple
Tuple of the form ( bool , dict ) . If the Cpt _ params [ 0 ] = False , then
the standard SA prolongation smoothing is carried out . If True , then
dict must be a dictionary of parameters containing , ( 1 ) P _ I : P _ I . T is
the injection matrix for the Cpts , ( 2 ) I _ F : an identity matrix
for only the F - points ( i . e . I , but with zero rows and columns for
C - points ) and I _ C : the C - point analogue to I _ F .
Returns
T : bsr _ matrix
Smoothed prolongator using GMRES to solve A T = 0,
subject to the constraints , T B _ c = B _ f , and T has no nonzero
outside of the sparsity pattern in Sparsity _ Pattern .
See Also
The principal calling routine ,
pyamg . aggregation . smooth . energy _ prolongation _ smoother""" | # For non - SPD system , apply GMRES with Diagonal Preconditioning
# Preallocate space for new search directions
uones = np . zeros ( Sparsity_Pattern . data . shape , dtype = T . dtype )
AV = sparse . bsr_matrix ( ( uones , Sparsity_Pattern . indices , Sparsity_Pattern . indptr ) , shape = ( Sparsity_Pattern . shape ) )
# Preallocate for Givens Rotations , Hessenberg matrix and Krylov Space
xtype = sparse . sputils . upcast ( A . dtype , T . dtype , B . dtype )
Q = [ ]
# Givens Rotations
V = [ ]
# Krylov Space
# vs = [ ] # vs store the pointers to each column of V for speed
# Upper Hessenberg matrix , converted to upper tri with Givens Rots
H = np . zeros ( ( maxiter + 1 , maxiter + 1 ) , dtype = xtype )
# GMRES will be run with diagonal preconditioning
if weighting == 'diagonal' :
Dinv = get_diagonal ( A , norm_eq = False , inv = True )
elif weighting == 'block' :
Dinv = get_block_diag ( A , blocksize = A . blocksize [ 0 ] , inv_flag = True )
Dinv = sparse . bsr_matrix ( ( Dinv , np . arange ( Dinv . shape [ 0 ] ) , np . arange ( Dinv . shape [ 0 ] + 1 ) ) , shape = A . shape )
elif weighting == 'local' : # Based on Gershgorin estimate
D = np . abs ( A ) * np . ones ( ( A . shape [ 0 ] , 1 ) , dtype = A . dtype )
Dinv = np . zeros_like ( D )
Dinv [ D != 0 ] = 1.0 / np . abs ( D [ D != 0 ] )
else :
raise ValueError ( 'weighting value is invalid' )
# Calculate initial residual
# Equivalent to R = - A * T ; R = R . multiply ( Sparsity _ Pattern )
# with the added constraint that R has an explicit 0 wherever
# R is 0 and Sparsity _ Pattern is not
uones = np . zeros ( Sparsity_Pattern . data . shape , dtype = T . dtype )
R = sparse . bsr_matrix ( ( uones , Sparsity_Pattern . indices , Sparsity_Pattern . indptr ) , shape = ( Sparsity_Pattern . shape ) )
pyamg . amg_core . incomplete_mat_mult_bsr ( A . indptr , A . indices , np . ravel ( A . data ) , T . indptr , T . indices , np . ravel ( T . data ) , R . indptr , R . indices , np . ravel ( R . data ) , int ( T . shape [ 0 ] / T . blocksize [ 0 ] ) , int ( T . shape [ 1 ] / T . blocksize [ 1 ] ) , A . blocksize [ 0 ] , A . blocksize [ 1 ] , T . blocksize [ 1 ] )
R . data *= - 1.0
# Apply diagonal preconditioner
if weighting == 'local' or weighting == 'diagonal' :
R = scale_rows ( R , Dinv )
else :
R = Dinv * R
# Enforce R * B = 0
Satisfy_Constraints ( R , B , BtBinv )
if R . nnz == 0 :
print ( "Error in sa_energy_min(..). Initial R no nonzeros on a level. \
Returning tentative prolongator\n" )
return T
# This is the RHS vector for the problem in the Krylov Space
normr = np . sqrt ( ( R . data . conjugate ( ) * R . data ) . sum ( ) )
g = np . zeros ( ( maxiter + 1 , ) , dtype = xtype )
g [ 0 ] = normr
# First Krylov vector
# V [ 0 ] = r / normr
if normr > 0.0 :
V . append ( ( 1.0 / normr ) * R )
# print " Energy Minimization of Prolongator \
# - - - Iteration 0 - - - r = " + str ( normr )
i = - 1
# vect = np . ravel ( ( A * T ) . data )
# print " Iteration " + str ( i + 1 ) + " \
# Energy = % 1.3e " % np . sqrt ( ( vect . conjugate ( ) * vect ) . sum ( ) )
# print " Iteration " + str ( i + 1 ) + " Normr % 1.3e " % normr
while i < maxiter - 1 and normr > tol :
i = i + 1
# Calculate new search direction
# Equivalent to : AV = A * V ; AV = AV . multiply ( Sparsity _ Pattern )
# with the added constraint that explicit zeros are in AP wherever
# AP = 0 and Sparsity _ Pattern does not
AV . data [ : ] = 0.0
pyamg . amg_core . incomplete_mat_mult_bsr ( A . indptr , A . indices , np . ravel ( A . data ) , V [ i ] . indptr , V [ i ] . indices , np . ravel ( V [ i ] . data ) , AV . indptr , AV . indices , np . ravel ( AV . data ) , int ( T . shape [ 0 ] / T . blocksize [ 0 ] ) , int ( T . shape [ 1 ] / T . blocksize [ 1 ] ) , A . blocksize [ 0 ] , A . blocksize [ 1 ] , T . blocksize [ 1 ] )
if weighting == 'local' or weighting == 'diagonal' :
AV = scale_rows ( AV , Dinv )
else :
AV = Dinv * AV
# Enforce AV * B = 0
Satisfy_Constraints ( AV , B , BtBinv )
V . append ( AV . copy ( ) )
# Modified Gram - Schmidt
for j in range ( i + 1 ) : # Frobenius inner - product
H [ j , i ] = ( V [ j ] . conjugate ( ) . multiply ( V [ i + 1 ] ) ) . sum ( )
V [ i + 1 ] = V [ i + 1 ] - H [ j , i ] * V [ j ]
# Frobenius Norm
H [ i + 1 , i ] = np . sqrt ( ( V [ i + 1 ] . data . conjugate ( ) * V [ i + 1 ] . data ) . sum ( ) )
# Check for breakdown
if H [ i + 1 , i ] != 0.0 :
V [ i + 1 ] = ( 1.0 / H [ i + 1 , i ] ) * V [ i + 1 ]
# Apply previous Givens rotations to H
if i > 0 :
apply_givens ( Q , H [ : , i ] , i )
# Calculate and apply next complex - valued Givens Rotation
if H [ i + 1 , i ] != 0 :
h1 = H [ i , i ]
h2 = H [ i + 1 , i ]
h1_mag = np . abs ( h1 )
h2_mag = np . abs ( h2 )
if h1_mag < h2_mag :
mu = h1 / h2
tau = np . conjugate ( mu ) / np . abs ( mu )
else :
mu = h2 / h1
tau = mu / np . abs ( mu )
denom = np . sqrt ( h1_mag ** 2 + h2_mag ** 2 )
c = h1_mag / denom
s = h2_mag * tau / denom
Qblock = np . array ( [ [ c , np . conjugate ( s ) ] , [ - s , c ] ] , dtype = xtype )
Q . append ( Qblock )
# Apply Givens Rotation to g ,
# the RHS for the linear system in the Krylov Subspace .
g [ i : i + 2 ] = sp . dot ( Qblock , g [ i : i + 2 ] )
# Apply effect of Givens Rotation to H
H [ i , i ] = sp . dot ( Qblock [ 0 , : ] , H [ i : i + 2 , i ] )
H [ i + 1 , i ] = 0.0
normr = np . abs ( g [ i + 1 ] )
# print " Iteration " + str ( i + 1 ) + " Normr % 1.3e " % normr
# End while loop
# Find best update to x in Krylov Space , V . Solve ( i x i ) system .
if i != - 1 :
y = la . solve ( H [ 0 : i + 1 , 0 : i + 1 ] , g [ 0 : i + 1 ] )
for j in range ( i + 1 ) :
T = T + y [ j ] * V [ j ]
# vect = np . ravel ( ( A * T ) . data )
# print " Final Iteration " + str ( i ) + " \
# Energy = % 1.3e " % np . sqrt ( ( vect . conjugate ( ) * vect ) . sum ( ) )
# Ensure identity at C - pts
if Cpt_params [ 0 ] :
T = Cpt_params [ 1 ] [ 'I_F' ] * T + Cpt_params [ 1 ] [ 'P_I' ]
return T |
def push_results ( self , results , scheduler_name ) :
"""Send a HTTP request to the satellite ( POST / put _ results )
Send actions results to the satellite
: param results : Results list to send
: type results : list
: param scheduler _ name : Scheduler name
: type scheduler _ name : uuid
: return : True on success , False on failure
: rtype : bool""" | logger . debug ( "Pushing %d results" , len ( results ) )
result = self . con . post ( 'put_results' , { 'results' : results , 'from' : scheduler_name } , wait = True )
return result |
def obfn_g ( self , Y ) :
r"""Compute : math : ` g ( \ mathbf { y } ) = g _ 0 ( \ mathbf { y } _ 0 ) +
g _ 1 ( \ mathbf { y } _ 1 ) ` component of ADMM objective function .""" | return self . obfn_g0 ( self . obfn_g0var ( ) ) + self . obfn_g1 ( self . obfn_g1var ( ) ) |
def ranked_attributes ( self ) :
"""Returns the matrix of ranked attributes from the last run .
: return : the Numpy matrix
: rtype : ndarray""" | matrix = javabridge . call ( self . jobject , "rankedAttributes" , "()[[D" )
if matrix is None :
return None
else :
return typeconv . double_matrix_to_ndarray ( matrix ) |
def read ( self , ** keys ) :
"""read data from this HDU
By default , all data are read .
send columns = and rows = to select subsets of the data .
Table data are read into a recarray ; use read _ column ( ) to get a single
column as an ordinary array . You can alternatively use slice notation
fits = fitsio . FITS ( filename )
fits [ ext ] [ : ]
fits [ ext ] [ 2:5]
fits [ ext ] [ 200:235:2]
fits [ ext ] [ rows ]
fits [ ext ] [ cols ] [ rows ]
parameters
columns : optional
An optional set of columns to read from table HDUs . Default is to
read all . Can be string or number . If a sequence , a recarray
is always returned . If a scalar , an ordinary array is returned .
rows : optional
An optional list of rows to read from table HDUS . Default is to
read all .
vstorage : string , optional
Over - ride the default method to store variable length columns . Can
be ' fixed ' or ' object ' . See docs on fitsio . FITS for details .""" | columns = keys . get ( 'columns' , None )
rows = keys . get ( 'rows' , None )
if columns is not None :
if 'columns' in keys :
del keys [ 'columns' ]
data = self . read_columns ( columns , ** keys )
elif rows is not None :
if 'rows' in keys :
del keys [ 'rows' ]
data = self . read_rows ( rows , ** keys )
else :
data = self . _read_all ( ** keys )
return data |
def get ( self , collection_id , content = None , ** kwargs ) :
"""Syntactic sugar around to make it easier to get fine - grained access
to the parts of a file without composing a PhyloSchema object .
Possible invocations include :
w . get ( ' pg _ 10 ' )
w . get ( ' pg _ 10 ' , ' trees ' )
w . get ( ' pg _ 10 ' , ' trees ' , format = ' nexus ' )
w . get ( ' pg _ 10 ' , tree _ id = ' tree3 ' )
see :""" | assert COLLECTION_ID_PATTERN . match ( collection_id )
r = self . get_collection ( collection_id )
if isinstance ( r , dict ) and ( 'data' in r ) :
return r [ 'data' ]
return r |
def start_user_session ( self , username , domain , resource , ** kwargs ) :
"""Method to add a user session for debugging .
Accepted parameters are the same as to the constructor of : py : class : ` ~ xmpp _ backends . base . UserSession ` .""" | kwargs . setdefault ( 'uptime' , pytz . utc . localize ( datetime . utcnow ( ) ) )
kwargs . setdefault ( 'priority' , 0 )
kwargs . setdefault ( 'status' , 'online' )
kwargs . setdefault ( 'status_text' , '' )
kwargs . setdefault ( 'connection_type' , CONNECTION_XMPP )
kwargs . setdefault ( 'encrypted' , True )
kwargs . setdefault ( 'compressed' , False )
kwargs . setdefault ( 'ip_address' , '127.0.0.1' )
if six . PY2 and isinstance ( kwargs [ 'ip_address' ] , str ) : # ipaddress constructor does not eat str in py2 : - /
kwargs [ 'ip_address' ] = kwargs [ 'ip_address' ] . decode ( 'utf-8' )
if isinstance ( kwargs [ 'ip_address' ] , six . string_types ) :
kwargs [ 'ip_address' ] = ipaddress . ip_address ( kwargs [ 'ip_address' ] )
user = '%s@%s' % ( username , domain )
session = UserSession ( self , username , domain , resource , ** kwargs )
data = self . module . get ( user )
if data is None :
raise UserNotFound ( username , domain , resource )
data . setdefault ( 'sessions' , set ( ) )
if isinstance ( data [ 'sessions' ] , list ) : # Cast old data to set
data [ 'sessions' ] = set ( data [ 'sessions' ] )
data [ 'sessions' ] . add ( session )
self . module . set ( user , data )
all_sessions = self . module . get ( 'all_sessions' , set ( ) )
all_sessions . add ( session )
self . module . set ( 'all_sessions' , all_sessions ) |
def wait ( self , status = None , locked = None , wait_interval = None , wait_time = None ) :
"""Poll the server periodically until the droplet has reached some final
state . If ` ` status ` ` is non - ` None ` , ` ` wait ` ` will wait for the
droplet ' s ` ` status ` ` field to equal the given value . If ` ` locked ` ` is
non - ` None ` , ` wait ` will wait for the droplet ' s ` ` locked ` ` field to
equal ( the truth value of ) the given value . Exactly one of ` ` status ` `
and ` ` locked ` ` must be non - ` None ` .
If ` ` wait _ time ` ` is exceeded , a ` WaitTimeoutError ` ( containing the
droplet ' s most recently fetched state ) is raised .
If a ` KeyboardInterrupt ` is caught , the droplet ' s most recently fetched
state is returned immediately without waiting for completion .
. . versionchanged : : 0.2.0
Raises ` WaitTimeoutError ` on timeout
. . versionchanged : : 0.2.0
` ` locked ` ` parameter added
. . versionchanged : : 0.2.0
No longer waits for latest action to complete
: param status : When non - ` None ` , the desired value for the ` ` status ` `
field of the droplet , which should be one of
` Droplet . STATUS _ ACTIVE ` , ` Droplet . STATUS _ ARCHIVE ` ,
` Droplet . STATUS _ NEW ` , and ` Droplet . STATUS _ OFF ` . ( For the sake of
forwards - compatibility , any other value is accepted as well . )
: type status : string or ` None `
: param locked : When non - ` None ` , the desired value for the ` ` locked ` `
field of the droplet
: type locked : ` bool ` or ` None `
: param number wait _ interval : how many seconds to sleep between
requests ; defaults to the ` doapi ` object ' s
: attr : ` ~ doapi . wait _ interval ` if not specified or ` None `
: param number wait _ time : the total number of seconds after which the
method will raise an error if the droplet has not yet completed , or
a negative number to wait indefinitely ; defaults to the ` doapi `
object ' s : attr : ` ~ doapi . wait _ time ` if not specified or ` None `
: return : the droplet ' s final state
: rtype : Droplet
: raises TypeError : if both or neither of ` ` status ` ` & ` ` locked ` ` are
defined
: raises DOAPIError : if the API endpoint replies with an error
: raises WaitTimeoutError : if ` ` wait _ time ` ` is exceeded""" | return next ( self . doapi_manager . wait_droplets ( [ self ] , status , locked , wait_interval , wait_time ) ) |
def GetPattern ( self ) :
"""Return a tuple of Stop objects , in the order visited""" | stoptimes = self . GetStopTimes ( )
return tuple ( st . stop for st in stoptimes ) |
def strip_required_prefix ( string , prefix ) :
"""> > > strip _ required _ prefix ( ' abcdef ' , ' abc ' )
' def '
> > > strip _ required _ prefix ( ' abcdef ' , ' 123 ' )
Traceback ( most recent call last ) :
AssertionError : String starts with ' abc ' , not ' 123'""" | if string . startswith ( prefix ) :
return string [ len ( prefix ) : ]
raise AssertionError ( 'String starts with %r, not %r' % ( string [ : len ( prefix ) ] , prefix ) ) |
def read_feature ( self , dataset , fid ) :
"""Retrieves ( reads ) a feature in a dataset .
Parameters
dataset : str
The dataset id .
fid : str
The feature id .
Returns
request . Response
The response contains a GeoJSON representation of the feature .""" | uri = URITemplate ( self . baseuri + '/{owner}/{did}/features/{fid}' ) . expand ( owner = self . username , did = dataset , fid = fid )
return self . session . get ( uri ) |
def set_white ( self , brightness , colourtemp ) :
"""Set white coloured theme of an rgb bulb .
Args :
brightness ( int ) : Value for the brightness ( 25-255 ) .
colourtemp ( int ) : Value for the colour temperature ( 0-255 ) .""" | if not 25 <= brightness <= 255 :
raise ValueError ( "The brightness needs to be between 25 and 255." )
if not 0 <= colourtemp <= 255 :
raise ValueError ( "The colour temperature needs to be between 0 and 255." )
payload = self . generate_payload ( SET , { self . DPS_INDEX_MODE : self . DPS_MODE_WHITE , self . DPS_INDEX_BRIGHTNESS : brightness , self . DPS_INDEX_COLOURTEMP : colourtemp } )
data = self . _send_receive ( payload )
return data |
def update_instrument_config ( instrument , measured_center ) -> Tuple [ Point , float ] :
"""Update config and pose tree with instrument ' s x and y offsets
and tip length based on delta between probe center and measured _ center ,
persist updated config and return it""" | from copy import deepcopy
from opentrons . trackers . pose_tracker import update
robot = instrument . robot
config = robot . config
instrument_offset = deepcopy ( config . instrument_offset )
dx , dy , dz = array ( measured_center ) - config . tip_probe . center
log . debug ( "This is measured probe center dx {}" . format ( Point ( dx , dy , dz ) ) )
# any Z offset will adjust the tip length , so instruments have Z = 0 offset
old_x , old_y , _ = instrument_offset [ instrument . mount ] [ instrument . type ]
instrument_offset [ instrument . mount ] [ instrument . type ] = ( old_x - dx , old_y - dy , 0.0 )
tip_length = deepcopy ( config . tip_length )
tip_length [ instrument . name ] = tip_length [ instrument . name ] + dz
config = config . _replace ( instrument_offset = instrument_offset ) . _replace ( tip_length = tip_length )
robot . config = config
log . debug ( "Updating config for {} instrument" . format ( instrument . mount ) )
robot_configs . save_robot_settings ( config )
new_coordinates = change_base ( robot . poses , src = instrument , dst = instrument . instrument_mover ) - Point ( dx , dy , 0.0 )
robot . poses = update ( robot . poses , instrument , new_coordinates )
return robot . config |
def _connect_sudo ( spec ) :
"""Return ContextService arguments for sudo as a become method .""" | return { 'method' : 'sudo' , 'enable_lru' : True , 'kwargs' : { 'username' : spec . become_user ( ) , 'password' : spec . become_pass ( ) , 'python_path' : spec . python_path ( ) , 'sudo_path' : spec . become_exe ( ) , 'connect_timeout' : spec . timeout ( ) , 'sudo_args' : spec . sudo_args ( ) , 'remote_name' : get_remote_name ( spec ) , } } |
def hwvtep_activate_hwvtep ( self , ** kwargs ) :
"""Activate the hwvtep
Args :
name ( str ) : overlay _ gateway name
callback ( function ) : A function executed upon completion of the
method .
Returns :
Return value of ` callback ` .
Raises :
None""" | name = kwargs . pop ( 'name' )
name_args = dict ( name = name )
method_name = 'overlay_gateway_activate'
method_class = self . _brocade_tunnels
gw_attr = getattr ( method_class , method_name )
config = gw_attr ( ** name_args )
output = self . _callback ( config )
return output |
def revoke ( self , auth , codetype , code , defer = False ) :
"""Given an activation code , the associated entity is revoked after which the activation
code can no longer be used .
Args :
auth : Takes the owner ' s cik
codetype : The type of code to revoke ( client | share )
code : Code specified by < codetype > ( cik | share - activation - code )""" | return self . _call ( 'revoke' , auth , [ codetype , code ] , defer ) |
def register ( self , func , singleton = False , threadlocal = False , name = None ) :
"""Register a dependency function""" | func . _giveme_singleton = singleton
func . _giveme_threadlocal = threadlocal
if name is None :
name = func . __name__
self . _registered [ name ] = func
return func |
def rsa_base64_sign_str ( self , plain , b64 = True ) :
"""对 msg rsa 签名 , 然后使用 base64 encode 编码数据""" | with open ( self . key_file ) as fp :
key_ = RSA . importKey ( fp . read ( ) )
# h = SHA . new ( plain if sys . version _ info < ( 3 , 0 ) else plain . encode ( ' utf - 8 ' ) )
plain = helper . to_bytes ( plain )
# if hasattr ( plain , ' encode ' ) :
# plain = plain . encode ( )
h = SHA . new ( plain )
cipher = pkcs . new ( key_ ) . sign ( h )
cip = base64 . b64encode ( cipher ) if b64 else cipher
return helper . to_str ( cip ) |
def _compute_anom_data_using_window ( self ) :
"""Compute anomaly scores using a lagging window .""" | anom_scores = { }
values = self . time_series . values
stdev = numpy . std ( values )
for i , ( timestamp , value ) in enumerate ( self . time_series_items ) :
if i < self . lag_window_size :
anom_score = self . _compute_anom_score ( values [ : i + 1 ] , value )
else :
anom_score = self . _compute_anom_score ( values [ i - self . lag_window_size : i + 1 ] , value )
if stdev :
anom_scores [ timestamp ] = anom_score / stdev
else :
anom_scores [ timestamp ] = anom_score
self . anom_scores = TimeSeries ( self . _denoise_scores ( anom_scores ) ) |
def relpath ( path , start = None ) :
"""Return a relative file path to path either from the
current directory or from an optional start directory .
For storage objects , " path " and " start " are relative to
storage root .
" / " are not stripped on storage objects path . The ending slash is required
on some storage to signify that target is a directory .
Equivalent to " os . path . relpath " .
Args :
path ( path - like object ) : Path or URL .
start ( path - like object ) : Relative from this optional directory .
Default to " os . curdir " for local files .
Returns :
str : Relative path .""" | relative = get_instance ( path ) . relpath ( path )
if start : # Storage relative path
# Replaces " \ " by " / " for Windows .
return os_path_relpath ( relative , start = start ) . replace ( '\\' , '/' )
return relative |
def is_compliant ( self , path ) :
"""Checks if the directory is compliant .
Used to determine if the path specified and all of its children
directories are in compliance with the check itself .
: param path : the directory path to check
: returns : True if the directory tree is compliant , otherwise False .""" | if not os . path . isdir ( path ) :
log ( 'Path specified %s is not a directory.' % path , level = ERROR )
raise ValueError ( "%s is not a directory." % path )
if not self . recursive :
return super ( DirectoryPermissionAudit , self ) . is_compliant ( path )
compliant = True
for root , dirs , _ in os . walk ( path ) :
if len ( dirs ) > 0 :
continue
if not super ( DirectoryPermissionAudit , self ) . is_compliant ( root ) :
compliant = False
continue
return compliant |
def update_object ( self , form , obj ) :
"""Saves the new value to the target object .""" | field_name = form . cleaned_data [ 'name' ]
value = form . cleaned_data [ 'value' ]
setattr ( obj , field_name , value )
save_kwargs = { }
if CAN_UPDATE_FIELDS :
save_kwargs [ 'update_fields' ] = [ field_name ]
obj . save ( ** save_kwargs )
data = json . dumps ( { 'status' : 'success' , } )
return HttpResponse ( data , content_type = "application/json" ) |
def getMemoryStats ( self ) :
"""Return JVM Memory Stats for Apache Tomcat Server .
@ return : Dictionary of memory utilization stats .""" | if self . _statusxml is None :
self . initStats ( )
node = self . _statusxml . find ( 'jvm/memory' )
memstats = { }
if node is not None :
for ( key , val ) in node . items ( ) :
memstats [ key ] = util . parse_value ( val )
return memstats |
def recurseforumcontents ( parser , token ) :
"""Iterates over the content nodes and renders the contained forum block for each node .""" | bits = token . contents . split ( )
forums_contents_var = template . Variable ( bits [ 1 ] )
template_nodes = parser . parse ( ( 'endrecurseforumcontents' , ) )
parser . delete_first_token ( )
return RecurseTreeForumVisibilityContentNode ( template_nodes , forums_contents_var ) |
def attitude_target_send ( self , time_boot_ms , type_mask , q , body_roll_rate , body_pitch_rate , body_yaw_rate , thrust , force_mavlink1 = False ) :
'''Reports the current commanded attitude of the vehicle as specified by
the autopilot . This should match the commands sent in
a SET _ ATTITUDE _ TARGET message if the vehicle is being
controlled this way .
time _ boot _ ms : Timestamp in milliseconds since system boot ( uint32 _ t )
type _ mask : Mappings : If any of these bits are set , the corresponding input should be ignored : bit 1 : body roll rate , bit 2 : body pitch rate , bit 3 : body yaw rate . bit 4 - bit 7 : reserved , bit 8 : attitude ( uint8 _ t )
q : Attitude quaternion ( w , x , y , z order , zero - rotation is 1 , 0 , 0 , 0 ) ( float )
body _ roll _ rate : Body roll rate in radians per second ( float )
body _ pitch _ rate : Body roll rate in radians per second ( float )
body _ yaw _ rate : Body roll rate in radians per second ( float )
thrust : Collective thrust , normalized to 0 . . 1 ( - 1 . . 1 for vehicles capable of reverse trust ) ( float )''' | return self . send ( self . attitude_target_encode ( time_boot_ms , type_mask , q , body_roll_rate , body_pitch_rate , body_yaw_rate , thrust ) , force_mavlink1 = force_mavlink1 ) |
def get_last ( self , table = None ) :
"""Just the last entry .""" | if table is None :
table = self . main_table
query = 'SELECT * FROM "%s" ORDER BY ROWID DESC LIMIT 1;' % table
return self . own_cursor . execute ( query ) . fetchone ( ) |
def as_boxes ( self , solid = False ) :
"""A rough Trimesh representation of the voxels with a box
for each filled voxel .
Parameters
solid : bool , if True return boxes for sparse _ solid
Returns
mesh : Trimesh object made up of one box per filled cell .""" | if solid :
filled = self . sparse_solid
else :
filled = self . sparse_surface
# center points of voxels
centers = indices_to_points ( indices = filled , pitch = self . pitch , origin = self . origin )
mesh = multibox ( centers = centers , pitch = self . pitch )
return mesh |
def read ( self , payloadType , elsClient ) :
"""Fetches the latest data for this entity from api . elsevier . com .
Returns True if successful ; else , False .""" | if elsClient :
self . _client = elsClient ;
elif not self . client :
raise ValueError ( '''Entity object not currently bound to elsClient instance. Call .read() with elsClient argument or set .client attribute.''' )
try :
api_response = self . client . exec_request ( self . uri )
if isinstance ( api_response [ payloadType ] , list ) :
self . _data = api_response [ payloadType ] [ 0 ]
else :
self . _data = api_response [ payloadType ]
# # TODO : check if URI is the same , if necessary update and log warning .
logger . info ( "Data loaded for " + self . uri )
return True
except ( requests . HTTPError , requests . RequestException ) as e :
for elm in e . args :
logger . warning ( elm )
return False |
def layout_deck ( self , i ) :
"""Stack the cards , starting at my deck ' s foundation , and proceeding
by ` ` card _ pos _ hint ` `""" | def get_dragidx ( cards ) :
j = 0
for card in cards :
if card . dragging :
return j
j += 1
# Put a None in the card list in place of the card you ' re
# hovering over , if you ' re dragging another card . This will
# result in an empty space where the card will go if you drop
# it now .
cards = list ( self . decks [ i ] )
dragidx = get_dragidx ( cards )
if dragidx is not None :
del cards [ dragidx ]
if self . insertion_deck == i and self . insertion_card is not None :
insdx = self . insertion_card
if dragidx is not None and insdx > dragidx :
insdx -= 1
cards . insert ( insdx , None )
if self . direction == 'descending' :
cards . reverse ( )
# Work out the initial pos _ hint for this deck
( phx , phy ) = get_pos_hint ( self . starting_pos_hint , * self . card_size_hint )
phx += self . deck_x_hint_step * i + self . deck_x_hint_offsets [ i ]
phy += self . deck_y_hint_step * i + self . deck_y_hint_offsets [ i ]
( w , h ) = self . size
( x , y ) = self . pos
# start assigning pos and size to cards
found = self . _get_foundation ( i )
if found in self . children :
self . remove_widget ( found )
self . add_widget ( found )
for card in cards :
if card is not None :
if card in self . children :
self . remove_widget ( card )
( shw , shh ) = self . card_size_hint
card . pos = ( x + phx * w , y + phy * h )
card . size = ( w * shw , h * shh )
self . add_widget ( card )
phx += self . card_x_hint_step
phy += self . card_y_hint_step |
def _set_group ( self , v , load = False ) :
"""Setter method for group , mapped from YANG variable / snmp _ server / group ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ group is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ group ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "group_name group_version" , group . group , yang_name = "group" , rest_name = "group" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'group-name group-version' , extensions = { u'tailf-common' : { u'info' : u'group\tDefine a User Security Model group' , u'cli-suppress-key-sort' : None , u'cli-suppress-mode' : None , u'sort-priority' : u'26' , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-suppress-key-abbreviation' : None , u'callpoint' : u'snmpgroup' } } ) , is_container = 'list' , yang_name = "group" , rest_name = "group" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'group\tDefine a User Security Model group' , u'cli-suppress-key-sort' : None , u'cli-suppress-mode' : None , u'sort-priority' : u'26' , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-suppress-key-abbreviation' : None , u'callpoint' : u'snmpgroup' } } , namespace = 'urn:brocade.com:mgmt:brocade-snmp' , defining_module = 'brocade-snmp' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """group must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("group_name group_version",group.group, yang_name="group", rest_name="group", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='group-name group-version', extensions={u'tailf-common': {u'info': u'group\tDefine a User Security Model group', u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'sort-priority': u'26', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'snmpgroup'}}), is_container='list', yang_name="group", rest_name="group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'group\tDefine a User Security Model group', u'cli-suppress-key-sort': None, u'cli-suppress-mode': None, u'sort-priority': u'26', u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-suppress-key-abbreviation': None, u'callpoint': u'snmpgroup'}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)""" , } )
self . __group = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def _get_program_dir ( name , config ) :
"""Retrieve directory for a program ( local installs / java jars ) .""" | if config is None :
raise ValueError ( "Could not find directory in config for %s" % name )
elif isinstance ( config , six . string_types ) :
return config
elif "dir" in config :
return expand_path ( config [ "dir" ] )
else :
raise ValueError ( "Could not find directory in config for %s" % name ) |
def _delete_request ( self , url , headers , data = None ) :
"""Issue a DELETE request to the specified endpoint with the data provided .
: param url : str
: pararm headers : dict
: param data : dict""" | return self . _session . delete ( url , headers = headers , data = data ) |
def connect ( self , coro ) :
"""The coroutine ` coro ` is connected to the signal . The coroutine must
return a true value , unless it wants to be disconnected from the
signal .
. . note : :
This is different from the return value convention with
: attr : ` AdHocSignal . STRONG ` and : attr : ` AdHocSignal . WEAK ` .
: meth : ` connect ` returns a token which can be used with
: meth : ` disconnect ` to disconnect the coroutine .""" | self . logger . debug ( "connecting %r" , coro )
return self . _connect ( coro ) |
def generate_random_nhs_number ( ) -> int :
"""Returns a random valid NHS number , as an ` ` int ` ` .""" | check_digit = 10
# NHS numbers with this check digit are all invalid
while check_digit == 10 :
digits = [ random . randint ( 1 , 9 ) ]
# don ' t start with a zero
digits . extend ( [ random . randint ( 0 , 9 ) for _ in range ( 8 ) ] )
# . . . length now 9
check_digit = nhs_check_digit ( digits )
# noinspection PyUnboundLocalVariable
digits . append ( check_digit )
return int ( "" . join ( [ str ( d ) for d in digits ] ) ) |
def check_undelivered ( to = None ) :
"""Sends a notification email if any undelivered dispatches .
Returns undelivered ( failed ) dispatches count .
: param str | unicode to : Recipient address . If not set Django ADMINS setting is used .
: rtype : int""" | failed_count = Dispatch . objects . filter ( dispatch_status = Dispatch . DISPATCH_STATUS_FAILED ) . count ( )
if failed_count :
from sitemessage . shortcuts import schedule_email
from sitemessage . messages . email import EmailTextMessage
if to is None :
admins = settings . ADMINS
if admins :
to = list ( dict ( admins ) . values ( ) )
if to :
priority = 999
register_message_types ( EmailTextMessage )
schedule_email ( 'You have %s undelivered dispatch(es) at %s' % ( failed_count , get_site_url ( ) ) , subject = '[SITEMESSAGE] Undelivered dispatches' , to = to , priority = priority )
send_scheduled_messages ( priority = priority )
return failed_count |
def load_trace ( path , * args , ** kwargs ) :
"""Read a packet trace file , return a : class : ` wltrace . common . WlTrace ` object .
This function first reads the file ' s magic
( first ` ` FILE _ TYPE _ HANDLER ` ` bytes ) , and automatically determine the
file type , and call appropriate handler to process the file .
Args :
path ( str ) : the file ' s path to be loaded .
Returns :
` ` WlTrace ` ` object .""" | with open ( path , 'rb' ) as f :
magic = f . read ( MAGIC_LEN )
if magic not in FILE_TYPE_HANDLER :
raise Exception ( 'Unknown file magic: %s' % ( binascii . hexlify ( magic ) ) )
return FILE_TYPE_HANDLER [ magic ] ( path , * args , ** kwargs ) |
def to_base64 ( self , skip = ( ) ) :
"""Construct from base64 - encoded JSON .""" | return base64 . b64encode ( ensure_bytes ( self . to_json ( skip = skip ) , encoding = 'utf-8' , ) ) |
def update_history ( self ) -> None :
"""Update messaging history on disk .
: returns : None""" | self . log . debug ( f"Saving history. History is: \n{self.history}" )
jsons = [ ]
for item in self . history :
json_item = item . __dict__
# Convert sub - entries into JSON as well .
json_item [ "output_records" ] = self . _parse_output_records ( item )
jsons . append ( json_item )
if not path . isfile ( self . history_filename ) :
open ( self . history_filename , "a+" ) . close ( )
with open ( self . history_filename , "w" ) as f :
json . dump ( jsons , f , default = lambda x : x . __dict__ . copy ( ) , sort_keys = True , indent = 4 )
f . write ( "\n" ) |
def _kl_divergence ( self , summary_freq , doc_freq ) :
"""Note : Could import scipy . stats and use scipy . stats . entropy ( doc _ freq , summary _ freq )
but this gives equivalent value without the import""" | sum_val = 0
for w in summary_freq :
frequency = doc_freq . get ( w )
if frequency : # missing or zero = no frequency
sum_val += frequency * math . log ( frequency / summary_freq [ w ] )
return sum_val |
def _legislator_objects ( self ) :
'''A cache of dereferenced legislator objects .''' | kwargs = { }
id_getter = operator . itemgetter ( 'leg_id' )
ids = [ ]
for k in ( 'yes' , 'no' , 'other' ) :
ids . extend ( map ( id_getter , self [ k + '_votes' ] ) )
objs = db . legislators . find ( { '_all_ids' : { '$in' : ids } } , ** kwargs )
# Handy to keep a reference to the vote on each legislator .
objs = list ( objs )
id_cache = { }
for obj in objs :
obj . vote = self
for _id in obj [ '_all_ids' ] :
id_cache [ _id ] = obj
return id_cache |
def append_data ( self , name , initial_content , size , readonly = False , sort = "unknown" ) : # pylint : disable = unused - argument
"""Append a new data entry into the binary with specific name , content , and size .
: param str name : Name of the data entry . Will be used as the label .
: param bytes initial _ content : The initial content of the data entry .
: param int size : Size of the data entry .
: param bool readonly : If the data entry belongs to the readonly region .
: param str sort : Type of the data .
: return : None""" | if readonly :
section_name = ".rodata"
else :
section_name = '.data'
if initial_content is None :
initial_content = b""
initial_content = initial_content . ljust ( size , b"\x00" )
data = Data ( self , memory_data = None , section_name = section_name , name = name , initial_content = initial_content , size = size , sort = sort )
if section_name == '.rodata' :
self . extra_rodata . append ( data )
else :
self . extra_data . append ( data ) |
def client_details ( self , * args ) :
"""Display known details about a given client""" | self . log ( _ ( 'Client details:' , lang = 'de' ) )
client = self . _clients [ args [ 0 ] ]
self . log ( 'UUID:' , client . uuid , 'IP:' , client . ip , 'Name:' , client . name , 'User:' , self . _users [ client . useruuid ] , pretty = True ) |
def NOAJS_metric ( bpmn_graph ) :
"""Returns the value of the NOAJS metric ( Number of Activities , joins and splits )
for the BPMNDiagramGraph instance .
: param bpmn _ graph : an instance of BpmnDiagramGraph representing BPMN model .""" | activities_count = all_activities_count ( bpmn_graph )
gateways_count = all_gateways_count ( bpmn_graph )
return activities_count + gateways_count |
def upload_part_copy ( Bucket = None , CopySource = None , CopySourceIfMatch = None , CopySourceIfModifiedSince = None , CopySourceIfNoneMatch = None , CopySourceIfUnmodifiedSince = None , CopySourceRange = None , Key = None , PartNumber = None , UploadId = None , SSECustomerAlgorithm = None , SSECustomerKey = None , SSECustomerKeyMD5 = None , CopySourceSSECustomerAlgorithm = None , CopySourceSSECustomerKey = None , CopySourceSSECustomerKeyMD5 = None , RequestPayer = None ) :
"""Uploads a part by copying data from an existing object as data source .
See also : AWS API Documentation
: example : response = client . upload _ part _ copy (
Bucket = ' string ' ,
CopySource = ' string ' or { ' Bucket ' : ' string ' , ' Key ' : ' string ' , ' VersionId ' : ' string ' } ,
CopySourceIfMatch = ' string ' ,
CopySourceIfModifiedSince = datetime ( 2015 , 1 , 1 ) ,
CopySourceIfNoneMatch = ' string ' ,
CopySourceIfUnmodifiedSince = datetime ( 2015 , 1 , 1 ) ,
CopySourceRange = ' string ' ,
Key = ' string ' ,
PartNumber = 123,
UploadId = ' string ' ,
SSECustomerAlgorithm = ' string ' ,
SSECustomerKey = ' string ' ,
CopySourceSSECustomerAlgorithm = ' string ' ,
CopySourceSSECustomerKey = ' string ' ,
RequestPayer = ' requester '
: type Bucket : string
: param Bucket : [ REQUIRED ]
: type CopySource : str or dict
: param CopySource : [ REQUIRED ] The name of the source bucket , key name of the source object , and optional version ID of the source object . You can either provide this value as a string or a dictionary . The string form is { bucket } / { key } or { bucket } / { key } ? versionId = { versionId } if you want to copy a specific version . You can also provide this value as a dictionary . The dictionary format is recommended over the string format because it is more explicit . The dictionary format is : { ' Bucket ' : ' bucket ' , ' Key ' : ' key ' , ' VersionId ' : ' id ' } . Note that the VersionId key is optional and may be omitted .
: type CopySourceIfMatch : string
: param CopySourceIfMatch : Copies the object if its entity tag ( ETag ) matches the specified tag .
: type CopySourceIfModifiedSince : datetime
: param CopySourceIfModifiedSince : Copies the object if it has been modified since the specified time .
: type CopySourceIfNoneMatch : string
: param CopySourceIfNoneMatch : Copies the object if its entity tag ( ETag ) is different than the specified ETag .
: type CopySourceIfUnmodifiedSince : datetime
: param CopySourceIfUnmodifiedSince : Copies the object if it hasn ' t been modified since the specified time .
: type CopySourceRange : string
: param CopySourceRange : The range of bytes to copy from the source object . The range value must use the form bytes = first - last , where the first and last are the zero - based byte offsets to copy . For example , bytes = 0-9 indicates that you want to copy the first ten bytes of the source . You can copy a range only if the source object is greater than 5 GB .
: type Key : string
: param Key : [ REQUIRED ]
: type PartNumber : integer
: param PartNumber : [ REQUIRED ] Part number of part being copied . This is a positive integer between 1 and 10,000.
: type UploadId : string
: param UploadId : [ REQUIRED ] Upload ID identifying the multipart upload whose part is being copied .
: type SSECustomerAlgorithm : string
: param SSECustomerAlgorithm : Specifies the algorithm to use to when encrypting the object ( e . g . , AES256 ) .
: type SSECustomerKey : string
: param SSECustomerKey : Specifies the customer - provided encryption key for Amazon S3 to use in encrypting data . This value is used to store the object and then it is discarded ; Amazon does not store the encryption key . The key must be appropriate for use with the algorithm specified in the x - amz - server - side - encryption - customer - algorithm header . This must be the same encryption key specified in the initiate multipart upload request .
: type SSECustomerKeyMD5 : string
: param SSECustomerKeyMD5 : Specifies the 128 - bit MD5 digest of the encryption key according to RFC 1321 . Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error . Please note that this parameter is automatically populated if it is not provided . Including this parameter is not required
: type CopySourceSSECustomerAlgorithm : string
: param CopySourceSSECustomerAlgorithm : Specifies the algorithm to use when decrypting the source object ( e . g . , AES256 ) .
: type CopySourceSSECustomerKey : string
: param CopySourceSSECustomerKey : Specifies the customer - provided encryption key for Amazon S3 to use to decrypt the source object . The encryption key provided in this header must be one that was used when the source object was created .
: type CopySourceSSECustomerKeyMD5 : string
: param CopySourceSSECustomerKeyMD5 : Specifies the 128 - bit MD5 digest of the encryption key according to RFC 1321 . Amazon S3 uses this header for a message integrity check to ensure the encryption key was transmitted without error . Please note that this parameter is automatically populated if it is not provided . Including this parameter is not required
: type RequestPayer : string
: param RequestPayer : Confirms that the requester knows that she or he will be charged for the request . Bucket owners need not specify this parameter in their requests . Documentation on downloading objects from requester pays buckets can be found at http : / / docs . aws . amazon . com / AmazonS3 / latest / dev / ObjectsinRequesterPaysBuckets . html
: rtype : dict
: return : {
' CopySourceVersionId ' : ' string ' ,
' CopyPartResult ' : {
' ETag ' : ' string ' ,
' LastModified ' : datetime ( 2015 , 1 , 1)
' ServerSideEncryption ' : ' AES256 ' | ' aws : kms ' ,
' SSECustomerAlgorithm ' : ' string ' ,
' SSECustomerKeyMD5 ' : ' string ' ,
' SSEKMSKeyId ' : ' string ' ,
' RequestCharged ' : ' requester '
: returns :
( dict ) - -
CopySourceVersionId ( string ) - - The version of the source object that was copied , if you have enabled versioning on the source bucket .
CopyPartResult ( dict ) - -
ETag ( string ) - - Entity tag of the object .
LastModified ( datetime ) - - Date and time at which the object was uploaded .
ServerSideEncryption ( string ) - - The Server - side encryption algorithm used when storing this object in S3 ( e . g . , AES256 , aws : kms ) .
SSECustomerAlgorithm ( string ) - - If server - side encryption with a customer - provided encryption key was requested , the response will include this header confirming the encryption algorithm used .
SSECustomerKeyMD5 ( string ) - - If server - side encryption with a customer - provided encryption key was requested , the response will include this header to provide round trip message integrity verification of the customer - provided encryption key .
SSEKMSKeyId ( string ) - - If present , specifies the ID of the AWS Key Management Service ( KMS ) master encryption key that was used for the object .
RequestCharged ( string ) - - If present , indicates that the requester was successfully charged for the request .""" | pass |
def set_location ( self , uri , size , checksum , storage_class = None ) :
"""Set only URI location of for object .
Useful to link files on externally controlled storage . If a file
instance has already been set , this methods raises an
` ` FileInstanceAlreadySetError ` ` exception .
: param uri : Full URI to object ( which can be interpreted by the storage
interface ) .
: param size : Size of file .
: param checksum : Checksum of file .
: param storage _ class : Storage class where file is stored ( )""" | self . file = FileInstance ( )
self . file . set_uri ( uri , size , checksum , storage_class = storage_class )
db . session . add ( self . file )
return self |
def delete ( python_data : LdapObject , database : Optional [ Database ] = None ) -> None :
"""Delete a LdapObject from the database .""" | dn = python_data . get_as_single ( 'dn' )
assert dn is not None
database = get_database ( database )
connection = database . connection
connection . delete ( dn ) |
def listItem ( node ) :
"""An item in a list""" | o = nodes . list_item ( )
for n in MarkDown ( node ) :
o += n
return o |
def get_releasetype ( self , ) :
"""Return the currently selected releasetype
: returns : the selected releasetype
: rtype : str
: raises : None""" | for rt , rb in self . _releasetype_button_mapping . items ( ) :
if rb . isChecked ( ) :
return rt |
def check_hierarchy ( rdf , break_cycles , keep_related , mark_top_concepts , eliminate_redundancy ) :
"""Check for , and optionally fix , problems in the skos : broader hierarchy
using a recursive depth first search algorithm .
: param Graph rdf : An rdflib . graph . Graph object .
: param bool fix _ cycles : Break cycles .
: param bool fix _ disjoint _ relations : Remoe skos : related overlapping with
skos : broaderTransitive .
: param bool fix _ redundancy : Remove skos : broader between two concepts otherwise
connected by skos : broaderTransitive .""" | starttime = time . time ( )
if check . hierarchy_cycles ( rdf , break_cycles ) :
logging . info ( "Some concepts not reached in initial cycle detection. " "Re-checking for loose concepts." )
setup_top_concepts ( rdf , mark_top_concepts )
check . disjoint_relations ( rdf , not keep_related )
check . hierarchical_redundancy ( rdf , eliminate_redundancy )
endtime = time . time ( )
logging . debug ( "check_hierarchy took %f seconds" , ( endtime - starttime ) ) |
def initialize ( cls ) :
"""Initialize the TLS / SSL platform to prepare it for
making AMQP requests . This only needs to happen once .""" | if cls . initialized :
_logger . debug ( "Platform already initialized." )
else :
_logger . debug ( "Initializing platform." )
c_uamqp . platform_init ( )
cls . initialized = True |
def id_by_index ( index , resources ) :
"""Helper method to fetch the id or address of a resource by its index
Args :
resources ( list of objects ) : The resources to be paginated
index ( integer ) : The index of the target resource
Returns :
str : The address or header _ signature of the resource ,
returns an empty string if not found""" | if index < 0 or index >= len ( resources ) :
return ''
try :
return resources [ index ] . header_signature
except AttributeError :
return resources [ index ] . address |
def visit_ImportFrom ( self , node ) :
"""Register imported modules and usage symbols .""" | module_path = tuple ( node . module . split ( '.' ) )
self . imports . add ( module_path [ 0 ] )
for alias in node . names :
path = module_path + ( alias . name , )
self . symbols [ alias . asname or alias . name ] = path
self . update = True
return None |
def slice_time ( begin , end = None , duration = datetime . timedelta ( days = 2 ) ) :
""": param begin : datetime
: param end : datetime
: param duration : timedelta
: return : a generator for a set of timeslices of the given duration""" | duration_ms = int ( duration . total_seconds ( ) * 1000 )
previous = int ( unix_time ( begin ) * 1000 )
next = previous + duration_ms
now_ms = unix_time ( datetime . datetime . now ( ) ) * 1000
end_slice = now_ms if not end else min ( now_ms , int ( unix_time ( end ) * 1000 ) )
while next < end_slice :
yield TimeSlice ( previous , next )
previous = next
next += duration_ms
now_ms = unix_time ( datetime . datetime . now ( ) ) * 1000
end_slice = now_ms if not end else min ( now_ms , int ( unix_time ( end ) * 1000 ) )
yield TimeSlice ( previous , end_slice ) |
def extend ( self , cli_api , command_prefix = "" , sub_command = "" , ** kwargs ) :
"""Extends this CLI api with the commands present in the provided cli _ api object""" | if sub_command and command_prefix :
raise ValueError ( 'It is not currently supported to provide both a command_prefix and sub_command' )
if sub_command :
self . commands [ sub_command ] = cli_api
else :
for name , command in cli_api . commands . items ( ) :
self . commands [ "{}{}" . format ( command_prefix , name ) ] = command |
def estimategaps ( args ) :
"""% prog estimategaps input . bed
Estimate sizes of inter - scaffold gaps . The AGP file generated by path ( )
command has unknown gap sizes with a generic number of Ns ( often 100 Ns ) .
The AGP file ` input . chr . agp ` will be modified in - place .""" | p = OptionParser ( estimategaps . __doc__ )
p . add_option ( "--minsize" , default = 100 , type = "int" , help = "Minimum gap size" )
p . add_option ( "--maxsize" , default = 500000 , type = "int" , help = "Maximum gap size" )
p . add_option ( "--links" , default = 10 , type = "int" , help = "Only use linkage grounds with matchings more than" )
p . set_verbose ( help = "Print details for each gap calculation" )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
inputbed , = args
pf = inputbed . rsplit ( "." , 1 ) [ 0 ]
agpfile = pf + ".chr.agp"
bedfile = pf + ".lifted.bed"
cc = Map ( bedfile , scaffold_info = True )
agp = AGP ( agpfile )
minsize , maxsize = opts . minsize , opts . maxsize
links = opts . links
verbose = opts . verbose
outagpfile = pf + ".estimategaps.agp"
fw = must_open ( outagpfile , "w" )
for ob , components in agp . iter_object ( ) :
components = list ( components )
s = Scaffold ( ob , cc )
mlg_counts = s . mlg_counts
gaps = [ x for x in components if x . is_gap ]
gapsizes = [ None ] * len ( gaps )
# master
for mlg , count in mlg_counts . items ( ) :
if count < links :
continue
g = GapEstimator ( cc , agp , ob , mlg )
g . compute_all_gaps ( minsize = minsize , maxsize = maxsize , verbose = verbose )
# Merge evidence from this mlg into master
assert len ( g . gapsizes ) == len ( gaps )
for i , gs in enumerate ( gapsizes ) :
gg = g . gapsizes [ i ]
if gs is None :
gapsizes [ i ] = gg
elif gg :
gapsizes [ i ] = min ( gs , gg )
print ( gapsizes )
# Modify AGP
i = 0
for x in components :
if x . is_gap :
x . gap_length = gapsizes [ i ] or minsize
x . component_type = 'U' if x . gap_length == 100 else 'N'
i += 1
print ( x , file = fw )
fw . close ( )
reindex ( [ outagpfile , "--inplace" ] ) |
def plot_confidence ( self , lower = 2.5 , upper = 97.5 , plot_limits = None , fixed_inputs = None , resolution = None , plot_raw = False , apply_link = False , visible_dims = None , which_data_ycols = 'all' , label = 'gp confidence' , predict_kw = None , ** kwargs ) :
"""Plot the confidence interval between the percentiles lower and upper .
E . g . the 95 % confidence interval is $ 2.5 , 97.5 $ .
Note : Only implemented for one dimension !
You can deactivate the legend for this one plot by supplying None to label .
Give the Y _ metadata in the predict _ kw if you need it .
: param float lower : the lower percentile to plot
: param float upper : the upper percentile to plot
: param plot _ limits : The limits of the plot . If 1D [ xmin , xmax ] , if 2D [ [ xmin , ymin ] , [ xmax , ymax ] ] . Defaluts to data limits
: type plot _ limits : np . array
: param fixed _ inputs : a list of tuple [ ( i , v ) , ( i , v ) . . . ] , specifying that input dimension i should be set to value v .
: type fixed _ inputs : a list of tuples
: param int resolution : The resolution of the prediction [ default : 200]
: param bool plot _ raw : plot the latent function ( usually denoted f ) only ?
: param bool apply _ link : whether to apply the link function of the GP to the raw prediction .
: param array - like visible _ dims : which columns of the input X ( ! ) to plot ( array - like or list of ints )
: param array - like which _ data _ ycols : which columns of the output y ( ! ) to plot ( array - like or list of ints )
: param dict predict _ kw : the keyword arguments for the prediction . If you want to plot a specific kernel give dict ( kern = < specific kernel > ) in here""" | canvas , kwargs = pl ( ) . new_canvas ( ** kwargs )
ycols = get_which_data_ycols ( self , which_data_ycols )
X = get_x_y_var ( self ) [ 0 ]
helper_data = helper_for_plot_data ( self , X , plot_limits , visible_dims , fixed_inputs , resolution )
helper_prediction = helper_predict_with_model ( self , helper_data [ 2 ] , plot_raw , apply_link , ( lower , upper ) , ycols , predict_kw )
plots = _plot_confidence ( self , canvas , helper_data , helper_prediction , label , ** kwargs )
return pl ( ) . add_to_canvas ( canvas , plots , legend = label is not None ) |
def bfd ( items , targets , ** kwargs ) :
"""Best - Fit Decreasing
Complexity O ( n ^ 2)""" | sizes = zip ( items , weight ( items , ** kwargs ) )
sizes = sorted ( sizes , key = operator . itemgetter ( 1 ) , reverse = True )
items = map ( operator . itemgetter ( 0 ) , sizes )
return bf ( items , targets , ** kwargs ) |
def key_diff ( key_bundle , key_defs ) :
"""Creates a difference dictionary with keys that should added and keys that
should be deleted from a Key Bundle to get it updated to a state that
mirrors What is in the key _ defs specification .
: param key _ bundle : The original KeyBundle
: param key _ defs : A set of key definitions
: return : A dictionary with possible keys ' add ' and ' del ' . The values
for the keys are lists of : py : class : ` cryptojwt . jwk . JWK ` instances""" | keys = key_bundle . get ( )
diff = { }
# My own sorted copy
key_defs = order_key_defs ( key_defs ) [ : ]
used = [ ]
for key in keys :
match = False
for kd in key_defs :
if key . use not in kd [ 'use' ] :
continue
if key . kty != kd [ 'type' ] :
continue
if key . kty == 'EC' : # special test only for EC keys
if key . crv != kd [ 'crv' ] :
continue
try :
_kid = kd [ 'kid' ]
except KeyError :
pass
else :
if key . kid != _kid :
continue
match = True
used . append ( kd )
key_defs . remove ( kd )
break
if not match :
try :
diff [ 'del' ] . append ( key )
except KeyError :
diff [ 'del' ] = [ key ]
if key_defs :
_kb = build_key_bundle ( key_defs )
diff [ 'add' ] = _kb . keys ( )
return diff |
def _match_cubes ( ccube_clean , ccube_dirty , bexpcube_clean , bexpcube_dirty , hpx_order ) :
"""Match the HEALPIX scheme and order of all the input cubes
return a dictionary of cubes with the same HEALPIX scheme and order""" | if hpx_order == ccube_clean . hpx . order :
ccube_clean_at_order = ccube_clean
else :
ccube_clean_at_order = ccube_clean . ud_grade ( hpx_order , preserve_counts = True )
if hpx_order == ccube_dirty . hpx . order :
ccube_dirty_at_order = ccube_dirty
else :
ccube_dirty_at_order = ccube_dirty . ud_grade ( hpx_order , preserve_counts = True )
if hpx_order == bexpcube_clean . hpx . order :
bexpcube_clean_at_order = bexpcube_clean
else :
bexpcube_clean_at_order = bexpcube_clean . ud_grade ( hpx_order , preserve_counts = True )
if hpx_order == bexpcube_dirty . hpx . order :
bexpcube_dirty_at_order = bexpcube_dirty
else :
bexpcube_dirty_at_order = bexpcube_dirty . ud_grade ( hpx_order , preserve_counts = True )
if ccube_dirty_at_order . hpx . nest != ccube_clean . hpx . nest :
ccube_dirty_at_order = ccube_dirty_at_order . swap_scheme ( )
if bexpcube_clean_at_order . hpx . nest != ccube_clean . hpx . nest :
bexpcube_clean_at_order = bexpcube_clean_at_order . swap_scheme ( )
if bexpcube_dirty_at_order . hpx . nest != ccube_clean . hpx . nest :
bexpcube_dirty_at_order = bexpcube_dirty_at_order . swap_scheme ( )
ret_dict = dict ( ccube_clean = ccube_clean_at_order , ccube_dirty = ccube_dirty_at_order , bexpcube_clean = bexpcube_clean_at_order , bexpcube_dirty = bexpcube_dirty_at_order )
return ret_dict |
def create_event ( self , register = False ) :
"""Create an asyncio . Event inside the emulation loop .
This method exists as a convenience to create an Event object that is
associated with the correct EventLoop ( ) . If you pass register = True ,
then the event will be registered as an event that must be set for the
EmulationLoop to be considered idle . This means that whenever
wait _ idle ( ) is called , it will block until this event is set .
Examples of when you may want this behavior is when the event is
signaling whether a tile has completed restarting itself . The reset ( )
rpc cannot block until the tile has initialized since it may need to
send its own rpcs as part of the initialization process . However , we
want to retain the behavior that once the reset ( ) rpc returns the tile
has been completely reset .
The cleanest way of achieving this is to have the tile set its
self . initialized Event when it has finished rebooting and register
that event so that wait _ idle ( ) nicely blocks until the reset process
is complete .
Args :
register ( bool ) : Whether to register the event so that wait _ idle
blocks until it is set .
Returns :
asyncio . Event : The Event object .""" | event = asyncio . Event ( loop = self . _loop )
if register :
self . _events . add ( event )
return event |
def delete ( path , version = - 1 , recursive = False , profile = None , hosts = None , scheme = None , username = None , password = None , default_acl = None ) :
'''Delete znode
path
path to znode
version
only delete if version matches ( Default : - 1 ( always matches ) )
profile
Configured Zookeeper profile to authenticate with ( Default : None )
hosts
Lists of Zookeeper Hosts ( Default : ' 127.0.0.1:2181)
scheme
Scheme to authenticate with ( Default : ' digest ' )
username
Username to authenticate ( Default : None )
password
Password to authenticate ( Default : None )
default _ acl
Default acls to assign if a node is created in this connection ( Default : None )
CLI Example :
. . code - block : : bash
salt minion1 zookeeper . delete / test / name profile = prod''' | conn = _get_zk_conn ( profile = profile , hosts = hosts , scheme = scheme , username = username , password = password , default_acl = default_acl )
return conn . delete ( path , version , recursive ) |
def unicode ( self , b , encoding = None ) :
"""Convert a byte string to unicode , using string _ encoding and decode _ errors .
Arguments :
b : a byte string .
encoding : the name of an encoding . Defaults to the string _ encoding
attribute for this instance .
Raises :
TypeError : Because this method calls Python ' s built - in unicode ( )
function , this method raises the following exception if the
given string is already unicode :
TypeError : decoding Unicode is not supported""" | if encoding is None :
encoding = self . string_encoding
# TODO : Wrap UnicodeDecodeErrors with a message about setting
# the string _ encoding and decode _ errors attributes .
return unicode ( b , encoding , self . decode_errors ) |
def setCheckedRecords ( self , records , column = 0 , parent = None ) :
"""Sets the checked items based on the inputed list of records .
: param records | [ < orb . Table > , . . ]
parent | < QTreeWidgetItem > | | None""" | if parent is None :
for i in range ( self . topLevelItemCount ( ) ) :
item = self . topLevelItem ( i )
try :
has_record = item . record ( ) in records
except AttributeError :
has_record = False
if has_record :
item . setCheckState ( column , Qt . Checked )
self . setCheckedRecords ( records , column , item )
else :
for c in range ( parent . childCount ( ) ) :
item = parent . child ( c )
try :
has_record = item . record ( ) in records
except AttributeError :
has_record = False
if has_record :
item . setCheckState ( column , Qt . Checked )
self . setCheckedRecords ( records , column , item ) |
def get_tree ( ident_hash , baked = False ) :
"""Return a tree structure of the Collection""" | id , version = get_id_n_version ( ident_hash )
stmt = _get_sql ( 'get-tree.sql' )
args = dict ( id = id , version = version , baked = baked )
with db_connect ( ) as db_conn :
with db_conn . cursor ( ) as cursor :
cursor . execute ( stmt , args )
try :
tree = cursor . fetchone ( ) [ 0 ]
except TypeError :
raise NotFound ( ident_hash )
if tree is None :
raise NotFound ( ident_hash )
return tree |
def __collapse_stranded ( s , proc_strands , names = False , verbose = False ) :
"""Get the union of a set of genomic intervals .
given a list of genomic intervals with chromosome , start , end and strand
fields , collapse those intervals with strand in the set < proc _ strands >
into a set of non - overlapping intervals . Other intervals are ignored .
Intervals must be sorted by chromosome and then start coordinate .
: note : O ( n ) time , O ( n ) space
: return : list of intervals that define the collapsed regions . Note that
these are all new objects , no existing object from s is returned
or altered . Returned regions will all have name " X " and score 0
: param s : list of genomic regions to collapse
: param proc _ strands : set of acceptable strands ; ignore input intervals
with strand not found in this set .
: param names : if True , accumulate region names . If false , all output
regions have name " X "
: param verbose : if True , output progress message to stderr .
: raise GenomicIntervalError : if the input regions are not correctly sorted
( chromosome then start )""" | def get_first_matching_index ( s , proc_strands ) :
for i in range ( 0 , len ( s ) ) :
if s [ i ] . strand in proc_strands :
return i
return None
if proc_strands not in [ set ( "+" ) , set ( "-" ) , set ( [ "+" , "-" ] ) ] :
raise GenomicIntervalError ( "failed collapsing intervals on strands '" + "," . join ( proc_strands ) + "''; unrecognised " + "strand symbols" )
first_index = get_first_matching_index ( s , proc_strands )
if first_index is None :
return [ ]
res = [ ]
current = copy . copy ( s [ first_index ] )
current . strand = '+' if ( proc_strands == set ( "+" ) or proc_strands == set ( [ "+" , "-" ] ) ) else '-'
current . score = 0
current . name = "X" if not names else set ( s [ first_index ] . name )
for i in range ( first_index + 1 , len ( s ) ) :
if s [ i ] . strand not in proc_strands :
continue
# make sure things are sorted . .
if ( s [ i ] . chrom < s [ i - 1 ] . chrom ) or ( s [ i ] . chrom == s [ i - 1 ] . chrom and s [ i ] . start < s [ i - 1 ] . start ) :
raise GenomicIntervalError ( "collapsing regions failed. saw this " + "region: " + str ( s [ i - 1 ] ) + " before this " + "one: " + str ( s [ i ] ) )
# because of sorting order , we know that nothing else exists with
# start less than s [ i ] which we haven ' t already seen .
if s [ i ] . start > current . end or s [ i ] . chrom != current . chrom :
if names :
current . name = ";" . join ( current . name )
res . append ( current )
current = copy . copy ( s [ i ] )
current . strand = '+' if ( proc_strands == set ( "+" ) or proc_strands == set ( [ "+" , "-" ] ) ) else '-'
current . score = 0
current . name = "X" if not names else set ( s [ i ] . name )
else :
current . end = max ( s [ i ] . end , current . end )
if names :
current . name . add ( s [ i ] . name )
# don ' t forget the last one . . .
if names :
current . name = ";" . join ( current . name )
res . append ( current )
return res |
def synchronize_switch ( self , switch_ip , expected_acls , expected_bindings ) :
"""Update ACL config on a switch to match expected config
This is done as follows :
1 . Get switch ACL config using show commands
2 . Update expected bindings based on switch LAGs
3 . Get commands to synchronize switch ACLs
4 . Get commands to synchronize switch ACL bindings
5 . Run sync commands on switch""" | # Get ACL rules and interface mappings from the switch
switch_acls , switch_bindings = self . _get_dynamic_acl_info ( switch_ip )
# Adjust expected bindings for switch LAG config
expected_bindings = self . adjust_bindings_for_lag ( switch_ip , expected_bindings )
# Get synchronization commands
switch_cmds = list ( )
switch_cmds . extend ( self . get_sync_acl_cmds ( switch_acls , expected_acls ) )
switch_cmds . extend ( self . get_sync_binding_cmds ( switch_bindings , expected_bindings ) )
# Update switch config
self . run_openstack_sg_cmds ( switch_cmds , self . _switches . get ( switch_ip ) ) |
def perform_experiment ( self , engine_list ) :
"""Performs nearest neighbour experiments with custom vector data
for all engines in the specified list .
Returns self . result contains list of ( distance _ ratio , search _ time )
tuple . All are the averaged values over all request vectors .
search _ time is the average retrieval / search time compared to the
average exact search time .""" | # We will fill this array with measures for all the engines .
result = [ ]
# For each engine , first index vectors and then retrieve neighbours
for engine in engine_list :
print ( 'Engine %d / %d' % ( engine_list . index ( engine ) , len ( engine_list ) ) )
# Clean storage
engine . clean_all_buckets ( )
# Use this to compute average distance _ ratio
avg_distance_ratio = 0.0
# Use this to compute average result set size
avg_result_size = 0.0
# Use this to compute average search time
avg_search_time = 0.0
# Index all vectors and store them
for index in range ( self . vectors . shape [ 1 ] ) :
engine . store_vector ( self . vectors [ : , index ] , 'data_%d' % index )
# Look for N nearest neighbours for query vectors
for index in self . query_indices : # We have to time the search
search_time_start = time . time ( )
# Get nearest N according to engine
nearest = engine . neighbours ( self . vectors [ : , index ] )
# Get search time
search_time = time . time ( ) - search_time_start
# Get average distance ratio ( with respect to radius
# of real N closest neighbours )
distance_ratio = 0.0
for n in nearest : # If the vector is outside the real neighbour radius
if n [ 2 ] > self . nearest_radius [ index ] : # Compute distance to real neighbour radius
d = ( n [ 2 ] - self . nearest_radius [ index ] )
# And normalize it . 1.0 means : distance to
# real neighbour radius is identical to radius
d /= self . nearest_radius [ index ]
# If all neighbours are in the radius , the
# distance ratio is 0.0
distance_ratio += d
# Normalize distance ratio over all neighbours
distance_ratio /= len ( nearest )
# Add to accumulator
avg_distance_ratio += distance_ratio
# Add to accumulator
avg_result_size += len ( nearest )
# Add to accumulator
avg_search_time += search_time
# Normalize distance ratio over query set
avg_distance_ratio /= float ( len ( self . query_indices ) )
# Normalize avg result size
avg_result_size /= float ( len ( self . query_indices ) )
# Normalize search time over query set
avg_search_time = avg_search_time / float ( len ( self . query_indices ) )
# Normalize search time with respect to exact search
avg_search_time /= self . exact_search_time_per_vector
print ( ' distance_ratio=%f, result_size=%f, time=%f' % ( avg_distance_ratio , avg_result_size , avg_search_time ) )
result . append ( ( avg_distance_ratio , avg_result_size , avg_search_time ) )
return result |
def get_metric_statistics ( Namespace = None , MetricName = None , Dimensions = None , StartTime = None , EndTime = None , Period = None , Statistics = None , ExtendedStatistics = None , Unit = None ) :
"""Gets statistics for the specified metric .
Amazon CloudWatch retains metric data as follows :
Note that CloudWatch started retaining 5 - minute and 1 - hour metric data as of 9 July 2016.
The maximum number of data points returned from a single call is 1,440 . If you request more than 1,440 data points , Amazon CloudWatch returns an error . To reduce the number of data points , you can narrow the specified time range and make multiple requests across adjacent time ranges , or you can increase the specified period . A period can be as short as one minute ( 60 seconds ) . Note that data points are not returned in chronological order .
Amazon CloudWatch aggregates data points based on the length of the period that you specify . For example , if you request statistics with a one - hour period , Amazon CloudWatch aggregates all data points with time stamps that fall within each one - hour period . Therefore , the number of values aggregated by CloudWatch is larger than the number of data points returned .
CloudWatch needs raw data points to calculate percentile statistics . If you publish data using a statistic set instead , you cannot retrieve percentile statistics for this data unless one of the following conditions is true :
For a list of metrics and dimensions supported by AWS services , see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide .
See also : AWS API Documentation
: example : response = client . get _ metric _ statistics (
Namespace = ' string ' ,
MetricName = ' string ' ,
Dimensions = [
' Name ' : ' string ' ,
' Value ' : ' string '
StartTime = datetime ( 2015 , 1 , 1 ) ,
EndTime = datetime ( 2015 , 1 , 1 ) ,
Period = 123,
Statistics = [
' SampleCount ' | ' Average ' | ' Sum ' | ' Minimum ' | ' Maximum ' ,
ExtendedStatistics = [
' string ' ,
Unit = ' Seconds ' | ' Microseconds ' | ' Milliseconds ' | ' Bytes ' | ' Kilobytes ' | ' Megabytes ' | ' Gigabytes ' | ' Terabytes ' | ' Bits ' | ' Kilobits ' | ' Megabits ' | ' Gigabits ' | ' Terabits ' | ' Percent ' | ' Count ' | ' Bytes / Second ' | ' Kilobytes / Second ' | ' Megabytes / Second ' | ' Gigabytes / Second ' | ' Terabytes / Second ' | ' Bits / Second ' | ' Kilobits / Second ' | ' Megabits / Second ' | ' Gigabits / Second ' | ' Terabits / Second ' | ' Count / Second ' | ' None '
: type Namespace : string
: param Namespace : [ REQUIRED ]
The namespace of the metric , with or without spaces .
: type MetricName : string
: param MetricName : [ REQUIRED ]
The name of the metric , with or without spaces .
: type Dimensions : list
: param Dimensions : The dimensions . If the metric contains multiple dimensions , you must include a value for each dimension . CloudWatch treats each unique combination of dimensions as a separate metric . You can ' t retrieve statistics using combinations of dimensions that were not specially published . You must specify the same dimensions that were used when the metrics were created . For an example , see Dimension Combinations in the Amazon CloudWatch User Guide . For more information on specifying dimensions , see Publishing Metrics in the Amazon CloudWatch User Guide .
( dict ) - - Expands the identity of a metric .
Name ( string ) - - [ REQUIRED ] The name of the dimension .
Value ( string ) - - [ REQUIRED ] The value representing the dimension measurement .
: type StartTime : datetime
: param StartTime : [ REQUIRED ]
The time stamp that determines the first data point to return . Note that start times are evaluated relative to the time that CloudWatch receives the request .
The value specified is inclusive ; results include data points with the specified time stamp . The time stamp must be in ISO 8601 UTC format ( for example , 2016-10-03T23:00:00Z ) .
CloudWatch rounds the specified time stamp as follows :
Start time less than 15 days ago - Round down to the nearest whole minute . For example , 12:32:34 is rounded down to 12:32:00.
Start time between 15 and 63 days ago - Round down to the nearest 5 - minute clock interval . For example , 12:32:34 is rounded down to 12:30:00.
Start time greater than 63 days ago - Round down to the nearest 1 - hour clock interval . For example , 12:32:34 is rounded down to 12:00:00.
: type EndTime : datetime
: param EndTime : [ REQUIRED ]
The time stamp that determines the last data point to return .
The value specified is exclusive ; results will include data points up to the specified time stamp . The time stamp must be in ISO 8601 UTC format ( for example , 2016-10-10T23:00:00Z ) .
: type Period : integer
: param Period : [ REQUIRED ]
The granularity , in seconds , of the returned data points . A period can be as short as one minute ( 60 seconds ) and must be a multiple of 60 . The default value is 60.
If the StartTime parameter specifies a time stamp that is greater than 15 days ago , you must specify the period as follows or no data points in that time range is returned :
Start time between 15 and 63 days ago - Use a multiple of 300 seconds ( 5 minutes ) .
Start time greater than 63 days ago - Use a multiple of 3600 seconds ( 1 hour ) .
: type Statistics : list
: param Statistics : The metric statistics , other than percentile . For percentile statistics , use ExtendedStatistic .
( string ) - -
: type ExtendedStatistics : list
: param ExtendedStatistics : The percentile statistics . Specify values between p0.0 and p100.
( string ) - -
: type Unit : string
: param Unit : The unit for a given metric . Metrics may be reported in multiple units . Not supplying a unit results in all units being returned . If the metric only ever reports one unit , specifying a unit has no effect .
: rtype : dict
: return : {
' Label ' : ' string ' ,
' Datapoints ' : [
' Timestamp ' : datetime ( 2015 , 1 , 1 ) ,
' SampleCount ' : 123.0,
' Average ' : 123.0,
' Sum ' : 123.0,
' Minimum ' : 123.0,
' Maximum ' : 123.0,
' Unit ' : ' Seconds ' | ' Microseconds ' | ' Milliseconds ' | ' Bytes ' | ' Kilobytes ' | ' Megabytes ' | ' Gigabytes ' | ' Terabytes ' | ' Bits ' | ' Kilobits ' | ' Megabits ' | ' Gigabits ' | ' Terabits ' | ' Percent ' | ' Count ' | ' Bytes / Second ' | ' Kilobytes / Second ' | ' Megabytes / Second ' | ' Gigabytes / Second ' | ' Terabytes / Second ' | ' Bits / Second ' | ' Kilobits / Second ' | ' Megabits / Second ' | ' Gigabits / Second ' | ' Terabits / Second ' | ' Count / Second ' | ' None ' ,
' ExtendedStatistics ' : {
' string ' : 123.0
: returns :
The SampleCount of the statistic set is 1
The Min and the Max of the statistic set are equal""" | pass |
def recv ( self , topic , payload , qos ) :
"""Receive a MQTT message .
Call this method when a message is received from the MQTT broker .""" | data = self . _parse_mqtt_to_message ( topic , payload , qos )
if data is None :
return
_LOGGER . debug ( 'Receiving %s' , data )
self . add_job ( self . logic , data ) |
def make_limited_stream ( stream , limit ) :
"""Makes a stream limited .""" | if not isinstance ( stream , LimitedStream ) :
if limit is None :
raise TypeError ( 'stream not limited and no limit provided.' )
stream = LimitedStream ( stream , limit )
return stream |
def _recv_internal ( self , timeout = None ) :
"""Read a message from kvaser device and return whether filtering has taken place .""" | arb_id = ctypes . c_long ( 0 )
data = ctypes . create_string_buffer ( 64 )
dlc = ctypes . c_uint ( 0 )
flags = ctypes . c_uint ( 0 )
timestamp = ctypes . c_ulong ( 0 )
if timeout is None : # Set infinite timeout
# http : / / www . kvaser . com / canlib - webhelp / group _ _ _ c _ a _ n . html # ga2edd785a87cc16b49ece8969cad71e5b
timeout = 0xFFFFFFFF
else :
timeout = int ( timeout * 1000 )
# log . log ( 9 , ' Reading for % d ms on handle : % s ' % ( timeout , self . _ read _ handle ) )
status = canReadWait ( self . _read_handle , ctypes . byref ( arb_id ) , ctypes . byref ( data ) , ctypes . byref ( dlc ) , ctypes . byref ( flags ) , ctypes . byref ( timestamp ) , timeout # This is an X ms blocking read
)
if status == canstat . canOK : # log . debug ( ' read complete - > status OK ' )
data_array = data . raw
flags = flags . value
is_extended = bool ( flags & canstat . canMSG_EXT )
is_remote_frame = bool ( flags & canstat . canMSG_RTR )
is_error_frame = bool ( flags & canstat . canMSG_ERROR_FRAME )
is_fd = bool ( flags & canstat . canFDMSG_FDF )
bitrate_switch = bool ( flags & canstat . canFDMSG_BRS )
error_state_indicator = bool ( flags & canstat . canFDMSG_ESI )
msg_timestamp = timestamp . value * TIMESTAMP_FACTOR
rx_msg = Message ( arbitration_id = arb_id . value , data = data_array [ : dlc . value ] , dlc = dlc . value , is_extended_id = is_extended , is_error_frame = is_error_frame , is_remote_frame = is_remote_frame , is_fd = is_fd , bitrate_switch = bitrate_switch , error_state_indicator = error_state_indicator , channel = self . channel , timestamp = msg_timestamp + self . _timestamp_offset )
# log . debug ( ' Got message : % s ' % rx _ msg )
return rx_msg , self . _is_filtered
else : # log . debug ( ' read complete - > status not okay ' )
return None , self . _is_filtered |
def load_gene_exp_to_df ( inst_path ) :
'''Loads gene expression data from 10x in sparse matrix format and returns a
Pandas dataframe''' | import pandas as pd
from scipy import io
from scipy import sparse
from ast import literal_eval as make_tuple
# matrix
Matrix = io . mmread ( inst_path + 'matrix.mtx' )
mat = Matrix . todense ( )
# genes
filename = inst_path + 'genes.tsv'
f = open ( filename , 'r' )
lines = f . readlines ( )
f . close ( )
# # add unique id to all genes
# genes = [ ]
# unique _ id = 0
# for inst _ line in lines :
# inst _ line = inst _ line . strip ( ) . split ( )
# if len ( inst _ line ) > 1:
# inst _ gene = inst _ line [ 1]
# else :
# inst _ gene = inst _ line [ 0]
# genes . append ( inst _ gene + ' _ ' + str ( unique _ id ) )
# unique _ id = unique _ id + 1
# add unique id only to duplicate genes
ini_genes = [ ]
for inst_line in lines :
inst_line = inst_line . strip ( ) . split ( )
if len ( inst_line ) > 1 :
inst_gene = inst_line [ 1 ]
else :
inst_gene = inst_line [ 0 ]
ini_genes . append ( inst_gene )
gene_name_count = pd . Series ( ini_genes ) . value_counts ( )
duplicate_genes = gene_name_count [ gene_name_count > 1 ] . index . tolist ( )
dup_index = { }
genes = [ ]
for inst_row in ini_genes : # add index to non - unique genes
if inst_row in duplicate_genes : # calc _ non - unque index
if inst_row not in dup_index :
dup_index [ inst_row ] = 1
else :
dup_index [ inst_row ] = dup_index [ inst_row ] + 1
new_row = inst_row + '_' + str ( dup_index [ inst_row ] )
else :
new_row = inst_row
genes . append ( new_row )
# barcodes
filename = inst_path + 'barcodes.tsv'
f = open ( filename , 'r' )
lines = f . readlines ( )
f . close ( )
cell_barcodes = [ ]
for inst_bc in lines :
inst_bc = inst_bc . strip ( ) . split ( '\t' )
# remove dash from barcodes if necessary
if '-' in inst_bc [ 0 ] :
inst_bc [ 0 ] = inst_bc [ 0 ] . split ( '-' ) [ 0 ]
cell_barcodes . append ( inst_bc [ 0 ] )
# parse tuples if necessary
try :
cell_barcodes = [ make_tuple ( x ) for x in cell_barcodes ]
except :
pass
try :
genes = [ make_tuple ( x ) for x in genes ]
except :
pass
# make dataframe
df = pd . DataFrame ( mat , index = genes , columns = cell_barcodes )
return df |
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values .""" | # extracting dictionary of coefficients specific to required
# intensity measure type .
C = self . COEFFS [ imt ]
# intensity on a reference soil is used for both mean
# and stddev calculations .
ln_y_ref = self . _get_ln_y_ref ( rup , dists , C )
# exp1 and exp2 are parts of eq . 7
exp1 = np . exp ( C [ 'phi3' ] * ( sites . vs30 . clip ( - np . inf , 1130 ) - 360 ) )
exp2 = np . exp ( C [ 'phi3' ] * ( 1130 - 360 ) )
# v1 is the period dependent site term . The Vs30 above which , the
# amplification is constant
v1 = self . _get_v1 ( imt )
mean = self . _get_mean ( sites , C , ln_y_ref , exp1 , exp2 , v1 )
mean += convert_to_LHC ( imt )
stddevs = self . _get_stddevs ( sites , rup , C , stddev_types , ln_y_ref , exp1 , exp2 )
return mean , stddevs |
def delete_workspace_config ( namespace , workspace , cnamespace , config ) :
"""Delete method configuration in workspace .
Args :
namespace ( str ) : project to which workspace belongs
workspace ( str ) : Workspace name
mnamespace ( str ) : Method namespace
method ( str ) : Method name
Swagger :
https : / / api . firecloud . org / # ! / Method _ Configurations / deleteWorkspaceMethodConfig""" | uri = "workspaces/{0}/{1}/method_configs/{2}/{3}" . format ( namespace , workspace , cnamespace , config )
return __delete ( uri ) |
def read_epw ( self ) :
"""Section 2 - Read EPW file
properties :
self . climateDataPath
self . newPathName
self . _ header # header data
self . epwinput # timestep data for weather
self . lat # latitude
self . lon # longitude
self . GMT # GMT
self . nSoil # Number of soil depths
self . Tsoil # nSoil x 12 matrix for soil temperture ( K )
self . depth _ soil # nSoil x 1 matrix for soil depth ( m )""" | # Make dir path to epw file
self . climateDataPath = os . path . join ( self . epwDir , self . epwFileName )
# Open epw file and feed csv data to climate _ data
try :
climate_data = utilities . read_csv ( self . climateDataPath )
except Exception as e :
raise Exception ( "Failed to read epw file! {}" . format ( e . message ) )
# Read header lines ( 1 to 8 ) from EPW and ensure TMY2 format .
self . _header = climate_data [ 0 : 8 ]
# Read weather data from EPW for each time step in weather file . ( lines 8 - end )
self . epwinput = climate_data [ 8 : ]
# Read Lat , Long ( line 1 of EPW )
self . lat = float ( self . _header [ 0 ] [ 6 ] )
self . lon = float ( self . _header [ 0 ] [ 7 ] )
self . GMT = float ( self . _header [ 0 ] [ 8 ] )
# Read in soil temperature data ( assumes this is always there )
# ref : http : / / bigladdersoftware . com / epx / docs / 8-2 / auxiliary - programs / epw - csv - format - inout . html
soilData = self . _header [ 3 ]
self . nSoil = int ( soilData [ 1 ] )
# Number of ground temperature depths
self . Tsoil = utilities . zeros ( self . nSoil , 12 )
# nSoil x 12 matrix for soil temperture ( K )
self . depth_soil = utilities . zeros ( self . nSoil , 1 )
# nSoil x 1 matrix for soil depth ( m )
# Read monthly data for each layer of soil from EPW file
for i in range ( self . nSoil ) :
self . depth_soil [ i ] [ 0 ] = float ( soilData [ 2 + ( i * 16 ) ] )
# get soil depth for each nSoil
# Monthly data
for j in range ( 12 ) : # 12 months of soil T for specific depth
self . Tsoil [ i ] [ j ] = float ( soilData [ 6 + ( i * 16 ) + j ] ) + 273.15
# Set new directory path for the moprhed EPW file
self . newPathName = os . path . join ( self . destinationDir , self . destinationFileName ) |
def traverse_postorder ( self , leaves = True , internal = True ) :
'''Perform a postorder traversal starting at this ` ` Node ` ` object
Args :
` ` leaves ` ` ( ` ` bool ` ` ) : ` ` True ` ` to include leaves , otherwise ` ` False ` `
` ` internal ` ` ( ` ` bool ` ` ) : ` ` True ` ` to include internal nodes , otherwise ` ` False ` `''' | s1 = deque ( ) ;
s2 = deque ( ) ;
s1 . append ( self )
while len ( s1 ) != 0 :
n = s1 . pop ( ) ;
s2 . append ( n ) ;
s1 . extend ( n . children )
while len ( s2 ) != 0 :
n = s2 . pop ( )
if ( leaves and n . is_leaf ( ) ) or ( internal and not n . is_leaf ( ) ) :
yield n |
def getErrorResponse ( self , errorCode , errorDescr ) :
"""This methods sets error attributes of an external method object .""" | self . errorCode = errorCode
self . errorDescr = errorDescr
self . response = "yes"
return self |
def adjacency_plot_und ( A , coor , tube = False ) :
'''This function in matlab is a visualization helper which translates an
adjacency matrix and an Nx3 matrix of spatial coordinates , and plots a
3D isometric network connecting the undirected unweighted nodes using a
specific plotting format . Including the formatted output is not useful at
all for bctpy since matplotlib will not be able to plot it in quite the
same way .
Instead of doing this , I have included code that will plot the adjacency
matrix onto nodes at the given spatial coordinates in mayavi
This routine is basically a less featureful version of the 3D brain in
cvu , the connectome visualization utility which I also maintain . cvu uses
freesurfer surfaces and annotations to get the node coordinates ( rather
than leaving them up to the user ) and has many other interactive
visualization features not included here for the sake of brevity .
There are other similar visualizations in the ConnectomeViewer and the
UCLA multimodal connectivity database .
Note that unlike other bctpy functions , this function depends on mayavi .
Paramaters
A : NxN np . ndarray
adjacency matrix
coor : Nx3 np . ndarray
vector of node coordinates
tube : bool
plots using cylindrical tubes for higher resolution image . If True ,
plots cylindrical tube sources . If False , plots line sources . Default
value is False .
Returns
fig : Instance ( Scene )
handle to a mayavi figure .
Notes
To display the output interactively , call
fig = adjacency _ plot _ und ( A , coor )
from mayavi import mlab
mlab . show ( )
Note : Thresholding the matrix is strongly recommended . It is recommended
that the input matrix have fewer than 5000 total connections in order to
achieve reasonable performance and noncluttered visualization .''' | from mayavi import mlab
n = len ( A )
nr_edges = ( n * n - 1 ) // 2
# starts = np . zeros ( ( nr _ edges , 3 ) )
# vecs = np . zeros ( ( nr _ edges , 3 ) )
# adjdat = np . zeros ( ( nr _ edges , ) )
ixes , = np . where ( np . triu ( np . ones ( ( n , n ) ) , 1 ) . flat )
# i = 0
# for r2 in xrange ( n ) :
# for r1 in xrange ( r2 ) :
# starts [ i , : ] = coor [ r1 , : ]
# vecs [ i , : ] = coor [ r2 , : ] - coor [ r1 , : ]
# adjdat [ i , : ]
# i + = 1
adjdat = A . flat [ ixes ]
A_r = np . tile ( coor , ( n , 1 , 1 ) )
starts = np . reshape ( A_r , ( n * n , 3 ) ) [ ixes , : ]
vecs = np . reshape ( A_r - np . transpose ( A_r , ( 1 , 0 , 2 ) ) , ( n * n , 3 ) ) [ ixes , : ]
# plotting
fig = mlab . figure ( )
nodesource = mlab . pipeline . scalar_scatter ( coor [ : , 0 ] , coor [ : , 1 ] , coor [ : , 2 ] , figure = fig )
nodes = mlab . pipeline . glyph ( nodesource , scale_mode = 'none' , scale_factor = 3. , mode = 'sphere' , figure = fig )
nodes . glyph . color_mode = 'color_by_scalar'
vectorsrc = mlab . pipeline . vector_scatter ( starts [ : , 0 ] , starts [ : , 1 ] , starts [ : , 2 ] , vecs [ : , 0 ] , vecs [ : , 1 ] , vecs [ : , 2 ] , figure = fig )
vectorsrc . mlab_source . dataset . point_data . scalars = adjdat
thres = mlab . pipeline . threshold ( vectorsrc , low = 0.0001 , up = np . max ( A ) , figure = fig )
vectors = mlab . pipeline . vectors ( thres , colormap = 'YlOrRd' , scale_mode = 'vector' , figure = fig )
vectors . glyph . glyph . clamping = False
vectors . glyph . glyph . color_mode = 'color_by_scalar'
vectors . glyph . color_mode = 'color_by_scalar'
vectors . glyph . glyph_source . glyph_position = 'head'
vectors . actor . property . opacity = .7
if tube :
vectors . glyph . glyph_source . glyph_source = ( vectors . glyph . glyph_source . glyph_dict [ 'cylinder_source' ] )
vectors . glyph . glyph_source . glyph_source . radius = 0.015
else :
vectors . glyph . glyph_source . glyph_source . glyph_type = 'dash'
return fig |
def _zscore ( a ) :
"""Calculating z - score of data on the first axis .
If the numbers in any column are all equal , scipy . stats . zscore
will return NaN for this column . We shall correct them all to
be zeros .
Parameters
a : numpy array
Returns
zscore : numpy array
The z - scores of input " a " , with any columns including non - finite
numbers replaced by all zeros .""" | assert a . ndim > 1 , 'a must have more than one dimensions'
zscore = scipy . stats . zscore ( a , axis = 0 )
zscore [ : , np . logical_not ( np . all ( np . isfinite ( zscore ) , axis = 0 ) ) ] = 0
return zscore |
def _load_sequences_to_strain ( self , strain_id , force_rerun = False ) :
"""Load strain GEMPRO with functional genes defined , load sequences to it , save as new GEMPRO""" | gp_seqs_path = op . join ( self . model_dir , '{}_gp_withseqs.pckl' . format ( strain_id ) )
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = gp_seqs_path ) :
gp_noseqs = ssbio . io . load_pickle ( self . strain_infodict [ strain_id ] [ 'gp_noseqs_path' ] )
strain_sequences = SeqIO . index ( self . strain_infodict [ strain_id ] [ 'genome_path' ] , 'fasta' )
for strain_gene in gp_noseqs . functional_genes : # Pull the gene ID of the strain from the orthology matrix
strain_gene_key = self . df_orthology_matrix . at [ strain_gene . id , strain_id ]
# Load into the strain GEM - PRO
new_id = '{}_{}' . format ( strain_gene . id , strain_id )
if strain_gene . protein . sequences . has_id ( new_id ) :
continue
strain_gene . protein . load_manual_sequence ( seq = strain_sequences [ strain_gene_key ] , ident = new_id , set_as_representative = True )
gp_noseqs . save_pickle ( outfile = gp_seqs_path )
return strain_id , gp_seqs_path |
def analytics ( account = None , * args , ** kwargs ) :
"""Simple Google Analytics integration .
First looks for an ` ` account ` ` parameter . If not supplied , uses
Django ` ` GOOGLE _ ANALYTICS _ ACCOUNT ` ` setting . If account not set ,
raises ` ` TemplateSyntaxError ` ` .
: param account :
Google Analytics account id to be used .""" | if not account :
try :
account = settings . GOOGLE_ANALYTICS_ACCOUNT
except :
raise template . TemplateSyntaxError ( "Analytics account could not found either " "in tag parameters or settings" )
return { 'account' : account , 'params' : kwargs } |
def _to_solr ( self , data ) :
'''Sends data to a Solr instance .''' | return self . _dest . index_json ( self . _dest_coll , json . dumps ( data , sort_keys = True ) ) |
def trifurcate_base ( cls , newick ) :
"""Rewrites a newick string so that the base is a trifurcation
( usually means an unrooted tree )""" | t = cls ( newick )
t . _tree . deroot ( )
return t . newick |
def likelihood ( self , outcomes , modelparams , expparams ) :
"""Calculates the likelihood function at the states specified
by modelparams and measurement specified by expparams .
This is given by the Born rule and is the probability of
outcomes given the state and measurement operator .""" | # By calling the superclass implementation , we can consolidate
# call counting there .
super ( MultiQubitStatePauliModel , self ) . likelihood ( outcomes , modelparams , expparams )
# Note that expparams [ ' axis ' ] has shape ( n _ exp , 3 ) .
pr0 = 0.5 * ( 1 + modelparams [ : , expparams [ 'pauli' ] ] )
# Use the following hack if you don ' t want to ensure positive weights
pr0 [ pr0 < 0 ] = 0
pr0 [ pr0 > 1 ] = 1
# Note that expparams [ ' vis ' ] has shape ( n _ exp , ) .
pr0 = expparams [ 'vis' ] * pr0 + ( 1 - expparams [ 'vis' ] ) * 0.5
# Now we concatenate over outcomes .
return Model . pr0_to_likelihood_array ( outcomes , pr0 ) |
def create_box ( self , orientation = Gtk . Orientation . HORIZONTAL , spacing = 0 ) :
"""Function creates box . Based on orientation
it can be either HORIZONTAL or VERTICAL""" | h_box = Gtk . Box ( orientation = orientation , spacing = spacing )
h_box . set_homogeneous ( False )
return h_box |
def local ( ) :
"""Load local requirements file .""" | logger . info ( "Loading requirements from local file." )
with open ( REQUIREMENTS_FILE , 'r' ) as f :
requirements = parse ( f )
for r in requirements :
logger . debug ( "Creating new package: %r" , r )
create_package_version ( r ) |
def _data_from_response ( self , resp_body , key = None ) :
"""This works for most API responses , but some don ' t structure their
listing responses the same way , so overriding this method allows
subclasses to handle extraction for those outliers .""" | if key :
data = resp_body . get ( key )
else :
data = resp_body . get ( self . plural_response_key , resp_body )
# NOTE ( ja ) : some services , such as keystone returns values as list as
# { " values " : [ . . . ] } unlike other services which just return the
# list .
if isinstance ( data , dict ) :
try :
data = data [ "values" ]
except KeyError :
pass
return data |
def deactivate ( self , plugins = [ ] ) :
"""Deactivates given plugins .
A given plugin must be activated , otherwise it is ignored and no action takes place ( no signals are fired ,
no deactivate functions are called . )
A deactivated plugin is still loaded and initialised and can be reactivated by calling : func : ` activate ` again .
It is also still registered in the : class : ` . PluginManager ` and can be requested via : func : ` get ` .
: param plugins : List of plugin names
: type plugins : list of strings""" | self . _log . debug ( "Plugins Deactivation started" )
if not isinstance ( plugins , list ) :
raise AttributeError ( "plugins must be a list, not %s" % type ( plugins ) )
self . _log . debug ( "Plugins to deactivate: %s" % ", " . join ( plugins ) )
plugins_deactivated = [ ]
for plugin_name in plugins :
if not isinstance ( plugin_name , str ) :
raise AttributeError ( "plugin name must be a str, not %s" % type ( plugin_name ) )
if plugin_name not in self . _plugins . keys ( ) :
self . _log . info ( "Unknown activated plugin %s" % plugin_name )
continue
else :
self . _log . debug ( "Deactivating plugin %s" % plugin_name )
if not self . _plugins [ plugin_name ] . active :
self . _log . warning ( "Plugin %s seems to be already deactivated" % plugin_name )
else :
try :
self . _plugins [ plugin_name ] . deactivate ( )
except Exception as e :
raise_from ( PluginNotDeactivatableException ( "Plugin %s could not be deactivated" % plugin_name ) , e )
else :
self . _log . debug ( "Plugin %s deactivated" % plugin_name )
plugins_deactivated . append ( plugin_name )
self . _log . info ( "Plugins deactivated: %s" % ", " . join ( plugins_deactivated ) ) |
def post_event_unpublish ( self , id , ** data ) :
"""POST / events / : id / unpublish /
Unpublishes an event . In order for a free event to be unpublished , it must not have any pending or completed orders ,
even if the event is in the past . In order for a paid event to be unpublished , it must not have any pending or completed
orders , unless the event has been completed and paid out . Returns a boolean indicating success or failure of the
unpublish .""" | return self . post ( "/events/{0}/unpublish/" . format ( id ) , data = data ) |
def _set_dst_vtep_ip_any ( self , v , load = False ) :
"""Setter method for dst _ vtep _ ip _ any , mapped from YANG variable / overlay / access _ list / type / vxlan / standard / seq / dst _ vtep _ ip _ any ( empty )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ dst _ vtep _ ip _ any is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ dst _ vtep _ ip _ any ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGBool , is_leaf = True , yang_name = "dst-vtep-ip-any" , rest_name = "dst-vtep-ip-any" , parent = self , choice = ( u'choice-dst-vtep-ip' , u'case-dst-vtep-ip-any' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'dst vtep ip address: any' , u'cli-incomplete-command' : None , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-vxlan-visibility' , defining_module = 'brocade-vxlan-visibility' , yang_type = 'empty' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """dst_vtep_ip_any must be of a type compatible with empty""" , 'defined-type' : "empty" , 'generated-type' : """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="dst-vtep-ip-any", rest_name="dst-vtep-ip-any", parent=self, choice=(u'choice-dst-vtep-ip', u'case-dst-vtep-ip-any'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'dst vtep ip address: any', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='empty', is_config=True)""" , } )
self . __dst_vtep_ip_any = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def sff ( args ) :
"""% prog sff sffiles
Convert reads formatted as 454 SFF file , and convert to CA frg file .
Turn - - nodedup on if another deduplication mechanism is used ( e . g .
CD - HIT - 454 ) . See assembly . sff . deduplicate ( ) .""" | p = OptionParser ( sff . __doc__ )
p . add_option ( "--prefix" , dest = "prefix" , default = None , help = "Output frg filename prefix" )
p . add_option ( "--nodedup" , default = False , action = "store_true" , help = "Do not remove duplicates [default: %default]" )
p . set_size ( )
opts , args = p . parse_args ( args )
if len ( args ) < 1 :
sys . exit ( p . print_help ( ) )
sffiles = args
plates = [ x . split ( "." ) [ 0 ] . split ( "_" ) [ - 1 ] for x in sffiles ]
mated = ( opts . size != 0 )
mean , sv = get_mean_sv ( opts . size )
if len ( plates ) > 1 :
plate = plates [ 0 ] [ : - 1 ] + 'X'
else :
plate = "_" . join ( plates )
if mated :
libname = "Titan{0}Kb-" . format ( opts . size / 1000 ) + plate
else :
libname = "TitanFrags-" + plate
if opts . prefix :
libname = opts . prefix
cmd = "sffToCA"
cmd += " -libraryname {0} -output {0} " . format ( libname )
cmd += " -clear 454 -trim chop "
if mated :
cmd += " -linker titanium -insertsize {0} {1} " . format ( mean , sv )
if opts . nodedup :
cmd += " -nodedup "
cmd += " " . join ( sffiles )
sh ( cmd ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.