signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _parseDataDirectoryImport ( self , dataDirectoryEntry , importSection ) :
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
|
if not importSection :
return
raw_bytes = ( c_ubyte * dataDirectoryEntry . Size ) . from_buffer ( importSection . raw , to_offset ( dataDirectoryEntry . VirtualAddress , importSection ) )
offset = 0
import_descriptors = [ ]
while True :
import_descriptor = IMAGE_IMPORT_DESCRIPTOR . from_buffer ( raw_bytes , offset )
if import_descriptor . OriginalFirstThunk == 0 :
break
else :
nameOffset = to_offset ( import_descriptor . Name , importSection )
checkOffset ( nameOffset , importSection )
dllName = get_str ( importSection . raw , nameOffset )
import_name_table = self . __parseThunks ( import_descriptor . OriginalFirstThunk , importSection )
import_address_table = self . __parseThunks ( import_descriptor . FirstThunk , importSection )
import_descriptors . append ( ImportDescriptorData ( header = import_descriptor , dllName = dllName , importNameTable = import_name_table , importAddressTable = import_address_table ) )
offset += sizeof ( IMAGE_IMPORT_DESCRIPTOR )
return import_descriptors
|
def _on_del_route ( self , msg ) :
"""Respond to : data : ` mitogen . core . DEL _ ROUTE ` by validating the source of
the message , updating the local table , propagating the message
upwards , and downwards towards any stream that every had a message
forwarded from it towards the disconnecting context ."""
|
if msg . is_dead :
return
target_id = int ( msg . data )
registered_stream = self . router . stream_by_id ( target_id )
if registered_stream is None :
return
stream = self . router . stream_by_id ( msg . auth_id )
if registered_stream != stream :
LOG . error ( '%r: received DEL_ROUTE for %d from %r, expected %r' , self , target_id , stream , registered_stream )
return
context = self . router . context_by_id ( target_id , create = False )
if context :
LOG . debug ( '%r: firing local disconnect for %r' , self , context )
mitogen . core . fire ( context , 'disconnect' )
LOG . debug ( '%r: deleting route to %d via %r' , self , target_id , stream )
routes = self . _routes_by_stream . get ( stream )
if routes :
routes . discard ( target_id )
self . router . del_route ( target_id )
if stream . remote_id != mitogen . parent_id :
self . _propagate_up ( mitogen . core . DEL_ROUTE , target_id )
self . _propagate_down ( mitogen . core . DEL_ROUTE , target_id )
|
def _EccZmaxRperiRap ( self , * args , ** kwargs ) :
"""NAME :
_ EccZmaxRperiRap
PURPOSE :
evaluate the eccentricity , maximum height above the plane , peri - and apocenter for an isochrone potential
INPUT :
Either :
a ) R , vR , vT , z , vz [ , phi ] :
1 ) floats : phase - space value for single object ( phi is optional ) ( each can be a Quantity )
2 ) numpy . ndarray : [ N ] phase - space values for N objects ( each can be a Quantity )
b ) Orbit instance : initial condition used if that ' s it , orbit ( t ) if there is a time given as well as the second argument
OUTPUT :
( e , zmax , rperi , rap )
HISTORY :
2017-12-22 - Written - Bovy ( UofT )"""
|
if len ( args ) == 5 : # R , vR . vT , z , vz pragma : no cover
R , vR , vT , z , vz = args
elif len ( args ) == 6 : # R , vR . vT , z , vz , phi
R , vR , vT , z , vz , phi = args
else :
self . _parse_eval_args ( * args )
R = self . _eval_R
vR = self . _eval_vR
vT = self . _eval_vT
z = self . _eval_z
vz = self . _eval_vz
if isinstance ( R , float ) :
R = nu . array ( [ R ] )
vR = nu . array ( [ vR ] )
vT = nu . array ( [ vT ] )
z = nu . array ( [ z ] )
vz = nu . array ( [ vz ] )
if self . _c : # pragma : no cover
pass
else :
Lz = R * vT
Lx = - z * vT
Ly = z * vR - R * vz
L2 = Lx * Lx + Ly * Ly + Lz * Lz
E = self . _ip ( R , z ) + vR ** 2. / 2. + vT ** 2. / 2. + vz ** 2. / 2.
if self . b == 0 :
warnings . warn ( "zmax for point-mass (b=0) isochrone potential is only approximate, because it assumes that zmax is attained at rap, which is not necessarily the case" , galpyWarning )
a = - self . amp / 2. / E
me2 = L2 / self . amp / a
e = nu . sqrt ( 1. - me2 )
rperi = a * ( 1. - e )
rap = a * ( 1. + e )
else :
smin = 0.5 * ( ( 2. * E - self . amp / self . b ) + nu . sqrt ( ( 2. * E - self . amp / self . b ) ** 2. + 2. * E * ( 4. * self . amp / self . b + L2 / self . b ** 2. ) ) ) / E
smax = 2. - self . amp / E / self . b - smin
rperi = smin * nu . sqrt ( 1. - 2. / smin ) * self . b
rap = smax * nu . sqrt ( 1. - 2. / smax ) * self . b
return ( ( rap - rperi ) / ( rap + rperi ) , rap * nu . sqrt ( 1. - Lz ** 2. / L2 ) , rperi , rap )
|
def makeCubiccFunc ( self , mNrm , cNrm ) :
'''Makes a cubic spline interpolation of the unconstrained consumption
function for this period .
Parameters
mNrm : np . array
Corresponding market resource points for interpolation .
cNrm : np . array
Consumption points for interpolation .
Returns
cFuncUnc : CubicInterp
The unconstrained consumption function for this period .'''
|
EndOfPrdvPP = self . DiscFacEff * self . Rfree * self . Rfree * self . PermGroFac ** ( - self . CRRA - 1.0 ) * np . sum ( self . PermShkVals_temp ** ( - self . CRRA - 1.0 ) * self . vPPfuncNext ( self . mNrmNext ) * self . ShkPrbs_temp , axis = 0 )
dcda = EndOfPrdvPP / self . uPP ( np . array ( cNrm [ 1 : ] ) )
MPC = dcda / ( dcda + 1. )
MPC = np . insert ( MPC , 0 , self . MPCmaxNow )
cFuncNowUnc = CubicInterp ( mNrm , cNrm , MPC , self . MPCminNow * self . hNrmNow , self . MPCminNow )
return cFuncNowUnc
|
def get_widths_mean_var ( self , estimation ) :
"""Get estimation on the variance of widths ' mean
Parameters
estimation : 1D arrary
Either prior of posterior estimation
Returns
widths _ mean _ var : 2D array , in shape [ K , 1]
Estimation on variance of widths ' mean"""
|
widths_mean_var = estimation [ self . map_offset [ 3 ] : ] . reshape ( self . K , 1 )
return widths_mean_var
|
def int_global_to_local ( self , index , axis = 0 ) :
"""Calculate local index from global index for integer input
: param index : global index as integer
: param axis : current axis to process
: return :"""
|
# Warum > = an dieser Stelle . Eigentlich sollte > ausreichend sein ! Test !
if index >= self . __mask [ axis ] . stop - self . __halos [ 1 ] [ axis ] :
return None
if index < self . __mask [ axis ] . start + self . __halos [ 0 ] [ axis ] :
return None
return index - self . __mask [ axis ] . start
|
def addCallbacks ( self , callback , errback = None , callbackArgs = None , callbackKeywords = None , errbackArgs = None , errbackKeywords = None ) :
"""Add a pair of callbacks that will be run in the context of an eliot
action .
@ return : C { self }
@ rtype : L { DeferredContext }
@ raises AlreadyFinished : L { DeferredContext . addActionFinish } has been
called . This indicates a programmer error ."""
|
if self . _finishAdded :
raise AlreadyFinished ( )
if errback is None :
errback = _passthrough
def callbackWithContext ( * args , ** kwargs ) :
return self . _action . run ( callback , * args , ** kwargs )
def errbackWithContext ( * args , ** kwargs ) :
return self . _action . run ( errback , * args , ** kwargs )
self . result . addCallbacks ( callbackWithContext , errbackWithContext , callbackArgs , callbackKeywords , errbackArgs , errbackKeywords )
return self
|
def from_payload ( type_code , payload , connection ) :
"""Generator function to create lob from payload .
Depending on lob type a BLOB , CLOB , or NCLOB instance will be returned .
This function is usually called from types . * LobType . from _ resultset ( )"""
|
lob_header = ReadLobHeader ( payload )
if lob_header . isnull ( ) :
lob = None
else :
data = payload . read ( lob_header . chunk_length )
_LobClass = LOB_TYPE_CODE_MAP [ type_code ]
lob = _LobClass . from_payload ( data , lob_header , connection )
logger . debug ( 'Lob Header %r' % lob )
return lob
|
def full_clean ( self , * args , ** kwargs ) :
"""Apply fixups that need to happen before per - field validation occurs .
Sets the page ' s title ."""
|
name = getattr ( self , 'name' , self . slugName . title ( ) )
self . title = "{} for {}" . format ( name , dateFormat ( self . except_date ) )
self . slug = "{}-{}" . format ( self . except_date , self . slugName )
super ( ) . full_clean ( * args , ** kwargs )
|
def parseruninfo ( self ) :
"""Extracts the flowcell ID , as well as the instrument name from RunInfo . xml . If this file is not provided ,
NA values are substituted"""
|
# Check if the RunInfo . xml file is provided , otherwise , yield N / A
try :
runinfo = ElementTree . ElementTree ( file = self . runinfo )
# Get the run id from the
for elem in runinfo . iter ( ) :
for run in elem :
try :
self . runid = run . attrib [ 'Id' ]
self . runnumber = run . attrib [ 'Number' ]
except KeyError :
break
# pull the text from flowcell and instrument values using the . iter ( tag = " X " ) function
for elem in runinfo . iter ( tag = "Flowcell" ) :
self . flowcell = elem . text
for elem in runinfo . iter ( tag = "Instrument" ) :
self . instrument = elem . text
except IOError :
pass
# Extract run statistics from either GenerateRunStatistics . xml or indexingQC . txt
self . parserunstats ( )
|
def populate ( self , struct ) :
"""Generates the list tree .
struct : if a list / set / tuple is given , a flat list is generated
< * l > < li > v1 < / li > < li > v2 < / li > . . . < / * l >
If the list type is ' Dl ' a flat list without definitions is generated
< * l > < dt > v1 < / dt > < dt > v2 < / dt > . . . < / * l >
If the given struct is a dict , key contaninct lists / tuples / sets / dicts
will be transformed in nested lists , and so on recursively , using dict
keys as list items , and dict values as sublists . If type is ' Dl ' each
value will be transformed in definition ( or list of definitions )
except others dict . In that case , it will be transformed in < dfn > tags .
> > > struct = { ' ele1 ' : None , ' ele2 ' : [ ' sub21 ' , ' sub22 ' ] , ' ele3 ' : { ' sub31 ' : None , ' sub32 ' : None , ' _ typ ' : ' Ol ' } }
> > > TempyList ( struct = struct )
< ul >
< li > ele1 < / li >
< li > ele2
< ul >
< li > sub21 < / li >
< li > sub22 < / li >
< / ul >
< / li >
< li > ele3
< ol >
< li > sub31 < / li >
< li > sub32 < / li >
< / ol >
< / li >
< / ul >"""
|
if struct is None : # Maybe raise ? Empty the list ?
return self
if isinstance ( struct , ( list , set , tuple ) ) :
struct = dict ( zip_longest ( struct , [ None ] ) )
if not isinstance ( struct , dict ) :
raise WidgetDataError ( self , "List Input not managed, expected (dict, list), got %s" % type ( struct ) , )
else :
if self . _typ == Dl :
self . __process_dl_struct ( struct )
else :
self . __process_li_struct ( struct )
return self
|
def QA_data_futuremin_resample ( min_data , type_ = '5min' ) :
"""期货分钟线采样成大周期
分钟线采样成子级别的分钟线
future :
vol = = > trade
amount X"""
|
min_data . tradeime = pd . to_datetime ( min_data . tradetime )
CONVERSION = { 'code' : 'first' , 'open' : 'first' , 'high' : 'max' , 'low' : 'min' , 'close' : 'last' , 'trade' : 'sum' , 'tradetime' : 'last' , 'date' : 'last' }
resx = min_data . resample ( type_ , closed = 'right' , loffset = type_ ) . apply ( CONVERSION )
return resx . dropna ( ) . reset_index ( ) . set_index ( [ 'datetime' , 'code' ] )
|
def start ( ** kwargs : Any ) -> None :
"""Start web server .
Run until ` ` Ctrl - c ` ` pressed , or if auto - shutdown is enabled , until when
all browser windows are closed .
This function accepts keyword areguments same as : func : ` start _ server ` and
all arguments passed to it ."""
|
start_server ( ** kwargs )
try :
asyncio . get_event_loop ( ) . run_forever ( )
except KeyboardInterrupt :
stop_server ( )
|
def adjust_name_for_printing ( name ) :
"""Make sure a name can be printed , alongside used as a variable name ."""
|
if name is not None :
name2 = name
name = name . replace ( " " , "_" ) . replace ( "." , "_" ) . replace ( "-" , "_m_" )
name = name . replace ( "+" , "_p_" ) . replace ( "!" , "_I_" )
name = name . replace ( "**" , "_xx_" ) . replace ( "*" , "_x_" )
name = name . replace ( "/" , "_l_" ) . replace ( "@" , '_at_' )
name = name . replace ( "(" , "_of_" ) . replace ( ")" , "" )
if re . match ( r'^[a-zA-Z_][a-zA-Z0-9-_]*$' , name ) is None :
raise NameError ( "name {} converted to {} cannot be further converted to valid python variable name!" . format ( name2 , name ) )
return name
return ''
|
def prepare_list_of_files ( kernel_name , kernel_file_list , params , grid , threads , block_size_names ) :
"""prepare the kernel string along with any additional files
The first file in the list is allowed to include or read in the others
The files beyond the first are considered additional files that may also contain tunable parameters
For each file beyond the first this function creates a temporary file with
preprocessors statements inserted . Occurences of the original filenames in the
first file are replaced with their temporary counterparts .
: param kernel _ file _ list : A list of filenames . The first file in the list is
allowed to read or include the other files in the list . All files may
will have access to the tunable parameters .
: type kernel _ file _ list : list ( string )
: param params : A dictionary with the tunable parameters for this particular
instance .
: type params : dict ( )
: param grid : The grid dimensions for this instance . The grid dimensions are
also inserted into the code as if they are tunable parameters for
convenience .
: type grid : tuple ( )"""
|
temp_files = dict ( )
kernel_string = get_kernel_string ( kernel_file_list [ 0 ] , params )
name , kernel_string = prepare_kernel_string ( kernel_name , kernel_string , params , grid , threads , block_size_names )
if len ( kernel_file_list ) > 1 :
for f in kernel_file_list [ 1 : ] : # generate temp filename with the same extension
temp_file = get_temp_filename ( suffix = "." + f . split ( "." ) [ - 1 ] )
temp_files [ f ] = temp_file
# add preprocessor statements to the additional file
_ , temp_file_string = prepare_kernel_string ( kernel_name , get_kernel_string ( f , params ) , params , grid , threads , block_size_names )
write_file ( temp_file , temp_file_string )
# replace occurences of the additional file ' s name in the first kernel _ string with the name of the temp file
kernel_string = kernel_string . replace ( f , temp_file )
return name , kernel_string , temp_files
|
def pool_memcached_connections ( func ) :
"""Function decorator to pool memcached connections .
Use this to wrap functions that might make multiple calls to memcached . This
will cause a single memcached client to be shared for all connections ."""
|
if isgeneratorfunction ( func ) :
def wrapper ( * nargs , ** kwargs ) :
with memcached_client ( ) :
for result in func ( * nargs , ** kwargs ) :
yield result
else :
def wrapper ( * nargs , ** kwargs ) :
with memcached_client ( ) :
return func ( * nargs , ** kwargs )
return update_wrapper ( wrapper , func )
|
def bellman_ford ( G , seeds , maxiter = None ) :
"""Bellman - Ford iteration .
Parameters
G : sparse matrix
Returns
distances : array
nearest _ seed : array
References
CLR"""
|
G = asgraph ( G )
N = G . shape [ 0 ]
if maxiter is not None and maxiter < 0 :
raise ValueError ( 'maxiter must be positive' )
if G . dtype == complex :
raise ValueError ( 'Bellman-Ford algorithm only defined for real\
weights' )
seeds = np . asarray ( seeds , dtype = 'intc' )
distances = np . empty ( N , dtype = G . dtype )
distances [ : ] = max_value ( G . dtype )
distances [ seeds ] = 0
nearest_seed = np . empty ( N , dtype = 'intc' )
nearest_seed [ : ] = - 1
nearest_seed [ seeds ] = seeds
old_distances = np . empty_like ( distances )
iter = 0
while maxiter is None or iter < maxiter :
old_distances [ : ] = distances
amg_core . bellman_ford ( N , G . indptr , G . indices , G . data , distances , nearest_seed )
if ( old_distances == distances ) . all ( ) :
break
return ( distances , nearest_seed )
|
def _deleteObject ( self , xref ) :
"""Delete an object given its xref ."""
|
if self . isClosed :
raise ValueError ( "operation illegal for closed doc" )
return _fitz . Document__deleteObject ( self , xref )
|
def _sd_decode ( self , msg ) :
"""SD : Description text ."""
|
desc_ch1 = msg [ 9 ]
show_on_keypad = ord ( desc_ch1 ) >= 0x80
if show_on_keypad :
desc_ch1 = chr ( ord ( desc_ch1 ) & 0x7f )
return { 'desc_type' : int ( msg [ 4 : 6 ] ) , 'unit' : int ( msg [ 6 : 9 ] ) - 1 , 'desc' : ( desc_ch1 + msg [ 10 : 25 ] ) . rstrip ( ) , 'show_on_keypad' : show_on_keypad }
|
def failMeasurement ( self , measurementId , deviceName , failureReason = None ) :
"""Fails the measurement session .
: param deviceName : the device name .
: param measurementId : the measurement name .
: param failureReason : why it failed .
: return : true if it was completed ."""
|
am , handler = self . getDataHandler ( measurementId , deviceName )
if handler is not None :
am . updateDeviceStatus ( deviceName , RecordStatus . FAILED , reason = failureReason )
handler . stop ( measurementId )
return True
else :
return False
|
def _getMonitorInfo ( self ) :
"""Returns info about the attached monitors , in device order
[0 ] is always the primary monitor"""
|
monitors = [ ]
CCHDEVICENAME = 32
def _MonitorEnumProcCallback ( hMonitor , hdcMonitor , lprcMonitor , dwData ) :
class MONITORINFOEX ( ctypes . Structure ) :
_fields_ = [ ( "cbSize" , ctypes . wintypes . DWORD ) , ( "rcMonitor" , ctypes . wintypes . RECT ) , ( "rcWork" , ctypes . wintypes . RECT ) , ( "dwFlags" , ctypes . wintypes . DWORD ) , ( "szDevice" , ctypes . wintypes . WCHAR * CCHDEVICENAME ) ]
lpmi = MONITORINFOEX ( )
lpmi . cbSize = ctypes . sizeof ( MONITORINFOEX )
self . _user32 . GetMonitorInfoW ( hMonitor , ctypes . byref ( lpmi ) )
# hdc = self . _ gdi32 . CreateDCA ( ctypes . c _ char _ p ( lpmi . szDevice ) , 0 , 0 , 0)
monitors . append ( { "hmon" : hMonitor , # " hdc " : hdc ,
"rect" : ( lprcMonitor . contents . left , lprcMonitor . contents . top , lprcMonitor . contents . right , lprcMonitor . contents . bottom ) , "name" : lpmi . szDevice } )
return True
MonitorEnumProc = ctypes . WINFUNCTYPE ( ctypes . c_bool , ctypes . c_ulong , ctypes . c_ulong , ctypes . POINTER ( ctypes . wintypes . RECT ) , ctypes . c_int )
callback = MonitorEnumProc ( _MonitorEnumProcCallback )
if self . _user32 . EnumDisplayMonitors ( 0 , 0 , callback , 0 ) == 0 :
raise WindowsError ( "Unable to enumerate monitors" )
# Clever magic to make the screen with origin of ( 0,0 ) [ the primary monitor ]
# the first in the list
# Sort by device ID - 0 is primary , 1 is next , etc .
monitors . sort ( key = lambda x : ( not ( x [ "rect" ] [ 0 ] == 0 and x [ "rect" ] [ 1 ] == 0 ) , x [ "name" ] ) )
return monitors
|
def _parse_dependencies ( string ) :
"""This function actually parses the dependencies are sorts them into
the buildable and given dependencies"""
|
contents = _get_contents_between ( string , '(' , ')' )
unsorted_dependencies = contents . split ( ',' )
_check_parameters ( unsorted_dependencies , ( '?' , ) )
buildable_dependencies = [ ]
given_dependencies = [ ]
for dependency in unsorted_dependencies :
if dependency [ 0 ] == '?' :
given_dependencies . append ( dependency [ 1 : ] )
else :
buildable_dependencies . append ( dependency )
string = string [ string . index ( ')' ) + 1 : ]
return buildable_dependencies , given_dependencies , string
|
def read ( cls , dstore ) :
""": param dstore : a DataStore instance
: returns : a : class : ` CompositeRiskModel ` instance"""
|
oqparam = dstore [ 'oqparam' ]
tmap = ( dstore [ 'taxonomy_mapping' ] if 'taxonomy_mapping' in dstore else { } )
crm = dstore . getitem ( 'risk_model' )
# building dictionaries riskid - > loss _ type - > risk _ func
fragdict , vulndict , consdict , retrodict = ( AccumDict ( ) , AccumDict ( ) , AccumDict ( ) , AccumDict ( ) )
fragdict . limit_states = crm . attrs [ 'limit_states' ]
for quoted_id , rm in crm . items ( ) :
riskid = unquote_plus ( quoted_id )
fragdict [ riskid ] = { }
vulndict [ riskid ] = { }
consdict [ riskid ] = { }
retrodict [ riskid ] = { }
for lt_kind in rm :
lt , kind = lt_kind . rsplit ( '-' , 1 )
rf = dstore [ 'risk_model/%s/%s' % ( quoted_id , lt_kind ) ]
if kind == 'consequence' :
consdict [ riskid ] [ lt , kind ] = rf
elif kind == 'fragility' : # rf is a FragilityFunctionList
try :
rf = rf . build ( fragdict . limit_states , oqparam . continuous_fragility_discretization , oqparam . steps_per_interval )
except ValueError as err :
raise ValueError ( '%s: %s' % ( riskid , err ) )
fragdict [ riskid ] [ lt , kind ] = rf
else : # rf is a vulnerability function
rf . init ( )
if lt . endswith ( '_retrofitted' ) : # strip _ retrofitted , since len ( ' _ retrofitted ' ) = 12
retrodict [ riskid ] [ lt [ : - 12 ] , kind ] = rf
else :
vulndict [ riskid ] [ lt , kind ] = rf
return CompositeRiskModel ( oqparam , tmap , fragdict , vulndict , consdict , retrodict )
|
def route ( self , uri , methods = frozenset ( { "GET" } ) , host = None , strict_slashes = None , stream = False , version = None , name = None , ) :
"""Decorate a function to be registered as a route
: param uri : path of the URL
: param methods : list or tuple of methods allowed
: param host :
: param strict _ slashes :
: param stream :
: param version :
: param name : user defined route name for url _ for
: return : decorated function"""
|
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it ' s not working
if not uri . startswith ( "/" ) :
uri = "/" + uri
if stream :
self . is_request_stream = True
if strict_slashes is None :
strict_slashes = self . strict_slashes
def response ( handler ) :
args = list ( signature ( handler ) . parameters . keys ( ) )
if not args :
raise ValueError ( "Required parameter `request` missing " "in the {0}() route?" . format ( handler . __name__ ) )
if stream :
handler . is_stream = stream
self . router . add ( uri = uri , methods = methods , handler = handler , host = host , strict_slashes = strict_slashes , version = version , name = name , )
return handler
return response
|
def distinct ( self , field = None ) :
"""If field is None , then it means that it ' ll create :
select distinct *
and if field is not None , for example : ' name ' , it ' ll create :
select distinc ( name ) ,"""
|
if field is None :
self . funcs . append ( ( 'distinct' , ( ) , { } ) )
else :
self . distinct_field = field
return self
|
def list_path_traversal ( path ) :
'''Returns a full list of directories leading up to , and including , a path .
So list _ path _ traversal ( ' / path / to / salt ' ) would return :
[ ' / ' , ' / path ' , ' / path / to ' , ' / path / to / salt ' ]
in that order .
This routine has been tested on Windows systems as well .
list _ path _ traversal ( ' c : \\ path \\ to \\ salt ' ) on Windows would return :
[ ' c : \\ ' , ' c : \\ path ' , ' c : \\ path \\ to ' , ' c : \\ path \\ to \\ salt ' ]'''
|
out = [ path ]
( head , tail ) = os . path . split ( path )
if tail == '' : # paths with trailing separators will return an empty string
out = [ head ]
( head , tail ) = os . path . split ( head )
while head != out [ 0 ] : # loop until head is the same two consecutive times
out . insert ( 0 , head )
( head , tail ) = os . path . split ( head )
return out
|
def main ( ) :
"""fetches hek data and makes thematic maps as requested"""
|
args = get_args ( )
config = Config ( args . config )
# Load dates
if os . path . isfile ( args . dates ) :
with open ( args . dates ) as f :
dates = [ dateparser . parse ( line . split ( " " ) [ 0 ] ) for line in f . readlines ( ) ]
else : # assume it ' s a date
dates = [ dateparser . parse ( args . dates ) ]
if args . verbose :
print ( "Dates are:" )
for date in dates :
print ( date )
for date in dates :
if args . verbose :
print ( 'Processing {}' . format ( date ) )
suvi_data = Fetcher ( date , [ 'suvi-l2-ci195' ] , suvi_composite_path = config . suvi_composite_path ) . fetch ( multithread = False ) [ 'suvi-l2-ci195' ]
if suvi_data [ 0 ] is not None :
config . expert = 'HEK'
responses = query_hek ( date )
thmap = make_thmap ( suvi_data , responses , config )
Outgest ( os . path . join ( args . output , "thmap_hek_{}.fits" . format ( date . strftime ( "%Y%m%d%H%M%S" ) ) ) , thmap , { "c195" : suvi_data [ 0 ] , "suvi-l2-ci195" : suvi_data [ 0 ] } , args . config ) . save ( )
|
def raw_sensor_count ( self ) :
"""Returns the raw integer ADC count from the sensor
Note : Must be divided depending on the max . sensor resolution
to get floating point celsius
: returns : the raw value from the sensor ADC
: rtype : int
: raises NoSensorFoundError : if the sensor could not be found
: raises SensorNotReadyError : if the sensor is not ready yet"""
|
# two complement bytes , MSB comes after LSB !
bytes = self . raw_sensor_strings [ 1 ] . split ( )
# Convert from 16 bit hex string into int
int16 = int ( bytes [ 1 ] + bytes [ 0 ] , 16 )
# check first signing bit
if int16 >> 15 == 0 :
return int16
# positive values need no processing
else :
return int16 - ( 1 << 16 )
|
def p_UnionType ( p ) :
"""UnionType : " ( " UnionMemberType or UnionMemberType UnionMemberTypes " ) " """
|
t = [ p [ 2 ] ] + [ p [ 4 ] ] + p [ 5 ]
p [ 0 ] = model . UnionType ( t = t )
|
def map_dataarray ( self , func , x , y , ** kwargs ) :
"""Apply a plotting function to a 2d facet ' s subset of the data .
This is more convenient and less general than ` ` FacetGrid . map ` `
Parameters
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as ` xarray . plot . imshow `
x , y : string
Names of the coordinates to plot on x , y axes
kwargs :
additional keyword arguments to func
Returns
self : FacetGrid object"""
|
if kwargs . get ( 'cbar_ax' , None ) is not None :
raise ValueError ( 'cbar_ax not supported by FacetGrid.' )
cmap_params , cbar_kwargs = _process_cmap_cbar_kwargs ( func , kwargs , self . data . values )
self . _cmap_extend = cmap_params . get ( 'extend' )
# Order is important
func_kwargs = kwargs . copy ( )
func_kwargs . update ( cmap_params )
func_kwargs . update ( { 'add_colorbar' : False , 'add_labels' : False } )
# Get x , y labels for the first subplot
x , y = _infer_xy_labels ( darray = self . data . loc [ self . name_dicts . flat [ 0 ] ] , x = x , y = y , imshow = func . __name__ == 'imshow' , rgb = kwargs . get ( 'rgb' , None ) )
for d , ax in zip ( self . name_dicts . flat , self . axes . flat ) : # None is the sentinel value
if d is not None :
subset = self . data . loc [ d ]
mappable = func ( subset , x = x , y = y , ax = ax , ** func_kwargs )
self . _mappables . append ( mappable )
self . _cmap_extend = cmap_params . get ( 'extend' )
self . _finalize_grid ( x , y )
if kwargs . get ( 'add_colorbar' , True ) :
self . add_colorbar ( ** cbar_kwargs )
return self
|
def _activate ( self ) :
"""Activates the stream ."""
|
if six . callable ( self . streamer ) : # If it ' s a function , create the stream .
self . stream_ = self . streamer ( * ( self . args ) , ** ( self . kwargs ) )
else : # If it ' s iterable , use it directly .
self . stream_ = iter ( self . streamer )
|
def svalue ( self , value ) :
"""Change of serialized value .
Nonify this value as well .
: param str value : serialized value to use ."""
|
if value is not None : # if value is not None
self . _value = None
self . _error = None
self . _svalue = value
|
def count ( self ) :
"""Explicit count of the number of items .
For lazy or distributed data , will force a computation ."""
|
if self . mode == 'spark' :
return self . tordd ( ) . count ( )
if self . mode == 'local' :
return prod ( self . values . values . shape )
|
def process_account ( account_info ) :
"""Scan all buckets in an account and schedule processing"""
|
log = logging . getLogger ( 'salactus.bucket-iterator' )
log . info ( "processing account %s" , account_info )
session = get_session ( account_info )
client = session . client ( 's3' , config = s3config )
buckets = client . list_buckets ( ) [ 'Buckets' ]
connection . hset ( 'bucket-accounts' , account_info [ 'name' ] , json . dumps ( account_info ) )
for b in buckets :
connection . hset ( 'bucket-ages' , bucket_id ( account_info , b [ 'Name' ] ) , b [ 'CreationDate' ] . isoformat ( ) )
account_buckets = account_info . pop ( 'buckets' , None )
buckets = [ n [ 'Name' ] for n in buckets if not account_buckets or n [ 'Name' ] in account_buckets ]
account_not_buckets = account_info . pop ( 'not-buckets' , None )
buckets = [ n for n in buckets if not account_not_buckets or n not in account_not_buckets ]
log . info ( "processing %d buckets in account %s" , len ( buckets ) , account_info [ 'name' ] )
for bucket_set in chunks ( buckets , 50 ) :
invoke ( process_bucket_set , account_info , bucket_set )
|
def client_cookie_jar ( self ) :
"""Return internal cookie jar that must be used as HTTP - request cookies
see : class : ` . WHTTPCookieJar `
: return : WHTTPCookieJar"""
|
cookie_jar = WHTTPCookieJar ( )
cookie_header = self . get_headers ( 'Cookie' )
for cookie_string in ( cookie_header if cookie_header is not None else tuple ( ) ) :
for single_cookie in WHTTPCookieJar . import_header_text ( cookie_string ) :
cookie_jar . add_cookie ( single_cookie )
return cookie_jar . ro ( )
|
def cnst_A ( self , X , Xf = None ) :
r"""Compute : math : ` A \ mathbf { x } ` component of ADMM problem
constraint . In this case : math : ` A \ mathbf { x } = ( G _ r ^ T \ ; \ ;
G _ c ^ T \ ; \ ; H ) ^ T \ mathbf { x } ` ."""
|
if Xf is None :
Xf = sl . rfftn ( X , axes = self . axes )
return sl . irfftn ( self . GAf * Xf [ ... , np . newaxis ] , self . axsz , axes = self . axes )
|
def bethe_lattice ( energy , hopping ) :
"""Bethe lattice in inf dim density of states"""
|
energy = np . asarray ( energy ) . clip ( - 2 * hopping , 2 * hopping )
return np . sqrt ( 4 * hopping ** 2 - energy ** 2 ) / ( 2 * np . pi * hopping ** 2 )
|
def to_naf ( self ) :
"""Converts the object to NAF"""
|
if self . type == 'KAF' :
self . type = 'NAF'
for node in self . __get_wf_nodes ( ) :
node . set ( 'id' , node . get ( 'wid' ) )
del node . attrib [ 'wid' ]
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'text' ) and self . text is not None :
_dict [ 'text' ] = self . text
if hasattr ( self , 'score' ) and self . score is not None :
_dict [ 'score' ] = self . score
return _dict
|
def trocar_codigo_de_ativacao ( self , novo_codigo_ativacao , opcao = constantes . CODIGO_ATIVACAO_REGULAR , codigo_emergencia = None ) :
"""Função ` ` TrocarCodigoDeAtivacao ` ` conforme ER SAT , item 6.1.15.
Troca do código de ativação do equipamento SAT .
: param str novo _ codigo _ ativacao : O novo código de ativação escolhido
pelo contribuinte .
: param int opcao : Indica se deverá ser utilizado o código de ativação
atualmente configurado , que é um código de ativação regular ,
definido pelo contribuinte , ou se deverá ser usado um código de
emergência . Deverá ser o valor de uma das constantes
: attr : ` satcomum . constantes . CODIGO _ ATIVACAO _ REGULAR ` ( padrão ) ou
: attr : ` satcomum . constantes . CODIGO _ ATIVACAO _ EMERGENCIA ` .
Nenhuma validação será realizada antes que a função seja
efetivamente invocada . Entretanto , se opção de código de ativação
indicada for ` ` CODIGO _ ATIVACAO _ EMERGENCIA ` ` , então o argumento que
informa o ` ` codigo _ emergencia ` ` será checado e deverá avaliar como
verdadeiro .
: param str codigo _ emergencia : O código de ativação de emergência , que
é definido pelo fabricante do equipamento SAT . Este código deverá
ser usado quando o usuário perder o código de ativação regular , e
precisar definir um novo código de ativação . Note que , o argumento
` ` opcao ` ` deverá ser informado com o valor
: attr : ` satcomum . constantes . CODIGO _ ATIVACAO _ EMERGENCIA ` para que
este código de emergência seja considerado .
: return : Retorna * verbatim * a resposta da função SAT .
: rtype : string
: raises ValueError : Se o novo código de ativação avaliar como falso
( possuir uma string nula por exemplo ) ou se o código de emergencia
avaliar como falso quando a opção for pelo código de ativação de
emergência .
. . warning : :
Os argumentos da função ` ` TrocarCodigoDeAtivacao ` ` requerem que o
novo código de ativação seja especificado duas vezes ( dois
argumentos com o mesmo conteúdo , como confirmação ) . Este método irá
simplesmente informar duas vezes o argumento
` ` novo _ codigo _ ativacao ` ` na função SAT , mantendo a confirmação do
código de ativação fora do escopo desta API ."""
|
if not novo_codigo_ativacao :
raise ValueError ( 'Novo codigo de ativacao invalido: {!r}' . format ( novo_codigo_ativacao ) )
codigo_ativacao = self . _codigo_ativacao
if opcao == constantes . CODIGO_ATIVACAO_EMERGENCIA :
if codigo_emergencia :
codigo_ativacao = codigo_emergencia
else :
raise ValueError ( 'Codigo de ativacao de emergencia invalido: ' '{!r} (opcao={!r})' . format ( codigo_emergencia , opcao ) )
return self . invocar__TrocarCodigoDeAtivacao ( self . gerar_numero_sessao ( ) , codigo_ativacao , opcao , novo_codigo_ativacao , novo_codigo_ativacao )
|
def actualize_source_type ( self , sources , prop_set ) :
"""Helper for ' actualize _ sources ' .
For each passed source , actualizes it with the appropriate scanner .
Returns the actualized virtual targets ."""
|
assert is_iterable_typed ( sources , VirtualTarget )
assert isinstance ( prop_set , property_set . PropertySet )
result = [ ]
for i in sources :
scanner = None
# FIXME : what ' s this ?
# if isinstance ( i , str ) :
# i = self . manager _ . get _ object ( i )
if i . type ( ) :
scanner = b2 . build . type . get_scanner ( i . type ( ) , prop_set )
r = i . actualize ( scanner )
result . append ( r )
return result
|
def next_block ( self ) :
"""This could probably be improved ; at the moment it starts by trying to overshoot the
desired compressed block size , then it reduces the input bytes one by one until it
has met the required block size"""
|
assert self . pos <= self . input_len
if self . pos == self . input_len :
return None
# Overshoot
i = self . START_OVERSHOOT
while True :
try_size = int ( self . bs * i )
size = self . check_request_size ( try_size )
c , d = self . compress_next_chunk ( size )
if size != try_size :
break
if len ( d ) < self . bs :
i += self . OVERSHOOT_INCREASE
else :
break
# Reduce by one byte until we hit the target
while True :
if len ( d ) <= self . bs :
self . c = c
# self . c = self . factory ( )
crc32 = zlib . crc32 ( self . get_input ( size ) , 0xffffffff ) & 0xffffffff
self . pos += size
self . compressed_bytes += len ( d )
return crc32 , size , d
size -= 1
if size == 0 :
return None
c , d = self . compress_next_chunk ( size )
|
def bitwise_xor ( bs0 : str , bs1 : str ) -> str :
"""A helper to calculate the bitwise XOR of two bit string
: param bs0 : String of 0 ' s and 1 ' s representing a number in binary representations
: param bs1 : String of 0 ' s and 1 ' s representing a number in binary representations
: return : String of 0 ' s and 1 ' s representing the XOR between bs0 and bs1"""
|
if len ( bs0 ) != len ( bs1 ) :
raise ValueError ( "Bit strings are not of equal length" )
n_bits = len ( bs0 )
return PADDED_BINARY_BIT_STRING . format ( xor ( int ( bs0 , 2 ) , int ( bs1 , 2 ) ) , n_bits )
|
def saxon6 ( self , elem , ** params ) :
"""Use Saxon6 to process the element .
If the XSLT has a filename ( fn ) , use that . Otherwise , make temp ."""
|
java = os . environ . get ( 'java' ) or 'java'
saxon6path = os . path . join ( JARS , 'saxon.jar' )
# saxon 6.5.5 , included with jing and trang
with tempfile . TemporaryDirectory ( ) as tempdir :
if self . fn is None :
xslfn = os . path . join ( tempdir , "xslt.xsl" )
self . write ( fn = xslfn )
else :
xslfn = self . fn
srcfn = os . path . join ( tempdir , "src.xml" )
outfn = os . path . join ( tempdir , "out.xml" )
XML ( fn = srcfn , root = elem ) . write ( )
cmd = [ java , '-jar' , saxon6path , '-o' , outfn , srcfn , xslfn ] + [ "%s=%r" % ( key , params [ key ] ) for key in params . keys ( ) ]
log . debug ( "saxon6: %r " % cmd )
try :
subprocess . check_output ( cmd )
except subprocess . CalledProcessError as e :
error = html . unescape ( str ( e . output , 'UTF-8' ) )
raise RuntimeError ( error ) . with_traceback ( sys . exc_info ( ) [ 2 ] ) from None
if self . find ( self . root , "xsl:output" ) is None or self . find ( self . root , "xsl:output" ) . get ( 'method' ) == 'xml' :
return etree . parse ( outfn )
else :
return open ( outfn , 'rb' ) . read ( ) . decode ( 'utf-8' )
|
def unixjoin ( * args ) :
"""Like os . path . join , but uses forward slashes on win32"""
|
isabs_list = list ( map ( isabs , args ) )
if any ( isabs_list ) :
poslist = [ count for count , flag in enumerate ( isabs_list ) if flag ]
pos = poslist [ - 1 ]
return '/' . join ( args [ pos : ] )
else :
return '/' . join ( args )
|
async def serviceViewChanger ( self , limit ) -> int :
"""Service the view _ changer ' s inBox , outBox and action queues .
: return : the number of messages successfully serviced"""
|
if not self . isReady ( ) :
return 0
o = self . serviceViewChangerOutBox ( limit )
i = await self . serviceViewChangerInbox ( limit )
return o + i
|
def compute_avg_of_tuples ( input_tuples ) :
"""This function calculates the average value of the numbers in a specified tuple of tuples .
Parameters :
input _ tuples : A collection of tuples containing numerical data .
Returns :
A list with the average values calculated across each position in the nested tuples .
Example :
compute _ avg _ of _ tuples ( ( ( 10 , 10 , 10 , 12 ) , ( 30 , 45 , 56 , 45 ) , ( 81 , 80 , 39 , 32 ) , ( 1 , 2 , 3 , 4 ) ) )
- > [ 30.5 , 34.25 , 27.0 , 23.25]
compute _ avg _ of _ tuples ( ( ( 1 , 1 , ( - 5 ) ) , ( 30 , ( - 15 ) , 56 ) , ( 81 , ( - 60 ) , ( - 39 ) ) , ( ( - 10 ) , 2 , 3 ) ) )
- > [ 25.5 , ( - 18.0 ) , 3.75]
compute _ avg _ of _ tuples ( ( ( 100 , 100 , 100 , 120 ) , ( 300 , 450 , 560 , 450 ) , ( 810 , 800 , 390 , 320 ) , ( 10 , 20 , 30 , 40 ) ) )
- > [ 305.0 , 342.5 , 270.0 , 232.5]"""
|
averages = [ sum ( data ) / len ( data ) for data in zip ( * input_tuples ) ]
return averages
|
def set_number_of_annotation_signals ( self , number_of_annotations ) :
"""Sets the number of annotation signals . The default value is 1
This function is optional and can be called only after opening a file in writemode
and before the first sample write action
Normally you don ' t need to change the default value . Only when the number of annotations
you want to write is more than the number of seconds of the duration of the recording , you can use
this function to increase the storage space for annotations
Minimum is 1 , maximum is 64
Parameters
number _ of _ annotations : integer
Sets the number of annotation signals"""
|
number_of_annotations = max ( ( min ( ( int ( number_of_annotations ) , 64 ) ) , 1 ) )
self . number_of_annotations = number_of_annotations
self . update_header ( )
|
def unzip ( self , overwrite : bool = False ) :
"""Flattens a MIZ file into the temp dir
Args :
overwrite : allow overwriting exiting files"""
|
if self . zip_content and not overwrite :
raise FileExistsError ( str ( self . temp_dir ) )
LOGGER . debug ( 'unzipping miz to temp dir' )
try :
with ZipFile ( str ( self . miz_path ) ) as zip_file :
LOGGER . debug ( 'reading infolist' )
self . zip_content = [ f . filename for f in zip_file . infolist ( ) ]
self . _extract_files_from_zip ( zip_file )
except BadZipFile :
raise BadZipFile ( str ( self . miz_path ) )
except : # noqa : E722
LOGGER . exception ( 'error while unzipping miz file: %s' , self . miz_path )
raise
LOGGER . debug ( 'checking miz content' )
# noinspection PyTypeChecker
for miz_item in [ 'mission' , 'options' , 'warehouses' , 'l10n/DEFAULT/dictionary' , 'l10n/DEFAULT/mapResource' ] :
if not Path ( self . temp_dir . joinpath ( miz_item ) ) . exists ( ) :
LOGGER . error ( 'missing file in miz: %s' , miz_item )
raise FileNotFoundError ( miz_item )
self . _check_extracted_content ( )
LOGGER . debug ( 'all files have been found, miz successfully unzipped' )
|
def assertDateTimesFuture ( self , sequence , strict = True , msg = None ) :
'''Fail if any elements in ` ` sequence ` ` are not in the future .
If the min element is a datetime , " future " is defined as
anything after ` ` datetime . now ( ) ` ` ; if the min element is a date ,
" future " is defined as anything after ` ` date . today ( ) ` ` .
If ` ` strict = True ` ` , fail unless all elements in ` ` sequence ` `
are strictly greater than ` ` date . today ( ) ` `
( or ` ` datetime . now ( ) ` ` ) . If ` ` strict = False ` ` , fail all
elements in ` ` sequence ` ` are greater than or equal to
` ` date . today ( ) ` ` ( or ` ` datetime . now ( ) ` ` ) .
Parameters
sequence : iterable
strict : bool
msg : str
If not provided , the : mod : ` marbles . mixins ` or
: mod : ` unittest ` standard message will be used .
Raises
TypeError
If ` ` sequence ` ` is not iterable .
TypeError
If min element in ` ` sequence ` ` is not a datetime or date
object .'''
|
if not isinstance ( sequence , collections . Iterable ) :
raise TypeError ( 'First argument is not iterable' )
# Cannot compare datetime to date , so if dates are provided use
# date . today ( ) , if datetimes are provided use datetime . today ( )
if isinstance ( min ( sequence ) , datetime ) :
target = datetime . today ( )
elif isinstance ( min ( sequence ) , date ) :
target = date . today ( )
else :
raise TypeError ( 'Expected iterable of datetime or date objects' )
self . assertDateTimesAfter ( sequence , target , strict = strict , msg = msg )
|
def add_data ( self , * args ) :
"""Add data to signer"""
|
for data in args :
self . _data . append ( to_binary ( data ) )
|
def _clean_record ( rec ) :
"""Remove secondary files from record fields , which are currently not supported .
To be removed later when secondaryFiles added to records ."""
|
if workflow . is_cwl_record ( rec ) :
def _clean_fields ( d ) :
if isinstance ( d , dict ) :
if "fields" in d :
out = [ ]
for f in d [ "fields" ] :
f = utils . deepish_copy ( f )
f . pop ( "secondaryFiles" , None )
out . append ( f )
d [ "fields" ] = out
return d
else :
out = { }
for k , v in d . items ( ) :
out [ k ] = _clean_fields ( v )
return out
else :
return d
return _clean_fields ( rec )
else :
return rec
|
def merge_result ( res ) :
"""Merge all items in ` res ` into a list .
This command is used when sending a command to multiple nodes
and they result from each node should be merged into a single list ."""
|
if not isinstance ( res , dict ) :
raise ValueError ( 'Value should be of dict type' )
result = set ( [ ] )
for _ , v in res . items ( ) :
for value in v :
result . add ( value )
return list ( result )
|
def breadcrumb_raw ( context , label , viewname , * args , ** kwargs ) :
"""Same as breadcrumb but label is not translated ."""
|
append_breadcrumb ( context , escape ( label ) , viewname , args , kwargs )
return ''
|
def _get_ansible_playbook ( self , playbook , ** kwargs ) :
"""Get an instance of AnsiblePlaybook and returns it .
: param playbook : A string containing an absolute path to a
provisioner ' s playbook .
: param kwargs : An optional keyword arguments .
: return : object"""
|
return ansible_playbook . AnsiblePlaybook ( playbook , self . _config , ** kwargs )
|
def maximum ( lhs , rhs ) :
"""Returns element - wise maximum of the input arrays with broadcasting .
Equivalent to ` ` mx . nd . broadcast _ maximum ( lhs , rhs ) ` ` .
. . note : :
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape .
Parameters
lhs : scalar or mxnet . ndarray . array
First array to be compared .
rhs : scalar or mxnet . ndarray . array
Second array to be compared . If ` ` lhs . shape ! = rhs . shape ` ` , they must be
broadcastable to a common shape .
Returns
NDArray
The element - wise maximum of the input arrays .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > y = mx . nd . arange ( 2 ) . reshape ( ( 2,1 ) )
> > > z = mx . nd . arange ( 2 ) . reshape ( ( 1,2 ) )
> > > x . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > y . asnumpy ( )
array ( [ [ 0 . ] ,
[ 1 . ] ] , dtype = float32)
> > > z . asnumpy ( )
array ( [ [ 0 . , 1 . ] ] , dtype = float32)
> > > mx . nd . maximum ( x , 2 ) . asnumpy ( )
array ( [ [ 2 . , 2 . , 2 . ] ,
[ 2 . , 2 . , 2 . ] ] , dtype = float32)
> > > mx . nd . maximum ( x , y ) . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > mx . nd . maximum ( y , z ) . asnumpy ( )
array ( [ [ 0 . , 1 . ] ,
[ 1 . , 1 . ] ] , dtype = float32)"""
|
# pylint : disable = no - member , protected - access
return _ufunc_helper ( lhs , rhs , op . broadcast_maximum , lambda x , y : x if x > y else y , _internal . _maximum_scalar , None )
|
def create ( self , edgeList = None , excludeEdges = None , networkName = None , nodeList = None , source = None , verbose = False ) :
"""Create a new network from a list of nodes and edges in an existing source network .
The SUID of the network and view are returned .
: param edgeList ( string , optional ) : Specifies a list of edges . The keywords
all , selected , or unselected can be used to specify edges by their
selection state . The pattern COLUMN : VALUE sets this parameter to any
rows that contain the specified column value ; if the COLUMN prefix is
not used , the NAME column is matched by default . A list of COLUMN : VALUE
pairs of the format COLUMN1 : VALUE1 , COLUMN2 : VALUE2 , . . . can be used to
match multiple values .
: param excludeEdges ( string , optional ) : Unless this is set to true , edges
that connect nodes in the nodeList are implicitly included
: param networkName ( string , optional ) :
: param nodeList ( string , optional ) : Specifies a list of nodes . The keywords
all , selected , or unselected can be used to specify nodes by their
selection state . The pattern COLUMN : VALUE sets this parameter to any
rows that contain the specified column value ; if the COLUMN prefix is
not used , the NAME column is matched by default . A list of COLUMN : VALUE
pairs of the format COLUMN1 : VALUE1 , COLUMN2 : VALUE2 , . . . can be used to
match multiple values .
: param source ( string , optional ) : Specifies a network by name , or by SUID
if the prefix SUID : is used . The keyword CURRENT , or a blank value can
also be used to specify the current network .
: param verbose : print more
: returns : { netowrk , view }"""
|
network = check_network ( self , source , verbose = verbose )
PARAMS = set_param ( [ "edgeList" , "excludeEdges" , "networkName" , "nodeList" , "source" ] , [ edgeList , excludeEdges , networkName , nodeList , network ] )
response = api ( url = self . __url + "/create" , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response
|
def reqs_txt ( self ) :
"""Export a requirements file in txt format ."""
|
role_lines = ""
for role in sorted ( self . report [ "roles" ] ) :
name = utils . normalize_role ( role , self . config )
galaxy_name = "{0}.{1}" . format ( self . config [ "scm_user" ] , name )
version_path = os . path . join ( self . roles_path , role , "VERSION" )
version = utils . get_version ( version_path )
role_lines += "{0},{1}\n" . format ( galaxy_name , version )
if self . out_file :
utils . string_to_file ( self . out_file , role_lines )
else :
print role_lines
|
def _get_function_matches ( attributes_a , attributes_b , filter_set_a = None , filter_set_b = None ) :
""": param attributes _ a : A dict of functions to their attributes
: param attributes _ b : A dict of functions to their attributes
The following parameters are optional .
: param filter _ set _ a : A set to limit attributes _ a to the functions in this set .
: param filter _ set _ b : A set to limit attributes _ b to the functions in this set .
: returns : A list of tuples of matching objects ."""
|
# get the attributes that are in the sets
if filter_set_a is None :
filtered_attributes_a = { k : v for k , v in attributes_a . items ( ) }
else :
filtered_attributes_a = { k : v for k , v in attributes_a . items ( ) if k in filter_set_a }
if filter_set_b is None :
filtered_attributes_b = { k : v for k , v in attributes_b . items ( ) }
else :
filtered_attributes_b = { k : v for k , v in attributes_b . items ( ) if k in filter_set_b }
# get closest
closest_a = _get_closest_matches ( filtered_attributes_a , filtered_attributes_b )
closest_b = _get_closest_matches ( filtered_attributes_b , filtered_attributes_a )
# a match ( x , y ) is good if x is the closest to y and y is the closest to x
matches = [ ]
for a in closest_a :
if len ( closest_a [ a ] ) == 1 :
match = closest_a [ a ] [ 0 ]
if len ( closest_b [ match ] ) == 1 and closest_b [ match ] [ 0 ] == a :
matches . append ( ( a , match ) )
return matches
|
def DecryptPrivateKey ( self , encrypted_private_key ) :
"""Decrypt the provided ciphertext with the initialized private key .
Args :
encrypted _ private _ key ( byte string ) : the ciphertext to be decrypted .
Returns :
bytes : the ciphertext ."""
|
aes = AES . new ( self . _master_key , AES . MODE_CBC , self . _iv )
return aes . decrypt ( encrypted_private_key )
|
def _enqueue_fs_event ( self , event ) :
"""Watchman filesystem event handler for BUILD / requirements . txt updates . Called via a thread ."""
|
self . _logger . info ( 'enqueuing {} changes for subscription {}' . format ( len ( event [ 'files' ] ) , event [ 'subscription' ] ) )
self . _event_queue . put ( event )
|
def autodoc_tuple2doc ( module ) :
"""Include tuples as ` CLASSES ` of ` ControlParameters ` and ` RUN _ METHODS `
of ` Models ` into the respective docstring ."""
|
modulename = module . __name__
for membername , member in inspect . getmembers ( module ) :
for tuplename , descr in _name2descr . items ( ) :
tuple_ = getattr ( member , tuplename , None )
if tuple_ :
logstring = f'{modulename}.{membername}.{tuplename}'
if logstring not in _loggedtuples :
_loggedtuples . add ( logstring )
lst = [ f'\n\n\n {descr}:' ]
if tuplename == 'CLASSES' :
type_ = 'func'
else :
type_ = 'class'
for cls in tuple_ :
lst . append ( f' * ' f':{type_}:`{cls.__module__}.{cls.__name__}`' f' {objecttools.description(cls)}' )
doc = getattr ( member , '__doc__' )
if doc is None :
doc = ''
member . __doc__ = doc + '\n' . join ( l for l in lst )
|
def _request_commit ( self , three_pc_key : Tuple [ int , int ] , recipients : List [ str ] = None ) -> bool :
"""Request commit"""
|
return self . _request_three_phase_msg ( three_pc_key , self . requested_commits , COMMIT , recipients )
|
def heatmaps_to_keypoints ( maps , rois ) :
"""Extract predicted keypoint locations from heatmaps . Output has shape
( # rois , 4 , # keypoints ) with the 4 rows corresponding to ( x , y , logit , prob )
for each keypoint ."""
|
# This function converts a discrete image coordinate in a HEATMAP _ SIZE x
# HEATMAP _ SIZE image to a continuous keypoint coordinate . We maintain
# consistency with keypoints _ to _ heatmap _ labels by using the conversion from
# Heckbert 1990 : c = d + 0.5 , where d is a discrete coordinate and c is a
# continuous coordinate .
offset_x = rois [ : , 0 ]
offset_y = rois [ : , 1 ]
widths = rois [ : , 2 ] - rois [ : , 0 ]
heights = rois [ : , 3 ] - rois [ : , 1 ]
widths = np . maximum ( widths , 1 )
heights = np . maximum ( heights , 1 )
widths_ceil = np . ceil ( widths )
heights_ceil = np . ceil ( heights )
# NCHW to NHWC for use with OpenCV
maps = np . transpose ( maps , [ 0 , 2 , 3 , 1 ] )
min_size = 0
# cfg . KRCNN . INFERENCE _ MIN _ SIZE
num_keypoints = maps . shape [ 3 ]
xy_preds = np . zeros ( ( len ( rois ) , 3 , num_keypoints ) , dtype = np . float32 )
end_scores = np . zeros ( ( len ( rois ) , num_keypoints ) , dtype = np . float32 )
for i in range ( len ( rois ) ) :
if min_size > 0 :
roi_map_width = int ( np . maximum ( widths_ceil [ i ] , min_size ) )
roi_map_height = int ( np . maximum ( heights_ceil [ i ] , min_size ) )
else :
roi_map_width = widths_ceil [ i ]
roi_map_height = heights_ceil [ i ]
width_correction = widths [ i ] / roi_map_width
height_correction = heights [ i ] / roi_map_height
roi_map = cv2 . resize ( maps [ i ] , ( roi_map_width , roi_map_height ) , interpolation = cv2 . INTER_CUBIC )
# Bring back to CHW
roi_map = np . transpose ( roi_map , [ 2 , 0 , 1 ] )
# roi _ map _ probs = scores _ to _ probs ( roi _ map . copy ( ) )
w = roi_map . shape [ 2 ]
pos = roi_map . reshape ( num_keypoints , - 1 ) . argmax ( axis = 1 )
x_int = pos % w
y_int = ( pos - x_int ) // w
# assert ( roi _ map _ probs [ k , y _ int , x _ int ] = =
# roi _ map _ probs [ k , : , : ] . max ( ) )
x = ( x_int + 0.5 ) * width_correction
y = ( y_int + 0.5 ) * height_correction
xy_preds [ i , 0 , : ] = x + offset_x [ i ]
xy_preds [ i , 1 , : ] = y + offset_y [ i ]
xy_preds [ i , 2 , : ] = 1
end_scores [ i , : ] = roi_map [ np . arange ( num_keypoints ) , y_int , x_int ]
return np . transpose ( xy_preds , [ 0 , 2 , 1 ] ) , end_scores
|
def replace_payment_token_by_id ( cls , payment_token_id , payment_token , ** kwargs ) :
"""Replace PaymentToken
Replace all attributes of PaymentToken
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ payment _ token _ by _ id ( payment _ token _ id , payment _ token , async = True )
> > > result = thread . get ( )
: param async bool
: param str payment _ token _ id : ID of paymentToken to replace ( required )
: param PaymentToken payment _ token : Attributes of paymentToken to replace ( required )
: return : PaymentToken
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_payment_token_by_id_with_http_info ( payment_token_id , payment_token , ** kwargs )
else :
( data ) = cls . _replace_payment_token_by_id_with_http_info ( payment_token_id , payment_token , ** kwargs )
return data
|
def launch_server ( ) :
"""Launches the django server at 127.0.0.1:8000"""
|
print ( os . path . dirname ( os . path . abspath ( __file__ ) ) )
cur_dir = os . getcwd ( )
path = os . path . dirname ( os . path . abspath ( __file__ ) )
run = True
os . chdir ( path )
os . system ( 'python manage.py runserver --nostatic' )
os . chdir ( cur_dir )
|
def read ( filename ) :
"""Reads an unstructured mesh with added data .
: param filenames : The files to read from .
: type filenames : str
: returns mesh { 2,3 } d : The mesh data .
: returns point _ data : Point data read from file .
: type point _ data : dict
: returns field _ data : Field data read from file .
: type field _ data : dict"""
|
mesh = meshio . read ( filename )
# make sure to include the used nodes only
if "tetra" in mesh . cells :
points , cells = _sanitize ( mesh . points , mesh . cells [ "tetra" ] )
return ( MeshTetra ( points , cells ) , mesh . point_data , mesh . cell_data , mesh . field_data , )
elif "triangle" in mesh . cells :
points , cells = _sanitize ( mesh . points , mesh . cells [ "triangle" ] )
return ( MeshTri ( points , cells ) , mesh . point_data , mesh . cell_data , mesh . field_data , )
else :
raise RuntimeError ( "Unknown mesh type." )
|
def contains_python_files_or_subdirs ( folder ) :
"""Checks ( recursively ) if the directory contains . py or . pyc files"""
|
for root , dirs , files in os . walk ( folder ) :
if [ filename for filename in files if filename . endswith ( '.py' ) or filename . endswith ( '.pyc' ) ] :
return True
for d in dirs :
for _ , subdirs , subfiles in os . walk ( d ) :
if [ filename for filename in subfiles if filename . endswith ( '.py' ) or filename . endswith ( '.pyc' ) ] :
return True
return False
|
def image_update ( call = None , kwargs = None ) :
'''Replaces the image template contents .
. . versionadded : : 2016.3.0
image _ id
The ID of the image to update . Can be used instead of ` ` image _ name ` ` .
image _ name
The name of the image to update . Can be used instead of ` ` image _ id ` ` .
path
The path to a file containing the template of the image . Syntax within the
file can be the usual attribute = value or XML . Can be used instead of ` ` data ` ` .
data
Contains the template of the image . Syntax can be the usual attribute = value
or XML . Can be used instead of ` ` path ` ` .
update _ type
There are two ways to update an image : ` ` replace ` ` the whole template
or ` ` merge ` ` the new template with the existing one .
CLI Example :
. . code - block : : bash
salt - cloud - f image _ update opennebula image _ id = 0 file = / path / to / image _ update _ file . txt update _ type = replace
salt - cloud - f image _ update opennebula image _ name = " Ubuntu 14.04 " update _ type = merge \ data = ' NAME = " Ubuntu Dev " PATH = " / home / one _ user / images / ubuntu _ desktop . img " \ DESCRIPTION = " Ubuntu 14.04 for development . " ' '''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The image_allocate function must be called with -f or --function.' )
if kwargs is None :
kwargs = { }
image_id = kwargs . get ( 'image_id' , None )
image_name = kwargs . get ( 'image_name' , None )
path = kwargs . get ( 'path' , None )
data = kwargs . get ( 'data' , None )
update_type = kwargs . get ( 'update_type' , None )
update_args = [ 'replace' , 'merge' ]
if update_type is None :
raise SaltCloudSystemExit ( 'The image_update function requires an \'update_type\' to be provided.' )
if update_type == update_args [ 0 ] :
update_number = 0
elif update_type == update_args [ 1 ] :
update_number = 1
else :
raise SaltCloudSystemExit ( 'The update_type argument must be either {0} or {1}.' . format ( update_args [ 0 ] , update_args [ 1 ] ) )
if image_id :
if image_name :
log . warning ( 'Both the \'image_id\' and \'image_name\' arguments were provided. ' '\'image_id\' will take precedence.' )
elif image_name :
image_id = get_image_id ( kwargs = { 'name' : image_name } )
else :
raise SaltCloudSystemExit ( 'The image_update function requires either an \'image_id\' or an ' '\'image_name\' to be provided.' )
if data :
if path :
log . warning ( 'Both the \'data\' and \'path\' arguments were provided. ' '\'data\' will take precedence.' )
elif path :
with salt . utils . files . fopen ( path , mode = 'r' ) as rfh :
data = rfh . read ( )
else :
raise SaltCloudSystemExit ( 'The image_update function requires either \'data\' or a file \'path\' ' 'to be provided.' )
server , user , password = _get_xml_rpc ( )
auth = ':' . join ( [ user , password ] )
response = server . one . image . update ( auth , int ( image_id ) , data , int ( update_number ) )
ret = { 'action' : 'image.update' , 'updated' : response [ 0 ] , 'image_id' : response [ 1 ] , 'error_code' : response [ 2 ] , }
return ret
|
def __EncodedAttribute_generic_encode_rgb24 ( self , rgb24 , width = 0 , height = 0 , quality = 0 , format = _ImageFormat . RawImage ) :
"""Internal usage only"""
|
if not is_seq ( rgb24 ) :
raise TypeError ( "Expected sequence (str, numpy.ndarray, list, tuple " "or bytearray) as first argument" )
is_str = is_pure_str ( rgb24 )
if is_str :
if not width or not height :
raise ValueError ( "When giving a string as data, you must also " "supply width and height" )
if np and isinstance ( rgb24 , np . ndarray ) :
if rgb24 . ndim != 3 :
if not width or not height :
raise ValueError ( "When giving a non 2D numpy array, width and " "height must be supplied" )
if rgb24 . nbytes / 3 != width * height :
raise ValueError ( "numpy array size mismatch" )
else :
if rgb24 . itemsize != 1 :
raise TypeError ( "Expected numpy array with itemsize == 1" )
if not rgb24 . flags . c_contiguous :
raise TypeError ( "Currently, only contiguous, aligned numpy arrays " "are supported" )
if not rgb24 . flags . aligned :
raise TypeError ( "Currently, only contiguous, aligned numpy arrays " "are supported" )
if not is_str and ( not width or not height ) :
height = len ( rgb24 )
if height < 1 :
raise IndexError ( "Expected sequence with at least one row" )
row0 = rgb24 [ 0 ]
if not is_seq ( row0 ) :
raise IndexError ( "Expected sequence (str, numpy.ndarray, list, tuple or " "bytearray) inside a sequence" )
width = len ( row0 )
if is_pure_str ( row0 ) or type ( row0 ) == bytearray :
width /= 3
if format == _ImageFormat . RawImage :
self . _encode_rgb24 ( rgb24 , width , height )
elif format == _ImageFormat . JpegImage :
self . _encode_jpeg_rgb24 ( rgb24 , width , height , quality )
|
async def iter_chunks ( self , chunk_size = _DEFAULT_CHUNK_SIZE ) :
"""Return an iterator to yield chunks of chunk _ size bytes from the raw
stream ."""
|
while True :
current_chunk = await self . read ( chunk_size )
if current_chunk == b"" :
break
await yield_ ( current_chunk )
|
def _set_ldp_ecmp ( self , v , load = False ) :
"""Setter method for ldp _ ecmp , mapped from YANG variable / mpls _ config / router / mpls / mpls _ cmds _ holder / ldp / ldp _ holder / ldp _ ecmp ( uint32)
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ldp _ ecmp is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ldp _ ecmp ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'1..16' ] } ) , default = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) ( 1 ) , is_leaf = True , yang_name = "ldp-ecmp" , rest_name = "load-sharing" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'Number of load-sharing paths: range 1-16, default is 1' , u'cli-full-no' : None , u'alt-name' : u'load-sharing' } } , namespace = 'urn:brocade.com:mgmt:brocade-mpls' , defining_module = 'brocade-mpls' , yang_type = 'uint32' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ldp_ecmp must be of a type compatible with uint32""" , 'defined-type' : "uint32" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..16']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="ldp-ecmp", rest_name="load-sharing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Number of load-sharing paths: range 1-16, default is 1', u'cli-full-no': None, u'alt-name': u'load-sharing'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""" , } )
self . __ldp_ecmp = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def valid_ips ( self ) :
""": return :
A list of unicode strings of valid IP addresses for the certificate"""
|
if self . _valid_ips is None :
self . _valid_ips = [ ]
if self . subject_alt_name_value :
for general_name in self . subject_alt_name_value :
if general_name . name == 'ip_address' :
self . _valid_ips . append ( general_name . native )
return self . _valid_ips
|
def _formatOntologyTerm ( self , element , element_type ) :
"""Formats the ontology terms for query"""
|
elementClause = None
if isinstance ( element , dict ) and element . get ( 'terms' ) :
elements = [ ]
for _term in element [ 'terms' ] :
if _term . get ( 'id' ) :
elements . append ( '?{} = <{}> ' . format ( element_type , _term [ 'id' ] ) )
else :
elements . append ( '?{} = <{}> ' . format ( element_type , self . _toNamespaceURL ( _term [ 'term' ] ) ) )
elementClause = "({})" . format ( " || " . join ( elements ) )
return elementClause
|
def kls_name ( self ) :
"""Determine python name for group"""
|
# Determine kls for group
if not self . parent or not self . parent . name :
return 'Test{0}' . format ( self . name )
else :
use = self . parent . kls_name
if use . startswith ( 'Test' ) :
use = use [ 4 : ]
return 'Test{0}_{1}' . format ( use , self . name )
|
def flush ( self ) :
"""Write self to . pyftpsync - meta . json ."""
|
# We DO write meta files even on read - only targets , but not in dry - run mode
# if self . target . readonly :
# write ( " DirMetadata . flush ( % s ) : read - only ; nothing to do " % self . target )
# return
assert self . path == self . target . cur_dir
if self . target . dry_run : # write ( " DirMetadata . flush ( % s ) : dry - run ; nothing to do " % self . target )
pass
elif self . was_read and len ( self . list ) == 0 and len ( self . peer_sync ) == 0 :
write ( "Remove empty meta data file: {}" . format ( self . target ) )
self . target . remove_file ( self . filename )
elif not self . modified_list and not self . modified_sync : # write ( " DirMetadata . flush ( % s ) : unmodified ; nothing to do " % self . target )
pass
else :
self . dir [ "_disclaimer" ] = "Generated by https://github.com/mar10/pyftpsync"
self . dir [ "_time_str" ] = pretty_stamp ( time . time ( ) )
self . dir [ "_file_version" ] = self . VERSION
self . dir [ "_version" ] = __version__
self . dir [ "_time" ] = time . mktime ( time . gmtime ( ) )
# We always save utf - 8 encoded .
# ` ensure _ ascii ` would escape all bytes > 127 as ` \ x12 ` or ` \ u1234 ` ,
# which makes it hard to read , so we set it to false .
# ` sort _ keys ` converts binary keys to unicode using utf - 8 , so we
# must make sure that we don ' t pass cp1225 or other encoded data .
data = self . dir
opts = { "indent" : 4 , "sort_keys" : True , "ensure_ascii" : False }
if compat . PY2 : # The ` encoding ` arg defaults to utf - 8 on Py2 and was removed in Py3
# opts [ " encoding " ] = " utf - 8"
# Python 2 has problems with mixed keys ( str / unicode )
data = decode_dict_keys ( data , "utf-8" )
if not self . PRETTY :
opts [ "indent" ] = None
opts [ "separators" ] = ( "," , ":" )
s = json . dumps ( data , ** opts )
self . target . write_text ( self . filename , s )
if self . target . synchronizer :
self . target . synchronizer . _inc_stat ( "meta_bytes_written" , len ( s ) )
self . modified_list = False
self . modified_sync = False
|
def setRules ( self , rules ) :
"""Sets all the rules for this builder .
: param rules | [ < XQueryRule > , . . ]"""
|
if ( type ( rules ) in ( list , tuple ) ) :
self . _rules = dict ( [ ( x . term ( ) , x ) for x in rules ] )
self . updateRules ( )
return True
elif ( type ( rules ) == dict ) :
self . _rules = rules . copy ( )
self . updateRules ( )
return True
else :
return False
|
def combine_neb_plots ( neb_analyses , arranged_neb_analyses = False , reverse_plot = False ) :
"""neb _ analyses : a list of NEBAnalysis objects
arranged _ neb _ analyses : The code connects two end points with the
smallest - energy difference . If all end points have very close energies , it ' s
likely to result in an inaccurate connection . Manually arrange neb _ analyses
if the combined plot is not as expected compared with all individual plots .
E . g . , if there are two NEBAnalysis objects to combine , arrange in such a
way that the end - point energy of the first NEBAnalysis object is the
start - point energy of the second NEBAnalysis object .
Note that the barrier labeled in y - axis in the combined plot might be
different from that in the individual plot due to the reference energy used .
reverse _ plot : reverse the plot or percolation direction .
return : a NEBAnalysis object"""
|
x = StructureMatcher ( )
for neb_index in range ( len ( neb_analyses ) ) :
if neb_index == 0 :
neb1 = neb_analyses [ neb_index ]
neb1_energies = list ( neb1 . energies )
neb1_structures = neb1 . structures
neb1_forces = neb1 . forces
neb1_r = neb1 . r
continue
neb2 = neb_analyses [ neb_index ]
neb2_energies = list ( neb2 . energies )
matching = 0
for neb1_s in [ neb1_structures [ 0 ] , neb1_structures [ - 1 ] ] :
if x . fit ( neb1_s , neb2 . structures [ 0 ] ) or x . fit ( neb1_s , neb2 . structures [ - 1 ] ) :
matching += 1
break
if matching == 0 :
raise ValueError ( "no matched structures for connection!" )
neb1_start_e , neb1_end_e = neb1_energies [ 0 ] , neb1_energies [ - 1 ]
neb2_start_e , neb2_end_e = neb2_energies [ 0 ] , neb2_energies [ - 1 ]
min_e_diff = min ( ( [ abs ( neb1_start_e - neb2_start_e ) , abs ( neb1_start_e - neb2_end_e ) , abs ( neb1_end_e - neb2_start_e ) , abs ( neb1_end_e - neb2_end_e ) ] ) )
if arranged_neb_analyses :
neb1_energies = neb1_energies [ 0 : len ( neb1_energies ) - 1 ] + [ ( neb1_energies [ - 1 ] + neb2_energies [ 0 ] ) / 2 ] + neb2_energies [ 1 : ]
neb1_structures = neb1_structures + neb2 . structures [ 1 : ]
neb1_forces = list ( neb1_forces ) + list ( neb2 . forces ) [ 1 : ]
neb1_r = list ( neb1_r ) + [ i + neb1_r [ - 1 ] for i in list ( neb2 . r ) [ 1 : ] ]
elif abs ( neb1_start_e - neb2_start_e ) == min_e_diff :
neb1_energies = list ( reversed ( neb1_energies [ 1 : ] ) ) + neb2_energies
neb1_structures = list ( reversed ( ( neb1_structures [ 1 : ] ) ) ) + neb2 . structures
neb1_forces = list ( reversed ( list ( neb1_forces ) [ 1 : ] ) ) + list ( neb2 . forces )
neb1_r = list ( reversed ( [ i * - 1 - neb1_r [ - 1 ] * - 1 for i in list ( neb1_r ) [ 1 : ] ] ) ) + [ i + neb1_r [ - 1 ] for i in list ( neb2 . r ) ]
elif abs ( neb1_start_e - neb2_end_e ) == min_e_diff :
neb1_energies = neb2_energies + neb1_energies [ 1 : ]
neb1_structures = neb2 . structures + neb1_structures [ 1 : ]
neb1_forces = list ( neb2 . forces ) + list ( neb1_forces ) [ 1 : ]
neb1_r = [ i for i in list ( neb2 . r ) ] + [ i + list ( neb2 . r ) [ - 1 ] for i in list ( neb1_r ) [ 1 : ] ]
elif abs ( neb1_end_e - neb2_start_e ) == min_e_diff :
neb1_energies = neb1_energies + neb2_energies [ 1 : ]
neb1_structures = neb1_structures + neb2 . structures [ 1 : ]
neb1_forces = list ( neb1_forces ) + list ( neb2 . forces ) [ 1 : ]
neb1_r = [ i for i in list ( neb1_r ) ] + [ i + neb1_r [ - 1 ] for i in list ( neb2 . r ) [ 1 : ] ]
else :
neb1_energies = neb1_energies + list ( reversed ( neb2_energies ) ) [ 1 : ]
neb1_structures = neb1_structures + list ( reversed ( ( neb2 . structures ) ) ) [ 1 : ]
neb1_forces = list ( neb1_forces ) + list ( reversed ( list ( neb2 . forces ) ) ) [ 1 : ]
neb1_r = list ( neb1_r ) + list ( reversed ( [ i * - 1 - list ( neb2 . r ) [ - 1 ] * - 1 + list ( neb1_r ) [ - 1 ] for i in list ( neb2 . r ) [ : - 1 ] ] ) )
if reverse_plot :
na = NEBAnalysis ( list ( reversed ( [ i * - 1 - neb1_r [ - 1 ] * - 1 for i in list ( neb1_r ) ] ) ) , list ( reversed ( neb1_energies ) ) , list ( reversed ( neb1_forces ) ) , list ( reversed ( neb1_structures ) ) )
else :
na = NEBAnalysis ( neb1_r , neb1_energies , neb1_forces , neb1_structures )
return na
|
def with_params ( self , params ) :
"""Create a new request with added query parameters
Parameters
params : Mapping
the query parameters to add"""
|
return self . replace ( params = _merge_maps ( self . params , params ) )
|
def _structmap ( self ) :
"""Returns structMap element for all files ."""
|
structmap = etree . Element ( utils . lxmlns ( "mets" ) + "structMap" , TYPE = "physical" , # TODO Add ability for multiple structMaps
ID = "structMap_1" , # TODO don ' t hardcode this
LABEL = "Archivematica default" , )
for item in self . _root_elements :
child = item . serialize_structmap ( recurse = True )
if child is not None :
structmap . append ( child )
return structmap
|
def _getOutputElegant ( self , ** kws ) :
"""get results from elegant output according to the given keywords ,
input parameter format : key = sdds field name tuple , e . g . :
available keywords are :
- ' file ' : sdds fielname , file = test . sig
- ' data ' : data array , data = ( ' s ' , ' Sx ' )
- ' dump ' : h5file name , if defined , dump data to hdf5 format"""
|
datascript = "sddsprintdata.sh"
datapath = self . sim_path
trajparam_list = kws [ 'data' ]
sddsfile = os . path . expanduser ( os . path . join ( self . sim_path , kws [ 'file' ] ) )
dh = datautils . DataExtracter ( sddsfile , * trajparam_list )
dh . setDataScript ( datascript )
dh . setDataPath ( datapath )
if 'dump' in kws :
dh . setH5file ( kws [ 'dump' ] )
dh . extractData ( ) . dump ( )
data = dh . extractData ( ) . getH5Data ( )
return data
|
def load_config ( ) :
"""Load a config file . This function looks for a config ( * . ini ) file in the
following order : :
(1 ) . / * . ini
(2 ) ~ / . config / hydra /
(3 ) / etc / hydra
(4 ) / path / to / hydra _ base / * . ini
(1 ) will override ( 2 ) will override ( 3 ) will override ( 4 ) . Parameters not
defined in ( 1 ) will be taken from ( 2 ) . Parameters not defined in ( 2 ) will
be taken from ( 3 ) . ( 3 ) is the config folder that will be checked out from
the svn repository . ( 2 ) Will be be provided as soon as an installable
distribution is available . ( 1 ) will usually be written individually by
every user ."""
|
global localfiles
global localfile
global repofile
global repofiles
global userfile
global userfiles
global sysfile
global sysfiles
global CONFIG
logging . basicConfig ( level = 'INFO' )
config = ConfigParser . ConfigParser ( allow_no_value = True )
modulepath = os . path . dirname ( os . path . abspath ( __file__ ) )
localfile = os . path . join ( os . getcwd ( ) , 'hydra.ini' )
localfiles = glob . glob ( localfile )
repofile = os . path . join ( modulepath , 'hydra.ini' )
repofiles = glob . glob ( repofile )
if os . name == 'nt' :
import winpaths
userfile = os . path . join ( os . path . expanduser ( '~' ) , 'AppData' , 'Local' , 'hydra.ini' )
userfiles = glob . glob ( userfile )
sysfile = os . path . join ( winpaths . get_common_documents ( ) , 'Hydra' , 'hydra.ini' )
sysfiles = glob . glob ( sysfile )
else :
userfile = os . path . join ( os . path . expanduser ( '~' ) , '.hydra' , 'hydra.ini' )
userfiles = glob . glob ( userfile )
sysfile = os . path . join ( 'etc' , 'hydra' , 'hydra.ini' )
sysfiles = glob . glob ( sysfile )
for ini_file in repofiles :
logging . debug ( "Repofile: %s" % ini_file )
config . read ( ini_file )
for ini_file in sysfiles :
logging . debug ( "Sysfile: %s" % ini_file )
config . read ( ini_file )
for ini_file in userfiles :
logging . debug ( "Userfile: %s" % ini_file )
config . read ( ini_file )
for ini_file in localfiles :
logging . info ( "Localfile: %s" % ini_file )
config . read ( ini_file )
env_value = os . environ . get ( 'HYDRA_CONFIG' )
if env_value is not None :
if os . path . exists ( env_value ) :
config . read ( ini_file )
else :
logging . warning ( 'HYDRA_CONFIG set as %s but file does not exist' , env_value )
if os . name == 'nt' :
set_windows_env_variables ( config )
try :
home_dir = config . get ( 'DEFAULT' , 'home_dir' )
except :
home_dir = os . environ . get ( 'HYDRA_HOME_DIR' , '~' )
config . set ( 'DEFAULT' , 'home_dir' , os . path . expanduser ( home_dir ) )
try :
hydra_base = config . get ( 'DEFAULT' , 'hydra_base_dir' )
except :
hydra_base = os . environ . get ( 'HYDRA_BASE_DIR' , modulepath )
config . set ( 'DEFAULT' , 'hydra_base_dir' , os . path . expanduser ( hydra_base ) )
read_values_from_environment ( config , 'mysqld' , 'server_name' )
CONFIG = config
return config
|
def get_gen_info ( network , level = 'mvlv' , fluctuating = False ) :
"""Gets all the installed generators with some additional information .
Parameters
network : : class : ` ~ . grid . network . Network `
Network object holding the grid data .
level : : obj : ` str `
Defines which generators are returned . Possible options are :
* ' mv '
Only generators connected to the MV grid are returned .
* ' lv '
Only generators connected to the LV grids are returned .
* ' mvlv '
All generators connected to the MV grid and LV grids are returned .
Default : ' mvlv ' .
fluctuating : : obj : ` bool `
If True only returns fluctuating generators . Default : False .
Returns
: pandas : ` pandas . DataFrame < dataframe > `
Dataframe with all generators connected to the specified voltage
level . Index of the dataframe are the generator objects of type
: class : ` ~ . grid . components . Generator ` . Columns of the dataframe are :
* ' gen _ repr '
The representative of the generator as : obj : ` str ` .
* ' type '
The generator type , e . g . ' solar ' or ' wind ' as : obj : ` str ` .
* ' voltage _ level '
The voltage level the generator is connected to as : obj : ` str ` . Can
either be ' mv ' or ' lv ' .
* ' nominal _ capacity '
The nominal capacity of the generator as as : obj : ` float ` .
* ' weather _ cell _ id '
The id of the weather cell the generator is located in as : obj : ` int `
( only applies to fluctuating generators ) ."""
|
gens_w_id = [ ]
if 'mv' in level :
gens = network . mv_grid . generators
gens_voltage_level = [ 'mv' ] * len ( gens )
gens_type = [ gen . type for gen in gens ]
gens_rating = [ gen . nominal_capacity for gen in gens ]
for gen in gens :
try :
gens_w_id . append ( gen . weather_cell_id )
except AttributeError :
gens_w_id . append ( np . nan )
gens_grid = [ network . mv_grid ] * len ( gens )
else :
gens = [ ]
gens_voltage_level = [ ]
gens_type = [ ]
gens_rating = [ ]
gens_grid = [ ]
if 'lv' in level :
for lv_grid in network . mv_grid . lv_grids :
gens_lv = lv_grid . generators
gens . extend ( gens_lv )
gens_voltage_level . extend ( [ 'lv' ] * len ( gens_lv ) )
gens_type . extend ( [ gen . type for gen in gens_lv ] )
gens_rating . extend ( [ gen . nominal_capacity for gen in gens_lv ] )
for gen in gens_lv :
try :
gens_w_id . append ( gen . weather_cell_id )
except AttributeError :
gens_w_id . append ( np . nan )
gens_grid . extend ( [ lv_grid ] * len ( gens_lv ) )
gen_df = pd . DataFrame ( { 'gen_repr' : list ( map ( lambda x : repr ( x ) , gens ) ) , 'generator' : gens , 'type' : gens_type , 'voltage_level' : gens_voltage_level , 'nominal_capacity' : gens_rating , 'weather_cell_id' : gens_w_id , 'grid' : gens_grid } )
gen_df . set_index ( 'generator' , inplace = True , drop = True )
# filter fluctuating generators
if fluctuating :
gen_df = gen_df . loc [ ( gen_df . type == 'solar' ) | ( gen_df . type == 'wind' ) ]
return gen_df
|
async def listTaskGroup ( self , * args , ** kwargs ) :
"""List Task Group
List tasks sharing the same ` taskGroupId ` .
As a task - group may contain an unbounded number of tasks , this end - point
may return a ` continuationToken ` . To continue listing tasks you must call
the ` listTaskGroup ` again with the ` continuationToken ` as the
query - string option ` continuationToken ` .
By default this end - point will try to return up to 1000 members in one
request . But it * * may return less * * , even if more tasks are available .
It may also return a ` continuationToken ` even though there are no more
results . However , you can only be sure to have seen all results if you
keep calling ` listTaskGroup ` with the last ` continuationToken ` until you
get a result without a ` continuationToken ` .
If you are not interested in listing all the members at once , you may
use the query - string option ` limit ` to return fewer .
This method gives output : ` ` v1 / list - task - group - response . json # ` `
This method is ` ` stable ` `"""
|
return await self . _makeApiCall ( self . funcinfo [ "listTaskGroup" ] , * args , ** kwargs )
|
def runCommandSplits ( splits , silent = False , shell = False ) :
"""Run a shell command given the command ' s parsed command line"""
|
try :
if silent :
with open ( os . devnull , 'w' ) as devnull :
subprocess . check_call ( splits , stdout = devnull , stderr = devnull , shell = shell )
else :
subprocess . check_call ( splits , shell = shell )
except OSError as exception :
if exception . errno == 2 : # cmd not found
raise Exception ( "Can't find command while trying to run {}" . format ( splits ) )
else :
raise
|
def date_range ( start_date , end_date , increment , period ) :
"""Generate ` date ` objects between ` start _ date ` and ` end _ date ` in ` increment `
` period ` intervals ."""
|
next = start_date
delta = relativedelta . relativedelta ( ** { period : increment } )
while next <= end_date :
yield next
next += delta
|
def asin ( x , context = None ) :
"""Return the inverse sine of ` ` x ` ` .
The mathematically exact result lies in the range [ - π / 2 , π / 2 ] . However ,
note that as a result of rounding to the current context , it ' s possible
for the actual value to lie just outside this range ."""
|
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_asin , ( BigFloat . _implicit_convert ( x ) , ) , context , )
|
def close ( self ) :
"""Close the underlying connection ."""
|
self . _closed = True
if self . receive_task :
self . receive_task . cancel ( )
if self . connection :
self . connection . close ( )
|
def remove_alt_text_language ( self , language_type ) :
"""Removes the specified alt _ text .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *"""
|
if self . get_alt_texts_metadata ( ) . is_read_only ( ) :
raise NoAccess ( )
self . remove_field_by_language ( 'altTexts' , language_type )
|
def _compute_p_value ( self , term_i ) :
"""compute the p - value of the desired feature
Arguments
term _ i : int
term to select from the data
Returns
p _ value : float
Notes
Wood 2006 , section 4.8.5:
The p - values , calculated in this manner , behave correctly for un - penalized models ,
or models with known smoothing parameters , but when smoothing parameters have
been estimated , the p - values are typically lower than they should be , meaning that
the tests reject the null too readily .
In practical terms , if these p - values suggest that a term is not needed in a model ,
then this is probably true , but if a term is deemed ‘ significant ’ it is important to be
aware that this significance may be overstated .
based on equations from Wood 2006 section 4.8.5 page 191
and errata https : / / people . maths . bris . ac . uk / ~ sw15190 / igam / iGAMerrata - 12 . pdf
the errata shows a correction for the f - statisitc ."""
|
if not self . _is_fitted :
raise AttributeError ( 'GAM has not been fitted. Call fit first.' )
idxs = self . terms . get_coef_indices ( term_i )
cov = self . statistics_ [ 'cov' ] [ idxs ] [ : , idxs ]
coef = self . coef_ [ idxs ]
# center non - intercept term functions
if isinstance ( self . terms [ term_i ] , SplineTerm ) :
coef -= coef . mean ( )
inv_cov , rank = sp . linalg . pinv ( cov , return_rank = True )
score = coef . T . dot ( inv_cov ) . dot ( coef )
# compute p - values
if self . distribution . _known_scale : # for known scale use chi - squared statistic
return 1 - sp . stats . chi2 . cdf ( x = score , df = rank )
else : # if scale has been estimated , prefer to use f - statisitc
score = score / rank
return 1 - sp . stats . f . cdf ( score , rank , self . statistics_ [ 'n_samples' ] - self . statistics_ [ 'edof' ] )
|
def get_hashes_from_search ( self , query , page = None ) :
"""Get the scan results for a file .
Even if you do not have a Private Mass API key that you can use , you can still automate VirusTotal Intelligence
searches pretty much in the same way that the searching for files api call works .
: param query : a VirusTotal Intelligence search string in accordance with the file search documentation .
< https : / / www . virustotal . com / intelligence / help / file - search / >
: param page : the next _ page property of the results of a previously issued query to this API . This parameter
should not be provided if it is the very first query to the API , i . e . if we are retrieving the
first page of results .
apikey : the API key associated to a VirusTotal Community account with VirusTotal Intelligence privileges ."""
|
params = { 'query' : query , 'apikey' : self . api_key , 'page' : page }
try :
response = requests . get ( self . base + 'search/programmatic/' , params = params , proxies = self . proxies )
except requests . RequestException as e :
return dict ( error = e . message )
return response . json ( ) [ 'next_page' ] , response
|
def list_packages ( self , ** kwargs ) :
"""List active packages .
: returns : List of active packages ."""
|
get_kwargs = { }
get_kwargs [ 'mask' ] = kwargs . get ( 'mask' , PACKAGE_MASK )
if 'filter' in kwargs :
get_kwargs [ 'filter' ] = kwargs [ 'filter' ]
packages = self . package_svc . getAllObjects ( ** get_kwargs )
return [ package for package in packages if package [ 'isActive' ] ]
|
def add_async_sender ( self , partition = None , operation = None , send_timeout = 60 , keep_alive = 30 , auto_reconnect = True , loop = None ) :
"""Add an async sender to the client to send ~ azure . eventhub . common . EventData object
to an EventHub .
: param partition : Optionally specify a particular partition to send to .
If omitted , the events will be distributed to available partitions via
round - robin .
: type partition : str
: operation : An optional operation to be appended to the hostname in the target URL .
The value must start with ` / ` character .
: type operation : str
: param send _ timeout : The timeout in seconds for an individual event to be sent from the time that it is
queued . Default value is 60 seconds . If set to 0 , there will be no timeout .
: type send _ timeout : int
: param keep _ alive : The time interval in seconds between pinging the connection to keep it alive during
periods of inactivity . The default value is 30 seconds . If set to ` None ` , the connection will not
be pinged .
: type keep _ alive : int
: param auto _ reconnect : Whether to automatically reconnect the sender if a retryable error occurs .
Default value is ` True ` .
: type auto _ reconnect : bool
: rtype : ~ azure . eventhub . async _ ops . sender _ async . SenderAsync"""
|
target = "amqps://{}{}" . format ( self . address . hostname , self . address . path )
if operation :
target = target + operation
handler = AsyncSender ( self , target , partition = partition , send_timeout = send_timeout , keep_alive = keep_alive , auto_reconnect = auto_reconnect , loop = loop )
self . clients . append ( handler )
return handler
|
def _counts_at_position ( positions , orig_reader , cmp_reader ) :
"""Combine orignal and new qualities at each position , generating counts ."""
|
pos_counts = collections . defaultdict ( lambda : collections . defaultdict ( lambda : collections . defaultdict ( int ) ) )
for orig_parts in orig_reader :
cmp_parts = next ( cmp_reader )
for pos in positions :
try :
pos_counts [ pos ] [ int ( orig_parts [ pos + 1 ] ) ] [ int ( cmp_parts [ pos + 1 ] ) ] += 1
except IndexError :
pass
for pos , count_dict in pos_counts . iteritems ( ) :
for orig_val , cmp_dict in count_dict . iteritems ( ) :
for cmp_val , count in cmp_dict . iteritems ( ) :
yield pos + 1 , orig_val , cmp_val , count
|
def interpolate ( dataset , points , sharpness = 2 , radius = 1.0 , dimensions = ( 101 , 101 , 101 ) , pass_cell_arrays = True , pass_point_arrays = True ) :
"""Interpolate values onto this mesh from the point data of a given
: class : ` vtki . PolyData ` object ( typically a point cloud ) .
This uses a guassian interpolation kernel . Use the ` ` sharpness ` ` and
` ` radius ` ` parameters to adjust this kernel .
Parameters
points : vtki . PolyData
The points whose values will be interpolated onto this mesh .
sharpness : float
Set / Get the sharpness ( i . e . , falloff ) of the Gaussian . By
default Sharpness = 2 . As the sharpness increases the effects of
distant points are reduced .
radius : float
Specify the radius within which the basis points must lie .
dimensions : tuple ( int )
When interpolating the points , they are first interpolating on to a
: class : ` vtki . UniformGrid ` with the same spatial extent -
` ` dimensions ` ` is number of points along each axis for that grid .
pass _ cell _ arrays : bool , optional
Preserve source mesh ' s original cell data arrays
pass _ point _ arrays : bool , optional
Preserve source mesh ' s original point data arrays"""
|
bounds = np . array ( dataset . bounds )
dimensions = np . array ( dimensions )
box = vtki . UniformGrid ( )
box . dimensions = dimensions
box . spacing = ( bounds [ 1 : : 2 ] - bounds [ : - 1 : 2 ] ) / ( dimensions - 1 )
box . origin = bounds [ : : 2 ]
gaussian_kernel = vtk . vtkGaussianKernel ( )
gaussian_kernel . SetSharpness ( sharpness )
gaussian_kernel . SetRadius ( radius )
interpolator = vtk . vtkPointInterpolator ( )
interpolator . SetInputData ( box )
interpolator . SetSourceData ( points )
interpolator . SetKernel ( gaussian_kernel )
interpolator . Update ( )
return dataset . sample ( interpolator . GetOutput ( ) , pass_cell_arrays = pass_cell_arrays , pass_point_arrays = pass_point_arrays )
|
def xstep ( self ) :
r"""Minimise Augmented Lagrangian with respect to : math : ` \ mathbf { x } ` ."""
|
self . cgit = None
self . YU [ : ] = self . Y - self . U
b = self . ZSf + self . rho * sl . rfftn ( self . YU , None , self . cri . axisN )
self . Xf [ : ] , cgit = sl . solvemdbi_cg ( self . Zf , self . rho , b , self . cri . axisM , self . cri . axisK , self . opt [ 'CG' , 'StopTol' ] , self . opt [ 'CG' , 'MaxIter' ] , self . Xf )
self . cgit = cgit
self . X = sl . irfftn ( self . Xf , self . cri . Nv , self . cri . axisN )
self . xstep_check ( b )
|
def _before_cursor_execute ( conn , cursor , statement , parameters , context , executemany ) :
"""Intercept low - level cursor execute ( ) events before execution .
If executemany is True , this is an executemany call , else an execute call .
Note : If enabled tracing both SQLAlchemy and the database it connected ,
the communication between SQLAlchemy and the database will also
be traced . To avoid the verbose spans , you can just trace SQLAlchemy .
See : http : / / docs . sqlalchemy . org / en / latest / core / events . html # sqlalchemy .
events . ConnectionEvents . before _ cursor _ execute"""
|
# Find out the func name
if executemany :
query_func = 'executemany'
else :
query_func = 'execute'
_tracer = execution_context . get_opencensus_tracer ( )
_span = _tracer . start_span ( )
_span . name = '{}.query' . format ( MODULE_NAME )
_span . span_kind = span_module . SpanKind . CLIENT
# Set query statement attribute
_tracer . add_attribute_to_current_span ( '{}.query' . format ( MODULE_NAME ) , statement )
# Set query parameters attribute
_tracer . add_attribute_to_current_span ( '{}.query.parameters' . format ( MODULE_NAME ) , str ( parameters ) )
# Set query function attribute
_tracer . add_attribute_to_current_span ( '{}.cursor.method.name' . format ( MODULE_NAME ) , query_func )
|
def make_ring_filelist ( self , sourcekeys , rings , galprop_run ) :
"""Make a list of all the template files for a merged component
Parameters
sourcekeys : list - like of str
The names of the componenents to merge
rings : list - like of int
The indices of the rings to merge
galprop _ run : str
String identifying the galprop parameters"""
|
flist = [ ]
for sourcekey in sourcekeys :
for ring in rings :
flist += [ self . make_ring_filename ( sourcekey , ring , galprop_run ) ]
return flist
|
def find_module ( self , fullname , path = None ) :
"""Return self when fullname starts with root _ name and the
target module is one vendored through this importer ."""
|
root , base , target = fullname . partition ( self . root_name + '.' )
if root :
return
if not any ( map ( target . startswith , self . vendored_names ) ) :
return
return self
|
def run_inline_script ( host , name = None , port = 22 , timeout = 900 , username = 'root' , key_filename = None , inline_script = None , ssh_timeout = 15 , display_ssh_output = True , parallel = False , sudo_password = None , sudo = False , password = None , tty = None , opts = None , tmp_dir = '/tmp/.saltcloud-inline_script' , ** kwargs ) :
'''Run the inline script commands , one by one
: * * kwargs : catch all other things we may get but don ' t actually need / use'''
|
gateway = None
if 'gateway' in kwargs :
gateway = kwargs [ 'gateway' ]
starttime = time . mktime ( time . localtime ( ) )
log . debug ( 'Deploying %s at %s' , host , starttime )
known_hosts_file = kwargs . get ( 'known_hosts_file' , '/dev/null' )
if wait_for_port ( host = host , port = port , gateway = gateway ) :
log . debug ( 'SSH port %s on %s is available' , port , host )
newtimeout = timeout - ( time . mktime ( time . localtime ( ) ) - starttime )
if wait_for_passwd ( host , port = port , username = username , password = password , key_filename = key_filename , ssh_timeout = ssh_timeout , display_ssh_output = display_ssh_output , gateway = gateway , known_hosts_file = known_hosts_file ) :
log . debug ( 'Logging into %s:%s as %s' , host , port , username )
newtimeout = timeout - ( time . mktime ( time . localtime ( ) ) - starttime )
ssh_kwargs = { 'hostname' : host , 'port' : port , 'username' : username , 'timeout' : ssh_timeout , 'display_ssh_output' : display_ssh_output , 'sudo_password' : sudo_password , 'sftp' : opts . get ( 'use_sftp' , False ) }
ssh_kwargs . update ( __ssh_gateway_config_dict ( gateway ) )
if key_filename :
log . debug ( 'Using %s as the key_filename' , key_filename )
ssh_kwargs [ 'key_filename' ] = key_filename
elif password and 'has_ssh_agent' in kwargs and kwargs [ 'has_ssh_agent' ] is False :
ssh_kwargs [ 'password' ] = password
# TODO : write some tests ? ? ?
# TODO : check edge cases ( e . g . ssh gateways , salt deploy disabled , etc . )
if root_cmd ( 'test -e \\"{0}\\"' . format ( tmp_dir ) , tty , sudo , allow_failure = True , ** ssh_kwargs ) and inline_script :
log . debug ( 'Found inline script to execute.' )
for cmd_line in inline_script :
log . info ( 'Executing inline command: %s' , cmd_line )
ret = root_cmd ( 'sh -c "( {0} )"' . format ( cmd_line ) , tty , sudo , allow_failure = True , ** ssh_kwargs )
if ret :
log . info ( '[%s] Output: %s' , cmd_line , ret )
# TODO : ensure we send the correct return value
return True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.