signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def FDMT ( data , f_min , f_max , maxDT , dataType ) :
"""This function implements the FDMT algorithm .
Input : Input visibility array ( nints , nbl , nchan , npol )
f _ min , f _ max are the base - band begin and end frequencies .
The frequencies should be entered in MHz
maxDT - the maximal delay ( in time bins ) of the maximal dispersion .
Appears in the paper as N _ { \ Delta }
A typical input is maxDT = N _ f
dataType - a valid numpy dtype .
reccomended : either int32 , or int64.
Output : The dispersion measure transform of the Input matrix .
The output dimensions are [ Input . shape [ 1 ] , maxDT ]
For details , see algorithm 1 in Zackay & Ofek ( 2014)""" | nint , nbl , nchan , npol = data . shape
niters = int ( np . log2 ( nchan ) )
assert nchan in 2 ** np . arange ( 30 ) and nint in 2 ** np . arange ( 30 ) , "Input dimensions must be a power of 2"
logger . info ( 'Input data dimensions: {0}' . format ( data . shape ) )
data = FDMT_initialization ( data , f_min , f_max , maxDT , dataType )
logger . info ( 'Iterating {0} times to calculate to maxDT of {1}' . format ( niters , maxDT ) )
for i_t in range ( 1 , niters + 1 ) :
data = FDMT_iteration ( data , maxDT , nchan , f_min , f_max , i_t , dataType )
[ nint , dT , nbl , nchan , npol ] = data . shape
assert nchan == 1 , 'Channel axis should have length 1 after all FDMT iterations.'
# put dT axis first and remove chan axis
return np . rollaxis ( data [ : , : , : , 0 , : ] , 1 ) |
def _get_dvs_infrastructure_traffic_resources ( dvs_name , dvs_infra_traffic_ress ) :
'''Returns a list of dict representations of the DVS infrastructure traffic
resource
dvs _ name
The name of the DVS
dvs _ infra _ traffic _ ress
The DVS infrastructure traffic resources''' | log . trace ( 'Building the dicts of the DVS \'%s\' infrastructure ' 'traffic resources' , dvs_name )
res_dicts = [ ]
for res in dvs_infra_traffic_ress :
res_dict = { 'key' : res . key , 'limit' : res . allocationInfo . limit , 'reservation' : res . allocationInfo . reservation }
if res . allocationInfo . shares :
res_dict . update ( { 'num_shares' : res . allocationInfo . shares . shares , 'share_level' : res . allocationInfo . shares . level } )
res_dicts . append ( res_dict )
return res_dicts |
def arch_to_personality ( arch ) :
"""Determine the process personality corresponding to the architecture""" | if isinstance ( arch , bytes ) :
arch = unicode ( arch )
return _lxc . arch_to_personality ( arch ) |
def _parseAtNamespace ( self , src ) :
"""namespace :
@ namespace S * [ IDENT S * ] ? [ STRING | URI ] S * ' ; ' S *""" | src = self . _parseSCDOCDC ( src )
while isAtRuleIdent ( src , 'namespace' ) :
ctxsrc = src
src = stripAtRuleIdent ( src )
namespace , src = self . _getStringOrURI ( src )
if namespace is None :
nsPrefix , src = self . _getIdent ( src )
if nsPrefix is None :
raise self . ParseError ( '@namespace expected an identifier or a URI' , src , ctxsrc )
namespace , src = self . _getStringOrURI ( src . lstrip ( ) )
if namespace is None :
raise self . ParseError ( '@namespace expected a URI' , src , ctxsrc )
else :
nsPrefix = None
src = src . lstrip ( )
if src [ : 1 ] != ';' :
raise self . ParseError ( '@namespace expected a terminating \';\'' , src , ctxsrc )
src = src [ 1 : ] . lstrip ( )
self . cssBuilder . atNamespace ( nsPrefix , namespace )
src = self . _parseSCDOCDC ( src )
return src |
def service_desks ( self ) :
"""Get a list of ServiceDesk Resources from the server visible to the current authenticated user .
: rtype : List [ ServiceDesk ]""" | url = self . _options [ 'server' ] + '/rest/servicedeskapi/servicedesk'
headers = { 'X-ExperimentalApi' : 'opt-in' }
r_json = json_loads ( self . _session . get ( url , headers = headers ) )
projects = [ ServiceDesk ( self . _options , self . _session , raw_project_json ) for raw_project_json in r_json [ 'values' ] ]
return projects |
def _setable_set_ ( name , self , func ) :
"Used to set the attribute a single time using the given function ." | setattr ( self . _attr_data_ , name , func ( ) )
if hasattr ( self . _attr_func_ , name ) :
delattr ( self . _attr_func_ , name )
setattr ( type ( self ) , name , property ( functools . partial ( self . _simple_get_ , name ) ) ) |
def generate ( cls ) :
"""Generates a random : class : ` ~ SigningKey ` object .
: rtype : : class : ` ~ SigningKey `""" | return cls ( libnacl . randombytes ( libnacl . crypto_sign_SEEDBYTES ) , encoder = encoding . RawEncoder , ) |
def rule_metric_names ( self ) :
"""Returns the rule metric names of the association rules . Only if implements AssociationRulesProducer .
: return : the metric names
: rtype : list""" | if not self . check_type ( self . jobject , "weka.associations.AssociationRulesProducer" ) :
return None
return string_array_to_list ( javabridge . call ( self . jobject , "getRuleMetricNames" , "()[Ljava/lang/String;" ) ) |
def plotF0 ( fromTuple , toTuple , mergeTupleList , fnFullPath ) :
'''Plots the original data in a graph above the plot of the dtw ' ed data''' | _matplotlibCheck ( )
plt . hold ( True )
fig , ( ax0 ) = plt . subplots ( nrows = 1 )
# Old data
plot1 = ax0 . plot ( fromTuple [ 0 ] , fromTuple [ 1 ] , color = 'red' , linewidth = 2 , label = "From" )
plot2 = ax0 . plot ( toTuple [ 0 ] , toTuple [ 1 ] , color = 'blue' , linewidth = 2 , label = "To" )
ax0 . set_title ( "Plot of F0 Morph" )
plt . ylabel ( 'Pitch (hz)' )
plt . xlabel ( 'Time (s)' )
# Merge data
colorValue = 0
colorStep = 255.0 / len ( mergeTupleList )
for timeList , valueList in mergeTupleList :
colorValue += colorStep
hexValue = "#%02x0000" % int ( 255 - colorValue )
if int ( colorValue ) == 255 :
ax0 . plot ( timeList , valueList , color = hexValue , linewidth = 1 , label = "Merged line, final iteration" )
else :
ax0 . plot ( timeList , valueList , color = hexValue , linewidth = 1 )
plt . legend ( loc = 1 , borderaxespad = 0. )
# plt . legend ( [ plot1 , plot2 , plot3 ] , [ " From " , " To " , " Merged line " ] )
plt . savefig ( fnFullPath , dpi = 300 , bbox_inches = 'tight' )
plt . close ( fig ) |
def next ( self ) :
"""Returns the next input from this input reader , a record .
Returns :
The next input from this input reader in the form of a record read from
an LevelDB file .
Raises :
StopIteration : The ordered set records has been exhausted .""" | while True :
if not hasattr ( self , "_cur_handle" ) or self . _cur_handle is None : # If there are no more files , StopIteration is raised here
self . _cur_handle = super ( _GoogleCloudStorageRecordInputReader , self ) . next ( )
if not hasattr ( self , "_record_reader" ) or self . _record_reader is None :
self . _record_reader = records . RecordsReader ( self . _cur_handle )
try :
start_time = time . time ( )
content = self . _record_reader . read ( )
ctx = context . get ( )
if ctx :
operation . counters . Increment ( COUNTER_IO_READ_BYTES , len ( content ) ) ( ctx )
operation . counters . Increment ( COUNTER_IO_READ_MSEC , int ( ( time . time ( ) - start_time ) * 1000 ) ) ( ctx )
return content
except EOFError :
self . _cur_handle = None
self . _record_reader = None |
def change_engine_password ( self , password ) :
"""Change Engine password for engines on allowed
list .
: param str password : password for engine level
: raises ModificationFailed : failed setting password on engine
: return : None""" | self . make_request ( ModificationFailed , method = 'update' , resource = 'change_engine_password' , params = { 'password' : password } ) |
def resolve ( self , cfg , addr , func_addr , block , jumpkind ) :
"""Resolves jump tables .
: param cfg : A CFG instance .
: param int addr : IRSB address .
: param int func _ addr : The function address .
: param pyvex . IRSB block : The IRSB .
: return : A bool indicating whether the indirect jump is resolved successfully , and a list of resolved targets
: rtype : tuple""" | project = self . project
# short - hand
self . _max_targets = cfg . _indirect_jump_target_limit
# Perform a backward slicing from the jump target
b = Blade ( cfg . graph , addr , - 1 , cfg = cfg , project = project , ignore_sp = False , ignore_bp = False , max_level = 3 , base_state = self . base_state )
stmt_loc = ( addr , DEFAULT_STATEMENT )
if stmt_loc not in b . slice :
return False , None
load_stmt_loc , load_stmt , load_size = None , None , None
stmts_to_remove = [ stmt_loc ]
stmts_adding_base_addr = [ ]
# type : list [ JumpTargetBaseAddr ]
# All temporary variables that hold indirect addresses loaded out of the memory
# Obviously , load _ stmt . tmp must be here
# if there are additional data transferring statements between the Load statement and the base - address - adding
# statement , all _ addr _ holders will have more than one temporary variables
# Here is an example :
# IRSB 0x4c64c4
# + 06 | t12 = LDle : I32 ( t7)
# + 07 | t11 = 32Sto64 ( t12)
# + 10 | t2 = Add64(0x00000571df0 , t11)
# all _ addr _ holders will be { ( 0x4c64c4 , 11 ) : AddressTransferringTypes . SignedExtension32to64,
# (0x4c64c4 , 12 ) ; AddressTransferringTypes . Assignment ,
all_addr_holders = OrderedDict ( )
while True :
preds = list ( b . slice . predecessors ( stmt_loc ) )
if len ( preds ) != 1 :
return False , None
block_addr , stmt_idx = stmt_loc = preds [ 0 ]
block = project . factory . block ( block_addr , backup_state = self . base_state ) . vex
stmt = block . statements [ stmt_idx ]
if isinstance ( stmt , ( pyvex . IRStmt . WrTmp , pyvex . IRStmt . Put ) ) :
if isinstance ( stmt . data , ( pyvex . IRExpr . Get , pyvex . IRExpr . RdTmp ) ) : # data transferring
stmts_to_remove . append ( stmt_loc )
if isinstance ( stmt , pyvex . IRStmt . WrTmp ) :
all_addr_holders [ ( stmt_loc [ 0 ] , stmt . tmp ) ] = AddressTransferringTypes . Assignment
continue
elif isinstance ( stmt . data , pyvex . IRExpr . ITE ) : # data transferring
# t16 = if ( t43 ) ILGop _ Ident32 ( LDle ( t29 ) ) else 0x0000c844
# > t44 = ITE ( t43 , t16,0x0000c844)
stmts_to_remove . append ( stmt_loc )
if isinstance ( stmt , pyvex . IRStmt . WrTmp ) :
all_addr_holders [ ( stmt_loc [ 0 ] , stmt . tmp ) ] = AddressTransferringTypes . Assignment
continue
elif isinstance ( stmt . data , pyvex . IRExpr . Unop ) :
if stmt . data . op == 'Iop_32Sto64' : # data transferring with conversion
# t11 = 32Sto64 ( t12)
stmts_to_remove . append ( stmt_loc )
if isinstance ( stmt , pyvex . IRStmt . WrTmp ) :
all_addr_holders [ ( stmt_loc [ 0 ] , stmt . tmp ) ] = AddressTransferringTypes . SignedExtension32to64
continue
elif stmt . data . op == 'Iop_64to32' : # data transferring with conversion
# t24 = 64to32 ( t21)
stmts_to_remove . append ( stmt_loc )
if isinstance ( stmt , pyvex . IRStmt . WrTmp ) :
all_addr_holders [ ( stmt_loc [ 0 ] , stmt . tmp ) ] = AddressTransferringTypes . Truncation64to32
continue
elif stmt . data . op == 'Iop_32Uto64' : # data transferring with conversion
# t21 = 32Uto64 ( t22)
stmts_to_remove . append ( stmt_loc )
if isinstance ( stmt , pyvex . IRStmt . WrTmp ) :
all_addr_holders [ ( stmt_loc [ 0 ] , stmt . tmp ) ] = AddressTransferringTypes . UnsignedExtension32to64
continue
elif isinstance ( stmt . data , pyvex . IRExpr . Binop ) and stmt . data . op . startswith ( 'Iop_Add' ) : # GitHub issue # 1289 , a S390X binary
# jump _ label = & jump _ table + * ( jump _ table [ index ] )
# IRSB 0x4007c0
# 00 | - - - - - IMark ( 0x4007c0 , 4 , 0 ) - - - - -
# + 01 | t0 = GET : I32(212)
# + 02 | t1 = Add32 ( t0,0xfffff )
# 03 | PUT ( 352 ) = 0x000003
# 04 | t13 = 32Sto64 ( t0)
# 05 | t6 = t13
# 06 | PUT ( 360 ) = t6
# 07 | PUT ( 368 ) = 0xfffff
# 08 | PUT ( 376 ) = 0x00000
# 09 | PUT ( 212 ) = t1
# 10 | PUT ( ia ) = 0x000004007c4
# 11 | - - - - - IMark ( 0x4007c4 , 6 , 0 ) - - - - -
# + 12 | t14 = 32Uto64 ( t1)
# + 13 | t8 = t14
# + 14 | t16 = CmpLE64U ( t8,0x00000b )
# + 15 | t15 = 1Uto32 ( t16)
# + 16 | t10 = t15
# + 17 | t11 = CmpNE32 ( t10,0x00000)
# + 18 | if ( t11 ) { PUT ( offset = 336 ) = 0x4007d4 ; Ijk _ Boring }
# Next : 0x4007ca
# IRSB 0x4007d4
# 00 | - - - - - IMark ( 0x4007d4 , 6 , 0 ) - - - - -
# + 01 | t8 = GET : I64 ( r2)
# + 02 | t7 = Shr64 ( t8,0x3d )
# + 03 | t9 = Shl64 ( t8,0x03)
# + 04 | t6 = Or64 ( t9 , t7)
# + 05 | t11 = And64 ( t6,0x000007fffff8)
# 06 | - - - - - IMark ( 0x4007da , 6 , 0 ) - - - - -
# 07 | PUT ( r1 ) = 0x00000400a50
# 08 | PUT ( ia ) = 0x000004007e0
# 09 | - - - - - IMark ( 0x4007e0 , 6 , 0 ) - - - - -
# + 10 | t12 = Add64(0x00000400a50 , t11)
# + 11 | t16 = LDbe : I64 ( t12)
# 12 | PUT ( r2 ) = t16
# 13 | - - - - - IMark ( 0x4007e6 , 4 , 0 ) - - - - -
# + 14 | t17 = Add64(0x00000400a50 , t16)
# + Next : t17
# Special case : a base address is added to the loaded offset before jumping to it .
if isinstance ( stmt . data . args [ 0 ] , pyvex . IRExpr . Const ) and isinstance ( stmt . data . args [ 1 ] , pyvex . IRExpr . RdTmp ) :
stmts_adding_base_addr . append ( JumpTargetBaseAddr ( stmt_loc , stmt , stmt . data . args [ 1 ] . tmp , base_addr = stmt . data . args [ 0 ] . con . value ) )
stmts_to_remove . append ( stmt_loc )
elif isinstance ( stmt . data . args [ 0 ] , pyvex . IRExpr . RdTmp ) and isinstance ( stmt . data . args [ 1 ] , pyvex . IRExpr . Const ) :
stmts_adding_base_addr . append ( JumpTargetBaseAddr ( stmt_loc , stmt , stmt . data . args [ 0 ] . tmp , base_addr = stmt . data . args [ 1 ] . con . value ) )
stmts_to_remove . append ( stmt_loc )
elif isinstance ( stmt . data . args [ 0 ] , pyvex . IRExpr . RdTmp ) and isinstance ( stmt . data . args [ 1 ] , pyvex . IRExpr . RdTmp ) : # one of the tmps must be holding a concrete value at this point
stmts_adding_base_addr . append ( JumpTargetBaseAddr ( stmt_loc , stmt , stmt . data . args [ 0 ] . tmp , tmp_1 = stmt . data . args [ 1 ] . tmp ) )
stmts_to_remove . append ( stmt_loc )
else : # not supported
pass
continue
elif isinstance ( stmt . data , pyvex . IRExpr . Load ) : # Got it !
load_stmt , load_stmt_loc , load_size = stmt , stmt_loc , block . tyenv . sizeof ( stmt . tmp ) // self . project . arch . byte_width
stmts_to_remove . append ( stmt_loc )
elif isinstance ( stmt , pyvex . IRStmt . LoadG ) : # Got it !
# this is how an ARM jump table is translated to VEX
# > t16 = if ( t43 ) ILGop _ Ident32 ( LDle ( t29 ) ) else 0x0000c844
load_stmt , load_stmt_loc , load_size = stmt , stmt_loc , block . tyenv . sizeof ( stmt . dst ) // self . project . arch . byte_width
stmts_to_remove . append ( stmt_loc )
break
if load_stmt_loc is None : # the load statement is not found
return False , None
# If we ' re just reading a constant , don ' t bother with the rest of this mess !
if isinstance ( load_stmt , pyvex . IRStmt . WrTmp ) :
if type ( load_stmt . data . addr ) is pyvex . IRExpr . Const : # It ' s directly loading from a constant address
# e . g . ,
# ldr r0 , = main + 1
# blx r0
# It ' s not a jump table , but we resolve it anyway
jump_target_addr = load_stmt . data . addr . con . value
jump_target = cfg . _fast_memory_load_pointer ( jump_target_addr )
if jump_target is None :
l . info ( "Constant indirect jump at %#08x points outside of loaded memory to %#08x" , addr , jump_target_addr )
return False , None
l . info ( "Resolved constant indirect jump from %#08x to %#08x" , addr , jump_target_addr )
ij = cfg . indirect_jumps [ addr ]
ij . jumptable = False
ij . resolved_targets = set ( [ jump_target ] )
return True , [ jump_target ]
elif isinstance ( load_stmt , pyvex . IRStmt . LoadG ) :
if type ( load_stmt . addr ) is pyvex . IRExpr . Const : # It ' s directly loading from a constant address
# e . g . ,
# 4352c SUB R1 , R11 , # 0x1000
# 43530 LDRHI R3 , = loc _ 45450
# 43540 MOV PC , R3
# It ' s not a jump table , but we resolve it anyway
# Note that this block has two branches : One goes to 45450 , the other one goes to whatever the original
# value of R3 is . Some intensive data - flow analysis is required in this case .
jump_target_addr = load_stmt . addr . con . value
jump_target = cfg . _fast_memory_load_pointer ( jump_target_addr )
l . info ( "Resolved constant indirect jump from %#08x to %#08x" , addr , jump_target_addr )
ij = cfg . indirect_jumps [ addr ]
ij . jumptable = False
ij . resolved_targets = set ( [ jump_target ] )
return True , [ jump_target ]
# Well , we have a real jumptable to resolve !
# If we ' re just reading a constant , don ' t bother with the rest of this mess !
if isinstance ( load_stmt , pyvex . IRStmt . WrTmp ) :
if type ( load_stmt . data . addr ) is pyvex . IRExpr . Const : # It ' s directly loading from a constant address
# e . g . ,
# ldr r0 , = main + 1
# blx r0
# It ' s not a jump table , but we resolve it anyway
jump_target_addr = load_stmt . data . addr . con . value
jump_target = cfg . _fast_memory_load_pointer ( jump_target_addr )
if not jump_target : # . . . except this constant looks like a jumpout !
l . info ( "Constant indirect jump directed out of the binary at #%08x" , addr )
return False , [ ]
l . info ( "Resolved constant indirect jump from %#08x to %#08x" , addr , jump_target_addr )
ij = cfg . indirect_jumps [ addr ]
ij . jumptable = False
ij . resolved_targets = set ( [ jump_target ] )
return True , [ jump_target ]
elif isinstance ( load_stmt , pyvex . IRStmt . LoadG ) :
if type ( load_stmt . addr ) is pyvex . IRExpr . Const : # It ' s directly loading from a constant address
# e . g . ,
# 4352c SUB R1 , R11 , # 0x1000
# 43530 LDRHI R3 , = loc _ 45450
# 43540 MOV PC , R3
# It ' s not a jump table , but we resolve it anyway
# Note that this block has two branches : One goes to 45450 , the other one goes to whatever the original
# value of R3 is . Some intensive data - flow analysis is required in this case .
jump_target_addr = load_stmt . addr . con . value
jump_target = cfg . _fast_memory_load_pointer ( jump_target_addr )
l . info ( "Resolved constant indirect jump from %#08x to %#08x" , addr , jump_target_addr )
ij = cfg . indirect_jumps [ addr ]
ij . jumptable = False
ij . resolved_targets = set ( [ jump_target ] )
return True , [ jump_target ]
# skip all statements before the load statement
# We want to leave the final loaded value as symbolic , so we can
# get the full range of possibilities
b . slice . remove_nodes_from ( stmts_to_remove )
# Debugging output
if l . level == logging . DEBUG :
self . _dbg_repr_slice ( b )
# Get all sources
sources = [ n_ for n_ in b . slice . nodes ( ) if b . slice . in_degree ( n_ ) == 0 ]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG ( project , None , detect_loops = False )
annotatedcfg . from_digraph ( b . slice )
# pylint : disable = too - many - nested - blocks
for src_irsb , _ in sources : # Use slicecutor to execute each one , and get the address
# We simply give up if any exception occurs on the way
start_state = self . _initial_state ( src_irsb )
# Keep IP symbolic to avoid unnecessary concretization
start_state . options . add ( o . KEEP_IP_SYMBOLIC )
start_state . options . add ( o . NO_IP_CONCRETIZATION )
# be quiet ! ! ! ! !
start_state . options . add ( o . SYMBOL_FILL_UNCONSTRAINED_REGISTERS )
start_state . options . add ( o . SYMBOL_FILL_UNCONSTRAINED_MEMORY )
# any read from an uninitialized segment should be unconstrained
if self . _bss_regions :
bss_memory_read_bp = BP ( when = BP_BEFORE , enabled = True , action = self . _bss_memory_read_hook )
start_state . inspect . add_breakpoint ( 'mem_read' , bss_memory_read_bp )
# FIXME :
# this is a hack : for certain architectures , we do not initialize the base pointer , since the jump table on
# those architectures may use the bp register to store value
if not self . project . arch . name in { 'S390X' } :
start_state . regs . bp = start_state . arch . initial_sp + 0x2000
self . _cached_memread_addrs . clear ( )
init_registers_on_demand_bp = BP ( when = BP_BEFORE , enabled = True , action = self . _init_registers_on_demand )
start_state . inspect . add_breakpoint ( 'mem_read' , init_registers_on_demand_bp )
# Create the slicecutor
simgr = self . project . factory . simulation_manager ( start_state , resilience = True )
slicecutor = Slicecutor ( annotatedcfg , force_taking_exit = True )
simgr . use_technique ( slicecutor )
simgr . use_technique ( Explorer ( find = load_stmt_loc [ 0 ] ) )
# Run it !
try :
simgr . run ( )
except KeyError as ex : # This is because the program slice is incomplete .
# Blade will support more IRExprs and IRStmts
l . debug ( "KeyError occurred due to incomplete program slice." , exc_info = ex )
continue
# Get the jumping targets
for r in simgr . found :
try :
whitelist = annotatedcfg . get_whitelisted_statements ( r . addr )
last_stmt = annotatedcfg . get_last_statement_index ( r . addr )
succ = project . factory . successors ( r , whitelist = whitelist , last_stmt = last_stmt )
except ( AngrError , SimError ) : # oops there are errors
l . warning ( 'Cannot get jump successor states from a path that has reached the target. Skip it.' )
continue
all_states = succ . flat_successors + succ . unconstrained_successors
if not all_states :
l . warning ( "Slicecutor failed to execute the program slice. No output state is available." )
continue
state = all_states [ 0 ]
# Just take the first state
self . _cached_memread_addrs . clear ( )
# clear the cache to save some memory ( and avoid confusion when
# debugging )
# Parse the memory load statement and get the memory address of where the jump table is stored
jumptable_addr = self . _parse_load_statement ( load_stmt , state )
if jumptable_addr is None :
continue
# sanity check and necessary pre - processing
if stmts_adding_base_addr :
assert len ( stmts_adding_base_addr ) == 1
# Making sure we are only dealing with one operation here
jump_base_addr = stmts_adding_base_addr [ 0 ]
if jump_base_addr . base_addr_available :
addr_holders = { ( jump_base_addr . stmt_loc [ 0 ] , jump_base_addr . tmp ) }
else :
addr_holders = { ( jump_base_addr . stmt_loc [ 0 ] , jump_base_addr . tmp ) , ( jump_base_addr . stmt_loc [ 0 ] , jump_base_addr . tmp_1 ) }
if len ( set ( all_addr_holders . keys ( ) ) . intersection ( addr_holders ) ) != 1 : # for some reason it ' s trying to add a base address onto a different temporary variable that we
# are not aware of . skip .
continue
if not jump_base_addr . base_addr_available : # we need to decide which tmp is the address holder and which tmp holds the base address
addr_holder = next ( iter ( set ( all_addr_holders . keys ( ) ) . intersection ( addr_holders ) ) )
if jump_base_addr . tmp_1 == addr_holder [ 1 ] : # swap the two tmps
jump_base_addr . tmp , jump_base_addr . tmp_1 = jump_base_addr . tmp_1 , jump_base_addr . tmp
# Load the concrete base address
jump_base_addr . base_addr = state . solver . eval ( state . scratch . temps [ jump_base_addr . tmp_1 ] )
all_targets = [ ]
total_cases = jumptable_addr . _model_vsa . cardinality
if total_cases > self . _max_targets : # We resolved too many targets for this indirect jump . Something might have gone wrong .
l . debug ( "%d targets are resolved for the indirect jump at %#x. It may not be a jump table. Try the " "next source, if there is any." , total_cases , addr )
continue
# Or alternatively , we can ask user , which is meh . . .
# jump _ base _ addr = int ( raw _ input ( " please give me the jump base addr : " ) , 16)
# total _ cases = int ( raw _ input ( " please give me the total cases : " ) )
# jump _ target = state . solver . SI ( bits = 64 , lower _ bound = jump _ base _ addr , upper _ bound = jump _ base _ addr +
# ( total _ cases - 1 ) * 8 , stride = 8)
jump_table = [ ]
min_jumptable_addr = state . solver . min ( jumptable_addr )
max_jumptable_addr = state . solver . max ( jumptable_addr )
# Both the min jump target and the max jump target should be within a mapped memory region
# i . e . , we shouldn ' t be jumping to the stack or somewhere unmapped
if ( not project . loader . find_segment_containing ( min_jumptable_addr ) or not project . loader . find_segment_containing ( max_jumptable_addr ) ) :
if ( not project . loader . find_section_containing ( min_jumptable_addr ) or not project . loader . find_section_containing ( max_jumptable_addr ) ) :
l . debug ( "Jump table %#x might have jump targets outside mapped memory regions. " "Continue to resolve it from the next data source." , addr )
continue
# Load the jump table from memory
for idx , a in enumerate ( state . solver . eval_upto ( jumptable_addr , total_cases ) ) :
if idx % 100 == 0 and idx != 0 :
l . debug ( "%d targets have been resolved for the indirect jump at %#x..." , idx , addr )
target = cfg . _fast_memory_load_pointer ( a , size = load_size )
all_targets . append ( target )
# Adjust entries inside the jump table
if stmts_adding_base_addr :
stmt_adding_base_addr = stmts_adding_base_addr [ 0 ]
base_addr = stmt_adding_base_addr . base_addr
conversion_ops = list ( reversed ( list ( v for v in all_addr_holders . values ( ) if v is not AddressTransferringTypes . Assignment ) ) )
if conversion_ops :
invert_conversion_ops = [ ]
for conversion_op in conversion_ops :
if conversion_op is AddressTransferringTypes . SignedExtension32to64 :
lam = lambda a : ( a | 0xffffffff00000000 ) if a >= 0x80000000 else a
elif conversion_op is AddressTransferringTypes . UnsignedExtension32to64 :
lam = lambda a : a
elif conversion_op is AddressTransferringTypes . Truncation64to32 :
lam = lambda a : a & 0xffffffff
else :
raise NotImplementedError ( "Unsupported conversion operation." )
invert_conversion_ops . append ( lam )
all_targets_copy = all_targets
all_targets = [ ]
for target_ in all_targets_copy :
for lam in invert_conversion_ops :
target_ = lam ( target_ )
all_targets . append ( target_ )
mask = ( 2 ** self . project . arch . bits ) - 1
all_targets = [ ( target + base_addr ) & mask for target in all_targets ]
# Finally . . . all targets are ready
illegal_target_found = False
for target in all_targets : # if the total number of targets is suspicious ( it usually implies a failure in applying the
# constraints ) , check if all jump targets are legal
if len ( all_targets ) in { 0x100 , 0x10000 } and not self . _is_jumptarget_legal ( target ) :
l . info ( "Jump target %#x is probably illegal. Try to resolve indirect jump at %#x from the next " "source." , target , addr )
illegal_target_found = True
break
jump_table . append ( target )
if illegal_target_found :
continue
l . info ( "Resolved %d targets from %#x." , len ( all_targets ) , addr )
# write to the IndirectJump object in CFG
ij = cfg . indirect_jumps [ addr ]
if total_cases > 1 : # It can be considered a jump table only if there are more than one jump target
ij . jumptable = True
ij . jumptable_addr = state . solver . min ( jumptable_addr )
ij . resolved_targets = set ( jump_table )
ij . jumptable_entries = jump_table
else :
ij . jumptable = False
ij . resolved_targets = set ( jump_table )
return True , all_targets
l . info ( "Could not resolve indirect jump %#x in funtion %#x." , addr , func_addr )
return False , None |
def segment_intersection1 ( start0 , end0 , start1 , end1 , s ) :
"""Image for : func : ` . segment _ intersection ` docstring .""" | if NO_IMAGES :
return
line0 = bezier . Curve . from_nodes ( stack1d ( start0 , end0 ) )
line1 = bezier . Curve . from_nodes ( stack1d ( start1 , end1 ) )
ax = line0 . plot ( 2 )
line1 . plot ( 256 , ax = ax )
( x_val , ) , ( y_val , ) = line0 . evaluate ( s )
ax . plot ( [ x_val ] , [ y_val ] , color = "black" , marker = "o" )
ax . axis ( "scaled" )
save_image ( ax . figure , "segment_intersection1.png" ) |
def normalized ( self ) :
"""Returns the unit quaternion corresponding to the same rotation
as this one .""" | magnitude = self . magnitude ( )
return Quaternion ( self . x / magnitude , self . y / magnitude , self . z / magnitude , self . w / magnitude ) |
def u2ver ( self ) :
"""Get the major / minor version of the urllib2 lib .
@ return : The urllib2 version .
@ rtype : float""" | try :
part = u2 . __version__ . split ( '.' , 1 )
n = float ( '.' . join ( part ) )
return n
except Exception as e :
log . exception ( e )
return 0 |
def remove_trailing_string ( content , trailing ) :
"""Strip trailing component ` trailing ` from ` content ` if it exists .
Used when generating names from view classes .""" | if content . endswith ( trailing ) and content != trailing :
return content [ : - len ( trailing ) ]
return content |
def load_log ( args ) :
"""Load a ` logging . Logger ` object .
Arguments
args : ` argparse . Namespace ` object
Namespace containing required settings :
{ ` args . debug ` , ` args . verbose ` , and ` args . log _ filename ` } .
Returns
log : ` logging . Logger ` object""" | from astrocats . catalog . utils import logger
# Determine verbosity ( ' None ' means use default )
log_stream_level = None
if args . debug :
log_stream_level = logger . DEBUG
elif args . verbose :
log_stream_level = logger . INFO
# Create log
log = logger . get_logger ( stream_level = log_stream_level , tofile = args . log_filename )
log . _verbose = args . verbose
log . _debug = args . debug
return log |
def _GetDirectory ( self ) :
"""Retrieves a directory .
Returns :
TARDirectory : a directory or None if not available .""" | if self . entry_type != definitions . FILE_ENTRY_TYPE_DIRECTORY :
return None
return TARDirectory ( self . _file_system , self . path_spec ) |
def get_tc_arguments ( parser ) :
"""Append test case arguments to parser .
: param parser : ArgumentParser
: return : ArgumentParser""" | group2 = parser . add_argument_group ( 'Test case arguments' )
group2 . add_argument ( '--log' , default = os . path . abspath ( "./log" ) , help = 'Store logs to specific path. Filename will be ' '<path>/<testcase>_D<dutNumber>.log' )
group2 . add_argument ( '-s' , '--silent' , action = 'store_true' , dest = 'silent' , default = False , help = 'Silent mode, only prints results' )
group2 . add_argument ( '-v' , "--verbose" , dest = 'verbose' , default = 0 , help = "increase output verbosity, max 2 times." , action = "count" )
group2 . add_argument ( '-w' , action = 'store_true' , dest = 'cloud' , default = False , help = 'Store results to a cloud service.' )
group2 . add_argument ( '--with_logs' , action = 'store_true' , dest = 'with_logs' , default = False , help = "Store bench.log to cloud db after run." )
group2 . add_argument ( '--reset' , dest = 'reset' , action = 'store' , nargs = '?' , const = True , help = 'reset device before executing test cases' )
group2 . add_argument ( '--iface' , help = "Used NW sniffer interface name" )
group2 . add_argument ( '--bin' , help = "Used specific binary for DUTs, when process is in use. " "NOTE: Does not affect duts which specify their own binaries." )
group2 . add_argument ( '--tc_cfg' , help = 'Testcase Configuration file' )
group2 . add_argument ( '--type' , help = 'Overrides DUT type.' , choices = [ 'hardware' , 'process' , "serial" , "mbed" ] )
group2 . add_argument ( '--platform_name' , help = 'Overrides used platform. Must be found in allowed_platforms in ' 'dut configuration if allowed_platforms is defined and non-empty.' , default = None )
group2 . add_argument ( '--putty' , dest = 'putty' , action = 'store_true' , default = False , help = 'Open putty after TC executed' )
group2 . add_argument ( '--skip_setup' , action = 'store_true' , default = False , help = 'Skip TC setUp phase' )
group2 . add_argument ( '--skip_case' , action = 'store_true' , default = False , help = 'Skip TC body phase' )
group2 . add_argument ( '--skip_teardown' , action = 'store_true' , default = False , help = 'Skip TC tearDown phase' )
group2 . add_argument ( '--valgrind' , action = 'store_true' , default = False , help = 'Analyse nodes with valgrind (linux only)' )
group2 . add_argument ( '--valgrind_tool' , help = 'Valgrind tool to use.' , choices = [ 'memcheck' , 'callgrind' , 'massif' ] )
group2 . add_argument ( '--valgrind_extra_params' , default = '' , help = 'Additional command line parameters to valgrind.' )
# only one of the - - valgrind _ text or - - valgrind _ console is allowed
valgrind_group = parser . add_mutually_exclusive_group ( )
valgrind_group . add_argument ( '--valgrind_text' , action = 'store_true' , default = False , help = 'Output as Text. Default: xml format' )
valgrind_group . add_argument ( '--valgrind_console' , dest = 'valgrind_console' , action = 'store_true' , default = False , help = 'Output as Text to console. Default: xml format' )
group2 . add_argument ( '--valgrind_track_origins' , action = 'store_true' , default = False , help = 'Show origins of undefined values. Default: false; ' 'Used only if the Valgrind tool is memcheck' )
group2 . add_argument ( '--use_sniffer' , dest = 'use_sniffer' , action = 'store_true' , default = False , help = 'Use Sniffer' )
group2 . add_argument ( '--my_duts' , dest = "my_duts" , help = 'Use only some of duts. e.g. --my_duts 1,3' )
group2 . add_argument ( '--pause_ext' , action = 'store_true' , dest = "pause_when_external_dut" , default = False , help = 'Pause when external device command happens' )
# only one of the - - gdb , - - gdbs or - - vgdb is allowed
gdb_group = parser . add_mutually_exclusive_group ( )
gdb_group . add_argument ( '--gdb' , type = int , help = 'Run specific process node with gdb (debugger). e.g. --gdb 1' )
gdb_group . add_argument ( '--gdbs' , type = int , help = 'Run specific process node with gdbserver ' '(debugger). e.g. --gdbs 1' )
gdb_group . add_argument ( '--vgdb' , type = int , help = 'Run specific process node with vgdb ' '(debugger under valgrind). e.g. --vgdb 1' )
group2 . add_argument ( '--gdbs-port' , dest = 'gdbs_port' , type = int , default = 2345 , help = 'select gdbs port' )
group2 . add_argument ( '--pre-cmds' , dest = 'pre_cmds' , help = 'Send extra commands right after DUT connection' )
group2 . add_argument ( '--post-cmds' , dest = 'post_cmds' , help = 'Send extra commands right before terminating dut connection.' )
group2 . add_argument ( '--baudrate' , dest = 'baudrate' , type = int , help = 'Use user defined serial baudrate (when serial device is in use)' )
group2 . add_argument ( '--serial_timeout' , type = float , default = 0.01 , help = 'User defined serial timeout (default 0.01)' )
group2 . add_argument ( '--serial_xonxoff' , action = 'store_true' , default = False , help = 'Use software flow control' )
group2 . add_argument ( '--serial_rtscts' , action = 'store_true' , default = False , help = 'Use Hardware flow control' )
group2 . add_argument ( '--serial_ch_size' , type = int , default = - 1 , help = 'use chunck mode with size N when writing to serial port. ' '(default N=-1: ' 'use pre-defined mode, N=0: normal, N<0: chunk-mode with size N' )
group2 . add_argument ( '--serial_ch_delay' , dest = 'ch_mode_ch_delay' , type = float , help = 'User defined delay between characters. ' 'Used only when serial_ch_size>0. (default 0.01)' )
group2 . add_argument ( '--nobuf' , help = "Do not use stdio buffers in node process." )
group2 . add_argument ( '--kill_putty' , action = 'store_true' , help = 'Kill old putty/kitty processes' )
# only one of the - - forceflash , - - forceflash _ once is allowed
forceflash_group = parser . add_mutually_exclusive_group ( )
forceflash_group . add_argument ( '--forceflash' , action = 'store_true' , default = False , help = 'Force flashing of hardware device if binary is given. ' 'Defaults to False' )
forceflash_group . add_argument ( '--forceflash_once' , action = 'store_true' , default = False , help = 'Force flashing of hardware device if ' 'binary is given, but only once. Defaults to False' )
forceflash_group . add_argument ( "--skip_flash" , default = False , action = "store_true" , help = "Skip flashing hardware devices during this run." )
group2 . add_argument ( '--interface' , dest = 'interface' , default = 'eth0' , help = 'Network interface used in tests, unless the testcase specifies ' 'which one to use. Defaults to eth0' )
return parser |
def main ( pos , bobj = None ) :
""": param pos : A dictionary with { ' latitude ' : 8.12 , ' longitude ' : 42.6}
: param bobj : An object which has a ' get ' method and returns a dictionary .""" | city , distance = get_city ( pos , bobj )
city = get_city_from_file ( city [ 'linenr' ] )
print ( "The city '%s' is about %0.2fkm away from your location %s" % ( city [ 'asciiname' ] , distance / 1000.0 , str ( pos ) ) )
for key , value in sorted ( city . items ( ) ) :
print ( "%s: %s" % ( key , value ) ) |
def get_size ( self ) :
"""see doc in Term class""" | self . curses . setupterm ( )
return self . curses . tigetnum ( 'cols' ) , self . curses . tigetnum ( 'lines' ) |
def get_feature_state_for_scope ( self , feature_id , user_scope , scope_name , scope_value ) :
"""GetFeatureStateForScope .
[ Preview API ] Get the state of the specified feature for the given named scope
: param str feature _ id : Contribution id of the feature
: param str user _ scope : User - Scope at which to get the value . Should be " me " for the current user or " host " for all users .
: param str scope _ name : Scope at which to get the feature setting for ( e . g . " project " or " team " )
: param str scope _ value : Value of the scope ( e . g . the project or team id )
: rtype : : class : ` < ContributedFeatureState > < azure . devops . v5_0 . feature _ management . models . ContributedFeatureState > `""" | route_values = { }
if feature_id is not None :
route_values [ 'featureId' ] = self . _serialize . url ( 'feature_id' , feature_id , 'str' )
if user_scope is not None :
route_values [ 'userScope' ] = self . _serialize . url ( 'user_scope' , user_scope , 'str' )
if scope_name is not None :
route_values [ 'scopeName' ] = self . _serialize . url ( 'scope_name' , scope_name , 'str' )
if scope_value is not None :
route_values [ 'scopeValue' ] = self . _serialize . url ( 'scope_value' , scope_value , 'str' )
response = self . _send ( http_method = 'GET' , location_id = 'dd291e43-aa9f-4cee-8465-a93c78e414a4' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'ContributedFeatureState' , response ) |
def ned ( simulated_array , observed_array , replace_nan = None , replace_inf = None , remove_neg = False , remove_zero = False ) :
"""Compute the normalized Euclidian distance between the simulated and observed data in vector
space .
. . image : : / pictures / NED . png
* * Range * * 0 ≤ NED < inf , smaller is better .
* * Notes * * Also sometimes referred to as the squared L2 - norm .
Parameters
simulated _ array : one dimensional ndarray
An array of simulated data from the time series .
observed _ array : one dimensional ndarray
An array of observed data from the time series .
replace _ nan : float , optional
If given , indicates which value to replace NaN values with in the two arrays . If None , when
a NaN value is found at the i - th position in the observed OR simulated array , the i - th value
of the observed and simulated array are removed before the computation .
replace _ inf : float , optional
If given , indicates which value to replace Inf values with in the two arrays . If None , when
an inf value is found at the i - th position in the observed OR simulated array , the i - th
value of the observed and simulated array are removed before the computation .
remove _ neg : boolean , optional
If True , when a negative value is found at the i - th position in the observed OR simulated
array , the i - th value of the observed AND simulated array are removed before the
computation .
remove _ zero : boolean , optional
If true , when a zero value is found at the i - th position in the observed OR simulated
array , the i - th value of the observed AND simulated array are removed before the
computation .
Returns
float
The normalized euclidean distance value .
Examples
> > > import HydroErr as he
> > > import numpy as np
> > > sim = np . array ( [ 5 , 7 , 9 , 2 , 4.5 , 6.7 ] )
> > > obs = np . array ( [ 4.7 , 6 , 10 , 2.5 , 4 , 7 ] )
> > > he . ned ( sim , obs )
0.2872053604165771
References
- Kennard , M . J . , Mackay , S . J . , Pusey , B . J . , Olden , J . D . , & Marsh , N . ( 2010 ) . Quantifying
uncertainty in estimation of hydrologic metrics for ecohydrological studies . River Research
and Applications , 26(2 ) , 137-156.""" | # Checking and cleaning the data
simulated_array , observed_array = treat_values ( simulated_array , observed_array , replace_nan = replace_nan , replace_inf = replace_inf , remove_neg = remove_neg , remove_zero = remove_zero )
a = observed_array / np . mean ( observed_array )
b = simulated_array / np . mean ( simulated_array )
return np . linalg . norm ( a - b ) |
def append_variables ( self , samples_like , sort_labels = True ) :
"""Create a new sampleset with the given variables with values added .
Not defined for empty sample sets . Note that when ` sample _ like ` is
a : obj : ` . SampleSet ` , the data vectors and info are ignored .
Args :
samples _ like :
Samples to add to the sample set . Should either be a single
sample or should match the length of the sample set . See
: func : ` . as _ samples ` for what is allowed to be ` samples _ like ` .
sort _ labels ( bool , optional , default = True ) :
If true , returned : attr : ` . SampleSet . variables ` will be in
sorted - order . Note that mixed types are not sortable in which
case the given order will be maintained .
Returns :
: obj : ` . SampleSet ` : A new sample set with the variables / values added .
Examples :
> > > sampleset = dimod . SampleSet . from _ samples ( [ { ' a ' : - 1 , ' b ' : + 1 } ,
. . . { ' a ' : + 1 , ' b ' : + 1 } ] ,
. . . dimod . SPIN ,
. . . energy = [ - 1.0 , 1.0 ] )
> > > new = sampleset . append _ variables ( { ' c ' : - 1 } )
> > > print ( new )
a b c energy num _ oc .
0 - 1 + 1 - 1 - 1.0 1
1 + 1 + 1 - 1 1.0 1
[ ' SPIN ' , 2 rows , 2 samples , 3 variables ]
Add variables from another sampleset to the original above . Note
that the energies do not change .
> > > another = dimod . SampleSet . from _ samples ( [ { ' c ' : - 1 , ' d ' : + 1 } ,
. . . { ' c ' : + 1 , ' d ' : + 1 } ] ,
. . . dimod . SPIN ,
. . . energy = [ - 2.0 , 1.0 ] )
> > > new = sampleset . append _ variables ( another )
> > > print ( new )
a b c d energy num _ oc .
0 - 1 + 1 - 1 + 1 - 1.0 1
1 + 1 + 1 + 1 + 1 1.0 1
[ ' SPIN ' , 2 rows , 2 samples , 4 variables ]""" | samples , labels = as_samples ( samples_like )
num_samples = len ( self )
# we don ' t handle multiple values
if samples . shape [ 0 ] == num_samples : # we don ' t need to do anything , it ' s already the correct shape
pass
elif samples . shape [ 0 ] == 1 and num_samples :
samples = np . repeat ( samples , num_samples , axis = 0 )
else :
msg = ( "mismatched shape. The samples to append should either be " "a single sample or should match the length of the sample " "set. Empty sample sets cannot be appended to." )
raise ValueError ( msg )
# append requires the new variables to be unique
variables = self . variables
if any ( v in variables for v in labels ) :
msg = "Appended samples cannot contain variables in sample set"
raise ValueError ( msg )
new_variables = list ( variables ) + labels
new_samples = np . hstack ( ( self . record . sample , samples ) )
return type ( self ) . from_samples ( ( new_samples , new_variables ) , self . vartype , info = copy . deepcopy ( self . info ) , # make a copy
sort_labels = sort_labels , ** self . data_vectors ) |
def swd_read32 ( self , offset ) :
"""Gets a unit of ` ` 32 ` ` bits from the input buffer .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
offset ( int ) : the offset ( in bits ) from which to start reading
Returns :
The integer read from the input buffer .""" | value = self . _dll . JLINK_SWD_GetU32 ( offset )
return ctypes . c_uint32 ( value ) . value |
def _update_project ( self , request , data ) :
"""Update project info""" | domain_id = identity . get_domain_id_for_operation ( request )
try :
project_id = data [ 'project_id' ]
# add extra information
if keystone . VERSIONS . active >= 3 :
EXTRA_INFO = getattr ( settings , 'PROJECT_TABLE_EXTRA_INFO' , { } )
kwargs = dict ( ( key , data . get ( key ) ) for key in EXTRA_INFO )
else :
kwargs = { }
return api . keystone . tenant_update ( request , project_id , name = data [ 'name' ] , description = data [ 'description' ] , enabled = data [ 'enabled' ] , domain = domain_id , ** kwargs )
except exceptions . Conflict :
msg = _ ( 'Project name "%s" is already used.' ) % data [ 'name' ]
self . failure_message = msg
return
except Exception as e :
LOG . debug ( 'Project update failed: %s' , e )
exceptions . handle ( request , ignore = True )
return |
def post ( self ) :
"""API endpoint to push transactions to the Federation .
Return :
A ` ` dict ` ` containing the data about the transaction .""" | parser = reqparse . RequestParser ( )
parser . add_argument ( 'mode' , type = parameters . valid_mode , default = 'broadcast_tx_async' )
args = parser . parse_args ( )
mode = str ( args [ 'mode' ] )
pool = current_app . config [ 'bigchain_pool' ]
# ` force ` will try to format the body of the POST request even if the
# ` content - type ` header is not set to ` application / json `
tx = request . get_json ( force = True )
try :
tx_obj = Transaction . from_dict ( tx )
except SchemaValidationError as e :
return make_error ( 400 , message = 'Invalid transaction schema: {}' . format ( e . __cause__ . message ) )
except ValidationError as e :
return make_error ( 400 , 'Invalid transaction ({}): {}' . format ( type ( e ) . __name__ , e ) )
with pool ( ) as bigchain :
try :
bigchain . validate_transaction ( tx_obj )
except ValidationError as e :
return make_error ( 400 , 'Invalid transaction ({}): {}' . format ( type ( e ) . __name__ , e ) )
else :
status_code , message = bigchain . write_transaction ( tx_obj , mode )
if status_code == 202 :
response = jsonify ( tx )
response . status_code = 202
return response
else :
return make_error ( status_code , message ) |
def is_uniform ( keys , axis = semantics . axis_default ) :
"""returns true if all keys have equal multiplicity""" | index = as_index ( keys , axis )
return index . uniform |
def main ( ) :
"""Provide the program ' s entry point when directly executed .""" | if len ( sys . argv ) < 2 :
print ( "Usage: {} SCOPE..." . format ( sys . argv [ 0 ] ) )
return 1
authenticator = prawcore . TrustedAuthenticator ( prawcore . Requestor ( "prawcore_refresh_token_example" ) , os . environ [ "PRAWCORE_CLIENT_ID" ] , os . environ [ "PRAWCORE_CLIENT_SECRET" ] , os . environ [ "PRAWCORE_REDIRECT_URI" ] , )
state = str ( random . randint ( 0 , 65000 ) )
url = authenticator . authorize_url ( "permanent" , sys . argv [ 1 : ] , state )
print ( url )
client = receive_connection ( )
data = client . recv ( 1024 ) . decode ( "utf-8" )
param_tokens = data . split ( " " , 2 ) [ 1 ] . split ( "?" , 1 ) [ 1 ] . split ( "&" )
params = { key : value for ( key , value ) in [ token . split ( "=" ) for token in param_tokens ] }
if state != params [ "state" ] :
send_message ( client , "State mismatch. Expected: {} Received: {}" . format ( state , params [ "state" ] ) , )
return 1
elif "error" in params :
send_message ( client , params [ "error" ] )
return 1
authorizer = prawcore . Authorizer ( authenticator )
authorizer . authorize ( params [ "code" ] )
send_message ( client , "Refresh token: {}" . format ( authorizer . refresh_token ) )
return 0 |
def calc_fwhm_moffat ( self , arr1d , medv = None , moffat_fn = None ) :
"""FWHM calculation on a 1D array by using least square fitting of
a Moffat function on the data . arr1d is a 1D array cut in either
X or Y direction on the object .""" | if moffat_fn is None :
moffat_fn = self . moffat
N = len ( arr1d )
X = np . array ( list ( range ( N ) ) )
Y = arr1d
# Fitting works more reliably if we do the following
# a . subtract sky background
if medv is None :
medv = get_median ( Y )
Y = Y - medv
maxv = Y . max ( )
# b . clamp to 0 . . max ( of the sky subtracted field )
Y = Y . clip ( 0 , maxv )
# Fit a moffat
p0 = [ 0 , N - 1 , 2 , maxv ]
# Inital guess
# Distance to the target function
errfunc = lambda p , x , y : moffat_fn ( x , p ) - y
# noqa
# Least square fit to the gaussian
with self . lock : # NOTE : without this mutex , optimize . leastsq causes a fatal error
# sometimes - - it appears not to be thread safe .
# The error is :
# " SystemError : null argument to internal routine "
# " Fatal Python error : GC object already tracked "
p1 , success = optimize . leastsq ( errfunc , p0 [ : ] , args = ( X , Y ) )
if not success :
raise IQCalcError ( "FWHM moffat fitting failed" )
mu , width , power , maxv = p1
width = np . abs ( width )
self . logger . debug ( "mu=%f width=%f power=%f maxv=%f" % ( mu , width , power , maxv ) )
fwhm = 2.0 * width * np . sqrt ( 2.0 ** ( 1.0 / power ) - 1.0 )
# some routines choke on numpy values and need " pure " Python floats
# e . g . when marshalling through a remote procedure interface
fwhm = float ( fwhm )
mu = float ( mu )
width = float ( width )
power = float ( power )
maxv = float ( maxv )
res = Bunch . Bunch ( fwhm = fwhm , mu = mu , width = width , power = power , maxv = maxv , fit_fn = moffat_fn , fit_args = [ mu , width , power , maxv ] )
return res |
def _close_open_date_ranges ( self , record ) :
"""If a date range is missing the start or end date , close it by copying the
date from the existing value .""" | date_ranges = ( ( 'beginDate' , 'endDate' ) , )
for begin , end in date_ranges :
if begin in record and end in record :
return
elif begin in record :
record [ end ] = record [ begin ]
elif end in record :
record [ begin ] = record [ end ] |
def file_md5 ( file_name ) :
'''Generate an MD5 hash of the specified file .
@ file _ name - The file to hash .
Returns an MD5 hex digest string .''' | md5 = hashlib . md5 ( )
with open ( file_name , 'rb' ) as f :
for chunk in iter ( lambda : f . read ( 128 * md5 . block_size ) , b'' ) :
md5 . update ( chunk )
return md5 . hexdigest ( ) |
async def AddPendingResources ( self , addcharmwithauthorization , entity , resources ) :
'''addcharmwithauthorization : AddCharmWithAuthorization
entity : Entity
resources : typing . Sequence [ ~ CharmResource ]
Returns - > typing . Union [ _ ForwardRef ( ' ErrorResult ' ) , typing . Sequence [ str ] ]''' | # map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'Resources' , request = 'AddPendingResources' , version = 1 , params = _params )
_params [ 'AddCharmWithAuthorization' ] = addcharmwithauthorization
_params [ 'Entity' ] = entity
_params [ 'Resources' ] = resources
reply = await self . rpc ( msg )
return reply |
def check ( self , item_id ) :
"""Check if an analysis is complete
: type item _ id : int
: param item _ id : task _ id to check .
: rtype : bool
: return : Boolean indicating if a report is done or not .""" | response = self . _request ( "tasks/view/{id}" . format ( id = item_id ) )
if response . status_code == 404 : # probably an unknown task id
return False
try :
content = json . loads ( response . content . decode ( 'utf-8' ) )
status = content [ 'task' ] [ "status" ]
if status == 'completed' or status == "reported" :
return True
except ValueError as e :
raise sandboxapi . SandboxError ( e )
return False |
def find_atoms_within_distance ( atoms , cutoff_distance , point ) :
"""Returns atoms within the distance from the point .
Parameters
atoms : [ ampal . atom ]
A list of ` ampal . atoms ` .
cutoff _ distance : float
Maximum distance from point .
point : ( float , float , float )
Reference point , 3D coordinate .
Returns
filtered _ atoms : [ ampal . atoms ]
` atoms ` list filtered by distance .""" | return [ x for x in atoms if distance ( x , point ) <= cutoff_distance ] |
def pp_event ( seq ) :
"""Returns pretty representation of an Event or keypress""" | if isinstance ( seq , Event ) :
return str ( seq )
# Get the original sequence back if seq is a pretty name already
rev_curses = dict ( ( v , k ) for k , v in CURSES_NAMES . items ( ) )
rev_curtsies = dict ( ( v , k ) for k , v in CURTSIES_NAMES . items ( ) )
if seq in rev_curses :
seq = rev_curses [ seq ]
elif seq in rev_curtsies :
seq = rev_curtsies [ seq ]
pretty = curtsies_name ( seq )
if pretty != seq :
return pretty
return repr ( seq ) . lstrip ( 'u' ) [ 1 : - 1 ] |
def optimize ( population , toolbox , ngen , archive = None , stats = None , verbose = False , history = None ) :
"""Optimize a population of individuals .
: param population :
: param toolbox :
: param mut _ prob :
: param ngen :
: param archive :
: param stats :
: param verbose :
: param history :
: return :""" | start = time . time ( )
if history is not None :
history . update ( population )
logbook = tools . Logbook ( )
logbook . header = [ 'gen' , 'nevals' , 'cpu_time' ] + ( stats . fields if stats else [ ] )
render_fitness ( population , toolbox , history )
record_information ( population , stats , start , archive , logbook , verbose )
for gen in range ( 1 , ngen + 1 ) :
offspring = generate_next_population ( population , toolbox )
render_fitness ( offspring , toolbox , history )
population = offspring
record_information ( population , stats , start , archive , logbook , verbose )
return population , logbook , history |
def get ( self , value ) :
"""Returns the VRF configuration as a resource dict .
Args :
value ( string ) : The vrf name to retrieve from the
running configuration .
Returns :
A Python dict object containing the VRF attributes as
key / value pairs .""" | config = self . get_block ( 'vrf definition %s' % value )
if not config :
return None
response = dict ( vrf_name = value )
response . update ( self . _parse_rd ( config ) )
response . update ( self . _parse_description ( config ) )
config = self . get_block ( 'no ip routing vrf %s' % value )
if config :
response [ 'ipv4_routing' ] = False
else :
response [ 'ipv4_routing' ] = True
config = self . get_block ( 'no ipv6 unicast-routing vrf %s' % value )
if config :
response [ 'ipv6_routing' ] = False
else :
response [ 'ipv6_routing' ] = True
return response |
def modsplit ( s ) :
"""Split importable""" | if ':' in s :
c = s . split ( ':' )
if len ( c ) != 2 :
raise ValueError ( "Syntax error: {s}" )
return c [ 0 ] , c [ 1 ]
else :
c = s . split ( '.' )
if len ( c ) < 2 :
raise ValueError ( "Syntax error: {s}" )
return '.' . join ( c [ : - 1 ] ) , c [ - 1 ] |
def updateTitle ( self ) :
"""Updates the Title of this widget according to how many parameters are currently in the model""" | title = 'Auto Parameters ({})' . format ( self . paramList . model ( ) . rowCount ( ) )
self . titleChange . emit ( title )
self . setWindowTitle ( title ) |
def _plot_methods ( self ) :
"""A dictionary with mappings from plot method to their summary""" | ret = { }
for attr in filter ( lambda s : not s . startswith ( "_" ) , dir ( self ) ) :
obj = getattr ( self , attr )
if isinstance ( obj , PlotterInterface ) :
ret [ attr ] = obj . _summary
return ret |
def get_process_by_its_id ( self , process_type_id , expand = None ) :
"""GetProcessByItsId .
[ Preview API ] Get a single process of a specified ID .
: param str process _ type _ id :
: param str expand :
: rtype : : class : ` < ProcessInfo > < azure . devops . v5_0 . work _ item _ tracking _ process . models . ProcessInfo > `""" | route_values = { }
if process_type_id is not None :
route_values [ 'processTypeId' ] = self . _serialize . url ( 'process_type_id' , process_type_id , 'str' )
query_parameters = { }
if expand is not None :
query_parameters [ '$expand' ] = self . _serialize . query ( 'expand' , expand , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '02cc6a73-5cfb-427d-8c8e-b49fb086e8af' , version = '5.0-preview.2' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'ProcessInfo' , response ) |
def _get_relationship_data ( self ) :
"""Get useful data for relationship management""" | relationship_field = request . path . split ( '/' ) [ - 1 ] . replace ( '-' , '_' )
if relationship_field not in get_relationships ( self . schema ) :
raise RelationNotFound ( "{} has no attribute {}" . format ( self . schema . __name__ , relationship_field ) )
related_type_ = self . schema . _declared_fields [ relationship_field ] . type_
related_id_field = self . schema . _declared_fields [ relationship_field ] . id_field
model_relationship_field = get_model_field ( self . schema , relationship_field )
return relationship_field , model_relationship_field , related_type_ , related_id_field |
def camelCaseToDashName ( camelCase ) :
'''camelCaseToDashName - Convert a camel case name to a dash - name ( like paddingTop to padding - top )
@ param camelCase < str > - A camel - case string
@ return < str > - A dash - name''' | camelCaseList = list ( camelCase )
ret = [ ]
for ch in camelCaseList :
if ch . isupper ( ) :
ret . append ( '-' )
ret . append ( ch . lower ( ) )
else :
ret . append ( ch )
return '' . join ( ret ) |
def generic_method_not_allowed ( * args ) :
"""Creates a Lambda Service Generic MethodNotAllowed Response
Parameters
args list
List of arguments Flask passes to the method
Returns
Flask . Response
A response object representing the GenericMethodNotAllowed Error""" | exception_tuple = LambdaErrorResponses . MethodNotAllowedException
return BaseLocalService . service_response ( LambdaErrorResponses . _construct_error_response_body ( LambdaErrorResponses . LOCAL_SERVICE_ERROR , "MethodNotAllowedException" ) , LambdaErrorResponses . _construct_headers ( exception_tuple [ 0 ] ) , exception_tuple [ 1 ] ) |
def listobs ( vis ) :
"""Textually describe the contents of a measurement set .
vis ( str )
The path to the dataset .
Returns
A generator of lines of human - readable output
Errors can only be detected by looking at the output . Example : :
from pwkit . environments . casa import tasks
for line in tasks . listobs ( ' mydataset . ms ' ) :
print ( line )""" | def inner_list ( sink ) :
try :
ms = util . tools . ms ( )
ms . open ( vis )
ms . summary ( verbose = True )
ms . close ( )
except Exception as e :
sink . post ( b'listobs failed: %s' % e , priority = b'SEVERE' )
for line in util . forkandlog ( inner_list ) :
info = line . rstrip ( ) . split ( '\t' , 3 )
# date , priority , origin , message
if len ( info ) > 3 :
yield info [ 3 ]
else :
yield '' |
def check ( mod ) :
"""Check the parsed ASDL tree for correctness .
Return True if success . For failure , the errors are printed out and False
is returned .""" | v = Check ( )
v . visit ( mod )
for t in v . types :
if t not in mod . types and not t in builtin_types :
v . errors += 1
uses = ", " . join ( v . types [ t ] )
print ( 'Undefined type {}, used in {}' . format ( t , uses ) )
return not v . errors |
def _copy ( self , other , copy_func ) :
"""Copies the contents of another ParsableOctetString object to itself
: param object :
Another instance of the same class
: param copy _ func :
An reference of copy . copy ( ) or copy . deepcopy ( ) to use when copying
lists , dicts and objects""" | super ( ParsableOctetString , self ) . _copy ( other , copy_func )
self . _bytes = other . _bytes
self . _parsed = copy_func ( other . _parsed ) |
def _CheckIsDevice ( self , file_entry ) :
"""Checks the is _ device find specification .
Args :
file _ entry ( FileEntry ) : file entry .
Returns :
bool : True if the file entry matches the find specification , False if not .""" | if definitions . FILE_ENTRY_TYPE_DEVICE not in self . _file_entry_types :
return False
return file_entry . IsDevice ( ) |
def execCommand ( g , command , timeout = 10 ) :
"""Executes a command by sending it to the rack server
Arguments :
g : hcam _ drivers . globals . Container
the Container object of application globals
command : ( string )
the command ( see below )
Possible commands are :
start : starts a run
stop : stops a run
abort : aborts a run
online : bring ESO control server online and power up hardware
off : put ESO control server in idle state and power down
standby : server can communicate , but child processes disabled
reset : resets the NGC controller front end
Returns True / False according to whether the command
succeeded or not .""" | if not g . cpars [ 'hcam_server_on' ] :
g . clog . warn ( 'execCommand: servers are not active' )
return False
try :
url = g . cpars [ 'hipercam_server' ] + command
g . clog . info ( 'execCommand, command = "' + command + '"' )
response = urllib . request . urlopen ( url , timeout = timeout )
rs = ReadServer ( response . read ( ) , status_msg = False )
g . rlog . info ( 'Server response =\n' + rs . resp ( ) )
if rs . ok :
g . clog . info ( 'Response from server was OK' )
return True
else :
g . clog . warn ( 'Response from server was not OK' )
g . clog . warn ( 'Reason: ' + rs . err )
return False
except urllib . error . URLError as err :
g . clog . warn ( 'execCommand failed' )
g . clog . warn ( str ( err ) )
return False |
def raise_api_exceptions ( function , * args , ** kwargs ) :
"""Raise client side exception ( s ) when present in the API response .
Returned data is not modified .""" | try :
return_value = function ( * args , ** kwargs )
except errors . HTTPException as exc :
if exc . _raw . status_code != 400 : # pylint : disable = W0212
raise
# Unhandled HTTPErrors
try : # Attempt to convert v1 errors into older format ( for now )
data = exc . _raw . json ( )
# pylint : disable = W0212
assert len ( data ) == 2
return_value = { 'errors' : [ ( data [ 'reason' ] , data [ 'explanation' ] , '' ) ] }
except Exception :
raise exc
if isinstance ( return_value , dict ) :
if return_value . get ( 'error' ) == 304 : # Not modified exception
raise errors . NotModified ( return_value )
elif return_value . get ( 'errors' ) :
error_list = [ ]
for error_type , msg , value in return_value [ 'errors' ] :
if error_type in errors . ERROR_MAPPING :
if error_type == 'RATELIMIT' :
args [ 0 ] . evict ( args [ 1 ] )
error_class = errors . ERROR_MAPPING [ error_type ]
else :
error_class = errors . APIException
error_list . append ( error_class ( error_type , msg , value , return_value ) )
if len ( error_list ) == 1 :
raise error_list [ 0 ]
else :
raise errors . ExceptionList ( error_list )
return return_value |
def subject_sequence_retriever ( fasta_handle , b6_handle , e_value , * args , ** kwargs ) :
"""Returns FASTA entries for subject sequences from BLAST hits
Stores B6 / M8 entries with E - Values below the e _ value cutoff . Then iterates
through the FASTA file and if an entry matches the subject of an B6 / M8
entry , it ' s sequence is extracted and returned as a FASTA entry
plus the E - Value .
Args :
fasta _ handle ( file ) : FASTA file handle , can technically
be any iterable that returns FASTA " lines "
b6 _ handle ( file ) : B6 / M8 file handle , can technically
be any iterable that returns B6 / M8 " lines "
e _ value ( float ) : Max E - Value of entry to return
* args : Variable length argument list for b6 _ iter
* * kwargs : Arbitrary keyword arguments for b6 _ iter
Yields :
FastaEntry : class containing all FASTA data
Example :
Note : These doctests will not pass , examples are only in doctest
format as per convention . bio _ utils uses pytests for testing .
> > > fasta _ handle = open ( ' test . fasta ' )
> > > b6 _ handle = open ( ' test . b6 ' )
> > > for entry in subject _ sequence _ retriever ( fasta _ handle ,
. . . b6 _ handle , 1e5)
. . . print ( entry . sequence ) # Print aligned subject sequence""" | filtered_b6 = defaultdict ( list )
for entry in b6_evalue_filter ( b6_handle , e_value , * args , ** kwargs ) :
filtered_b6 [ entry . subject ] . append ( ( entry . subject_start , entry . subject_end , entry . _evalue_str ) )
for fastaEntry in fasta_iter ( fasta_handle ) :
if fastaEntry . id in filtered_b6 :
for alignment in filtered_b6 [ fastaEntry . id ] :
start = alignment [ 0 ] - 1
end = alignment [ 1 ] - 1
# Get subject sequence
if start < end :
subject_sequence = fastaEntry . sequence [ start : end ]
elif start > end :
subject_sequence = fastaEntry . sequence [ end : start ] [ : : - 1 ]
else :
subject_sequence = fastaEntry . sequence [ start ]
fastaEntry . sequence = subject_sequence
# Add E - value to FASTA / Q header
if fastaEntry . description == '' :
fastaEntry . description = 'E-value: '
else :
fastaEntry . description += ' E-value: '
fastaEntry . description += alignment [ 2 ]
yield fastaEntry |
def _download ( self , dstFile ) :
"""Download this resource from its URL to the given file object .
: type dstFile : io . BytesIO | io . FileIO""" | for attempt in retry ( predicate = lambda e : isinstance ( e , HTTPError ) and e . code == 400 ) :
with attempt :
with closing ( urlopen ( self . url ) ) as content :
buf = content . read ( )
contentHash = hashlib . md5 ( buf )
assert contentHash . hexdigest ( ) == self . contentHash
dstFile . write ( buf ) |
def get_preparation_cmd ( user , permissions , path ) :
"""Generates the command lines for adjusting a volume ' s ownership and permission flags . Returns an empty list if there
is nothing to adjust .
: param user : User to set ownership for on the path via ` ` chown ` ` .
: type user : unicode | str | int | dockermap . functional . AbstractLazyObject
: param permissions : Permission flags to set via ` ` chmod ` ` .
: type permissions : unicode | str | dockermap . functional . AbstractLazyObject
: param path : Path to adjust permissions on .
: type path : unicode | str
: return : Iterator over resulting command strings .
: rtype : collections . Iterable [ unicode | str ]""" | r_user = resolve_value ( user )
r_permissions = resolve_value ( permissions )
if user :
yield chown ( r_user , path )
if permissions :
yield chmod ( r_permissions , path ) |
def validate ( self , model = None , context = None ) :
"""Validate model and return validation result object
: param model : object or dict
: param context : object , dict or None
: return : shiftschema . result . Result""" | # inject with settings
result = Result ( translator = self . translator , locale = self . locale )
# validate state
state_result = self . validate_state ( model , context = context )
result . merge ( state_result )
# validate simple properties
props_result = self . validate_properties ( model , context = context )
result . merge ( props_result )
# validate nested entity properties
entities_result = self . validate_entities ( model , context = context )
result . merge ( entities_result )
# validate collection properties
collections_result = self . validate_collections ( model , context = context )
result . merge ( collections_result )
# and return
return result |
def _final_frame_length ( header , final_frame_bytes ) :
"""Calculates the length of a final ciphertext frame , given a complete header
and the number of bytes of ciphertext in the final frame .
: param header : Complete message header object
: type header : aws _ encryption _ sdk . structures . MessageHeader
: param int final _ frame _ bytes : Bytes of ciphertext in the final frame
: rtype : int""" | final_frame_length = 4
# Sequence Number End
final_frame_length += 4
# Sequence Number
final_frame_length += header . algorithm . iv_len
# IV
final_frame_length += 4
# Encrypted Content Length
final_frame_length += final_frame_bytes
# Encrypted Content
final_frame_length += header . algorithm . auth_len
# Authentication Tag
return final_frame_length |
def sample ( self , initial_pos , num_samples , stepsize = None , return_type = 'dataframe' ) :
"""Method to return samples using No U Turn Sampler
Parameters
initial _ pos : A 1d array like object
Vector representing values of parameter position , the starting
state in markov chain .
num _ samples : int
Number of samples to be generated
stepsize : float , defaults to None
The stepsize for proposing new values of position and momentum in simulate _ dynamics
If None , then will be choosen suitably
return _ type : string ( dataframe | recarray )
Return type for samples , either of ' dataframe ' or ' recarray ' .
Defaults to ' dataframe '
Returns
sampled : A pandas . DataFrame or a numpy . recarray object depending upon return _ type argument
Examples
> > > from pgmpy . sampling import NoUTurnSampler as NUTS , GradLogPDFGaussian , LeapFrog
> > > from pgmpy . factors . continuous import GaussianDistribution as JGD
> > > import numpy as np
> > > mean = np . array ( [ 0 , 0 , 0 ] )
> > > covariance = np . array ( [ [ 6 , 0.7 , 0.2 ] , [ 0.7 , 3 , 0.9 ] , [ 0.2 , 0.9 , 1 ] ] )
> > > model = JGD ( [ ' x ' , ' y ' , ' z ' ] , mean , covariance )
> > > sampler = NUTS ( model = model , grad _ log _ pdf = GradLogPDFGaussian , simulate _ dynamics = LeapFrog )
> > > samples = sampler . sample ( initial _ pos = np . array ( [ 1 , 1 , 1 ] ) , num _ samples = 10,
. . . stepsize = 0.4 , return _ type = ' dataframe ' )
> > > samples
x y z
0 1.00000 1.00000 1.00000
1 1.760756 0.271543 - 0.613309
2 1.883387 0.990745 - 0.611720
3 0.980812 0.340336 - 0.916283
4 0.781338 0.647220 - 0.948640
5 0.040308 - 1.391406 0.412201
6 1.179549 - 1.450552 1.105216
7 1.100320 - 1.313926 1.207815
8 1.484520 - 1.349247 0.768599
9 0.934942 - 1.894589 0.471772""" | initial_pos = _check_1d_array_object ( initial_pos , 'initial_pos' )
_check_length_equal ( initial_pos , self . model . variables , 'initial_pos' , 'model.variables' )
if stepsize is None :
stepsize = self . _find_reasonable_stepsize ( initial_pos )
types = [ ( var_name , 'float' ) for var_name in self . model . variables ]
samples = np . zeros ( num_samples , dtype = types ) . view ( np . recarray )
samples [ 0 ] = tuple ( initial_pos )
position_m = initial_pos
for i in range ( 1 , num_samples ) : # Genrating sample
position_m = self . _sample ( position_m , stepsize )
samples [ i ] = tuple ( position_m )
return _return_samples ( return_type , samples ) |
def get_sqla_query ( # sqla
self , groupby , metrics , granularity , from_dttm , to_dttm , filter = None , # noqa
is_timeseries = True , timeseries_limit = 15 , timeseries_limit_metric = None , row_limit = None , inner_from_dttm = None , inner_to_dttm = None , orderby = None , extras = None , columns = None , order_desc = True , prequeries = None , is_prequery = False , ) :
"""Querying any sqla table from this common interface""" | template_kwargs = { 'from_dttm' : from_dttm , 'groupby' : groupby , 'metrics' : metrics , 'row_limit' : row_limit , 'to_dttm' : to_dttm , 'filter' : filter , 'columns' : { col . column_name : col for col in self . columns } , }
template_kwargs . update ( self . template_params_dict )
template_processor = self . get_template_processor ( ** template_kwargs )
db_engine_spec = self . database . db_engine_spec
orderby = orderby or [ ]
# For backward compatibility
if granularity not in self . dttm_cols :
granularity = self . main_dttm_col
# Database spec supports join - free timeslot grouping
time_groupby_inline = db_engine_spec . time_groupby_inline
cols = { col . column_name : col for col in self . columns }
metrics_dict = { m . metric_name : m for m in self . metrics }
if not granularity and is_timeseries :
raise Exception ( _ ( 'Datetime column not provided as part table configuration ' 'and is required by this type of chart' ) )
if not groupby and not metrics and not columns :
raise Exception ( _ ( 'Empty query?' ) )
metrics_exprs = [ ]
for m in metrics :
if utils . is_adhoc_metric ( m ) :
metrics_exprs . append ( self . adhoc_metric_to_sqla ( m , cols ) )
elif m in metrics_dict :
metrics_exprs . append ( metrics_dict . get ( m ) . get_sqla_col ( ) )
else :
raise Exception ( _ ( "Metric '{}' is not valid" . format ( m ) ) )
if metrics_exprs :
main_metric_expr = metrics_exprs [ 0 ]
else :
main_metric_expr , label = literal_column ( 'COUNT(*)' ) , 'ccount'
main_metric_expr = self . make_sqla_column_compatible ( main_metric_expr , label )
select_exprs = [ ]
groupby_exprs_sans_timestamp = OrderedDict ( )
if groupby :
select_exprs = [ ]
for s in groupby :
if s in cols :
outer = cols [ s ] . get_sqla_col ( )
else :
outer = literal_column ( f'({s})' )
outer = self . make_sqla_column_compatible ( outer , s )
groupby_exprs_sans_timestamp [ outer . name ] = outer
select_exprs . append ( outer )
elif columns :
for s in columns :
select_exprs . append ( cols [ s ] . get_sqla_col ( ) if s in cols else self . make_sqla_column_compatible ( literal_column ( s ) ) )
metrics_exprs = [ ]
groupby_exprs_with_timestamp = OrderedDict ( groupby_exprs_sans_timestamp . items ( ) )
if granularity :
dttm_col = cols [ granularity ]
time_grain = extras . get ( 'time_grain_sqla' )
time_filters = [ ]
if is_timeseries :
timestamp = dttm_col . get_timestamp_expression ( time_grain )
select_exprs += [ timestamp ]
groupby_exprs_with_timestamp [ timestamp . name ] = timestamp
# Use main dttm column to support index with secondary dttm columns
if db_engine_spec . time_secondary_columns and self . main_dttm_col in self . dttm_cols and self . main_dttm_col != dttm_col . column_name :
time_filters . append ( cols [ self . main_dttm_col ] . get_time_filter ( from_dttm , to_dttm ) )
time_filters . append ( dttm_col . get_time_filter ( from_dttm , to_dttm ) )
select_exprs += metrics_exprs
labels_expected = [ c . _df_label_expected for c in select_exprs ]
select_exprs = db_engine_spec . make_select_compatible ( groupby_exprs_with_timestamp . values ( ) , select_exprs )
qry = sa . select ( select_exprs )
tbl = self . get_from_clause ( template_processor )
if not columns :
qry = qry . group_by ( * groupby_exprs_with_timestamp . values ( ) )
where_clause_and = [ ]
having_clause_and = [ ]
for flt in filter :
if not all ( [ flt . get ( s ) for s in [ 'col' , 'op' ] ] ) :
continue
col = flt [ 'col' ]
op = flt [ 'op' ]
col_obj = cols . get ( col )
if col_obj :
is_list_target = op in ( 'in' , 'not in' )
eq = self . filter_values_handler ( flt . get ( 'val' ) , target_column_is_numeric = col_obj . is_num , is_list_target = is_list_target )
if op in ( 'in' , 'not in' ) :
cond = col_obj . get_sqla_col ( ) . in_ ( eq )
if '<NULL>' in eq :
cond = or_ ( cond , col_obj . get_sqla_col ( ) == None )
# noqa
if op == 'not in' :
cond = ~ cond
where_clause_and . append ( cond )
else :
if col_obj . is_num :
eq = utils . string_to_num ( flt [ 'val' ] )
if op == '==' :
where_clause_and . append ( col_obj . get_sqla_col ( ) == eq )
elif op == '!=' :
where_clause_and . append ( col_obj . get_sqla_col ( ) != eq )
elif op == '>' :
where_clause_and . append ( col_obj . get_sqla_col ( ) > eq )
elif op == '<' :
where_clause_and . append ( col_obj . get_sqla_col ( ) < eq )
elif op == '>=' :
where_clause_and . append ( col_obj . get_sqla_col ( ) >= eq )
elif op == '<=' :
where_clause_and . append ( col_obj . get_sqla_col ( ) <= eq )
elif op == 'LIKE' :
where_clause_and . append ( col_obj . get_sqla_col ( ) . like ( eq ) )
elif op == 'IS NULL' :
where_clause_and . append ( col_obj . get_sqla_col ( ) == None )
# noqa
elif op == 'IS NOT NULL' :
where_clause_and . append ( col_obj . get_sqla_col ( ) != None )
# noqa
if extras :
where = extras . get ( 'where' )
if where :
where = template_processor . process_template ( where )
where_clause_and += [ sa . text ( '({})' . format ( where ) ) ]
having = extras . get ( 'having' )
if having :
having = template_processor . process_template ( having )
having_clause_and += [ sa . text ( '({})' . format ( having ) ) ]
if granularity :
qry = qry . where ( and_ ( * ( time_filters + where_clause_and ) ) )
else :
qry = qry . where ( and_ ( * where_clause_and ) )
qry = qry . having ( and_ ( * having_clause_and ) )
if not orderby and not columns :
orderby = [ ( main_metric_expr , not order_desc ) ]
for col , ascending in orderby :
direction = asc if ascending else desc
if utils . is_adhoc_metric ( col ) :
col = self . adhoc_metric_to_sqla ( col , cols )
qry = qry . order_by ( direction ( col ) )
if row_limit :
qry = qry . limit ( row_limit )
if is_timeseries and timeseries_limit and groupby and not time_groupby_inline :
if self . database . db_engine_spec . inner_joins : # some sql dialects require for order by expressions
# to also be in the select clause - - others , e . g . vertica ,
# require a unique inner alias
inner_main_metric_expr = self . make_sqla_column_compatible ( main_metric_expr , 'mme_inner__' )
inner_groupby_exprs = [ ]
inner_select_exprs = [ ]
for gby_name , gby_obj in groupby_exprs_sans_timestamp . items ( ) :
inner = self . make_sqla_column_compatible ( gby_obj , gby_name + '__' )
inner_groupby_exprs . append ( inner )
inner_select_exprs . append ( inner )
inner_select_exprs += [ inner_main_metric_expr ]
subq = select ( inner_select_exprs ) . select_from ( tbl )
inner_time_filter = dttm_col . get_time_filter ( inner_from_dttm or from_dttm , inner_to_dttm or to_dttm , )
subq = subq . where ( and_ ( * ( where_clause_and + [ inner_time_filter ] ) ) )
subq = subq . group_by ( * inner_groupby_exprs )
ob = inner_main_metric_expr
if timeseries_limit_metric :
ob = self . _get_timeseries_orderby ( timeseries_limit_metric , metrics_dict , cols , )
direction = desc if order_desc else asc
subq = subq . order_by ( direction ( ob ) )
subq = subq . limit ( timeseries_limit )
on_clause = [ ]
for gby_name , gby_obj in groupby_exprs_sans_timestamp . items ( ) : # in this case the column name , not the alias , needs to be
# conditionally mutated , as it refers to the column alias in
# the inner query
col_name = db_engine_spec . make_label_compatible ( gby_name + '__' )
on_clause . append ( gby_obj == column ( col_name ) )
tbl = tbl . join ( subq . alias ( ) , and_ ( * on_clause ) )
else :
if timeseries_limit_metric :
orderby = [ ( self . _get_timeseries_orderby ( timeseries_limit_metric , metrics_dict , cols , ) , False , ) ]
# run subquery to get top groups
subquery_obj = { 'prequeries' : prequeries , 'is_prequery' : True , 'is_timeseries' : False , 'row_limit' : timeseries_limit , 'groupby' : groupby , 'metrics' : metrics , 'granularity' : granularity , 'from_dttm' : inner_from_dttm or from_dttm , 'to_dttm' : inner_to_dttm or to_dttm , 'filter' : filter , 'orderby' : orderby , 'extras' : extras , 'columns' : columns , 'order_desc' : True , }
result = self . query ( subquery_obj )
dimensions = [ c for c in result . df . columns if c not in metrics and c in groupby_exprs_sans_timestamp ]
top_groups = self . _get_top_groups ( result . df , dimensions , groupby_exprs_sans_timestamp )
qry = qry . where ( top_groups )
return SqlaQuery ( sqla_query = qry . select_from ( tbl ) , labels_expected = labels_expected ) |
def __get_function_by_pattern ( self , pattern ) :
"""Return first function whose name * contains * the string ` pattern ` .
: param func : partial function name ( ex . key _ pair )
: return : list function that goes with it ( ex . list _ key _ pairs )""" | function_names = [ name for name in dir ( self . driver ) if pattern in name ]
if function_names :
name = function_names [ 0 ]
if len ( function_names ) > 1 :
log . warn ( "Several functions match pattern `%s`: %r -- using first one!" , pattern , function_names )
return getattr ( self . driver , name )
else : # no such function
raise AttributeError ( "No function name contains `{0}` in class `{1}`" . format ( pattern , self . __class__ . __name__ ) ) |
def do_publish ( broker , cmd , f , when , res , meta , * args , ** kwargs ) :
"""Implement the publish so it can be called outside the decorator""" | publish_command = functools . partial ( broker . publish , topic = command_types . COMMAND )
call_args = _get_args ( f , args , kwargs )
if when == 'before' :
broker . logger . info ( "{}: {}" . format ( f . __qualname__ , { k : v for k , v in call_args . items ( ) if str ( k ) != 'self' } ) )
command_args = dict ( zip ( reversed ( inspect . getfullargspec ( cmd ) . args ) , reversed ( inspect . getfullargspec ( cmd ) . defaults or [ ] ) ) )
# TODO ( artyom , 20170927 ) : we are doing this to be able to use
# the decorator in Instrument class methods , in which case
# self is effectively an instrument .
# To narrow the scope of this hack , we are checking if the
# command is expecting instrument first .
if 'instrument' in inspect . getfullargspec ( cmd ) . args : # We are also checking if call arguments have ' self ' and
# don ' t have instruments specified , in which case
# instruments should take precedence .
if 'self' in call_args and 'instrument' not in call_args :
call_args [ 'instrument' ] = call_args [ 'self' ]
command_args . update ( { key : call_args [ key ] for key in ( set ( inspect . getfullargspec ( cmd ) . args ) & call_args . keys ( ) ) } )
if meta :
command_args [ 'meta' ] = meta
payload = cmd ( ** command_args )
message = { ** payload , '$' : when }
if when == 'after' :
message [ 'return' ] = res
publish_command ( message = { ** payload , '$' : when } ) |
def decode_union ( self , data_type , obj ) :
"""The data _ type argument must be a Union .
See json _ compat _ obj _ decode ( ) for argument descriptions .""" | val = None
if isinstance ( obj , six . string_types ) : # Handles the shorthand format where the union is serialized as only
# the string of the tag .
tag = obj
if data_type . definition . _is_tag_present ( tag , self . caller_permissions ) :
val_data_type = data_type . definition . _get_val_data_type ( tag , self . caller_permissions )
if not isinstance ( val_data_type , ( bv . Void , bv . Nullable ) ) :
raise bv . ValidationError ( "expected object for '%s', got symbol" % tag )
if tag == data_type . definition . _catch_all :
raise bv . ValidationError ( "unexpected use of the catch-all tag '%s'" % tag )
elif not self . strict and data_type . definition . _catch_all :
tag = data_type . definition . _catch_all
else :
raise bv . ValidationError ( "unknown tag '%s'" % tag )
elif isinstance ( obj , dict ) :
tag , val = self . decode_union_dict ( data_type , obj )
else :
raise bv . ValidationError ( "expected string or object, got %s" % bv . generic_type_name ( obj ) )
return data_type . definition ( tag , val ) |
def enrich_json_objects_by_object_type ( request , value ) :
"""Take the given value and start enrichment by object _ type . The va
Args :
request ( django . http . request . HttpRequest ) : request which is currently processed
value ( dict | list | django . db . models . Model ) :
in case of django . db . models . Model object ( or list of these
objects ) , to _ json method is invoked
Returns :
dict | list""" | time_start_globally = time ( )
if isinstance ( value , list ) :
json = [ x . to_json ( ) if hasattr ( x , "to_json" ) else x for x in value ]
else :
if isinstance ( value , dict ) :
json = value
else :
json = value . to_json ( )
objects , nested = _collect_json_objects ( json , by = 'object_type' )
for enricher_info in _get_OBJECT_TYPE_ENRICHER_ORDER ( ) :
if len ( enricher_info [ 'object_types' ] ) > 0 :
enricher_objects = flatten ( [ objects . get ( object_type , [ ] ) for object_type in enricher_info [ 'object_types' ] ] )
enricher_nested = any ( [ nested . get ( object_type , False ) for object_type in enricher_info [ 'object_types' ] ] )
else :
enricher_objects = flatten ( objects . values ( ) )
enricher_nested = any ( nested . values ( ) )
if len ( enricher_objects ) > 0 :
time_start = time ( )
enricher_info [ 'enricher' ] ( request , enricher_objects , enricher_nested )
LOGGER . debug ( 'enrichment "{}" took {} seconds' . format ( enricher_info [ 'enricher_name' ] , time ( ) - time_start ) )
if not enricher_info [ 'pure' ] : # if the enricher modified object types we must collect objects
# again
objects , nested = _collect_json_objects ( json , by = 'object_type' )
LOGGER . debug ( 'The whole enrichment of json objects by their object_type took {} seconds.' . format ( time ( ) - time_start_globally ) )
return json |
def _set_ciphers ( self ) :
"""Sets up the allowed ciphers . By default this matches the set in
util . ssl _ . DEFAULT _ CIPHERS , at least as supported by macOS . This is done
custom and doesn ' t allow changing at this time , mostly because parsing
OpenSSL cipher strings is going to be a freaking nightmare .""" | ciphers = ( Security . SSLCipherSuite * len ( CIPHER_SUITES ) ) ( * CIPHER_SUITES )
result = Security . SSLSetEnabledCiphers ( self . context , ciphers , len ( CIPHER_SUITES ) )
_assert_no_error ( result ) |
def generate ( engine , database , models , ** kwargs ) :
'''Generate the migrations by introspecting the db''' | validate_args ( engine , database , models )
generator = Generator ( engine , database , models )
generator . run ( ) |
def binary_search ( a , k ) :
"""Do a binary search in an array of objects ordered by ' . key '
returns the largest index for which : a [ i ] . key < = k
like c + + : a . upperbound ( k ) - -""" | first , last = 0 , len ( a )
while first < last :
mid = ( first + last ) >> 1
if k < a [ mid ] . key :
last = mid
else :
first = mid + 1
return first - 1 |
def apply_upgrade ( self , upgrade ) :
"""Apply a upgrade and register that it was successful .
A upgrade may throw a RuntimeError , if an unrecoverable error happens .
: param upgrade : A single upgrade""" | self . _setup_log_prefix ( plugin_id = upgrade . name )
try : # Nested due to Python 2.4
try :
upgrade . do_upgrade ( )
self . register_success ( upgrade )
except RuntimeError as e :
msg = [ "Upgrade error(s):" ]
for m in e . args :
msg . append ( " (-) %s" % m )
logger = self . get_logger ( )
logger . error ( "\n" . join ( msg ) )
raise RuntimeError ( "Upgrade '%s' failed. Your installation is in an" " inconsistent state. Please manually review the upgrade " "and resolve inconsistencies." % upgrade [ 'id' ] )
finally :
self . _teardown_log_prefix ( ) |
def is_function_or_method ( obj ) :
"""Check if an object is a function or method .
Args :
obj : The Python object in question .
Returns :
True if the object is an function or method .""" | return inspect . isfunction ( obj ) or inspect . ismethod ( obj ) or is_cython ( obj ) |
def list_vms ( self , allow_clone = False ) :
"""Gets VirtualBox VM list .""" | vbox_vms = [ ]
result = yield from self . execute ( "list" , [ "vms" ] )
for line in result :
if len ( line ) == 0 or line [ 0 ] != '"' or line [ - 1 : ] != "}" :
continue
# Broken output ( perhaps a carriage return in VM name )
vmname , _ = line . rsplit ( ' ' , 1 )
vmname = vmname . strip ( '"' )
if vmname == "<inaccessible>" :
continue
# ignore inaccessible VMs
extra_data = yield from self . execute ( "getextradata" , [ vmname , "GNS3/Clone" ] )
if allow_clone or len ( extra_data ) == 0 or not extra_data [ 0 ] . strip ( ) == "Value: yes" : # get the amount of RAM
info_results = yield from self . execute ( "showvminfo" , [ vmname , "--machinereadable" ] )
ram = 0
for info in info_results :
try :
name , value = info . split ( '=' , 1 )
if name . strip ( ) == "memory" :
ram = int ( value . strip ( ) )
break
except ValueError :
continue
vbox_vms . append ( { "vmname" : vmname , "ram" : ram } )
return vbox_vms |
def get_data_disk_size ( vm_ , swap , linode_id ) :
'''Return the size of of the data disk in MB
. . versionadded : : 2016.3.0''' | disk_size = get_linode ( kwargs = { 'linode_id' : linode_id } ) [ 'TOTALHD' ]
root_disk_size = config . get_cloud_config_value ( 'disk_size' , vm_ , __opts__ , default = disk_size - swap )
return disk_size - root_disk_size - swap |
def create_manifest_from_s3_files ( self ) :
"""To create a manifest db for the current
: return :""" | for k in self . s3 . list_objects ( Bucket = self . sitename ) [ 'Contents' ] :
key = k [ "Key" ]
files = [ ]
if key not in [ self . manifest_file ] :
files . append ( key )
self . _set_manifest_data ( files ) |
def scroll_from_element ( self , on_element , xoffset , yoffset ) :
"""Touch and scroll starting at on _ element , moving by xoffset and yoffset .
: Args :
- on _ element : The element where scroll starts .
- xoffset : X offset to scroll to .
- yoffset : Y offset to scroll to .""" | self . _actions . append ( lambda : self . _driver . execute ( Command . TOUCH_SCROLL , { 'element' : on_element . id , 'xoffset' : int ( xoffset ) , 'yoffset' : int ( yoffset ) } ) )
return self |
def distances_indices_sorted ( self , points , sign = False ) :
"""Computes the distances from the plane to each of the points . Positive distances are on the side of the
normal of the plane while negative distances are on the other side . Indices sorting the points from closest
to furthest is also computed .
: param points : Points for which distances are computed
: param sign : Whether to add sign information in the indices sorting the points distances
: return : Distances from the plane to the points ( positive values on the side of the normal to the plane ,
negative values on the other side ) , as well as indices of the points from closest to furthest . For
the latter , when the sign parameter is True , items of the sorting list are given as tuples of
( index , sign ) .""" | distances = [ np . dot ( self . normal_vector , pp ) + self . d for pp in points ]
indices = sorted ( range ( len ( distances ) ) , key = lambda k : np . abs ( distances [ k ] ) )
if sign :
indices = [ ( ii , int ( np . sign ( distances [ ii ] ) ) ) for ii in indices ]
return distances , indices |
def set_widget ( self , canvas_w ) :
"""Call this method with the widget that will be used
for the display .""" | self . logger . debug ( "set widget canvas_w=%s" % canvas_w )
self . pgcanvas = canvas_w |
def disconnect ( self , receipt = None , headers = None , ** keyword_headers ) :
"""Disconnect from the server .
: param str receipt : the receipt to use ( once the server acknowledges that receipt , we ' re
officially disconnected ; optional - if not specified a unique receipt id will
be generated )
: param dict headers : a map of any additional headers the broker requires
: param keyword _ headers : any additional headers the broker requires""" | if not self . transport . is_connected ( ) :
log . debug ( 'Not sending disconnect, already disconnected' )
return
headers = utils . merge_headers ( [ headers , keyword_headers ] )
rec = receipt or utils . get_uuid ( )
headers [ HDR_RECEIPT ] = rec
self . set_receipt ( rec , CMD_DISCONNECT )
self . send_frame ( CMD_DISCONNECT , headers ) |
def check ( self , value , major ) :
"""Check whether the value is between the minimum and maximum .
Raise a ValueError if it is not .""" | if self . _min is not None and value < self . _min :
raise ValueError ( "Integer %d is lower than minimum %d." % ( value , self . _min ) )
if self . _max is not None and value > self . _max :
raise ValueError ( "Integer %d is higher than maximum %d." % ( value , self . _max ) ) |
def parse_addr ( text ) :
"Parse a 1 - to 3 - part address spec ." | if text :
parts = text . split ( ':' )
length = len ( parts )
if length == 3 :
return parts [ 0 ] , parts [ 1 ] , int ( parts [ 2 ] )
elif length == 2 :
return None , parts [ 0 ] , int ( parts [ 1 ] )
elif length == 1 :
return None , '' , int ( parts [ 0 ] )
return None , None , None |
def flip_ctrlpts_u ( ctrlpts , size_u , size_v ) :
"""Flips a list of 1 - dimensional control points from u - row order to v - row order .
* * u - row order * * : each row corresponds to a list of u values
* * v - row order * * : each row corresponds to a list of v values
: param ctrlpts : control points in u - row order
: type ctrlpts : list , tuple
: param size _ u : size in u - direction
: type size _ u : int
: param size _ v : size in v - direction
: type size _ v : int
: return : control points in v - row order
: rtype : list""" | new_ctrlpts = [ ]
for i in range ( 0 , size_u ) :
for j in range ( 0 , size_v ) :
temp = [ float ( c ) for c in ctrlpts [ i + ( j * size_u ) ] ]
new_ctrlpts . append ( temp )
return new_ctrlpts |
def get_slice ( im , center , size , pad = 0 ) :
r"""Given a ` ` center ` ` location and ` ` radius ` ` of a feature , returns the slice
object into the ` ` im ` ` that bounds the feature but does not extend beyond
the image boundaries .
Parameters
im : ND - image
The image of the porous media
center : array _ like
The coordinates of the center of the feature of interest
size : array _ like or scalar
The size of the feature in each direction . If a scalar is supplied ,
this implies the same size in all directions .
pad : scalar or array _ like
The amount to pad onto each side of the slice . The default is 0 . A
scalar value will increase the slice size equally in all directions ,
while an array the same shape as ` ` im . shape ` ` can be passed to pad
a specified amount in each direction .
Returns
slices : list
A list of slice objects , each indexing into one dimension of the image .""" | p = sp . ones ( shape = im . ndim , dtype = int ) * sp . array ( pad )
s = sp . ones ( shape = im . ndim , dtype = int ) * sp . array ( size )
slc = [ ]
for dim in range ( im . ndim ) :
lower_im = sp . amax ( ( center [ dim ] - s [ dim ] - p [ dim ] , 0 ) )
upper_im = sp . amin ( ( center [ dim ] + s [ dim ] + 1 + p [ dim ] , im . shape [ dim ] ) )
slc . append ( slice ( lower_im , upper_im ) )
return slc |
def nodes_of_class ( self , klass , skip_klass = None ) :
"""Get the nodes ( including this one or below ) of the given type .
: param klass : The type of node to search for .
: type klass : builtins . type
: param skip _ klass : A type of node to ignore . This is useful to ignore
subclasses of : attr : ` klass ` .
: type skip _ klass : builtins . type
: returns : The node of the given type .
: rtype : iterable ( NodeNG )""" | if isinstance ( self , klass ) :
yield self
if skip_klass is None :
for child_node in self . get_children ( ) :
yield from child_node . nodes_of_class ( klass , skip_klass )
return
for child_node in self . get_children ( ) :
if isinstance ( child_node , skip_klass ) :
continue
yield from child_node . nodes_of_class ( klass , skip_klass ) |
def _encode ( self , sources : mx . nd . NDArray , source_length : int ) -> Tuple [ List [ ModelState ] , mx . nd . NDArray ] :
"""Returns a ModelState for each model representing the state of the model after encoding the source .
: param sources : Source ids . Shape : ( batch _ size , bucket _ key , num _ factors ) .
: param source _ length : Bucket key .
: return : List of ModelStates and the estimated reference length based on ratios averaged over models .""" | model_states = [ ]
ratios = [ ]
for model in self . models :
state , ratio = model . run_encoder ( sources , source_length )
model_states . append ( state )
if ratio is not None :
ratios . append ( ratio )
# num _ seq takes batch _ size and beam _ size into account
num_seq = model_states [ 0 ] . states [ 0 ] . shape [ 0 ]
if self . constant_length_ratio > 0.0 : # override all ratios with the constant value
length_ratios = mx . nd . full ( val = self . constant_length_ratio , shape = ( num_seq , 1 ) , ctx = self . context )
else :
if len ( ratios ) > 0 : # some model predicted a ratio ?
# average the ratios over the models that actually we able to predict them
length_ratios = mx . nd . mean ( mx . nd . stack ( * ratios , axis = 1 ) , axis = 1 )
else :
length_ratios = mx . nd . zeros ( ( num_seq , 1 ) , ctx = self . context )
encoded_source_length = self . models [ 0 ] . encoder . get_encoded_seq_len ( source_length )
return model_states , length_ratios * encoded_source_length |
def keep_vertices ( self , indices_to_keep , ret_kept_faces = False ) :
'''Keep the given vertices and discard the others , and any faces to which
they may belong .
If ` ret _ kept _ faces ` is ` True ` , return the original indices of the kept
faces . Otherwise return ` self ` for chaining .''' | import numpy as np
if self . v is None :
return
indices_to_keep = np . array ( indices_to_keep , dtype = np . uint32 )
initial_num_verts = self . v . shape [ 0 ]
if self . f is not None :
initial_num_faces = self . f . shape [ 0 ]
f_indices_to_keep = self . all_faces_with_verts ( indices_to_keep , as_boolean = True )
# Why do we test this ? Don ' t know . But we do need to test it before we
# mutate self . v .
vn_should_update = self . vn is not None and self . vn . shape [ 0 ] == initial_num_verts
vc_should_update = self . vc is not None and self . vc . shape [ 0 ] == initial_num_verts
self . v = self . v [ indices_to_keep ]
if vn_should_update :
self . vn = self . vn [ indices_to_keep ]
if vc_should_update :
self . vc = self . vc [ indices_to_keep ]
if self . f is not None :
v_old_to_new = np . zeros ( initial_num_verts , dtype = int )
f_old_to_new = np . zeros ( initial_num_faces , dtype = int )
v_old_to_new [ indices_to_keep ] = np . arange ( len ( indices_to_keep ) , dtype = int )
self . f = v_old_to_new [ self . f [ f_indices_to_keep ] ]
f_old_to_new [ f_indices_to_keep ] = np . arange ( self . f . shape [ 0 ] , dtype = int )
else : # Make the code below work , in case there is somehow degenerate
# segm even though there are no faces .
f_indices_to_keep = [ ]
if self . segm is not None :
new_segm = { }
for segm_name , segm_faces in self . segm . items ( ) :
faces = np . array ( segm_faces , dtype = int )
valid_faces = faces [ f_indices_to_keep [ faces ] ]
if len ( valid_faces ) :
new_segm [ segm_name ] = f_old_to_new [ valid_faces ]
self . segm = new_segm if new_segm else None
if hasattr ( self , '_raw_landmarks' ) and self . _raw_landmarks is not None :
self . recompute_landmarks ( )
return np . nonzero ( f_indices_to_keep ) [ 0 ] if ret_kept_faces else self |
def is_done ( self ) :
"""True if the last two moves were Pass or if the position is at a move
greater than the max depth .""" | return self . position . is_game_over ( ) or self . position . n >= FLAGS . max_game_length |
def delete_feature ( self , dataset , fid ) :
"""Removes a feature from a dataset .
Parameters
dataset : str
The dataset id .
fid : str
The feature id .
Returns
HTTP status code .""" | uri = URITemplate ( self . baseuri + '/{owner}/{did}/features/{fid}' ) . expand ( owner = self . username , did = dataset , fid = fid )
return self . session . delete ( uri ) |
def daterange ( start_date , end_date ) :
"""Yield one date per day from starting date to ending date .
Args :
start _ date ( date ) : starting date .
end _ date ( date ) : ending date .
Yields :
date : a date for each day within the range .""" | for n in range ( int ( ( end_date - start_date ) . days ) ) :
yield start_date + timedelta ( n ) |
def get_endpoints ( ) :
"""get all endpoints known on the Ariane server
: return :""" | LOGGER . debug ( "EndpointService.get_endpoints" )
params = SessionService . complete_transactional_req ( None )
if params is None :
if MappingService . driver_type != DriverFactory . DRIVER_REST :
params = { 'OPERATION' : 'getEndpoints' }
args = { 'properties' : params }
else :
args = { 'http_operation' : 'GET' , 'operation_path' : '' }
else :
if MappingService . driver_type != DriverFactory . DRIVER_REST :
params [ 'OPERATION' ] = 'getEndpoints'
args = { 'properties' : params }
else :
args = { 'http_operation' : 'GET' , 'operation_path' : '' , 'parameters' : params }
response = EndpointService . requester . call ( args )
if MappingService . driver_type != DriverFactory . DRIVER_REST :
response = response . get ( )
ret = None
if response . rc == 0 :
ret = [ ]
for endpoint in response . response_content [ 'endpoints' ] :
ret . append ( Endpoint . json_2_endpoint ( endpoint ) )
elif response . rc != 404 :
err_msg = 'EndpointService.get_endpoints - Problem while getting nodes. ' 'Reason: ' + str ( response . response_content ) + ' - ' + str ( response . error_message ) + " (" + str ( response . rc ) + ")"
LOGGER . warning ( err_msg )
if response . rc == 500 and ArianeMappingOverloadError . ERROR_MSG in response . error_message :
raise ArianeMappingOverloadError ( "EndpointService.get_endpoints" , ArianeMappingOverloadError . ERROR_MSG )
# traceback . print _ stack ( )
return ret |
def minion_config ( opts , vm_ ) :
'''Return a minion ' s configuration for the provided options and VM''' | # Don ' t start with a copy of the default minion opts ; they ' re not always
# what we need . Some default options are Null , let ' s set a reasonable default
minion = { 'master' : 'salt' , 'log_level' : 'info' , 'hash_type' : 'sha256' , }
# Now , let ' s update it to our needs
minion [ 'id' ] = vm_ [ 'name' ]
master_finger = salt . config . get_cloud_config_value ( 'master_finger' , vm_ , opts )
if master_finger is not None :
minion [ 'master_finger' ] = master_finger
minion . update ( # Get ANY defined minion settings , merging data , in the following order
# 1 . VM config
# 2 . Profile config
# 3 . Global configuration
salt . config . get_cloud_config_value ( 'minion' , vm_ , opts , default = { } , search_global = True ) )
make_master = salt . config . get_cloud_config_value ( 'make_master' , vm_ , opts )
if 'master' not in minion and make_master is not True :
raise SaltCloudConfigError ( 'A master setting was not defined in the minion\'s configuration.' )
# Get ANY defined grains settings , merging data , in the following order
# 1 . VM config
# 2 . Profile config
# 3 . Global configuration
minion . setdefault ( 'grains' , { } ) . update ( salt . config . get_cloud_config_value ( 'grains' , vm_ , opts , default = { } , search_global = True ) )
return minion |
def _split_params ( tag_prefix , tag_suffix ) :
"Split comma - separated tag _ suffix [ : - 1 ] and map with _ maybe _ int" | if tag_suffix [ - 1 : ] != ')' :
raise ValueError , "unbalanced parenthesis in type %s%s" % ( tag_prefix , tag_suffix )
return map ( _maybe_int , tag_suffix [ : - 1 ] . split ( ',' ) ) |
def convert ( values , source_measure_or_unit_abbreviation , target_measure_or_unit_abbreviation ) :
"""Convert a value or a list of values from an unit to another one .
The two units must represent the same physical dimension .""" | source_dimension = get_dimension_by_unit_measure_or_abbreviation ( source_measure_or_unit_abbreviation )
target_dimension = get_dimension_by_unit_measure_or_abbreviation ( target_measure_or_unit_abbreviation )
if source_dimension == target_dimension :
source = JSONObject ( { } )
target = JSONObject ( { } )
source . unit_abbreviation , source . factor = _parse_unit ( source_measure_or_unit_abbreviation )
target . unit_abbreviation , target . factor = _parse_unit ( target_measure_or_unit_abbreviation )
source . unit_data = get_unit_by_abbreviation ( source . unit_abbreviation )
target . unit_data = get_unit_by_abbreviation ( target . unit_abbreviation )
source . conv_factor = JSONObject ( { 'lf' : source . unit_data . lf , 'cf' : source . unit_data . cf } )
target . conv_factor = JSONObject ( { 'lf' : target . unit_data . lf , 'cf' : target . unit_data . cf } )
if isinstance ( values , float ) : # If values is a float = > returns a float
return ( source . conv_factor . lf / target . conv_factor . lf * ( source . factor * values ) + ( source . conv_factor . cf - target . conv_factor . cf ) / target . conv_factor . lf ) / target . factor
elif isinstance ( values , list ) : # If values is a list of floats = > returns a list of floats
return [ ( source . conv_factor . lf / target . conv_factor . lf * ( source . factor * value ) + ( source . conv_factor . cf - target . conv_factor . cf ) / target . conv_factor . lf ) / target . factor for value in values ]
else :
raise HydraError ( "Unit conversion: dimensions are not consistent." ) |
def _hook_variable_gradient_stats ( self , var , name , log_track ) :
"""Logs a Variable ' s gradient ' s distribution statistics next time backward ( )
is called on it .""" | if not isinstance ( var , torch . autograd . Variable ) :
cls = type ( var )
raise TypeError ( 'Expected torch.Variable, not {}.{}' . format ( cls . __module__ , cls . __name__ ) )
handle = self . _hook_handles . get ( name )
if handle is not None and self . _torch_hook_handle_is_valid ( handle ) :
raise ValueError ( 'A hook has already been set under name "{}"' . format ( name ) )
def _callback ( grad , log_track ) :
if not log_track_update ( log_track ) :
return
self . log_tensor_stats ( grad . data , name )
handle = var . register_hook ( lambda grad : _callback ( grad , log_track ) )
self . _hook_handles [ name ] = handle
return handle |
def visit_Assign ( self , node ) :
"""Implement assignment walker .
Parse class properties defined via the property ( ) function""" | # [ [ [ cog
# cog . out ( " print ( pcolor ( ' Enter assign visitor ' , ' magenta ' ) ) " )
# [ [ [ end ] ] ]
# Class - level assignment may also be a class attribute that is not
# a managed attribute , record it anyway , no harm in doing so as it
# is not attached to a callable
if self . _in_class ( node ) :
element_full_name = self . _pop_indent_stack ( node , "prop" )
code_id = ( self . _fname , node . lineno )
self . _processed_line = node . lineno
self . _callables_db [ element_full_name ] = { "name" : element_full_name , "type" : "prop" , "code_id" : code_id , "last_lineno" : None , }
self . _reverse_callables_db [ code_id ] = element_full_name
# [ [ [ cog
# code = " " "
# print (
# pcolor (
# ' Visiting property { 0 } @ { 1 } ' . format (
# element _ full _ name , code _ id [ 1]
# ' green '
# cog . out ( code )
# [ [ [ end ] ] ]
# Get property actions
self . generic_visit ( node ) |
def rm ( self , index ) :
"""Handles the ' r ' command .
: index : Index of the item to remove .""" | if self . model . exists ( index ) :
self . model . remove ( index ) |
def spht ( ssphere , nmax = None , mmax = None ) :
"""Transforms ScalarPatternUniform object * ssphere * into a set of scalar
spherical harmonics stored in ScalarCoefs .
Example : :
> > > p = spherepy . random _ patt _ uniform ( 6 , 8)
> > > c = spherepy . spht ( p )
> > > spherepy . pretty _ coefs ( c )
Args :
ssphere ( ScalarPatternUniform ) : The pattern to be transformed .
nmax ( int , optional ) : The maximum number of * n * values required . If a
value isn ' t passed , * nmax * is the number of rows in ssphere minus one .
mmax ( int , optional ) : The maximum number of * m * values required . If a
value isn ' t passed , * mmax * is half the number of columns in ssphere
minus one .
Returns :
ScalarCoefs : The object containing the coefficients of the scalar
spherical harmonic transform .
Raises :
ValueError : If * nmax * and * mmax * are too large or * mmax * > * nmax * .""" | if nmax == None :
nmax = ssphere . nrows - 2
mmax = int ( ssphere . ncols / 2 ) - 1
elif mmax == None :
mmax = nmax
if mmax > nmax :
raise ValueError ( err_msg [ 'nmax_g_mmax' ] )
if nmax >= ssphere . nrows - 1 :
raise ValueError ( err_msg [ 'nmax_too_lrg' ] )
if mmax >= ssphere . ncols / 2 :
raise ValueError ( err_msg [ 'mmax_too_lrg' ] )
dnrows = ssphere . _dsphere . shape [ 0 ]
ncols = ssphere . _dsphere . shape [ 1 ]
if np . mod ( ncols , 2 ) == 1 :
raise ValueError ( err_msg [ 'ncols_even' ] )
fdata = np . fft . fft2 ( ssphere . _dsphere ) / ( dnrows * ncols )
ops . fix_even_row_data_fc ( fdata )
fdata_extended = np . zeros ( [ dnrows + 2 , ncols ] , dtype = np . complex128 )
ops . pad_rows_fdata ( fdata , fdata_extended )
ops . sin_fc ( fdata_extended )
N = nmax + 1 ;
NC = N + mmax * ( 2 * N - mmax - 1 ) ;
sc = np . zeros ( NC , dtype = np . complex128 )
# check if we are using c extended versions of the code or not
if use_cext :
csphi . fc_to_sc ( fdata_extended , sc , nmax , mmax )
else :
sc = pysphi . fc_to_sc ( fdata_extended , nmax , mmax )
return ScalarCoefs ( sc , nmax , mmax ) |
def find_nth_digit ( n ) :
"""find the nth digit of given number .
1 . find the length of the number where the nth digit is from .
2 . find the actual number where the nth digit is from
3 . find the nth digit and return""" | length = 1
count = 9
start = 1
while n > length * count :
n -= length * count
length += 1
count *= 10
start *= 10
start += ( n - 1 ) / length
s = str ( start )
return int ( s [ ( n - 1 ) % length ] ) |
def init_prov_graph ( self ) :
"""Initialize PROV graph with all we know at the start of the recording""" | try : # Use git2prov to get prov on the repo
repo_prov = check_output ( [ 'node_modules/git2prov/bin/git2prov' , 'https://github.com/{}/{}/' . format ( self . user , self . repo ) , 'PROV-O' ] ) . decode ( "utf-8" )
repo_prov = repo_prov [ repo_prov . find ( '@' ) : ]
# glogger . debug ( ' Git2PROV output : { } ' . format ( repo _ prov ) )
glogger . debug ( 'Ingesting Git2PROV output into RDF graph' )
with open ( 'temp.prov.ttl' , 'w' ) as temp_prov :
temp_prov . write ( repo_prov )
self . prov_g . parse ( 'temp.prov.ttl' , format = 'turtle' )
except Exception as e :
glogger . error ( e )
glogger . error ( "Couldn't parse Git2PROV graph, continuing without repo PROV" )
pass
self . prov_g . add ( ( self . agent , RDF . type , self . prov . Agent ) )
self . prov_g . add ( ( self . entity_d , RDF . type , self . prov . Entity ) )
self . prov_g . add ( ( self . activity , RDF . type , self . prov . Activity ) )
# entity _ d
self . prov_g . add ( ( self . entity_d , self . prov . wasGeneratedBy , self . activity ) )
self . prov_g . add ( ( self . entity_d , self . prov . wasAttributedTo , self . agent ) )
# later : entity _ d genereated at time ( when we know the end time )
# activity
self . prov_g . add ( ( self . activity , self . prov . wasAssociatedWith , self . agent ) )
self . prov_g . add ( ( self . activity , self . prov . startedAtTime , Literal ( datetime . now ( ) ) ) ) |
def _init_user_stub ( self , ** stub_kwargs ) :
"""Initializes the user stub using nosegae config magic""" | # do a little dance to keep the same kwargs for multiple tests in the same class
# because the user stub will barf if you pass these items into it
# stub = user _ service _ stub . UserServiceStub ( * * stub _ kw _ args )
# TypeError : _ _ init _ _ ( ) got an unexpected keyword argument ' USER _ IS _ ADMIN '
task_args = stub_kwargs . copy ( )
self . testbed . setup_env ( overwrite = True , USER_ID = task_args . pop ( 'USER_ID' , 'testuser' ) , USER_EMAIL = task_args . pop ( 'USER_EMAIL' , 'testuser@example.org' ) , USER_IS_ADMIN = task_args . pop ( 'USER_IS_ADMIN' , '1' ) )
self . testbed . init_user_stub ( ** task_args ) |
def get_memory_usage ( self ) :
"""Get data about the virtual memory usage of the holder .
: returns : Memory usage data
: rtype : dict
Example :
> > > holder . get _ memory _ usage ( )
> > > ' nb _ arrays ' : 12 , # The holder contains the variable values for 12 different periods
> > > ' nb _ cells _ by _ array ' : 100 , # There are 100 entities ( e . g . persons ) in our simulation
> > > ' cell _ size ' : 8 , # Each value takes 8B of memory
> > > ' dtype ' : dtype ( ' float64 ' ) # Each value is a float 64
> > > ' total _ nb _ bytes ' : 10400 # The holder uses 10.4kB of virtual memory
> > > ' nb _ requests ' : 24 # The variable has been computed 24 times
> > > ' nb _ requests _ by _ array ' : 2 # Each array stored has been on average requested twice""" | usage = dict ( nb_cells_by_array = self . population . count , dtype = self . variable . dtype , )
usage . update ( self . _memory_storage . get_memory_usage ( ) )
if self . simulation . trace :
usage_stats = self . simulation . tracer . usage_stats [ self . variable . name ]
usage . update ( dict ( nb_requests = usage_stats [ 'nb_requests' ] , nb_requests_by_array = usage_stats [ 'nb_requests' ] / float ( usage [ 'nb_arrays' ] ) if usage [ 'nb_arrays' ] > 0 else np . nan ) )
return usage |
def elements ( self ) :
"""Return a list of the elements which are not None""" | elements = [ ]
for el in ct :
if isinstance ( el [ 1 ] , datapoint . Element . Element ) :
elements . append ( el [ 1 ] )
return elements |
def initialize ( self , init = None , ctx = None , default_init = initializer . Uniform ( ) , force_reinit = False ) :
"""Initializes parameter and gradient arrays . Only used for : py : class : ` NDArray ` API .
Parameters
init : Initializer
The initializer to use . Overrides : py : meth : ` Parameter . init ` and default _ init .
ctx : Context or list of Context , defaults to : py : meth : ` context . current _ context ( ) ` .
Initialize Parameter on given context . If ctx is a list of Context , a
copy will be made for each context .
. . note : :
Copies are independent arrays . User is responsible for keeping
their values consistent when updating .
Normally : py : class : ` gluon . Trainer ` does this for you .
default _ init : Initializer
Default initializer is used when both : py : func : ` init `
and : py : meth : ` Parameter . init ` are ` ` None ` ` .
force _ reinit : bool , default False
Whether to force re - initialization if parameter is already initialized .
Examples
> > > weight = mx . gluon . Parameter ( ' weight ' , shape = ( 2 , 2 ) )
> > > weight . initialize ( ctx = mx . cpu ( 0 ) )
> > > weight . data ( )
[ [ - 0.01068833 0.01729892]
[ 0.02042518 - 0.01618656 ] ]
< NDArray 2x2 @ cpu ( 0 ) >
> > > weight . grad ( )
[ [ 0 . 0 . ]
[ 0 . 0 . ] ]
< NDArray 2x2 @ cpu ( 0 ) >
> > > weight . initialize ( ctx = [ mx . gpu ( 0 ) , mx . gpu ( 1 ) ] )
> > > weight . data ( mx . gpu ( 0 ) )
[ [ - 0.00873779 - 0.02834515]
[ 0.05484822 - 0.06206018 ] ]
< NDArray 2x2 @ gpu ( 0 ) >
> > > weight . data ( mx . gpu ( 1 ) )
[ [ - 0.00873779 - 0.02834515]
[ 0.05484822 - 0.06206018 ] ]
< NDArray 2x2 @ gpu ( 1 ) >""" | if self . _data is not None and not force_reinit :
warnings . warn ( "Parameter '%s' is already initialized, ignoring. " "Set force_reinit=True to re-initialize." % self . name , stacklevel = 2 )
return
self . _data = self . _grad = None
if ctx is None :
ctx = [ context . current_context ( ) ]
if isinstance ( ctx , Context ) :
ctx = [ ctx ]
if init is None :
init = default_init if self . init is None else self . init
if not self . shape or np . prod ( self . shape ) <= 0 :
if self . _allow_deferred_init :
self . _deferred_init = ( init , ctx , default_init , None )
return
raise ValueError ( "Cannot initialize Parameter '%s' because it has " "invalid shape: %s." % ( self . name , str ( self . shape ) ) )
self . _deferred_init = ( init , ctx , default_init , None )
self . _finish_deferred_init ( ) |
def get_node_attributes ( self , node ) :
"""Given a node , get a dictionary with copies of that node ' s
attributes .
: param node : reference to the node to retrieve the attributes of .
: returns : dict - - copy of each attribute of the specified node .
: raises : ValueError - - No such node exists .""" | if not self . has_node ( node ) :
raise ValueError ( "No such node exists." )
attributes = { }
for attr_name , attr_value in self . _node_attributes [ node ] . items ( ) :
attributes [ attr_name ] = copy . copy ( attr_value )
return attributes |
def check_version ( self , timeout = 2 , strict = False , topics = [ ] ) :
"""Attempt to guess the broker version .
Note : This is a blocking call .
Returns : version tuple , i . e . ( 0 , 10 ) , ( 0 , 9 ) , ( 0 , 8 , 2 ) , . . .""" | timeout_at = time . time ( ) + timeout
log . info ( 'Probing node %s broker version' , self . node_id )
# Monkeypatch some connection configurations to avoid timeouts
override_config = { 'request_timeout_ms' : timeout * 1000 , 'max_in_flight_requests_per_connection' : 5 }
stashed = { }
for key in override_config :
stashed [ key ] = self . config [ key ]
self . config [ key ] = override_config [ key ]
# kafka kills the connection when it doesn ' t recognize an API request
# so we can send a test request and then follow immediately with a
# vanilla MetadataRequest . If the server did not recognize the first
# request , both will be failed with a ConnectionError that wraps
# socket . error ( 32 , 54 , or 104)
from kafka . protocol . admin import ApiVersionRequest , ListGroupsRequest
from kafka . protocol . commit import OffsetFetchRequest , GroupCoordinatorRequest
test_cases = [ # All cases starting from 0.10 will be based on ApiVersionResponse
( ( 0 , 10 ) , ApiVersionRequest [ 0 ] ( ) ) , ( ( 0 , 9 ) , ListGroupsRequest [ 0 ] ( ) ) , ( ( 0 , 8 , 2 ) , GroupCoordinatorRequest [ 0 ] ( 'kafka-python-default-group' ) ) , ( ( 0 , 8 , 1 ) , OffsetFetchRequest [ 0 ] ( 'kafka-python-default-group' , [ ] ) ) , ( ( 0 , 8 , 0 ) , MetadataRequest [ 0 ] ( topics ) ) , ]
for version , request in test_cases :
if not self . connect_blocking ( timeout_at - time . time ( ) ) :
raise Errors . NodeNotReadyError ( )
f = self . send ( request )
# HACK : sleeping to wait for socket to send bytes
time . sleep ( 0.1 )
# when broker receives an unrecognized request API
# it abruptly closes our socket .
# so we attempt to send a second request immediately
# that we believe it will definitely recognize ( metadata )
# the attempt to write to a disconnected socket should
# immediately fail and allow us to infer that the prior
# request was unrecognized
mr = self . send ( MetadataRequest [ 0 ] ( topics ) )
selector = self . config [ 'selector' ] ( )
selector . register ( self . _sock , selectors . EVENT_READ )
while not ( f . is_done and mr . is_done ) :
selector . select ( 1 )
for response , future in self . recv ( ) :
future . success ( response )
selector . close ( )
if f . succeeded ( ) :
if isinstance ( request , ApiVersionRequest [ 0 ] ) : # Starting from 0.10 kafka broker we determine version
# by looking at ApiVersionResponse
api_versions = self . _handle_api_version_response ( f . value )
version = self . _infer_broker_version_from_api_versions ( api_versions )
log . info ( 'Broker version identifed as %s' , '.' . join ( map ( str , version ) ) )
log . info ( 'Set configuration api_version=%s to skip auto' ' check_version requests on startup' , version )
break
# Only enable strict checking to verify that we understand failure
# modes . For most users , the fact that the request failed should be
# enough to rule out a particular broker version .
if strict : # If the socket flush hack did not work ( which should force the
# connection to close and fail all pending requests ) , then we
# get a basic Request Timeout . This is not ideal , but we ' ll deal
if isinstance ( f . exception , Errors . RequestTimedOutError ) :
pass
# 0.9 brokers do not close the socket on unrecognized api
# requests ( bug . . . ) . In this case we expect to see a correlation
# id mismatch
elif ( isinstance ( f . exception , Errors . CorrelationIdError ) and version == ( 0 , 10 ) ) :
pass
elif six . PY2 :
assert isinstance ( f . exception . args [ 0 ] , socket . error )
assert f . exception . args [ 0 ] . errno in ( 32 , 54 , 104 )
else :
assert isinstance ( f . exception . args [ 0 ] , ConnectionError )
log . info ( "Broker is not v%s -- it did not recognize %s" , version , request . __class__ . __name__ )
else :
raise Errors . UnrecognizedBrokerVersion ( )
for key in stashed :
self . config [ key ] = stashed [ key ]
return version |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.