idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
26,100
def unique ( seq ) : cleaned = [ ] for each in seq : if each not in cleaned : cleaned . append ( each ) return cleaned
Return the unique elements of a collection even if those elements are unhashable and unsortable like dicts and sets
26,101
def learn ( self , matches , recall ) : compound_length = 2 dupe_cover = Cover ( self . blocker . predicates , matches ) dupe_cover . dominators ( cost = self . total_cover ) dupe_cover . compound ( compound_length ) comparison_count = self . comparisons ( dupe_cover , compound_length ) dupe_cover . dominators ( cost = comparison_count , comparison = True ) coverable_dupes = set . union ( * viewvalues ( dupe_cover ) ) uncoverable_dupes = [ pair for i , pair in enumerate ( matches ) if i not in coverable_dupes ] epsilon = int ( ( 1.0 - recall ) * len ( matches ) ) if len ( uncoverable_dupes ) > epsilon : logger . warning ( OUT_OF_PREDICATES_WARNING ) logger . debug ( uncoverable_dupes ) epsilon = 0 else : epsilon -= len ( uncoverable_dupes ) for pred in dupe_cover : pred . count = comparison_count [ pred ] searcher = BranchBound ( len ( coverable_dupes ) - epsilon , 2500 ) final_predicates = searcher . search ( dupe_cover ) logger . info ( 'Final predicate set:' ) for predicate in final_predicates : logger . info ( predicate ) return final_predicates
Takes in a set of training pairs and predicates and tries to find a good set of blocking rules .
26,102
def unindex ( self , data , field ) : indices = extractIndices ( self . index_fields [ field ] ) for doc in data : if doc : for _ , index , preprocess in indices : index . unindex ( preprocess ( doc ) ) for index_type , index , _ in indices : index . _index . initSearch ( ) for predicate in self . index_fields [ field ] [ index_type ] : logger . debug ( "Canopy: %s" , str ( predicate ) ) predicate . index = index
Remove index of a given set of data
26,103
def save ( self , * args , ** kwargs ) : lexer = get_lexer_by_name ( self . language ) linenos = self . linenos and 'table' or False options = self . title and { 'title' : self . title } or { } formatter = HtmlFormatter ( style = self . style , linenos = linenos , full = True , ** options ) self . highlighted = highlight ( self . code , lexer , formatter ) super ( Snippet , self ) . save ( * args , ** kwargs ) snippets = Snippet . objects . all ( ) if len ( snippets ) > 100 : snippets [ 0 ] . delete ( )
Use the pygments library to create a highlighted HTML representation of the code snippet .
26,104
def scan ( self ) : found = [ ] for addr in range ( 0 , 0x80 ) : try : self . _i2c_bus . read_byte ( addr ) except OSError : continue found . append ( addr ) return found
Try to read a byte from each address if you get an OSError it means the device isnt there
26,105
def final ( ) : if DEBUG : print ( "Cleaning up message queues" , queues ) print ( "Cleaning up processes" , procs ) for q in queues : q . remove ( ) for proc in procs : proc . terminate ( )
In case the program is cancelled or quit we need to clean up the PulseIn helper process and also the message queue this is called at exit to do so
26,106
def _wait_receive_msg ( self , timeout = 0.25 , type = 2 ) : stamp = time . monotonic ( ) while ( time . monotonic ( ) - stamp ) < timeout : try : message = self . _mq . receive ( block = False , type = 2 ) return message except sysv_ipc . BusyError : time . sleep ( 0.001 ) raise RuntimeError ( "Timed out waiting for PulseIn message" )
Internal helper that will wait for new messages of a given type and throw an exception on timeout
26,107
def deinit ( self ) : self . _process . terminate ( ) procs . remove ( self . _process ) self . _mq . remove ( ) queues . remove ( self . _mq )
Deinitialises the PulseIn and releases any hardware and software resources for reuse .
26,108
def resume ( self , trigger_duration = 0 ) : if trigger_duration != 0 : self . _mq . send ( "t%d" % trigger_duration , True , type = 1 ) else : self . _mq . send ( "r" , True , type = 1 ) self . _paused = False
Resumes pulse capture after an optional trigger pulse .
26,109
def pause ( self ) : self . _mq . send ( "p" , True , type = 1 ) self . _paused = True
Pause pulse capture
26,110
def popleft ( self ) : self . _mq . send ( "^" , True , type = 1 ) message = self . _wait_receive_msg ( ) reply = int ( message [ 0 ] . decode ( 'utf-8' ) ) if reply == - 1 : raise IndexError ( "pop from empty list" ) return reply
Removes and returns the oldest read pulse .
26,111
def _validate_layers ( self , proposal ) : self . _layer_ids = [ l . model_id for l in proposal . value ] if len ( set ( self . _layer_ids ) ) != len ( self . _layer_ids ) : raise LayerException ( 'duplicate layer detected, only use each layer once' ) return proposal . value
Validate layers list .
26,112
def on_hover ( self , callback , remove = False ) : self . _hover_callbacks . register_callback ( callback , remove = remove )
The hover callback takes an unpacked set of keyword arguments .
26,113
def _validate_controls ( self , proposal ) : self . _control_ids = [ c . model_id for c in proposal . value ] if len ( set ( self . _control_ids ) ) != len ( self . _control_ids ) : raise ControlException ( 'duplicate control detected, only use each control once' ) return proposal . value
Validate controls list .
26,114
def read_core_register ( self , reg ) : regIndex = register_name_to_index ( reg ) regValue = self . read_core_register_raw ( regIndex ) if is_single_float_register ( regIndex ) : regValue = conversion . u32_to_float32 ( regValue ) elif is_double_float_register ( regIndex ) : regValue = conversion . u64_to_float64 ( regValue ) return regValue
read CPU register Unpack floating point register values
26,115
def write_core_register ( self , reg , data ) : regIndex = register_name_to_index ( reg ) if is_single_float_register ( regIndex ) and type ( data ) is float : data = conversion . float32_to_u32 ( data ) elif is_double_float_register ( regIndex ) and type ( data ) is float : data = conversion . float64_to_u64 ( data ) self . write_core_register_raw ( regIndex , data )
write a CPU register . Will need to pack floating point register values before writing .
26,116
def connect ( self , protocol = None ) : if protocol is not None : port = self . PORT_MAP [ protocol ] else : port = DAPAccess . PORT . DEFAULT try : self . _link . connect ( port ) except DAPAccess . Error as exc : six . raise_from ( self . _convert_exception ( exc ) , exc ) actualMode = self . _link . get_swj_mode ( ) self . _protocol = self . PORT_MAP [ actualMode ] self . _invalidate_cached_registers ( )
Initialize DAP IO pins for JTAG or SWD
26,117
def set_clock ( self , frequency ) : try : self . _link . set_clock ( frequency ) except DAPAccess . Error as exc : six . raise_from ( self . _convert_exception ( exc ) , exc )
Set the frequency for JTAG and SWD in Hz
26,118
def reset ( self ) : try : self . _invalidate_cached_registers ( ) self . _link . reset ( ) except DAPAccess . Error as exc : six . raise_from ( self . _convert_exception ( exc ) , exc )
Reset the target
26,119
def assert_reset ( self , asserted ) : try : self . _invalidate_cached_registers ( ) self . _link . assert_reset ( asserted ) except DAPAccess . Error as exc : six . raise_from ( self . _convert_exception ( exc ) , exc )
Assert or de - assert target reset line
26,120
def _get_text ( node , tag , default = None ) : try : return node . find ( tag ) . text except AttributeError : return default
Get the text for the provided tag from the provided node
26,121
def init ( self ) : if not self . call_delegate ( 'will_start_debug_core' , core = self ) : if self . halt_on_connect : self . halt ( ) self . _read_core_type ( ) self . _check_for_fpu ( ) self . build_target_xml ( ) self . sw_bp . init ( ) self . call_delegate ( 'did_start_debug_core' , core = self )
Cortex M initialization . The bus must be accessible when this method is called .
26,122
def write_memory ( self , addr , value , transfer_size = 32 ) : self . ap . write_memory ( addr , value , transfer_size )
write a memory location . By default the transfer size is a word
26,123
def read_memory ( self , addr , transfer_size = 32 , now = True ) : result = self . ap . read_memory ( addr , transfer_size , now ) def read_memory_cb ( ) : return self . bp_manager . filter_memory ( addr , transfer_size , result ( ) ) if now : return self . bp_manager . filter_memory ( addr , transfer_size , result ) else : return read_memory_cb
read a memory location . By default a word will be read
26,124
def read_memory_block8 ( self , addr , size ) : data = self . ap . read_memory_block8 ( addr , size ) return self . bp_manager . filter_memory_unaligned_8 ( addr , size , data )
read a block of unaligned bytes in memory . Returns an array of byte values
26,125
def read_memory_block32 ( self , addr , size ) : data = self . ap . read_memory_block32 ( addr , size ) return self . bp_manager . filter_memory_aligned_32 ( addr , size , data )
read a block of aligned words in memory . Returns an array of word values
26,126
def halt ( self ) : self . notify ( Notification ( event = Target . EVENT_PRE_HALT , source = self , data = Target . HALT_REASON_USER ) ) self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_HALT ) self . flush ( ) self . notify ( Notification ( event = Target . EVENT_POST_HALT , source = self , data = Target . HALT_REASON_USER ) )
halt the core
26,127
def step ( self , disable_interrupts = True , start = 0 , end = 0 ) : dhcsr = self . read_memory ( CortexM . DHCSR ) if not ( dhcsr & ( CortexM . C_STEP | CortexM . C_HALT ) ) : logging . error ( 'cannot step: target not halted' ) return self . notify ( Notification ( event = Target . EVENT_PRE_RUN , source = self , data = Target . RUN_TYPE_STEP ) ) self . clear_debug_cause_bits ( ) interrupts_masked = ( CortexM . C_MASKINTS & dhcsr ) != 0 if not interrupts_masked and disable_interrupts : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_HALT | CortexM . C_MASKINTS ) while True : if disable_interrupts or interrupts_masked : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_MASKINTS | CortexM . C_STEP ) else : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_STEP ) while not self . read_memory ( CortexM . DHCSR ) & CortexM . C_HALT : pass if start == end : break program_counter = self . read_core_register ( CORE_REGISTER [ 'pc' ] ) if program_counter < start or end <= program_counter : break if self . read_memory ( CortexM . DFSR ) & ( CortexM . DFSR_DWTTRAP | CortexM . DFSR_BKPT ) : break if not interrupts_masked and disable_interrupts : self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN | CortexM . C_HALT ) self . flush ( ) self . _run_token += 1 self . notify ( Notification ( event = Target . EVENT_POST_RUN , source = self , data = Target . RUN_TYPE_STEP ) )
perform an instruction level step . This function preserves the previous interrupt mask state
26,128
def reset_and_halt ( self , reset_type = None ) : delegateResult = self . call_delegate ( 'set_reset_catch' , core = self , reset_type = reset_type ) if not delegateResult : self . halt ( ) demcr = self . read_memory ( CortexM . DEMCR ) if not delegateResult : self . write_memory ( CortexM . DEMCR , demcr | CortexM . DEMCR_VC_CORERESET ) self . reset ( reset_type ) with timeout . Timeout ( 2.0 ) as t_o : while t_o . check ( ) : if self . get_state ( ) not in ( Target . TARGET_RESET , Target . TARGET_RUNNING ) : break sleep ( 0.01 ) xpsr = self . read_core_register ( 'xpsr' ) if xpsr & self . XPSR_THUMB == 0 : self . write_core_register ( 'xpsr' , xpsr | self . XPSR_THUMB ) self . call_delegate ( 'clear_reset_catch' , core = self , reset_type = reset_type ) self . write_memory ( CortexM . DEMCR , demcr )
perform a reset and stop the core on the reset handler
26,129
def resume ( self ) : if self . get_state ( ) != Target . TARGET_HALTED : logging . debug ( 'cannot resume: target not halted' ) return self . notify ( Notification ( event = Target . EVENT_PRE_RUN , source = self , data = Target . RUN_TYPE_RESUME ) ) self . _run_token += 1 self . clear_debug_cause_bits ( ) self . write_memory ( CortexM . DHCSR , CortexM . DBGKEY | CortexM . C_DEBUGEN ) self . flush ( ) self . notify ( Notification ( event = Target . EVENT_POST_RUN , source = self , data = Target . RUN_TYPE_RESUME ) )
resume the execution
26,130
def read_core_registers_raw ( self , reg_list ) : reg_list = [ register_name_to_index ( reg ) for reg in reg_list ] for reg in reg_list : if reg not in CORE_REGISTER . values ( ) : raise ValueError ( "unknown reg: %d" % reg ) elif is_fpu_register ( reg ) and ( not self . has_fpu ) : raise ValueError ( "attempt to read FPU register without FPU" ) doubles = [ reg for reg in reg_list if is_double_float_register ( reg ) ] hasDoubles = len ( doubles ) > 0 if hasDoubles : originalRegList = reg_list reg_list = [ reg for reg in reg_list if not is_double_float_register ( reg ) ] singleRegList = [ ] for reg in doubles : singleRegList += ( - reg , - reg + 1 ) singleValues = self . read_core_registers_raw ( singleRegList ) dhcsr_cb_list = [ ] reg_cb_list = [ ] for reg in reg_list : if is_cfbp_subregister ( reg ) : reg = CORE_REGISTER [ 'cfbp' ] elif is_psr_subregister ( reg ) : reg = CORE_REGISTER [ 'xpsr' ] self . write_memory ( CortexM . DCRSR , reg ) dhcsr_cb = self . read_memory ( CortexM . DHCSR , now = False ) reg_cb = self . read_memory ( CortexM . DCRDR , now = False ) dhcsr_cb_list . append ( dhcsr_cb ) reg_cb_list . append ( reg_cb ) reg_vals = [ ] for reg , reg_cb , dhcsr_cb in zip ( reg_list , reg_cb_list , dhcsr_cb_list ) : dhcsr_val = dhcsr_cb ( ) assert dhcsr_val & CortexM . S_REGRDY val = reg_cb ( ) if is_cfbp_subregister ( reg ) : val = ( val >> ( ( - reg - 1 ) * 8 ) ) & 0xff elif is_psr_subregister ( reg ) : val &= sysm_to_psr_mask ( reg ) reg_vals . append ( val ) if hasDoubles : results = [ ] for reg in originalRegList : if is_double_float_register ( reg ) : doubleIndex = doubles . index ( reg ) singleLow = singleValues [ doubleIndex * 2 ] singleHigh = singleValues [ doubleIndex * 2 + 1 ] double = ( singleHigh << 32 ) | singleLow results . append ( double ) else : results . append ( reg_vals [ reg_list . index ( reg ) ] ) reg_vals = results return reg_vals
Read one or more core registers
26,131
def write_core_registers_raw ( self , reg_list , data_list ) : assert len ( reg_list ) == len ( data_list ) reg_list = [ register_name_to_index ( reg ) for reg in reg_list ] for reg in reg_list : if reg not in CORE_REGISTER . values ( ) : raise ValueError ( "unknown reg: %d" % reg ) elif is_fpu_register ( reg ) and ( not self . has_fpu ) : raise ValueError ( "attempt to write FPU register without FPU" ) cfbpValue = None xpsrValue = None reg_data_list = [ ] for reg , data in zip ( reg_list , data_list ) : if is_double_float_register ( reg ) : singleLow = data & 0xffffffff singleHigh = ( data >> 32 ) & 0xffffffff reg_data_list += [ ( - reg , singleLow ) , ( - reg + 1 , singleHigh ) ] elif is_cfbp_subregister ( reg ) and cfbpValue is None : cfbpValue = self . read_core_register_raw ( CORE_REGISTER [ 'cfbp' ] ) elif is_psr_subregister ( reg ) and xpsrValue is None : xpsrValue = self . read_core_register_raw ( CORE_REGISTER [ 'xpsr' ] ) else : reg_data_list . append ( ( reg , data ) ) dhcsr_cb_list = [ ] for reg , data in reg_data_list : if is_cfbp_subregister ( reg ) : shift = ( - reg - 1 ) * 8 mask = 0xffffffff ^ ( 0xff << shift ) data = ( cfbpValue & mask ) | ( ( data & 0xff ) << shift ) cfbpValue = data reg = CORE_REGISTER [ 'cfbp' ] elif is_psr_subregister ( reg ) : mask = sysm_to_psr_mask ( reg ) data = ( xpsrValue & ( 0xffffffff ^ mask ) ) | ( data & mask ) xpsrValue = data reg = CORE_REGISTER [ 'xpsr' ] self . write_memory ( CortexM . DCRDR , data ) self . write_memory ( CortexM . DCRSR , reg | CortexM . DCRSR_REGWnR ) dhcsr_cb = self . read_memory ( CortexM . DHCSR , now = False ) dhcsr_cb_list . append ( dhcsr_cb ) for dhcsr_cb in dhcsr_cb_list : dhcsr_val = dhcsr_cb ( ) assert dhcsr_val & CortexM . S_REGRDY
Write one or more core registers
26,132
def set_watchpoint ( self , addr , size , type ) : return self . dwt . set_watchpoint ( addr , size , type )
set a hardware watchpoint
26,133
def remove_watchpoint ( self , addr , size , type ) : return self . dwt . remove_watchpoint ( addr , size , type )
remove a hardware watchpoint
26,134
def init ( self ) : self . target . halt ( ) self . target . reset_and_halt ( ) result = self . _call_function_and_wait ( self . flash_algo [ 'pc_init' ] , init = True ) if result != 0 : logging . error ( 'init error: %i' , result ) self . erase_sector ( 0x01000000 ) time . sleep ( .5 ) self . target . dp . reset ( ) time . sleep ( 1.3 ) self . target . dp . init ( ) self . target . dp . power_up_debug ( ) self . target . halt ( ) self . target . reset_and_halt ( ) result = self . _call_function_and_wait ( self . flash_algo [ 'pc_init' ] , init = True ) if result != 0 : logging . error ( 'init error: %i' , result )
Download the flash algorithm in RAM
26,135
def _check_type ( value , expected_type ) : if not isinstance ( value , expected_type ) : raise TypeError ( "Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}" . format ( value = value , expected_type = expected_type , actual_type = type ( value ) , ) ) return value
Perform type checking on the provided value
26,136
def _get_cached_mounted_points ( ) : result = [ ] try : mounted_devices_key = winreg . OpenKey ( winreg . HKEY_LOCAL_MACHINE , "SYSTEM\\MountedDevices" ) for v in _iter_vals ( mounted_devices_key ) : if "DosDevices" not in v [ 0 ] : continue volume_string = v [ 1 ] . decode ( "utf-16le" , "ignore" ) if not _is_mbed_volume ( volume_string ) : continue mount_point_match = re . match ( ".*\\\\(.:)$" , v [ 0 ] ) if not mount_point_match : logger . debug ( "Invalid disk pattern for entry %s, skipping" , v [ 0 ] ) continue mount_point = mount_point_match . group ( 1 ) result . append ( { "mount_point" : mount_point , "volume_string" : volume_string } ) except OSError : logger . error ( 'Failed to open "MountedDevices" in registry' ) return result
! Get the volumes present on the system
26,137
def _vid_pid_path_to_usb_info ( vid_pid_path ) : result = { "vendor_id" : None , "product_id" : None } for component in vid_pid_path . split ( "&" ) : component_part = component . lower ( ) . split ( "_" ) if len ( component_part ) != 2 : logger . debug ( "Unexpected VID/PID string structure %s" , component ) break if component_part [ 0 ] == "vid" : result [ "vendor_id" ] = component_part [ 1 ] elif component_part [ 0 ] == "pid" : result [ "product_id" ] = component_part [ 1 ] return result
! Provide the vendor ID and product ID of a device based on its entry in the registry
26,138
def _iter_keys_as_str ( key ) : for i in range ( winreg . QueryInfoKey ( key ) [ 0 ] ) : yield winreg . EnumKey ( key , i )
! Iterate over subkeys of a key returning subkey as string
26,139
def _iter_keys ( key ) : for i in range ( winreg . QueryInfoKey ( key ) [ 0 ] ) : yield winreg . OpenKey ( key , winreg . EnumKey ( key , i ) )
! Iterate over subkeys of a key
26,140
def _iter_vals ( key ) : for i in range ( winreg . QueryInfoKey ( key ) [ 1 ] ) : yield winreg . EnumValue ( key , i )
! Iterate over values of a key
26,141
def mount_point_ready ( self , path ) : stdout , stderr , retcode = self . _run_cli_process ( "dir %s" % path ) result = True if retcode == 0 else False return result
! Check if a mount point is ready for file operations
26,142
def _create_algo_bin ( self , ro_rw_zi ) : sect_ro , sect_rw , sect_zi = ro_rw_zi algo_size = sect_ro . length + sect_rw . length + sect_zi . length algo_data = bytearray ( algo_size ) for section in ( sect_ro , sect_rw ) : start = section . start size = section . length data = section . data assert len ( data ) == size algo_data [ start : start + size ] = data return algo_data
Create a binary blob of the flash algo which can execute from ram
26,143
def _sector_and_sz_itr ( self , elf , data_start ) : for entry_start in itertools . count ( data_start , self . FLASH_SECTORS_STRUCT_SIZE ) : data = elf . read ( entry_start , self . FLASH_SECTORS_STRUCT_SIZE ) size , start = struct . unpack ( self . FLASH_SECTORS_STRUCT , data ) start_and_size = start , size if start_and_size == ( self . SECTOR_END , self . SECTOR_END ) : return yield start_and_size
Iterator which returns starting address and sector size
26,144
def _prune ( current , keys ) : pruned_current = { k : current [ k ] for k in keys if k in current } pruned_children = list ( filter ( None , [ _prune ( c , keys ) for c in current . get ( "IORegistryEntryChildren" , [ ] ) ] ) ) keep_current = any ( k in current for k in keys ) or pruned_children if keep_current : if pruned_children : pruned_current [ "IORegistryEntryChildren" ] = pruned_children return pruned_current else : return { }
Reduce the amount of data we have to sift through to only include the specified keys and children that contain the specified keys
26,145
def get_register_context ( self ) : logging . debug ( "GDB getting register context" ) resp = b'' reg_num_list = [ reg . reg_num for reg in self . _register_list ] vals = self . _context . read_core_registers_raw ( reg_num_list ) for reg , regValue in zip ( self . _register_list , vals ) : if reg . bitsize == 64 : resp += six . b ( conversion . u64_to_hex16le ( regValue ) ) else : resp += six . b ( conversion . u32_to_hex8le ( regValue ) ) logging . debug ( "GDB reg: %s = 0x%X" , reg . name , regValue ) return resp
return hexadecimal dump of registers as expected by GDB
26,146
def set_register_context ( self , data ) : logging . debug ( "GDB setting register context" ) reg_num_list = [ ] reg_data_list = [ ] for reg in self . _register_list : if reg . bitsize == 64 : regValue = conversion . hex16_to_u64be ( data ) data = data [ 16 : ] else : regValue = conversion . hex8_to_u32be ( data ) data = data [ 8 : ] reg_num_list . append ( reg . reg_num ) reg_data_list . append ( regValue ) logging . debug ( "GDB reg: %s = 0x%X" , reg . name , regValue ) self . _context . write_core_registers_raw ( reg_num_list , reg_data_list )
Set registers from GDB hexadecimal string .
26,147
def set_register ( self , reg , data ) : if reg < 0 : return elif reg < len ( self . _register_list ) : regName = self . _register_list [ reg ] . name regBits = self . _register_list [ reg ] . bitsize if regBits == 64 : value = conversion . hex16_to_u64be ( data ) else : value = conversion . hex8_to_u32be ( data ) logging . debug ( "GDB: write reg %s: 0x%X" , regName , value ) self . _context . write_core_register_raw ( regName , value )
Set single register from GDB hexadecimal string . reg parameter is the index of register in targetXML sent to GDB .
26,148
def _get_interfaces ( ) : v1_interfaces = INTERFACE [ USB_BACKEND ] . get_all_connected_interfaces ( ) v2_interfaces = INTERFACE [ USB_BACKEND_V2 ] . get_all_connected_interfaces ( ) devices_in_both = [ v1 for v1 in v1_interfaces for v2 in v2_interfaces if _get_unique_id ( v1 ) == _get_unique_id ( v2 ) ] for dev in devices_in_both : v1_interfaces . remove ( dev ) return v1_interfaces + v2_interfaces
Get the connected USB devices
26,149
def add_response ( self , data ) : assert len ( data ) == self . _size_bytes result = [ ] for i in range ( 0 , self . _size_bytes , 4 ) : word = ( ( data [ 0 + i ] << 0 ) | ( data [ 1 + i ] << 8 ) | ( data [ 2 + i ] << 16 ) | ( data [ 3 + i ] << 24 ) ) result . append ( word ) self . _result = result
Add data read from the remote device to this object .
26,150
def get_result ( self ) : while self . _result is None : if len ( self . daplink . _commands_to_read ) > 0 : self . daplink . _read_packet ( ) else : assert not self . daplink . _crnt_cmd . get_empty ( ) self . daplink . flush ( ) if self . _error is not None : raise self . _error assert self . _result is not None return self . _result
Get the result of this transfer .
26,151
def _get_free_words ( self , blockAllowed , isRead ) : if blockAllowed : send = self . _size - 5 - 4 * self . _write_count recv = self . _size - 4 - 4 * self . _read_count if isRead : return recv // 4 else : return send // 4 else : send = self . _size - 3 - 1 * self . _read_count - 5 * self . _write_count recv = self . _size - 3 - 4 * self . _read_count if isRead : return min ( send , recv // 4 ) else : return send // 5
Return the number of words free in the transmit packet
26,152
def add ( self , count , request , data , dap_index ) : assert self . _data_encoded is False if self . _dap_index is None : self . _dap_index = dap_index assert self . _dap_index == dap_index if self . _block_request is None : self . _block_request = request elif request != self . _block_request : self . _block_allowed = False assert not self . _block_allowed or self . _block_request == request if request & READ : self . _read_count += count else : self . _write_count += count self . _data . append ( ( count , request , data ) ) if LOG_PACKET_BUILDS : self . _logger . debug ( "add(%d, %02x:%s) -> [wc=%d, rc=%d, ba=%d]" % ( count , request , 'r' if ( request & READ ) else 'w' , self . _write_count , self . _read_count , self . _block_allowed ) )
Add a single or block register transfer operation to this command
26,153
def decode_data ( self , data ) : assert self . get_empty ( ) is False assert self . _data_encoded is True if self . _block_allowed : data = self . _decode_transfer_block_data ( data ) else : data = self . _decode_transfer_data ( data ) return data
Decode the response data
26,154
def get_connected_devices ( ) : all_daplinks = [ ] all_interfaces = _get_interfaces ( ) for interface in all_interfaces : try : new_daplink = DAPAccessCMSISDAP ( None , interface = interface ) all_daplinks . append ( new_daplink ) except DAPAccessIntf . TransferError : logger = logging . getLogger ( __name__ ) logger . error ( 'Failed to get unique id' , exc_info = session . Session . get_current ( ) . log_tracebacks ) return all_daplinks
Return an array of all mbed boards connected
26,155
def set_deferred_transfer ( self , enable ) : if self . _deferred_transfer and not enable : self . flush ( ) self . _deferred_transfer = enable
Allow transfers to be delayed and buffered
26,156
def _init_deferred_buffers ( self ) : self . _transfer_list = collections . deque ( ) self . _crnt_cmd = _Command ( self . _packet_size ) self . _commands_to_read = collections . deque ( ) self . _command_response_buf = bytearray ( )
Initialize or reinitalize all the deferred transfer buffers
26,157
def _read_packet ( self ) : cmd = self . _commands_to_read . popleft ( ) try : raw_data = self . _interface . read ( ) raw_data = bytearray ( raw_data ) decoded_data = cmd . decode_data ( raw_data ) except Exception as exception : self . _abort_all_transfers ( exception ) raise decoded_data = bytearray ( decoded_data ) self . _command_response_buf . extend ( decoded_data ) pos = 0 while True : size_left = len ( self . _command_response_buf ) - pos if size_left == 0 : break transfer = self . _transfer_list [ 0 ] size = transfer . get_data_size ( ) if size > size_left : break self . _transfer_list . popleft ( ) data = self . _command_response_buf [ pos : pos + size ] pos += size transfer . add_response ( data ) if pos > 0 : self . _command_response_buf = self . _command_response_buf [ pos : ]
Reads and decodes a single packet
26,158
def _send_packet ( self ) : cmd = self . _crnt_cmd if cmd . get_empty ( ) : return max_packets = self . _interface . get_packet_count ( ) if len ( self . _commands_to_read ) >= max_packets : self . _read_packet ( ) data = cmd . encode_data ( ) try : self . _interface . write ( list ( data ) ) except Exception as exception : self . _abort_all_transfers ( exception ) raise self . _commands_to_read . append ( cmd ) self . _crnt_cmd = _Command ( self . _packet_size )
Send a single packet to the interface
26,159
def _write ( self , dap_index , transfer_count , transfer_request , transfer_data ) : assert dap_index == 0 assert isinstance ( transfer_count , six . integer_types ) assert isinstance ( transfer_request , six . integer_types ) assert transfer_data is None or len ( transfer_data ) > 0 transfer = None if transfer_request & READ : transfer = _Transfer ( self , dap_index , transfer_count , transfer_request , transfer_data ) self . _transfer_list . append ( transfer ) cmd = self . _crnt_cmd is_read = transfer_request & READ size_to_transfer = transfer_count trans_data_pos = 0 while size_to_transfer > 0 : size = cmd . get_request_space ( size_to_transfer , transfer_request , dap_index ) if size == 0 : if LOG_PACKET_BUILDS : self . _logger . debug ( "_write: send packet [size==0]" ) self . _send_packet ( ) cmd = self . _crnt_cmd continue if transfer_data is None : data = None else : data = transfer_data [ trans_data_pos : trans_data_pos + size ] cmd . add ( size , transfer_request , data , dap_index ) size_to_transfer -= size trans_data_pos += size if cmd . get_full ( ) : if LOG_PACKET_BUILDS : self . _logger . debug ( "_write: send packet [full]" ) self . _send_packet ( ) cmd = self . _crnt_cmd if not self . _deferred_transfer : self . flush ( ) return transfer
Write one or more commands
26,160
def _jtag_to_swd ( self ) : data = [ 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff ] self . _protocol . swj_sequence ( data ) data = [ 0x9e , 0xe7 ] self . _protocol . swj_sequence ( data ) data = [ 0xff , 0xff , 0xff , 0xff , 0xff , 0xff , 0xff ] self . _protocol . swj_sequence ( data ) data = [ 0x00 ] self . _protocol . swj_sequence ( data )
Send the command to switch from SWD to jtag
26,161
def _abort_all_transfers ( self , exception ) : pending_reads = len ( self . _commands_to_read ) for transfer in self . _transfer_list : transfer . add_error ( exception ) self . _init_deferred_buffers ( ) if isinstance ( exception , DAPAccessIntf . TransferError ) : for _ in range ( pending_reads ) : self . _interface . read ( )
Abort any ongoing transfers and clear all buffers
26,162
def resetn ( self ) : logging . debug ( "target_nrf51.reset: enable reset pin" ) self . write_memory ( RESET , RESET_ENABLE ) logging . debug ( "target_nrf51.reset: trigger nRST pin" ) self . reset ( )
reset a core . After a call to this function the core is running
26,163
def list_mbeds ( self ) : platform_count = { } candidates = list ( self . find_candidates ( ) ) result = [ ] for device in candidates : if not device . get ( "mount_point" , None ) : continue device [ "target_id" ] = device [ "target_id_usb_id" ] self . _update_device_from_fs ( device ) result . append ( device ) return result
List details of connected devices
26,164
def _update_device_from_fs ( self , device ) : try : directory_entries = listdir ( device [ "mount_point" ] ) lowercase_directory_entries = [ e . lower ( ) for e in directory_entries ] if self . MBED_HTM_NAME . lower ( ) in lowercase_directory_entries : self . _update_device_from_htm ( device ) except ( OSError , IOError ) as e : logger . warning ( 'Marking device with mount point "%s" as unmounted due to the ' "following error: %s" , device [ "mount_point" ] , e , ) device [ "mount_point" ] = None
Updates the device information based on files from its mount_point
26,165
def _read_htm_ids ( self , mount_point ) : result = { } target_id = None for line in self . _htm_lines ( mount_point ) : target_id = target_id or self . _target_id_from_htm ( line ) return target_id , result
! Function scans mbed . htm to get information about TargetID .
26,166
def _target_id_from_htm ( self , line ) : m = re . search ( "\\?code=([a-fA-F0-9]+)" , line ) if m : result = m . groups ( ) [ 0 ] return result m = re . search ( "\\?auth=([a-fA-F0-9]+)" , line ) if m : result = m . groups ( ) [ 0 ] return result return None
! Extract Target id from htm line .
26,167
def _run_cli_process ( cmd , shell = True ) : from subprocess import Popen , PIPE p = Popen ( cmd , shell = shell , stdout = PIPE , stderr = PIPE ) _stdout , _stderr = p . communicate ( ) return _stdout , _stderr , p . returncode
! Runs command as a process and return stdout stderr and ret code
26,168
def create_mbed_detector ( ** kwargs ) : host_os = platform . system ( ) if host_os == "Windows" : from . windows import StlinkDetectWindows return StlinkDetectWindows ( ** kwargs ) elif host_os == "Linux" : from . linux import StlinkDetectLinuxGeneric return StlinkDetectLinuxGeneric ( ** kwargs ) elif host_os == "Darwin" : from . darwin import StlinkDetectDarwin return StlinkDetectDarwin ( ** kwargs ) else : return None
! Factory used to create host OS specific mbed - lstools object
26,169
def _locked ( func ) : def _locking ( self , * args , ** kwargs ) : try : self . lock ( ) return func ( self , * args , ** kwargs ) finally : self . unlock ( ) return _locking
! Decorator to automatically lock an AccessPort method .
26,170
def _dev_by_id ( self , device_type ) : dir = os . path . join ( "/dev" , device_type , "by-id" ) if os . path . isdir ( dir ) : to_ret = dict ( self . _hex_ids ( [ os . path . join ( dir , f ) for f in os . listdir ( dir ) ] ) ) return to_ret else : logger . error ( "Could not get %s devices by id. " "This could be because your Linux distribution " "does not use udev, or does not create /dev/%s/by-id " "symlinks. Please submit an issue to github.com/" "armmbed/mbed-ls." , device_type , device_type , ) return { }
! Get a dict USBID - > device for a device class
26,171
def _hex_ids ( self , dev_list ) : for dl in dev_list : match = self . nlp . search ( dl ) if match : yield match . group ( "usbid" ) , _readlink ( dl )
! Build a USBID map for a device list
26,172
def _get_unicode ( data , force = False ) : if isinstance ( data , binary_type ) : return data . decode ( 'utf-8' ) elif data is None : return '' elif force : if PY2 : return unicode ( data ) else : return str ( data ) else : return data
Try to return a text aka unicode object from the given data .
26,173
def make_lines ( data , precision = None ) : lines = [ ] static_tags = data . get ( 'tags' ) for point in data [ 'points' ] : elements = [ ] measurement = _escape_tag ( _get_unicode ( point . get ( 'measurement' , data . get ( 'measurement' ) ) ) ) key_values = [ measurement ] if static_tags : tags = dict ( static_tags ) tags . update ( point . get ( 'tags' ) or { } ) else : tags = point . get ( 'tags' ) or { } for tag_key , tag_value in sorted ( iteritems ( tags ) ) : key = _escape_tag ( tag_key ) value = _escape_tag_value ( tag_value ) if key != '' and value != '' : key_values . append ( key + "=" + value ) elements . append ( ',' . join ( key_values ) ) field_values = [ ] for field_key , field_value in sorted ( iteritems ( point [ 'fields' ] ) ) : key = _escape_tag ( field_key ) value = _escape_value ( field_value ) if key != '' and value != '' : field_values . append ( key + "=" + value ) elements . append ( ',' . join ( field_values ) ) if 'time' in point : timestamp = _get_unicode ( str ( int ( _convert_timestamp ( point [ 'time' ] , precision ) ) ) ) elements . append ( timestamp ) line = ' ' . join ( elements ) lines . append ( line ) return '\n' . join ( lines ) + '\n'
Extract points from given dict .
26,174
def main ( host = 'localhost' , port = 8086 ) : user = 'root' password = 'root' dbname = 'demo' protocol = 'json' client = DataFrameClient ( host , port , user , password , dbname ) print ( "Create pandas DataFrame" ) df = pd . DataFrame ( data = list ( range ( 30 ) ) , index = pd . date_range ( start = '2014-11-16' , periods = 30 , freq = 'H' ) , columns = [ '0' ] ) print ( "Create database: " + dbname ) client . create_database ( dbname ) print ( "Write DataFrame" ) client . write_points ( df , 'demo' , protocol = protocol ) print ( "Write DataFrame with Tags" ) client . write_points ( df , 'demo' , { 'k1' : 'v1' , 'k2' : 'v2' } , protocol = protocol ) print ( "Read DataFrame" ) client . query ( "select * from demo" ) print ( "Delete database: " + dbname ) client . drop_database ( dbname )
Instantiate the connection to the InfluxDB client .
26,175
def _parse_dsn ( dsn ) : conn_params = urlparse ( dsn ) init_args = { } scheme_info = conn_params . scheme . split ( '+' ) if len ( scheme_info ) == 1 : scheme = scheme_info [ 0 ] modifier = None else : modifier , scheme = scheme_info if scheme != 'influxdb' : raise ValueError ( 'Unknown scheme "{0}".' . format ( scheme ) ) if modifier : if modifier == 'udp' : init_args [ 'use_udp' ] = True elif modifier == 'https' : init_args [ 'ssl' ] = True else : raise ValueError ( 'Unknown modifier "{0}".' . format ( modifier ) ) netlocs = conn_params . netloc . split ( ',' ) init_args [ 'hosts' ] = [ ] for netloc in netlocs : parsed = _parse_netloc ( netloc ) init_args [ 'hosts' ] . append ( ( parsed [ 'host' ] , int ( parsed [ 'port' ] ) ) ) init_args [ 'username' ] = parsed [ 'username' ] init_args [ 'password' ] = parsed [ 'password' ] if conn_params . path and len ( conn_params . path ) > 1 : init_args [ 'database' ] = conn_params . path [ 1 : ] return init_args
Parse data source name .
26,176
def from_dsn ( cls , dsn , ** kwargs ) : r init_args = _parse_dsn ( dsn ) host , port = init_args . pop ( 'hosts' ) [ 0 ] init_args [ 'host' ] = host init_args [ 'port' ] = port init_args . update ( kwargs ) return cls ( ** init_args )
r Generate an instance of InfluxDBClient from given data source name .
26,177
def switch_user ( self , username , password ) : self . _username = username self . _password = password
Change the client s username .
26,178
def request ( self , url , method = 'GET' , params = None , data = None , expected_response_code = 200 , headers = None ) : url = "{0}/{1}" . format ( self . _baseurl , url ) if headers is None : headers = self . _headers if params is None : params = { } if isinstance ( data , ( dict , list ) ) : data = json . dumps ( data ) retry = True _try = 0 while retry : try : response = self . _session . request ( method = method , url = url , auth = ( self . _username , self . _password ) , params = params , data = data , headers = headers , proxies = self . _proxies , verify = self . _verify_ssl , timeout = self . _timeout ) break except ( requests . exceptions . ConnectionError , requests . exceptions . HTTPError , requests . exceptions . Timeout ) : _try += 1 if self . _retries != 0 : retry = _try < self . _retries if method == "POST" : time . sleep ( ( 2 ** _try ) * random . random ( ) / 100.0 ) if not retry : raise if 500 <= response . status_code < 600 : raise InfluxDBServerError ( response . content ) elif response . status_code == expected_response_code : return response else : raise InfluxDBClientError ( response . content , response . status_code )
Make a HTTP request to the InfluxDB API .
26,179
def write ( self , data , params = None , expected_response_code = 204 , protocol = 'json' ) : headers = self . _headers headers [ 'Content-Type' ] = 'application/octet-stream' if params : precision = params . get ( 'precision' ) else : precision = None if protocol == 'json' : data = make_lines ( data , precision ) . encode ( 'utf-8' ) elif protocol == 'line' : if isinstance ( data , str ) : data = [ data ] data = ( '\n' . join ( data ) + '\n' ) . encode ( 'utf-8' ) self . request ( url = "write" , method = 'POST' , params = params , data = data , expected_response_code = expected_response_code , headers = headers ) return True
Write data to InfluxDB .
26,180
def query ( self , query , params = None , bind_params = None , epoch = None , expected_response_code = 200 , database = None , raise_errors = True , chunked = False , chunk_size = 0 , method = "GET" ) : if params is None : params = { } if bind_params is not None : params_dict = json . loads ( params . get ( 'params' , '{}' ) ) params_dict . update ( bind_params ) params [ 'params' ] = json . dumps ( params_dict ) params [ 'q' ] = query params [ 'db' ] = database or self . _database if epoch is not None : params [ 'epoch' ] = epoch if chunked : params [ 'chunked' ] = 'true' if chunk_size > 0 : params [ 'chunk_size' ] = chunk_size if query . lower ( ) . startswith ( "select " ) and " into " in query . lower ( ) : method = "POST" response = self . request ( url = "query" , method = method , params = params , data = None , expected_response_code = expected_response_code ) if chunked : return self . _read_chunked_response ( response ) data = response . json ( ) results = [ ResultSet ( result , raise_errors = raise_errors ) for result in data . get ( 'results' , [ ] ) ] if len ( results ) == 1 : return results [ 0 ] return results
Send a query to InfluxDB .
26,181
def ping ( self ) : response = self . request ( url = "ping" , method = 'GET' , expected_response_code = 204 ) return response . headers [ 'X-Influxdb-Version' ]
Check connectivity to InfluxDB .
26,182
def create_retention_policy ( self , name , duration , replication , database = None , default = False , shard_duration = "0s" ) : query_string = "CREATE RETENTION POLICY {0} ON {1} " "DURATION {2} REPLICATION {3} SHARD DURATION {4}" . format ( quote_ident ( name ) , quote_ident ( database or self . _database ) , duration , replication , shard_duration ) if default is True : query_string += " DEFAULT" self . query ( query_string , method = "POST" )
Create a retention policy for a database .
26,183
def alter_retention_policy ( self , name , database = None , duration = None , replication = None , default = None , shard_duration = None ) : query_string = ( "ALTER RETENTION POLICY {0} ON {1}" ) . format ( quote_ident ( name ) , quote_ident ( database or self . _database ) , shard_duration ) if duration : query_string += " DURATION {0}" . format ( duration ) if shard_duration : query_string += " SHARD DURATION {0}" . format ( shard_duration ) if replication : query_string += " REPLICATION {0}" . format ( replication ) if default is True : query_string += " DEFAULT" self . query ( query_string , method = "POST" )
Modify an existing retention policy for a database .
26,184
def drop_retention_policy ( self , name , database = None ) : query_string = ( "DROP RETENTION POLICY {0} ON {1}" ) . format ( quote_ident ( name ) , quote_ident ( database or self . _database ) ) self . query ( query_string , method = "POST" )
Drop an existing retention policy for a database .
26,185
def get_list_retention_policies ( self , database = None ) : if not ( database or self . _database ) : raise InfluxDBClientError ( "get_list_retention_policies() requires a database as a " "parameter or the client to be using a database" ) rsp = self . query ( "SHOW RETENTION POLICIES ON {0}" . format ( quote_ident ( database or self . _database ) ) ) return list ( rsp . get_points ( ) )
Get the list of retention policies for a database .
26,186
def create_user ( self , username , password , admin = False ) : text = "CREATE USER {0} WITH PASSWORD {1}" . format ( quote_ident ( username ) , quote_literal ( password ) ) if admin : text += ' WITH ALL PRIVILEGES' self . query ( text , method = "POST" )
Create a new user in InfluxDB .
26,187
def drop_user ( self , username ) : text = "DROP USER {0}" . format ( quote_ident ( username ) , method = "POST" ) self . query ( text , method = "POST" )
Drop a user from InfluxDB .
26,188
def set_user_password ( self , username , password ) : text = "SET PASSWORD FOR {0} = {1}" . format ( quote_ident ( username ) , quote_literal ( password ) ) self . query ( text )
Change the password of an existing user .
26,189
def delete_series ( self , database = None , measurement = None , tags = None ) : database = database or self . _database query_str = 'DROP SERIES' if measurement : query_str += ' FROM {0}' . format ( quote_ident ( measurement ) ) if tags : tag_eq_list = [ "{0}={1}" . format ( quote_ident ( k ) , quote_literal ( v ) ) for k , v in tags . items ( ) ] query_str += ' WHERE ' + ' AND ' . join ( tag_eq_list ) self . query ( query_str , database = database , method = "POST" )
Delete series from a database .
26,190
def get_list_privileges ( self , username ) : text = "SHOW GRANTS FOR {0}" . format ( quote_ident ( username ) ) return list ( self . query ( text ) . get_points ( ) )
Get the list of all privileges granted to given user .
26,191
def get_list_continuous_queries ( self ) : query_string = "SHOW CONTINUOUS QUERIES" return [ { sk [ 0 ] : list ( p ) } for sk , p in self . query ( query_string ) . items ( ) ]
Get the list of continuous queries in InfluxDB .
26,192
def create_continuous_query ( self , name , select , database = None , resample_opts = None ) : r query_string = ( "CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END" ) . format ( quote_ident ( name ) , quote_ident ( database or self . _database ) , ' RESAMPLE ' + resample_opts if resample_opts else '' , select ) self . query ( query_string )
r Create a continuous query for a database .
26,193
def drop_continuous_query ( self , name , database = None ) : query_string = ( "DROP CONTINUOUS QUERY {0} ON {1}" ) . format ( quote_ident ( name ) , quote_ident ( database or self . _database ) ) self . query ( query_string )
Drop an existing continuous query for a database .
26,194
def send_packet ( self , packet , protocol = 'json' , time_precision = None ) : if protocol == 'json' : data = make_lines ( packet , time_precision ) . encode ( 'utf-8' ) elif protocol == 'line' : data = ( '\n' . join ( packet ) + '\n' ) . encode ( 'utf-8' ) self . udp_socket . sendto ( data , ( self . _host , self . _udp_port ) )
Send an UDP packet .
26,195
def close ( self ) : if isinstance ( self . _session , requests . Session ) : self . _session . close ( )
Close http session .
26,196
def loads ( s ) : _decoder = json . JSONDecoder ( ) while s : s = s . strip ( ) obj , pos = _decoder . raw_decode ( s ) if not pos : raise ValueError ( 'no JSON object found at %i' % pos ) yield obj s = s [ pos : ]
Generate a sequence of JSON values from a string .
26,197
def commit ( cls , client = None ) : if not client : client = cls . _client rtn = client . write_points ( cls . _json_body_ ( ) ) cls . _reset_ ( ) return rtn
Commit everything from datapoints via the client .
26,198
def _json_body_ ( cls ) : json = [ ] for series_name , data in six . iteritems ( cls . _datapoints ) : json . append ( { 'name' : series_name , 'columns' : cls . _fields , 'points' : [ [ getattr ( point , k ) for k in cls . _fields ] for point in data ] } ) return json
Return JSON body of the datapoints .
26,199
def query ( self , query , params = None , bind_params = None , epoch = None , expected_response_code = 200 , database = None , raise_errors = True , chunked = False , chunk_size = 0 , method = "GET" , dropna = True ) : query_args = dict ( params = params , bind_params = bind_params , epoch = epoch , expected_response_code = expected_response_code , raise_errors = raise_errors , chunked = chunked , database = database , method = method , chunk_size = chunk_size ) results = super ( DataFrameClient , self ) . query ( query , ** query_args ) if query . strip ( ) . upper ( ) . startswith ( "SELECT" ) : if len ( results ) > 0 : return self . _to_dataframe ( results , dropna ) else : return { } else : return results
Query data into a DataFrame .