idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
21,800
async def stop ( self ) : self . _logger . info ( "Stopping all servers" ) for server in self . servers : await server . stop ( ) self . _logger . info ( "Stopping all device adapters" ) await self . device_manager . stop ( )
Stop the gateway manager and synchronously wait for it to stop .
62
13
21,801
def main ( argv = None ) : if argv is None : argv = sys . argv [ 1 : ] parser = build_args ( ) args = parser . parse_args ( args = argv ) recipe_name , _ext = os . path . splitext ( os . path . basename ( args . recipe ) ) rm = RecipeManager ( ) rm . add_recipe_folder ( os . path . dirname ( args . recipe ) , whitelist = [ os . path . basename ( args . recipe ) ] ) recipe = rm . get_recipe ( recipe_name ) if args . archive is not None : print ( "Archiving recipe into %s" % args . archive ) recipe . archive ( args . archive ) return 0 if args . info : print ( recipe ) return 0 variables = load_variables ( args . define , args . config ) success = 0 start_time = time . time ( ) if args . loop is None : try : recipe . run ( variables ) success += 1 except IOTileException as exc : print ( "Error running recipe: %s" % str ( exc ) ) return 1 else : while True : value = input ( "Enter value for loop variable %s (return to stop): " % args . loop ) if value == '' : break local_vars = dict ( * * variables ) local_vars [ args . loop ] = value try : recipe . run ( local_vars ) success += 1 except IOTileException as exc : print ( "--> ERROR processing loop variable %s: %s" % ( value , str ( exc ) ) ) end_time = time . time ( ) total_time = end_time - start_time if success == 0 : per_time = 0.0 else : per_time = total_time / success print ( "Performed %d runs in %.1f seconds (%.1f seconds / run)" % ( success , total_time , per_time ) ) return 0
Main entry point for iotile - ship recipe runner .
427
12
21,802
def subst_dict ( target , source ) : dict = { } if target : def get_tgt_subst_proxy ( thing ) : try : subst_proxy = thing . get_subst_proxy ( ) except AttributeError : subst_proxy = thing # probably a string, just return it return subst_proxy tnl = NLWrapper ( target , get_tgt_subst_proxy ) dict [ 'TARGETS' ] = Targets_or_Sources ( tnl ) dict [ 'TARGET' ] = Target_or_Source ( tnl ) # This is a total cheat, but hopefully this dictionary goes # away soon anyway. We just let these expand to $TARGETS # because that's "good enough" for the use of ToolSurrogates # (see test/ToolSurrogate.py) to generate documentation. dict [ 'CHANGED_TARGETS' ] = '$TARGETS' dict [ 'UNCHANGED_TARGETS' ] = '$TARGETS' else : dict [ 'TARGETS' ] = NullNodesList dict [ 'TARGET' ] = NullNodesList if source : def get_src_subst_proxy ( node ) : try : rfile = node . rfile except AttributeError : pass else : node = rfile ( ) try : return node . get_subst_proxy ( ) except AttributeError : return node # probably a String, just return it snl = NLWrapper ( source , get_src_subst_proxy ) dict [ 'SOURCES' ] = Targets_or_Sources ( snl ) dict [ 'SOURCE' ] = Target_or_Source ( snl ) # This is a total cheat, but hopefully this dictionary goes # away soon anyway. We just let these expand to $TARGETS # because that's "good enough" for the use of ToolSurrogates # (see test/ToolSurrogate.py) to generate documentation. dict [ 'CHANGED_SOURCES' ] = '$SOURCES' dict [ 'UNCHANGED_SOURCES' ] = '$SOURCES' else : dict [ 'SOURCES' ] = NullNodesList dict [ 'SOURCE' ] = NullNodesList return dict
Create a dictionary for substitution of special construction variables .
499
10
21,803
def escape ( self , escape_func , quote_func = quote_spaces ) : if self . is_literal ( ) : return escape_func ( self . data ) elif ' ' in self . data or '\t' in self . data : return quote_func ( self . data ) else : return self . data
Escape the string with the supplied function . The function is expected to take an arbitrary string then return it with all special characters escaped and ready for passing to the command interpreter .
71
35
21,804
def indent_list ( inlist , level ) : indent = ' ' * level joinstr = '\n' + indent retval = joinstr . join ( inlist ) return indent + retval
Join a list of strings one per line with level spaces before each one
42
14
21,805
def generate ( env ) : fortran . generate ( env ) for dialect in [ 'F77' , 'F90' , 'FORTRAN' , 'F95' , 'F03' , 'F08' ] : env [ '%s' % dialect ] = 'gfortran' env [ 'SH%s' % dialect ] = '$%s' % dialect if env [ 'PLATFORM' ] in [ 'cygwin' , 'win32' ] : env [ 'SH%sFLAGS' % dialect ] = SCons . Util . CLVar ( '$%sFLAGS' % dialect ) else : env [ 'SH%sFLAGS' % dialect ] = SCons . Util . CLVar ( '$%sFLAGS -fPIC' % dialect ) env [ 'INC%sPREFIX' % dialect ] = "-I" env [ 'INC%sSUFFIX' % dialect ] = ""
Add Builders and construction variables for gfortran to an Environment .
208
14
21,806
def _extract_device_uuid ( cls , slug ) : if len ( slug ) != 22 : raise ArgumentError ( "Invalid device slug" , slug = slug ) hexdigits = slug [ 3 : ] hexdigits = hexdigits . replace ( '-' , '' ) try : rawbytes = binascii . unhexlify ( hexdigits ) words = struct . unpack ( ">LL" , rawbytes ) return ( words [ 0 ] << 32 ) | ( words [ 1 ] ) except ValueError as exc : raise ArgumentError ( "Could not convert device slug to hex integer" , slug = slug , error = str ( exc ) )
Turn a string slug into a UUID
144
8
21,807
def start ( self ) : self . _prepare ( ) self . _disconnector = tornado . ioloop . PeriodicCallback ( self . _disconnect_hanging_devices , 1000 , self . _loop ) self . _disconnector . start ( )
Start this gateway agent .
58
5
21,808
def stop ( self ) : if self . _disconnector : self . _disconnector . stop ( ) self . client . disconnect ( )
Stop this gateway agent .
31
5
21,809
def _validate_connection ( self , action , uuid , key ) : if uuid not in self . _connections : self . _logger . warn ( "Received message for device with no connection 0x%X" , uuid ) return None data = self . _connections [ uuid ] if key != data [ 'key' ] : self . _logger . warn ( "Received message for device with incorrect key, uuid=0x%X" , uuid ) return None return data [ 'connection_id' ]
Validate that a message received for a device has the right key
118
13
21,810
def _publish_status ( self , slug , data ) : status_topic = self . topics . prefix + 'devices/{}/data/status' . format ( slug ) self . _logger . debug ( "Publishing status message: (topic=%s) (message=%s)" , status_topic , str ( data ) ) self . client . publish ( status_topic , data )
Publish a status message for a device
87
8
21,811
def _publish_response ( self , slug , message ) : resp_topic = self . topics . gateway_topic ( slug , 'data/response' ) self . _logger . debug ( "Publishing response message: (topic=%s) (message=%s)" , resp_topic , message ) self . client . publish ( resp_topic , message )
Publish a response message for a device
79
8
21,812
def _on_action ( self , sequence , topic , message ) : try : slug = None parts = topic . split ( '/' ) slug = parts [ - 3 ] uuid = self . _extract_device_uuid ( slug ) except Exception as exc : self . _logger . warn ( "Error parsing slug in action handler (slug=%s, topic=%s)" , slug , topic ) return if messages . DisconnectCommand . matches ( message ) : self . _logger . debug ( "Received disconnect command for device 0x%X" , uuid ) key = message [ 'key' ] client = message [ 'client' ] self . _loop . add_callback ( self . _disconnect_from_device , uuid , key , client ) elif messages . OpenInterfaceCommand . matches ( message ) or messages . CloseInterfaceCommand . matches ( message ) : self . _logger . debug ( "Received %s command for device 0x%X" , message [ 'operation' ] , uuid ) key = message [ 'key' ] client = message [ 'client' ] oper = message [ 'operation' ] if oper == 'open_interface' : self . _loop . add_callback ( self . _open_interface , client , uuid , message [ 'interface' ] , key ) else : self . _loop . add_callback ( self . _close_interface , client , uuid , message [ 'interface' ] , key ) elif messages . RPCCommand . matches ( message ) : rpc_msg = messages . RPCCommand . verify ( message ) client = rpc_msg [ 'client' ] address = rpc_msg [ 'address' ] rpc = rpc_msg [ 'rpc_id' ] payload = rpc_msg [ 'payload' ] key = rpc_msg [ 'key' ] timeout = rpc_msg [ 'timeout' ] self . _loop . add_callback ( self . _send_rpc , client , uuid , address , rpc , payload , timeout , key ) elif messages . ScriptCommand . matches ( message ) : script_msg = messages . ScriptCommand . verify ( message ) key = script_msg [ 'key' ] client = script_msg [ 'client' ] script = script_msg [ 'script' ] self . _loop . add_callback ( self . _send_script , client , uuid , script , key , ( script_msg [ 'fragment_index' ] , script_msg [ 'fragment_count' ] ) ) else : self . _logger . error ( "Unsupported message received (topic=%s) (message=%s)" , topic , str ( message ) )
Process a command action that we received on behalf of a device .
596
13
21,813
def _on_connect ( self , sequence , topic , message ) : try : slug = None parts = topic . split ( '/' ) slug = parts [ - 3 ] uuid = self . _extract_device_uuid ( slug ) except Exception : self . _logger . exception ( "Error parsing slug from connection request (slug=%s, topic=%s)" , slug , topic ) return if messages . ConnectCommand . matches ( message ) : key = message [ 'key' ] client = message [ 'client' ] self . _loop . add_callback ( self . _connect_to_device , uuid , key , client ) else : self . _logger . warn ( "Unknown message received on connect topic=%s, message=%s" , topic , message )
Process a request to connect to an IOTile device
171
11
21,814
def _send_rpc ( self , client , uuid , address , rpc , payload , timeout , key ) : conn_id = self . _validate_connection ( 'send_rpc' , uuid , key ) if conn_id is None : return conn_data = self . _connections [ uuid ] conn_data [ 'last_touch' ] = monotonic ( ) slug = self . _build_device_slug ( uuid ) try : resp = yield self . _manager . send_rpc ( conn_id , address , rpc >> 8 , rpc & 0xFF , bytes ( payload ) , timeout ) except Exception as exc : self . _logger . error ( "Error in manager send rpc: %s" % str ( exc ) ) resp = { 'success' : False , 'reason' : "Internal error: %s" % str ( exc ) } payload = { 'client' : client , 'type' : 'response' , 'operation' : 'rpc' } payload [ 'success' ] = resp [ 'success' ] if resp [ 'success' ] is False : payload [ 'failure_reason' ] = resp [ 'reason' ] else : payload [ 'status' ] = resp [ 'status' ] payload [ 'payload' ] = binascii . hexlify ( resp [ 'payload' ] ) self . _publish_response ( slug , payload )
Send an RPC to a connected device
315
7
21,815
def _send_script ( self , client , uuid , chunk , key , chunk_status ) : conn_id = self . _validate_connection ( 'send_script' , uuid , key ) if conn_id is None : return conn_data = self . _connections [ uuid ] conn_data [ 'last_touch' ] = monotonic ( ) slug = self . _build_device_slug ( uuid ) # Check and see if we have the entire script or if we need to accumulate it index , count = chunk_status if index == 0 : conn_data [ 'script' ] = bytes ( ) conn_data [ 'script' ] += chunk # If there is more than one chunk and we aren't on the last one, wait until we receive them # all before sending them on to the device as a unit if index != count - 1 : return # Initialize our progress throttling system in case we need to throttle progress reports conn_data [ 'last_progress' ] = None try : resp = yield self . _manager . send_script ( conn_id , conn_data [ 'script' ] , lambda x , y : self . _notify_progress_async ( uuid , client , x , y ) ) yield None # Make sure we give time for any progress notifications that may have been queued to flush out conn_data [ 'script' ] = bytes ( ) except Exception as exc : self . _logger . exception ( "Error in manager send_script" ) resp = { 'success' : False , 'reason' : "Internal error: %s" % str ( exc ) } payload = { 'client' : client , 'type' : 'response' , 'operation' : 'send_script' , 'success' : resp [ 'success' ] } if resp [ 'success' ] is False : payload [ 'failure_reason' ] = resp [ 'reason' ] self . _publish_response ( slug , payload )
Send a script to the connected device .
428
8
21,816
def _open_interface ( self , client , uuid , iface , key ) : conn_id = self . _validate_connection ( 'open_interface' , uuid , key ) if conn_id is None : return conn_data = self . _connections [ uuid ] conn_data [ 'last_touch' ] = monotonic ( ) slug = self . _build_device_slug ( uuid ) try : resp = yield self . _manager . open_interface ( conn_id , iface ) except Exception as exc : self . _logger . exception ( "Error in manager open interface" ) resp = { 'success' : False , 'reason' : "Internal error: %s" % str ( exc ) } message = { 'type' : 'response' , 'operation' : 'open_interface' , 'client' : client } message [ 'success' ] = resp [ 'success' ] if not message [ 'success' ] : message [ 'failure_reason' ] = resp [ 'reason' ] self . _publish_response ( slug , message )
Open an interface on a connected device .
239
8
21,817
def _disconnect_hanging_devices ( self ) : now = monotonic ( ) for uuid , data in self . _connections . items ( ) : if ( now - data [ 'last_touch' ] ) > self . client_timeout : self . _logger . info ( "Disconnect inactive client %s from device 0x%X" , data [ 'client' ] , uuid ) self . _loop . add_callback ( self . _disconnect_from_device , uuid , data [ 'key' ] , data [ 'client' ] , unsolicited = True )
Periodic callback that checks for devices that haven t been used and disconnects them .
130
18
21,818
def _disconnect_from_device ( self , uuid , key , client , unsolicited = False ) : conn_id = self . _validate_connection ( 'disconnect' , uuid , key ) if conn_id is None : return conn_data = self . _connections [ uuid ] slug = self . _build_device_slug ( uuid ) message = { 'client' : client , 'type' : 'response' , 'operation' : 'disconnect' } self . client . reset_sequence ( self . topics . gateway_topic ( slug , 'control/connect' ) ) self . client . reset_sequence ( self . topics . gateway_topic ( slug , 'control/action' ) ) try : resp = yield self . _manager . disconnect ( conn_id ) except Exception as exc : self . _logger . exception ( "Error in manager disconnect" ) resp = { 'success' : False , 'reason' : "Internal error: %s" % str ( exc ) } # Remove any monitors that we registered for this device self . _manager . remove_monitor ( conn_data [ 'report_monitor' ] ) self . _manager . remove_monitor ( conn_data [ 'trace_monitor' ] ) if resp [ 'success' ] : del self . _connections [ uuid ] message [ 'success' ] = True else : message [ 'success' ] = False message [ 'failure_reason' ] = resp [ 'reason' ] self . _logger . info ( "Client %s disconnected from device 0x%X" , client , uuid ) # Send a response for all requested disconnects and if we tried to disconnect the client # on our own and succeeded, send an unsolicited notification to that effect if unsolicited and resp [ 'success' ] : self . _publish_response ( slug , { 'client' : client , 'type' : 'notification' , 'operation' : 'disconnect' } ) elif not unsolicited : self . _publish_response ( slug , message )
Disconnect from a device that we have previously connected to .
445
12
21,819
def _notify_report ( self , device_uuid , event_name , report ) : if device_uuid not in self . _connections : self . _logger . debug ( "Dropping report for device without an active connection, uuid=0x%X" , device_uuid ) return slug = self . _build_device_slug ( device_uuid ) streaming_topic = self . topics . prefix + 'devices/{}/data/streaming' . format ( slug ) data = { 'type' : 'notification' , 'operation' : 'report' } ser = report . serialize ( ) data [ 'received_time' ] = ser [ 'received_time' ] . strftime ( "%Y%m%dT%H:%M:%S.%fZ" ) . encode ( ) data [ 'report_origin' ] = ser [ 'origin' ] data [ 'report_format' ] = ser [ 'report_format' ] data [ 'report' ] = binascii . hexlify ( ser [ 'encoded_report' ] ) data [ 'fragment_count' ] = 1 data [ 'fragment_index' ] = 0 self . _logger . debug ( "Publishing report: (topic=%s)" , streaming_topic ) self . client . publish ( streaming_topic , data )
Notify that a report has been received from a device .
303
12
21,820
def _notify_trace ( self , device_uuid , event_name , trace ) : if device_uuid not in self . _connections : self . _logger . debug ( "Dropping trace data for device without an active connection, uuid=0x%X" , device_uuid ) return conn_data = self . _connections [ device_uuid ] last_trace = conn_data [ 'last_trace' ] now = monotonic ( ) conn_data [ 'trace_accum' ] += bytes ( trace ) # If we're throttling tracing data, we need to see if we should accumulate this trace or # send it now. We acculumate if we've last sent tracing data less than self.throttle_trace seconds ago if last_trace is not None and ( now - last_trace ) < self . throttle_trace : if not conn_data [ 'trace_scheduled' ] : self . _loop . call_later ( self . throttle_trace - ( now - last_trace ) , self . _send_accum_trace , device_uuid ) conn_data [ 'trace_scheduled' ] = True self . _logger . debug ( "Deferring trace data due to throttling uuid=0x%X" , device_uuid ) else : self . _send_accum_trace ( device_uuid )
Notify that we have received tracing data from a device .
305
12
21,821
def _send_accum_trace ( self , device_uuid ) : if device_uuid not in self . _connections : self . _logger . debug ( "Dropping trace data for device without an active connection, uuid=0x%X" , device_uuid ) return conn_data = self . _connections [ device_uuid ] trace = conn_data [ 'trace_accum' ] if len ( trace ) > 0 : slug = self . _build_device_slug ( device_uuid ) tracing_topic = self . topics . prefix + 'devices/{}/data/tracing' . format ( slug ) data = { 'type' : 'notification' , 'operation' : 'trace' } data [ 'trace' ] = binascii . hexlify ( trace ) data [ 'trace_origin' ] = device_uuid self . _logger . debug ( 'Publishing trace: (topic=%s)' , tracing_topic ) self . client . publish ( tracing_topic , data ) conn_data [ 'trace_scheduled' ] = False conn_data [ 'last_trace' ] = monotonic ( ) conn_data [ 'trace_accum' ] = bytes ( )
Send whatever accumulated tracing data we have for the device .
276
11
21,822
def _on_scan_request ( self , sequence , topic , message ) : if messages . ProbeCommand . matches ( message ) : self . _logger . debug ( "Received probe message on topic %s, message=%s" , topic , message ) self . _loop . add_callback ( self . _publish_scan_response , message [ 'client' ] ) else : self . _logger . warn ( "Invalid message received on topic %s, message=%s" , topic , message )
Process a request for scanning information
111
6
21,823
def _publish_scan_response ( self , client ) : devices = self . _manager . scanned_devices converted_devs = [ ] for uuid , info in devices . items ( ) : slug = self . _build_device_slug ( uuid ) message = { } message [ 'uuid' ] = uuid if uuid in self . _connections : message [ 'user_connected' ] = True elif 'user_connected' in info : message [ 'user_connected' ] = info [ 'user_connected' ] else : message [ 'user_connected' ] = False message [ 'connection_string' ] = slug message [ 'signal_strength' ] = info [ 'signal_strength' ] converted_devs . append ( { x : y for x , y in message . items ( ) } ) message [ 'type' ] = 'notification' message [ 'operation' ] = 'advertisement' self . client . publish ( self . topics . gateway_topic ( slug , 'data/advertisement' ) , message ) probe_message = { } probe_message [ 'type' ] = 'response' probe_message [ 'client' ] = client probe_message [ 'success' ] = True probe_message [ 'devices' ] = converted_devs self . client . publish ( self . topics . status , probe_message )
Publish a scan response message
296
6
21,824
def _versioned_lib_suffix ( env , suffix , version ) : Verbose = False if Verbose : print ( "_versioned_lib_suffix: suffix={:r}" . format ( suffix ) ) print ( "_versioned_lib_suffix: version={:r}" . format ( version ) ) if not suffix . endswith ( version ) : suffix = suffix + '.' + version if Verbose : print ( "_versioned_lib_suffix: return suffix={:r}" . format ( suffix ) ) return suffix
For suffix = . so and version = 0 . 1 . 2 it returns . so . 0 . 1 . 2
117
23
21,825
def _setup_versioned_lib_variables ( env , * * kw ) : tool = None try : tool = kw [ 'tool' ] except KeyError : pass use_soname = False try : use_soname = kw [ 'use_soname' ] except KeyError : pass # The $_SHLIBVERSIONFLAGS define extra commandline flags used when # building VERSIONED shared libraries. It's always set, but used only # when VERSIONED library is built (see __SHLIBVERSIONFLAGS in SCons/Defaults.py). if use_soname : # If the linker uses SONAME, then we need this little automata if tool == 'sunlink' : env [ '_SHLIBVERSIONFLAGS' ] = '$SHLIBVERSIONFLAGS -h $_SHLIBSONAME' env [ '_LDMODULEVERSIONFLAGS' ] = '$LDMODULEVERSIONFLAGS -h $_LDMODULESONAME' else : env [ '_SHLIBVERSIONFLAGS' ] = '$SHLIBVERSIONFLAGS -Wl,-soname=$_SHLIBSONAME' env [ '_LDMODULEVERSIONFLAGS' ] = '$LDMODULEVERSIONFLAGS -Wl,-soname=$_LDMODULESONAME' env [ '_SHLIBSONAME' ] = '${ShLibSonameGenerator(__env__,TARGET)}' env [ '_LDMODULESONAME' ] = '${LdModSonameGenerator(__env__,TARGET)}' env [ 'ShLibSonameGenerator' ] = SCons . Tool . ShLibSonameGenerator env [ 'LdModSonameGenerator' ] = SCons . Tool . LdModSonameGenerator else : env [ '_SHLIBVERSIONFLAGS' ] = '$SHLIBVERSIONFLAGS' env [ '_LDMODULEVERSIONFLAGS' ] = '$LDMODULEVERSIONFLAGS' # LDOMDULVERSIONFLAGS should always default to $SHLIBVERSIONFLAGS env [ 'LDMODULEVERSIONFLAGS' ] = '$SHLIBVERSIONFLAGS'
Setup all variables required by the versioning machinery
487
9
21,826
def main ( argv = None , loop = SharedLoop , max_time = None ) : should_raise = argv is not None if argv is None : argv = sys . argv [ 1 : ] parser = build_parser ( ) cmd_args = parser . parse_args ( argv ) configure_logging ( cmd_args . verbose ) logger = logging . getLogger ( __name__ ) try : args = { } if cmd_args . config is not None : try : with open ( cmd_args . config , "r" ) as conf : args = json . load ( conf ) except IOError as exc : raise ScriptError ( "Could not open config file %s due to %s" % ( cmd_args . config , str ( exc ) ) , 2 ) except ValueError as exc : raise ScriptError ( "Could not parse JSON from config file %s due to %s" % ( cmd_args . config , str ( exc ) ) , 3 ) except TypeError as exc : raise ScriptError ( "You must pass the path to a json config file" , 4 ) logger . critical ( "Starting gateway" ) gateway = IOTileGateway ( args , loop = loop ) loop . run_coroutine ( gateway . start ( ) ) logger . critical ( "Gateway running" ) # Run forever until we receive a ctrl-c # (allow quitting early after max_time seconds for testing) loop . wait_for_interrupt ( max_time = max_time ) loop . run_coroutine ( gateway . stop ( ) ) except ScriptError as exc : if should_raise : raise exc logger . fatal ( "Quitting due to error: %s" , exc . msg ) return exc . code except Exception as exc : # pylint: disable=W0703 if should_raise : raise exc logger . exception ( "Fatal error running gateway" ) return 1 return 0
Main entry point for iotile - gateway .
411
10
21,827
def copy ( self ) : return _TimeAnchor ( self . reading_id , self . uptime , self . utc , self . is_break , self . exact )
Return a copy of this _TimeAnchor .
39
11
21,828
def anchor_stream ( self , stream_id , converter = "rtc" ) : if isinstance ( converter , str ) : converter = self . _known_converters . get ( converter ) if converter is None : raise ArgumentError ( "Unknown anchor converter string: %s" % converter , known_converters = list ( self . _known_converters ) ) self . _anchor_streams [ stream_id ] = converter
Mark a stream as containing anchor points .
97
8
21,829
def id_range ( self ) : if len ( self . _anchor_points ) == 0 : return ( 0 , 0 ) return ( self . _anchor_points [ 0 ] . reading_id , self . _anchor_points [ - 1 ] . reading_id )
Get the range of archor reading_ids .
61
10
21,830
def _convert_epoch_anchor ( cls , reading ) : delta = datetime . timedelta ( seconds = reading . value ) return cls . _EpochReference + delta
Convert a reading containing an epoch timestamp to datetime .
41
12
21,831
def add_point ( self , reading_id , uptime = None , utc = None , is_break = False ) : if reading_id == 0 : return if uptime is None and utc is None : return if uptime is not None and uptime & ( 1 << 31 ) : if utc is not None : return uptime &= ~ ( 1 << 31 ) utc = self . convert_rtc ( uptime ) uptime = None anchor = _TimeAnchor ( reading_id , uptime , utc , is_break , exact = utc is not None ) if anchor in self . _anchor_points : return self . _anchor_points . add ( anchor ) self . _prepared = False
Add a time point that could be used as a UTC reference .
160
13
21,832
def add_reading ( self , reading ) : is_break = False utc = None if reading . stream in self . _break_streams : is_break = True if reading . stream in self . _anchor_streams : utc = self . _anchor_streams [ reading . stream ] ( reading ) self . add_point ( reading . reading_id , reading . raw_time , utc , is_break = is_break )
Add an IOTileReading .
99
7
21,833
def add_report ( self , report , ignore_errors = False ) : if not isinstance ( report , SignedListReport ) : if ignore_errors : return raise ArgumentError ( "You can only add SignedListReports to a UTCAssigner" , report = report ) for reading in report . visible_readings : self . add_reading ( reading ) self . add_point ( report . report_id , report . sent_timestamp , report . received_time )
Add all anchors from a report .
101
7
21,834
def assign_utc ( self , reading_id , uptime = None , prefer = "before" ) : if prefer not in ( "before" , "after" ) : raise ArgumentError ( "Invalid prefer parameter: {}, must be 'before' or 'after'" . format ( prefer ) ) if len ( self . _anchor_points ) == 0 : return None if reading_id > self . _anchor_points [ - 1 ] . reading_id : return None i = self . _anchor_points . bisect_key_left ( reading_id ) found_id = False crossed_break = False exact = True last = self . _anchor_points [ i ] . copy ( ) if uptime is not None : last . uptime = uptime if last . reading_id == reading_id : found_id = True if last . utc is not None : return UTCAssignment ( reading_id , last . utc , found_id , exact , crossed_break ) left_assign = self . _fix_left ( reading_id , last , i , found_id ) if left_assign is not None and left_assign . exact : return left_assign right_assign = self . _fix_right ( reading_id , last , i , found_id ) if right_assign is not None and right_assign . exact : return right_assign return self . _pick_best_fix ( left_assign , right_assign , prefer )
Assign a utc datetime to a reading id .
328
12
21,835
def ensure_prepared ( self ) : if self . _prepared : return exact_count = 0 fixed_count = 0 inexact_count = 0 self . _logger . debug ( "Preparing UTCAssigner (%d total anchors)" , len ( self . _anchor_points ) ) for curr in self . _anchor_points : if not curr . exact : assignment = self . assign_utc ( curr . reading_id , curr . uptime ) if assignment is not None and assignment . exact : curr . utc = assignment . utc curr . exact = True fixed_count += 1 else : inexact_count += 1 else : exact_count += 1 self . _logger . debug ( "Prepared UTCAssigner with %d reference points, " "%d exact anchors and %d inexact anchors" , exact_count , fixed_count , inexact_count ) self . _prepared = True
Calculate and cache UTC values for all exactly known anchor points .
206
14
21,836
def fix_report ( self , report , errors = "drop" , prefer = "before" ) : if not isinstance ( report , SignedListReport ) : raise ArgumentError ( "Report must be a SignedListReport" , report = report ) if errors not in ( 'drop' , ) : raise ArgumentError ( "Unknown errors handler: {}, supported=['drop']" . format ( errors ) ) self . ensure_prepared ( ) fixed_readings = [ ] dropped_readings = 0 for reading in report . visible_readings : assignment = self . assign_utc ( reading . reading_id , reading . raw_time , prefer = prefer ) if assignment is None : dropped_readings += 1 continue fixed_reading = IOTileReading ( assignment . rtc_value , reading . stream , reading . value , reading_time = assignment . utc , reading_id = reading . reading_id ) fixed_readings . append ( fixed_reading ) fixed_report = SignedListReport . FromReadings ( report . origin , fixed_readings , report_id = report . report_id , selector = report . streamer_selector , streamer = report . origin_streamer , sent_timestamp = report . sent_timestamp ) fixed_report . received_time = report . received_time if dropped_readings > 0 : self . _logger . warning ( "Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X" , dropped_readings , len ( report . visible_readings ) , report . report_id , report . origin ) return fixed_report
Perform utc assignment on all readings in a report .
360
12
21,837
def _fix_left ( self , reading_id , last , start , found_id ) : accum_delta = 0 exact = True crossed_break = False if start == 0 : return None for curr in self . _anchor_points . islice ( None , start - 1 , reverse = True ) : if curr . uptime is None or last . uptime is None : exact = False elif curr . is_break or last . uptime < curr . uptime : exact = False crossed_break = True else : accum_delta += last . uptime - curr . uptime if curr . utc is not None : time_delta = datetime . timedelta ( seconds = accum_delta ) return UTCAssignment ( reading_id , curr . utc + time_delta , found_id , exact , crossed_break ) last = curr return None
Fix a reading by looking for the nearest anchor point before it .
197
13
21,838
def sconsign_dir ( node ) : if not node . _sconsign : import SCons . SConsign node . _sconsign = SCons . SConsign . ForDirectory ( node ) return node . _sconsign
Return the . sconsign file info for this directory creating it first if necessary .
52
17
21,839
def __get_base_path ( self ) : entry = self . get ( ) return SCons . Subst . SpecialAttrWrapper ( SCons . Util . splitext ( entry . get_path ( ) ) [ 0 ] , entry . name + "_base" )
Return the file s directory and file name with the suffix stripped .
60
13
21,840
def __get_windows_path ( self ) : if OS_SEP == '\\' : return self else : entry = self . get ( ) r = entry . get_path ( ) . replace ( OS_SEP , '\\' ) return SCons . Subst . SpecialAttrWrapper ( r , entry . name + "_windows" )
Return the path with \ as the path separator regardless of platform .
75
14
21,841
def must_be_same ( self , klass ) : if isinstance ( self , klass ) or klass is Entry : return raise TypeError ( "Tried to lookup %s '%s' as a %s." % ( self . __class__ . __name__ , self . get_internal_path ( ) , klass . __name__ ) )
This node which already existed is being looked up as the specified klass . Raise an exception if it isn t .
79
23
21,842
def srcnode ( self ) : srcdir_list = self . dir . srcdir_list ( ) if srcdir_list : srcnode = srcdir_list [ 0 ] . Entry ( self . name ) srcnode . must_be_same ( self . __class__ ) return srcnode return self
If this node is in a build path return the node corresponding to its source file . Otherwise return ourself .
65
22
21,843
def get_path ( self , dir = None ) : if not dir : dir = self . fs . getcwd ( ) if self == dir : return '.' path_elems = self . get_path_elements ( ) pathname = '' try : i = path_elems . index ( dir ) except ValueError : for p in path_elems [ : - 1 ] : pathname += p . dirname else : for p in path_elems [ i + 1 : - 1 ] : pathname += p . dirname return pathname + path_elems [ - 1 ] . name
Return path relative to the current working directory of the Node . FS . Base object that owns us .
130
20
21,844
def set_src_builder ( self , builder ) : self . sbuilder = builder if not self . has_builder ( ) : self . builder_set ( builder )
Set the source code builder for this node .
36
9
21,845
def src_builder ( self ) : try : scb = self . sbuilder except AttributeError : scb = self . dir . src_builder ( ) self . sbuilder = scb return scb
Fetch the source code builder for this node .
44
10
21,846
def Rfindalldirs ( self , pathlist ) : try : memo_dict = self . _memo [ 'Rfindalldirs' ] except KeyError : memo_dict = { } self . _memo [ 'Rfindalldirs' ] = memo_dict else : try : return memo_dict [ pathlist ] except KeyError : pass create_dir_relative_to_self = self . Dir result = [ ] for path in pathlist : if isinstance ( path , SCons . Node . Node ) : result . append ( path ) else : dir = create_dir_relative_to_self ( path ) result . extend ( dir . get_all_rdirs ( ) ) memo_dict [ pathlist ] = result return result
Return all of the directories for a given path list including corresponding backing directories in any repositories .
163
18
21,847
def RDirs ( self , pathlist ) : cwd = self . cwd or self . fs . _cwd return cwd . Rfindalldirs ( pathlist )
Search for a list of directories in the Repository list .
38
12
21,848
def rfile ( self ) : self . __class__ = File self . _morph ( ) self . clear ( ) return File . rfile ( self )
We re a generic Entry but the caller is actually looking for a File at this point so morph into one .
33
22
21,849
def get_text_contents ( self ) : try : self = self . disambiguate ( must_exist = 1 ) except SCons . Errors . UserError : # There was nothing on disk with which to disambiguate # this entry. Leave it as an Entry, but return a null # string so calls to get_text_contents() in emitters and # the like (e.g. in qt.py) don't have to disambiguate by # hand or catch the exception. return '' else : return self . get_text_contents ( )
Fetch the decoded text contents of a Unicode encoded Entry .
125
13
21,850
def must_be_same ( self , klass ) : if self . __class__ is not klass : self . __class__ = klass self . _morph ( ) self . clear ( )
Called to make sure a Node is a Dir . Since we re an Entry we can morph into one .
43
22
21,851
def chdir ( self , dir , change_os_dir = 0 ) : curr = self . _cwd try : if dir is not None : self . _cwd = dir if change_os_dir : os . chdir ( dir . get_abspath ( ) ) except OSError : self . _cwd = curr raise
Change the current working directory for lookups . If change_os_dir is true we will also change the real cwd to match .
77
28
21,852
def get_root ( self , drive ) : drive = _my_normcase ( drive ) try : return self . Root [ drive ] except KeyError : root = RootDir ( drive , self ) self . Root [ drive ] = root if not drive : self . Root [ self . defaultDrive ] = root elif drive == self . defaultDrive : self . Root [ '' ] = root return root
Returns the root directory for the specified drive creating it if necessary .
84
13
21,853
def _lookup ( self , p , directory , fsclass , create = 1 ) : if isinstance ( p , Base ) : # It's already a Node.FS object. Make sure it's the right # class and return. p . must_be_same ( fsclass ) return p # str(p) in case it's something like a proxy object p = str ( p ) if not os_sep_is_slash : p = p . replace ( OS_SEP , '/' ) if p [ 0 : 1 ] == '#' : # There was an initial '#', so we strip it and override # whatever directory they may have specified with the # top-level SConstruct directory. p = p [ 1 : ] directory = self . Top # There might be a drive letter following the # '#'. Although it is not described in the SCons man page, # the regression test suite explicitly tests for that # syntax. It seems to mean the following thing: # # Assuming the the SCons top dir is in C:/xxx/yyy, # '#X:/toto' means X:/xxx/yyy/toto. # # i.e. it assumes that the X: drive has a directory # structure similar to the one found on drive C:. if do_splitdrive : drive , p = _my_splitdrive ( p ) if drive : root = self . get_root ( drive ) else : root = directory . root else : root = directory . root # We can only strip trailing after splitting the drive # since the drive might the UNC '//' prefix. p = p . strip ( '/' ) needs_normpath = needs_normpath_match ( p ) # The path is relative to the top-level SCons directory. if p in ( '' , '.' ) : p = directory . get_labspath ( ) else : p = directory . get_labspath ( ) + '/' + p else : if do_splitdrive : drive , p = _my_splitdrive ( p ) if drive and not p : # This causes a naked drive letter to be treated # as a synonym for the root directory on that # drive. p = '/' else : drive = '' # We can only strip trailing '/' since the drive might the # UNC '//' prefix. if p != '/' : p = p . rstrip ( '/' ) needs_normpath = needs_normpath_match ( p ) if p [ 0 : 1 ] == '/' : # Absolute path root = self . get_root ( drive ) else : # This is a relative lookup or to the current directory # (the path name is not absolute). Add the string to the # appropriate directory lookup path, after which the whole # thing gets normalized. if directory : if not isinstance ( directory , Dir ) : directory = self . Dir ( directory ) else : directory = self . _cwd if p in ( '' , '.' ) : p = directory . get_labspath ( ) else : p = directory . get_labspath ( ) + '/' + p if drive : root = self . get_root ( drive ) else : root = directory . root if needs_normpath is not None : # Normalize a pathname. Will return the same result for # equivalent paths. # # We take advantage of the fact that we have an absolute # path here for sure. In addition, we know that the # components of lookup path are separated by slashes at # this point. Because of this, this code is about 2X # faster than calling os.path.normpath() followed by # replacing os.sep with '/' again. ins = p . split ( '/' ) [ 1 : ] outs = [ ] for d in ins : if d == '..' : try : outs . pop ( ) except IndexError : pass elif d not in ( '' , '.' ) : outs . append ( d ) p = '/' + '/' . join ( outs ) return root . _lookup_abs ( p , fsclass , create )
The generic entry point for Node lookup with user - supplied data .
868
13
21,854
def VariantDir ( self , variant_dir , src_dir , duplicate = 1 ) : if not isinstance ( src_dir , SCons . Node . Node ) : src_dir = self . Dir ( src_dir ) if not isinstance ( variant_dir , SCons . Node . Node ) : variant_dir = self . Dir ( variant_dir ) if src_dir . is_under ( variant_dir ) : raise SCons . Errors . UserError ( "Source directory cannot be under variant directory." ) if variant_dir . srcdir : if variant_dir . srcdir == src_dir : return # We already did this. raise SCons . Errors . UserError ( "'%s' already has a source directory: '%s'." % ( variant_dir , variant_dir . srcdir ) ) variant_dir . link ( src_dir , duplicate )
Link the supplied variant directory to the source directory for purposes of building files .
186
15
21,855
def Repository ( self , * dirs ) : for d in dirs : if not isinstance ( d , SCons . Node . Node ) : d = self . Dir ( d ) self . Top . addRepository ( d )
Specify Repository directories to search .
50
8
21,856
def variant_dir_target_climb ( self , orig , dir , tail ) : targets = [ ] message = None fmt = "building associated VariantDir targets: %s" start_dir = dir while dir : for bd in dir . variant_dirs : if start_dir . is_under ( bd ) : # If already in the build-dir location, don't reflect return [ orig ] , fmt % str ( orig ) p = os . path . join ( bd . _path , * tail ) targets . append ( self . Entry ( p ) ) tail = [ dir . name ] + tail dir = dir . up ( ) if targets : message = fmt % ' ' . join ( map ( str , targets ) ) return targets , message
Create targets in corresponding variant directories
161
6
21,857
def Dir ( self , name , create = True ) : return self . fs . Dir ( name , self , create )
Looks up or creates a directory node named name relative to this directory .
25
14
21,858
def link ( self , srcdir , duplicate ) : self . srcdir = srcdir self . duplicate = duplicate self . __clearRepositoryCache ( duplicate ) srcdir . variant_dirs . append ( self )
Set this directory as the variant directory for the supplied source directory .
45
13
21,859
def getRepositories ( self ) : if self . srcdir and not self . duplicate : return self . srcdir . get_all_rdirs ( ) + self . repositories return self . repositories
Returns a list of repositories for this directory .
41
9
21,860
def rel_path ( self , other ) : # This complicated and expensive method, which constructs relative # paths between arbitrary Node.FS objects, is no longer used # by SCons itself. It was introduced to store dependency paths # in .sconsign files relative to the target, but that ended up # being significantly inefficient. # # We're continuing to support the method because some SConstruct # files out there started using it when it was available, and # we're all about backwards compatibility.. try : memo_dict = self . _memo [ 'rel_path' ] except KeyError : memo_dict = { } self . _memo [ 'rel_path' ] = memo_dict else : try : return memo_dict [ other ] except KeyError : pass if self is other : result = '.' elif not other in self . _path_elements : try : other_dir = other . get_dir ( ) except AttributeError : result = str ( other ) else : if other_dir is None : result = other . name else : dir_rel_path = self . rel_path ( other_dir ) if dir_rel_path == '.' : result = other . name else : result = dir_rel_path + OS_SEP + other . name else : i = self . _path_elements . index ( other ) + 1 path_elems = [ '..' ] * ( len ( self . _path_elements ) - i ) + [ n . name for n in other . _path_elements [ i : ] ] result = OS_SEP . join ( path_elems ) memo_dict [ other ] = result return result
Return a path to other relative to this directory .
359
10
21,861
def get_found_includes ( self , env , scanner , path ) : if not scanner : return [ ] # Clear cached info for this Dir. If we already visited this # directory on our walk down the tree (because we didn't know at # that point it was being used as the source for another Node) # then we may have calculated build signature before realizing # we had to scan the disk. Now that we have to, though, we need # to invalidate the old calculated signature so that any node # dependent on our directory structure gets one that includes # info about everything on disk. self . clear ( ) return scanner ( self , env , path )
Return this directory s implicit dependencies .
136
7
21,862
def build ( self , * * kw ) : global MkdirBuilder if self . builder is not MkdirBuilder : SCons . Node . Node . build ( self , * * kw )
A null builder for directories .
41
6
21,863
def _create ( self ) : listDirs = [ ] parent = self while parent : if parent . exists ( ) : break listDirs . append ( parent ) p = parent . up ( ) if p is None : # Don't use while: - else: for this condition because # if so, then parent is None and has no .path attribute. raise SCons . Errors . StopError ( parent . _path ) parent = p listDirs . reverse ( ) for dirnode in listDirs : try : # Don't call dirnode.build(), call the base Node method # directly because we definitely *must* create this # directory. The dirnode.build() method will suppress # the build if it's the default builder. SCons . Node . Node . build ( dirnode ) dirnode . get_executor ( ) . nullify ( ) # The build() action may or may not have actually # created the directory, depending on whether the -n # option was used or not. Delete the _exists and # _rexists attributes so they can be reevaluated. dirnode . clear ( ) except OSError : pass
Create this directory silently and without worrying about whether the builder is the default or not .
242
17
21,864
def is_up_to_date ( self ) : if self . builder is not MkdirBuilder and not self . exists ( ) : return 0 up_to_date = SCons . Node . up_to_date for kid in self . children ( ) : if kid . get_state ( ) > up_to_date : return 0 return 1
If any child is not up - to - date then this directory isn t either .
75
17
21,865
def get_timestamp ( self ) : stamp = 0 for kid in self . children ( ) : if kid . get_timestamp ( ) > stamp : stamp = kid . get_timestamp ( ) return stamp
Return the latest timestamp from among our children
45
8
21,866
def walk ( self , func , arg ) : entries = self . entries names = list ( entries . keys ( ) ) names . remove ( '.' ) names . remove ( '..' ) func ( arg , self , names ) for dirname in [ n for n in names if isinstance ( entries [ n ] , Dir ) ] : entries [ dirname ] . walk ( func , arg )
Walk this directory tree by calling the specified function for each directory in the tree .
83
16
21,867
def _glob1 ( self , pattern , ondisk = True , source = False , strings = False ) : search_dir_list = self . get_all_rdirs ( ) for srcdir in self . srcdir_list ( ) : search_dir_list . extend ( srcdir . get_all_rdirs ( ) ) selfEntry = self . Entry names = [ ] for dir in search_dir_list : # We use the .name attribute from the Node because the keys of # the dir.entries dictionary are normalized (that is, all upper # case) on case-insensitive systems like Windows. node_names = [ v . name for k , v in dir . entries . items ( ) if k not in ( '.' , '..' ) ] names . extend ( node_names ) if not strings : # Make sure the working directory (self) actually has # entries for all Nodes in repositories or variant dirs. for name in node_names : selfEntry ( name ) if ondisk : try : disk_names = os . listdir ( dir . _abspath ) except os . error : continue names . extend ( disk_names ) if not strings : # We're going to return corresponding Nodes in # the local directory, so we need to make sure # those Nodes exist. We only want to create # Nodes for the entries that will match the # specified pattern, though, which means we # need to filter the list here, even though # the overall list will also be filtered later, # after we exit this loop. if pattern [ 0 ] != '.' : disk_names = [ x for x in disk_names if x [ 0 ] != '.' ] disk_names = fnmatch . filter ( disk_names , pattern ) dirEntry = dir . Entry for name in disk_names : # Add './' before disk filename so that '#' at # beginning of filename isn't interpreted. name = './' + name node = dirEntry ( name ) . disambiguate ( ) n = selfEntry ( name ) if n . __class__ != node . __class__ : n . __class__ = node . __class__ n . _morph ( ) names = set ( names ) if pattern [ 0 ] != '.' : names = [ x for x in names if x [ 0 ] != '.' ] names = fnmatch . filter ( names , pattern ) if strings : return names return [ self . entries [ _my_normcase ( n ) ] for n in names ]
Globs for and returns a list of entry names matching a single pattern in this directory .
538
18
21,868
def convert_to_sconsign ( self ) : if os_sep_is_slash : node_to_str = str else : def node_to_str ( n ) : try : s = n . get_internal_path ( ) except AttributeError : s = str ( n ) else : s = s . replace ( OS_SEP , '/' ) return s for attr in [ 'bsources' , 'bdepends' , 'bimplicit' ] : try : val = getattr ( self , attr ) except AttributeError : pass else : setattr ( self , attr , list ( map ( node_to_str , val ) ) )
Converts this FileBuildInfo object for writing to a . sconsign file
149
16
21,869
def prepare_dependencies ( self ) : attrs = [ ( 'bsources' , 'bsourcesigs' ) , ( 'bdepends' , 'bdependsigs' ) , ( 'bimplicit' , 'bimplicitsigs' ) , ] for ( nattr , sattr ) in attrs : try : strings = getattr ( self , nattr ) nodeinfos = getattr ( self , sattr ) except AttributeError : continue if strings is None or nodeinfos is None : continue nodes = [ ] for s , ni in zip ( strings , nodeinfos ) : if not isinstance ( s , SCons . Node . Node ) : s = ni . str_to_node ( s ) nodes . append ( s ) setattr ( self , nattr , nodes )
Prepares a FileBuildInfo object for explaining what changed
173
11
21,870
def Dir ( self , name , create = True ) : return self . dir . Dir ( name , create = create )
Create a directory node named name relative to the directory of this file .
25
14
21,871
def _morph ( self ) : self . scanner_paths = { } if not hasattr ( self , '_local' ) : self . _local = 0 if not hasattr ( self , 'released_target_info' ) : self . released_target_info = False self . store_info = 1 self . _func_exists = 4 self . _func_get_contents = 3 # Initialize this Node's decider function to decide_source() because # every file is a source file until it has a Builder attached... self . changed_since_last_build = 4 # If there was already a Builder set on this entry, then # we need to make sure we call the target-decider function, # not the source-decider. Reaching in and doing this by hand # is a little bogus. We'd prefer to handle this by adding # an Entry.builder_set() method that disambiguates like the # other methods, but that starts running into problems with the # fragile way we initialize Dir Nodes with their Mkdir builders, # yet still allow them to be overridden by the user. Since it's # not clear right now how to fix that, stick with what works # until it becomes clear... if self . has_builder ( ) : self . changed_since_last_build = 5
Turn a file system node into a File object .
283
10
21,872
def get_text_contents ( self ) : contents = self . get_contents ( ) # The behavior of various decode() methods and functions # w.r.t. the initial BOM bytes is different for different # encodings and/or Python versions. ('utf-8' does not strip # them, but has a 'utf-8-sig' which does; 'utf-16' seems to # strip them; etc.) Just sidestep all the complication by # explicitly stripping the BOM before we decode(). if contents [ : len ( codecs . BOM_UTF8 ) ] == codecs . BOM_UTF8 : return contents [ len ( codecs . BOM_UTF8 ) : ] . decode ( 'utf-8' ) if contents [ : len ( codecs . BOM_UTF16_LE ) ] == codecs . BOM_UTF16_LE : return contents [ len ( codecs . BOM_UTF16_LE ) : ] . decode ( 'utf-16-le' ) if contents [ : len ( codecs . BOM_UTF16_BE ) ] == codecs . BOM_UTF16_BE : return contents [ len ( codecs . BOM_UTF16_BE ) : ] . decode ( 'utf-16-be' ) try : return contents . decode ( 'utf-8' ) except UnicodeDecodeError as e : try : return contents . decode ( 'latin-1' ) except UnicodeDecodeError as e : return contents . decode ( 'utf-8' , error = 'backslashreplace' )
This attempts to figure out what the encoding of the text is based upon the BOM bytes and then decodes the contents so that it s a valid python string .
345
33
21,873
def get_content_hash ( self ) : if not self . rexists ( ) : return SCons . Util . MD5signature ( '' ) fname = self . rfile ( ) . get_abspath ( ) try : cs = SCons . Util . MD5filesignature ( fname , chunksize = SCons . Node . FS . File . md5_chunksize * 1024 ) except EnvironmentError as e : if not e . filename : e . filename = fname raise return cs
Compute and return the MD5 hash for this file .
112
12
21,874
def get_found_includes ( self , env , scanner , path ) : memo_key = ( id ( env ) , id ( scanner ) , path ) try : memo_dict = self . _memo [ 'get_found_includes' ] except KeyError : memo_dict = { } self . _memo [ 'get_found_includes' ] = memo_dict else : try : return memo_dict [ memo_key ] except KeyError : pass if scanner : result = [ n . disambiguate ( ) for n in scanner ( self , env , path ) ] else : result = [ ] memo_dict [ memo_key ] = result return result
Return the included implicit dependencies in this file . Cache results so we only scan the file once per path regardless of how many times this information is requested .
143
30
21,875
def push_to_cache ( self ) : # This should get called before the Nodes' .built() method is # called, which would clear the build signature if the file has # a source scanner. # # We have to clear the local memoized values *before* we push # the node to cache so that the memoization of the self.exists() # return value doesn't interfere. if self . nocache : return self . clear_memoized_values ( ) if self . exists ( ) : self . get_build_env ( ) . get_CacheDir ( ) . push ( self )
Try to push the node into a cache
130
8
21,876
def retrieve_from_cache ( self ) : if self . nocache : return None if not self . is_derived ( ) : return None return self . get_build_env ( ) . get_CacheDir ( ) . retrieve ( self )
Try to retrieve the node s content from a cache
53
10
21,877
def release_target_info ( self ) : if ( self . released_target_info or SCons . Node . interactive ) : return if not hasattr ( self . attributes , 'keep_targetinfo' ) : # Cache some required values, before releasing # stuff like env, executor and builder... self . changed ( allowcache = True ) self . get_contents_sig ( ) self . get_build_env ( ) # Now purge unneeded stuff to free memory... self . executor = None self . _memo . pop ( 'rfile' , None ) self . prerequisites = None # Cleanup lists, but only if they're empty if not len ( self . ignore_set ) : self . ignore_set = None if not len ( self . implicit_set ) : self . implicit_set = None if not len ( self . depends_set ) : self . depends_set = None if not len ( self . ignore ) : self . ignore = None if not len ( self . depends ) : self . depends = None # Mark this node as done, we only have to release # the memory once... self . released_target_info = True
Called just after this node has been marked up - to - date or was built completely .
249
19
21,878
def has_src_builder ( self ) : try : scb = self . sbuilder except AttributeError : scb = self . sbuilder = self . find_src_builder ( ) return scb is not None
Return whether this Node has a source builder or not .
47
11
21,879
def alter_targets ( self ) : if self . is_derived ( ) : return [ ] , None return self . fs . variant_dir_target_climb ( self , self . dir , [ self . name ] )
Return any corresponding targets in a variant directory .
50
9
21,880
def prepare ( self ) : SCons . Node . Node . prepare ( self ) if self . get_state ( ) != SCons . Node . up_to_date : if self . exists ( ) : if self . is_derived ( ) and not self . precious : self . _rmv_existing ( ) else : try : self . _createDir ( ) except SCons . Errors . StopError as drive : raise SCons . Errors . StopError ( "No drive `{}' for target `{}'." . format ( drive , self ) )
Prepare for this file to be created .
119
9
21,881
def remove ( self ) : if self . exists ( ) or self . islink ( ) : self . fs . unlink ( self . get_internal_path ( ) ) return 1 return None
Remove this file .
41
4
21,882
def get_max_drift_csig ( self ) : old = self . get_stored_info ( ) mtime = self . get_timestamp ( ) max_drift = self . fs . max_drift if max_drift > 0 : if ( time . time ( ) - mtime ) > max_drift : try : n = old . ninfo if n . timestamp and n . csig and n . timestamp == mtime : return n . csig except AttributeError : pass elif max_drift == 0 : try : return old . ninfo . csig except AttributeError : pass return None
Returns the content signature currently stored for this node if it s been unmodified longer than the max_drift value or the max_drift value is 0 . Returns None otherwise .
138
37
21,883
def built ( self ) : SCons . Node . Node . built ( self ) if ( not SCons . Node . interactive and not hasattr ( self . attributes , 'keep_targetinfo' ) ) : # Ensure that the build infos get computed and cached... SCons . Node . store_info_map [ self . store_info ] ( self ) # ... then release some more variables. self . _specific_sources = False self . _labspath = None self . _save_str ( ) self . cwd = None self . scanner_paths = None
Called just after this File node is successfully built .
123
11
21,884
def changed ( self , node = None , allowcache = False ) : if node is None : try : return self . _memo [ 'changed' ] except KeyError : pass has_changed = SCons . Node . Node . changed ( self , node ) if allowcache : self . _memo [ 'changed' ] = has_changed return has_changed
Returns if the node is up - to - date with respect to the BuildInfo stored last time it was built .
77
23
21,885
def get_cachedir_csig ( self ) : try : return self . cachedir_csig except AttributeError : pass cachedir , cachefile = self . get_build_env ( ) . get_CacheDir ( ) . cachepath ( self ) if not self . exists ( ) and cachefile and os . path . exists ( cachefile ) : self . cachedir_csig = SCons . Util . MD5filesignature ( cachefile , SCons . Node . FS . File . md5_chunksize * 1024 ) else : self . cachedir_csig = self . get_csig ( ) return self . cachedir_csig
Fetch a Node s content signature for purposes of computing another Node s cachesig .
145
17
21,886
def get_contents_sig ( self ) : try : return self . contentsig except AttributeError : pass executor = self . get_executor ( ) result = self . contentsig = SCons . Util . MD5signature ( executor . get_contents ( ) ) return result
A helper method for get_cachedir_bsig .
66
13
21,887
def get_cachedir_bsig ( self ) : try : return self . cachesig except AttributeError : pass # Collect signatures for all children children = self . children ( ) sigs = [ n . get_cachedir_csig ( ) for n in children ] # Append this node's signature... sigs . append ( self . get_contents_sig ( ) ) # ...and it's path sigs . append ( self . get_internal_path ( ) ) # Merge this all into a single signature result = self . cachesig = SCons . Util . MD5collect ( sigs ) return result
Return the signature for a cached file including its children .
136
11
21,888
def find_file ( self , filename , paths , verbose = None ) : memo_key = self . _find_file_key ( filename , paths ) try : memo_dict = self . _memo [ 'find_file' ] except KeyError : memo_dict = { } self . _memo [ 'find_file' ] = memo_dict else : try : return memo_dict [ memo_key ] except KeyError : pass if verbose and not callable ( verbose ) : if not SCons . Util . is_String ( verbose ) : verbose = "find_file" _verbose = u' %s: ' % verbose verbose = lambda s : sys . stdout . write ( _verbose + s ) filedir , filename = os . path . split ( filename ) if filedir : self . default_filedir = filedir paths = [ _f for _f in map ( self . filedir_lookup , paths ) if _f ] result = None for dir in paths : if verbose : verbose ( "looking for '%s' in '%s' ...\n" % ( filename , dir ) ) node , d = dir . srcdir_find_file ( filename ) if node : if verbose : verbose ( "... FOUND '%s' in '%s'\n" % ( filename , d ) ) result = node break memo_dict [ memo_key ] = result return result
Find a node corresponding to either a derived file or a file that exists already .
317
16
21,889
def run ( self , resources ) : hwman = resources [ 'connection' ] updater = hwman . hwman . app ( name = 'device_updater' ) updater . run_script ( self . _script , no_reboot = self . _no_reboot )
Actually send the trub script .
66
7
21,890
def process_gatt_service ( services , event ) : length = len ( event . payload ) - 5 handle , start , end , uuid = unpack ( '<BHH%ds' % length , event . payload ) uuid = process_uuid ( uuid ) services [ uuid ] = { 'uuid_raw' : uuid , 'start_handle' : start , 'end_handle' : end }
Process a BGAPI event containing a GATT service description and add it to a dictionary
93
17
21,891
def handle_to_uuid ( handle , services ) : for service in services . values ( ) : for char_uuid , char_def in service [ 'characteristics' ] . items ( ) : if char_def [ 'handle' ] == handle : return char_uuid raise ValueError ( "Handle not found in GATT table" )
Find the corresponding UUID for an attribute handle
75
9
21,892
def _validator ( key , val , env ) : if not env [ key ] in ( True , False ) : raise SCons . Errors . UserError ( 'Invalid value for boolean option %s: %s' % ( key , env [ key ] ) )
Validates the given value to be either 0 or 1 . This is usable as validator for SCons Variables .
56
24
21,893
def FromDictionary ( cls , msg_dict ) : level = msg_dict . get ( 'level' ) msg = msg_dict . get ( 'message' ) now = msg_dict . get ( 'now_time' ) created = msg_dict . get ( 'created_time' ) count = msg_dict . get ( 'count' , 1 ) msg_id = msg_dict . get ( 'id' , 0 ) new_msg = ServiceMessage ( level , msg , msg_id , created , now ) if count > 1 : new_msg . count = count return new_msg
Create from a dictionary with kv pairs .
130
9
21,894
def to_dict ( self ) : msg_dict = { } msg_dict [ 'level' ] = self . level msg_dict [ 'message' ] = self . message msg_dict [ 'now_time' ] = monotonic ( ) msg_dict [ 'created_time' ] = self . created msg_dict [ 'id' ] = self . id msg_dict [ 'count' ] = self . count return msg_dict
Create a dictionary with the information in this message .
96
10
21,895
def get_message ( self , message_id ) : for message in self . messages : if message . id == message_id : return message raise ArgumentError ( "Message ID not found" , message_id = message_id )
Get a message by its persistent id .
49
8
21,896
def post_message ( self , level , message , count = 1 , timestamp = None , now_reference = None ) : if len ( self . messages ) > 0 and self . messages [ - 1 ] . message == message : self . messages [ - 1 ] . count += 1 else : msg_object = ServiceMessage ( level , message , self . _last_message_id , timestamp , now_reference ) msg_object . count = count self . messages . append ( msg_object ) self . _last_message_id += 1 return self . messages [ - 1 ]
Post a new message for service .
122
7
21,897
def set_headline ( self , level , message , timestamp = None , now_reference = None ) : if self . headline is not None and self . headline . message == message : self . headline . created = monotonic ( ) self . headline . count += 1 return msg_object = ServiceMessage ( level , message , self . _last_message_id , timestamp , now_reference ) self . headline = msg_object self . _last_message_id += 1
Set the persistent headline message for this service .
101
9
21,898
def generate_doxygen_file ( output_path , iotile ) : mapping = { } mapping [ 'short_name' ] = iotile . short_name mapping [ 'full_name' ] = iotile . full_name mapping [ 'authors' ] = iotile . authors mapping [ 'version' ] = iotile . version render_template ( 'doxygen.txt.tpl' , mapping , out_path = output_path )
Fill in our default doxygen template file with info from an IOTile
103
16
21,899
def pull ( name , version , force = False ) : chain = DependencyResolverChain ( ) ver = SemanticVersionRange . FromString ( version ) chain . pull_release ( name , ver , force = force )
Pull a released IOTile component into the current working directory
47
12