idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
22,100
def pull_release ( self , name , version , destfolder = "." , force = False ) : unique_id = name . replace ( '/' , '_' ) depdict = { 'name' : name , 'unique_id' : unique_id , 'required_version' : version , 'required_version_string' : str ( version ) } destdir = os . path . join ( destfolder , unique_id ) if os . path . exists ( destdir ) : if not force : raise ExternalError ( "Output directory exists and force was not specified, aborting" , output_directory = destdir ) shutil . rmtree ( destdir ) result = self . update_dependency ( None , depdict , destdir ) if result != "installed" : raise ArgumentError ( "Could not find component to satisfy name/version combination" )
Download and unpack a released iotile component by name and version range
185
15
22,101
def update_dependency ( self , tile , depinfo , destdir = None ) : if destdir is None : destdir = os . path . join ( tile . folder , 'build' , 'deps' , depinfo [ 'unique_id' ] ) has_version = False had_version = False if os . path . exists ( destdir ) : has_version = True had_version = True for priority , rule in self . rules : if not self . _check_rule ( rule , depinfo ) : continue resolver = self . _find_resolver ( rule ) if has_version : deptile = IOTile ( destdir ) # If the dependency is not up to date, don't do anything depstatus = self . _check_dep ( depinfo , deptile , resolver ) if depstatus is False : shutil . rmtree ( destdir ) has_version = False else : continue # Now try to resolve this dependency with the latest version result = resolver . resolve ( depinfo , destdir ) if not result [ 'found' ] and result . get ( 'stop' , False ) : return 'not found' if not result [ 'found' ] : continue settings = { 'resolver' : resolver . __class__ . __name__ , 'factory_args' : rule [ 2 ] } if 'settings' in result : settings [ 'settings' ] = result [ 'settings' ] self . _save_depsettings ( destdir , settings ) if had_version : return "updated" return "installed" if has_version : return "already installed" return "not found"
Attempt to install or update a dependency to the latest version .
352
12
22,102
def _check_dep ( self , depinfo , deptile , resolver ) : try : settings = self . _load_depsettings ( deptile ) except IOError : return False # If this dependency was initially resolved with a different resolver, then # we cannot check if it is up to date if settings [ 'resolver' ] != resolver . __class__ . __name__ : return None resolver_settings = { } if 'settings' in settings : resolver_settings = settings [ 'settings' ] return resolver . check ( depinfo , deptile , resolver_settings )
Check if a dependency tile is up to date
130
9
22,103
def _log_future_exception ( future , logger ) : if not future . done ( ) : return try : future . result ( ) except : #pylint:disable=bare-except;This is a background logging helper logger . warning ( "Exception in ignored future: %s" , future , exc_info = True )
Log any exception raised by future .
71
7
22,104
def create_subtask ( self , cor , name = None , stop_timeout = 1.0 ) : if self . stopped : raise InternalError ( "Cannot add a subtask to a parent that is already stopped" ) subtask = BackgroundTask ( cor , name , loop = self . _loop , stop_timeout = stop_timeout ) self . add_subtask ( subtask ) return subtask
Create and add a subtask from a coroutine .
86
11
22,105
def add_subtask ( self , subtask ) : if self . stopped : raise InternalError ( "Cannot add a subtask to a parent that is already stopped" ) if not isinstance ( subtask , BackgroundTask ) : raise ArgumentError ( "Subtasks must inherit from BackgroundTask, task={}" . format ( subtask ) ) #pylint:disable=protected-access;It is the same class as us so is equivalent to self access. if subtask . _loop != self . _loop : raise ArgumentError ( "Subtasks must run in the same BackgroundEventLoop as their parent" , subtask = subtask , parent = self ) self . subtasks . append ( subtask )
Link a subtask to this parent task .
151
9
22,106
async def stop ( self ) : if self . stopped : return self . _logger . debug ( "Stopping task %s" , self . name ) if self . _finalizer is not None : try : result = self . _finalizer ( self ) if inspect . isawaitable ( result ) : await result except : #pylint:disable=bare-except;We need to make sure we always wait for the task self . _logger . exception ( "Error running finalizer for task %s" , self . name ) elif self . task is not None : self . task . cancel ( ) tasks = [ ] if self . task is not None : tasks . append ( self . task ) tasks . extend ( x . task for x in self . subtasks ) finished = asyncio . gather ( * tasks , return_exceptions = True ) outcomes = [ ] try : outcomes = await asyncio . wait_for ( finished , timeout = self . _stop_timeout ) except asyncio . TimeoutError as err : # See discussion here: https://github.com/python/asyncio/issues/253#issuecomment-120138132 # This prevents a nuisance log error message, finished is guaranteed # to be cancelled but not awaited when wait_for() has a timeout. try : outcomes = await finished except asyncio . CancelledError : pass # See https://mail.python.org/pipermail/python-3000/2008-May/013740.html # for why we need to explictly name the error here raise err finally : self . stopped = True for outcome in outcomes : if isinstance ( outcome , Exception ) and not isinstance ( outcome , asyncio . CancelledError ) : self . _logger . error ( outcome ) if self in self . _loop . tasks : self . _loop . tasks . remove ( self )
Stop this task and wait until it and all its subtasks end .
400
14
22,107
def stop_threadsafe ( self ) : if self . stopped : return try : self . _loop . run_coroutine ( self . stop ( ) ) except asyncio . TimeoutError : raise TimeoutExpiredError ( "Timeout stopping task {} with {} subtasks" . format ( self . name , len ( self . subtasks ) ) )
Stop this task from another thread and wait for it to finish .
74
13
22,108
def start ( self , aug = 'EventLoopThread' ) : if self . stopping : raise LoopStoppingError ( "Cannot perform action while loop is stopping." ) if not self . loop : self . _logger . debug ( "Starting event loop" ) self . loop = asyncio . new_event_loop ( ) self . thread = threading . Thread ( target = self . _loop_thread_main , name = aug , daemon = True ) self . thread . start ( )
Ensure the background loop is running .
105
8
22,109
def wait_for_interrupt ( self , check_interval = 1.0 , max_time = None ) : self . start ( ) wait = max ( check_interval , 0.01 ) accum = 0 try : while max_time is None or accum < max_time : try : time . sleep ( wait ) except IOError : pass # IOError comes when this call is interrupted in a signal handler accum += wait except KeyboardInterrupt : pass
Run the event loop until we receive a ctrl - c interrupt or max_time passes .
97
19
22,110
def stop ( self ) : if not self . loop : return if self . inside_loop ( ) : raise InternalError ( "BackgroundEventLoop.stop() called from inside event loop; " "would have deadlocked." ) try : self . run_coroutine ( self . _stop_internal ( ) ) self . thread . join ( ) except : self . _logger . exception ( "Error stopping BackgroundEventLoop" ) raise finally : self . thread = None self . loop = None self . tasks = set ( )
Synchronously stop the background loop from outside .
110
10
22,111
async def _stop_internal ( self ) : # Make sure we only try to stop once if self . stopping is True : return self . stopping = True awaitables = [ task . stop ( ) for task in self . tasks ] results = await asyncio . gather ( * awaitables , return_exceptions = True ) for task , result in zip ( self . tasks , results ) : if isinstance ( result , Exception ) : self . _logger . error ( "Error stopping task %s: %s" , task . name , repr ( result ) ) # It is important to defer this call by one loop cycle so # that this coroutine is finalized and anyone blocking on it # resumes execution. self . loop . call_soon ( self . loop . stop )
Cleanly stop the event loop after shutting down all tasks .
162
12
22,112
def _loop_thread_main ( self ) : asyncio . set_event_loop ( self . loop ) self . _loop_check . inside_loop = True try : self . _logger . debug ( "Starting loop in background thread" ) self . loop . run_forever ( ) self . _logger . debug ( "Finished loop in background thread" ) except : # pylint:disable=bare-except;This is a background worker thread. self . _logger . exception ( "Exception raised from event loop thread" ) finally : self . loop . close ( )
Main background thread running the event loop .
127
8
22,113
def add_task ( self , cor , name = None , finalizer = None , stop_timeout = 1.0 , parent = None ) : if self . stopping : raise LoopStoppingError ( "Cannot add task because loop is stopping" ) # Ensure the loop exists and is started self . start ( ) if parent is not None and parent not in self . tasks : raise ArgumentError ( "Designated parent task {} is not registered" . format ( parent ) ) task = BackgroundTask ( cor , name , finalizer , stop_timeout , loop = self ) if parent is None : self . tasks . add ( task ) self . _logger . debug ( "Added primary task %s" , task . name ) else : parent . add_subtask ( task ) self . _logger . debug ( "Added subtask %s to parent %s" , task . name , parent . name ) return task
Schedule a task to run on the background event loop .
193
12
22,114
def run_coroutine ( self , cor , * args , * * kwargs ) : if self . stopping : raise LoopStoppingError ( "Could not launch coroutine because loop is shutting down: %s" % cor ) self . start ( ) cor = _instaniate_coroutine ( cor , args , kwargs ) if self . inside_loop ( ) : raise InternalError ( "BackgroundEventLoop.run_coroutine called from inside event loop, " "would have deadlocked." ) future = self . launch_coroutine ( cor ) return future . result ( )
Run a coroutine to completion and return its result .
125
11
22,115
def log_coroutine ( self , cor , * args , * * kwargs ) : if self . stopping : raise LoopStoppingError ( "Could not launch coroutine because loop is shutting down: %s" % cor ) self . start ( ) cor = _instaniate_coroutine ( cor , args , kwargs ) def _run_and_log ( ) : task = self . loop . create_task ( cor ) task . add_done_callback ( lambda x : _log_future_exception ( x , self . _logger ) ) if self . inside_loop ( ) : _run_and_log ( ) else : self . loop . call_soon_threadsafe ( _run_and_log )
Run a coroutine logging any exception raised .
159
9
22,116
def link_cloud ( self , username = None , password = None , device_id = None ) : reg = ComponentRegistry ( ) domain = self . get ( 'cloud:server' ) if username is None : prompt_str = "Please enter your IOTile.cloud email: " username = input ( prompt_str ) if password is None : prompt_str = "Please enter your IOTile.cloud password: " password = getpass . getpass ( prompt_str ) cloud = Api ( domain = domain ) ok_resp = cloud . login ( email = username , password = password ) if not ok_resp : raise ArgumentError ( "Could not login to iotile.cloud as user %s" % username ) reg . set_config ( 'arch:cloud_user' , cloud . username ) reg . set_config ( 'arch:cloud_token' , cloud . token ) reg . set_config ( 'arch:cloud_token_type' , cloud . token_type ) if device_id is not None : cloud = IOTileCloud ( ) cloud . impersonate_device ( device_id )
Create and store a token for interacting with the IOTile Cloud API .
241
15
22,117
def _load_file ( self ) : if not os . path . exists ( self . file ) : return { } with open ( self . file , "r" ) as infile : data = json . load ( infile ) return data
Load all entries from json backing file
51
7
22,118
def _save_file ( self , data ) : if platform . system ( ) == 'Windows' : with open ( self . file , "w" ) as outfile : json . dump ( data , outfile ) else : newpath = self . file + '.new' with open ( newpath , "w" ) as outfile : json . dump ( data , outfile ) os . rename ( os . path . realpath ( newpath ) , os . path . realpath ( self . file ) )
Attempt to atomically save file by saving and then moving into position
108
13
22,119
def remove ( self , key ) : data = self . _load_file ( ) del data [ key ] self . _save_file ( data )
Remove a key from the data store
32
7
22,120
def set ( self , key , value ) : data = self . _load_file ( ) data [ key ] = value self . _save_file ( data )
Set the value of a key
35
6
22,121
def trigger_chain ( self ) : trigger_stream = self . allocator . attach_stream ( self . trigger_stream ) return ( trigger_stream , self . trigger_cond )
Return a NodeInput tuple for creating a node .
39
10
22,122
def generate ( env ) : static_obj , shared_obj = SCons . Tool . createObjBuilders ( env ) for suffix in CSuffixes : static_obj . add_action ( suffix , SCons . Defaults . CAction ) shared_obj . add_action ( suffix , SCons . Defaults . ShCAction ) static_obj . add_emitter ( suffix , SCons . Defaults . StaticObjectEmitter ) shared_obj . add_emitter ( suffix , SCons . Defaults . SharedObjectEmitter ) add_common_cc_variables ( env ) if 'CC' not in env : env [ 'CC' ] = env . Detect ( compilers ) or compilers [ 0 ] env [ 'CFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'CCCOM' ] = '$CC -o $TARGET -c $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES' env [ 'SHCC' ] = '$CC' env [ 'SHCFLAGS' ] = SCons . Util . CLVar ( '$CFLAGS' ) env [ 'SHCCCOM' ] = '$SHCC -o $TARGET -c $SHCFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES' env [ 'CPPDEFPREFIX' ] = '-D' env [ 'CPPDEFSUFFIX' ] = '' env [ 'INCPREFIX' ] = '-I' env [ 'INCSUFFIX' ] = '' env [ 'SHOBJSUFFIX' ] = '.os' env [ 'STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME' ] = 0 env [ 'CFILESUFFIX' ] = '.c'
Add Builders and construction variables for C compilers to an Environment .
400
14
22,123
def process_mock_rpc ( input_string ) : spec , equals , value = input_string . partition ( u'=' ) if len ( equals ) == 0 : print ( "Could not parse mock RPC argument: {}" . format ( input_string ) ) sys . exit ( 1 ) try : value = int ( value . strip ( ) , 0 ) except ValueError as exc : print ( "Could not parse mock RPC value: {}" . format ( str ( exc ) ) ) sys . exit ( 1 ) slot , part , rpc_id = spec . partition ( u":" ) if len ( part ) == 0 : print ( "Could not parse mock RPC slot/rpc definition: {}" . format ( spec ) ) sys . exit ( 1 ) try : slot = SlotIdentifier . FromString ( slot ) except ArgumentError as exc : print ( "Could not parse slot id in mock RPC definition: {}" . format ( exc . msg ) ) sys . exit ( 1 ) try : rpc_id = int ( rpc_id , 0 ) except ValueError as exc : print ( "Could not parse mock RPC number: {}" . format ( str ( exc ) ) ) sys . exit ( 1 ) return slot , rpc_id , value
Process a mock RPC argument .
269
6
22,124
def watch_printer ( watch , value ) : print ( "({: 8} s) {}: {}" . format ( value . raw_time , watch , value . value ) )
Print a watched value .
40
5
22,125
def main ( argv = None ) : if argv is None : argv = sys . argv [ 1 : ] try : executor = None parser = build_args ( ) args = parser . parse_args ( args = argv ) model = DeviceModel ( ) parser = SensorGraphFileParser ( ) parser . parse_file ( args . sensor_graph ) parser . compile ( model ) if not args . disable_optimizer : opt = SensorGraphOptimizer ( ) opt . optimize ( parser . sensor_graph , model = model ) graph = parser . sensor_graph sim = SensorGraphSimulator ( graph ) for stop in args . stop : sim . stop_condition ( stop ) for watch in args . watch : watch_sel = DataStreamSelector . FromString ( watch ) graph . sensor_log . watch ( watch_sel , watch_printer ) # If we are semihosting, create the appropriate executor connected to the device if args . semihost_device is not None : executor = SemihostedRPCExecutor ( args . port , args . semihost_device ) sim . rpc_executor = executor for mock in args . mock_rpc : slot , rpc_id , value = process_mock_rpc ( mock ) sim . rpc_executor . mock ( slot , rpc_id , value ) for stim in args . stimulus : sim . stimulus ( stim ) graph . load_constants ( ) if args . trace is not None : sim . record_trace ( ) try : if args . connected : sim . step ( user_connected , 8 ) sim . run ( accelerated = not args . realtime ) except KeyboardInterrupt : pass if args . trace is not None : sim . trace . save ( args . trace ) finally : if executor is not None : executor . hw . close ( ) return 0
Main entry point for iotile sensorgraph simulator .
406
12
22,126
def _verify_tile_versions ( self , hw ) : for tile , expected_tile_version in self . _tile_versions . items ( ) : actual_tile_version = str ( hw . get ( tile ) . tile_version ( ) ) if expected_tile_version != actual_tile_version : raise ArgumentError ( "Tile has incorrect firmware" , tile = tile , expected_version = expected_tile_version , actual_version = actual_tile_version )
Verify that the tiles have the correct versions
105
9
22,127
def _verify_realtime_streams ( self , hw ) : print ( "--> Testing realtime data (takes 2 seconds)" ) time . sleep ( 2.1 ) reports = [ x for x in hw . iter_reports ( ) ] reports_seen = { key : 0 for key in self . _realtime_streams } for report in reports : stream_value = report . visible_readings [ 0 ] . stream if reports_seen . get ( stream_value ) is not None : reports_seen [ stream_value ] += 1 for stream in reports_seen . keys ( ) : if reports_seen [ stream ] < 2 : raise ArgumentError ( "Realtime Stream not pushing any reports" , stream = hex ( stream ) , reports_seen = reports_seen [ stream ] )
Check that the realtime streams are being produced
174
9
22,128
def _update_pot_file ( target , source , env ) : import re import os import SCons . Action nop = lambda target , source , env : 0 # Save scons cwd and os cwd (NOTE: they may be different. After the job, we # revert each one to its original state). save_cwd = env . fs . getcwd ( ) save_os_cwd = os . getcwd ( ) chdir = target [ 0 ] . dir chdir_str = repr ( chdir . get_abspath ( ) ) # Print chdir message (employ SCons.Action.Action for that. It knows better # than me how to to this correctly). env . Execute ( SCons . Action . Action ( nop , "Entering " + chdir_str ) ) # Go to target's directory and do our job env . fs . chdir ( chdir , 1 ) # Go into target's directory try : cmd = _CmdRunner ( '$XGETTEXTCOM' , '$XGETTEXTCOMSTR' ) action = SCons . Action . Action ( cmd , strfunction = cmd . strfunction ) status = action ( [ target [ 0 ] ] , source , env ) except : # Something went wrong. env . Execute ( SCons . Action . Action ( nop , "Leaving " + chdir_str ) ) # Revert working dirs to previous state and re-throw exception. env . fs . chdir ( save_cwd , 0 ) os . chdir ( save_os_cwd ) raise # Print chdir message. env . Execute ( SCons . Action . Action ( nop , "Leaving " + chdir_str ) ) # Revert working dirs to previous state. env . fs . chdir ( save_cwd , 0 ) os . chdir ( save_os_cwd ) # If the command was not successfull, return error code. if status : return status new_content = cmd . out if not new_content : # When xgettext finds no internationalized messages, no *.pot is created # (because we don't want to bother translators with empty POT files). needs_update = False explain = "no internationalized messages encountered" else : if target [ 0 ] . exists ( ) : # If the file already exists, it's left unaltered unless its messages # are outdated (w.r.t. to these recovered by xgettext from sources). old_content = target [ 0 ] . get_text_contents ( ) re_cdate = re . compile ( r'^"POT-Creation-Date: .*"$[\r\n]?' , re . M ) old_content_nocdate = re . sub ( re_cdate , "" , old_content ) new_content_nocdate = re . sub ( re_cdate , "" , new_content ) if ( old_content_nocdate == new_content_nocdate ) : # Messages are up-to-date needs_update = False explain = "messages in file found to be up-to-date" else : # Messages are outdated needs_update = True explain = "messages in file were outdated" else : # No POT file found, create new one needs_update = True explain = "new file" if needs_update : # Print message employing SCons.Action.Action for that. msg = "Writing " + repr ( str ( target [ 0 ] ) ) + " (" + explain + ")" env . Execute ( SCons . Action . Action ( nop , msg ) ) f = open ( str ( target [ 0 ] ) , "w" ) f . write ( new_content ) f . close ( ) return 0 else : # Print message employing SCons.Action.Action for that. msg = "Not writing " + repr ( str ( target [ 0 ] ) ) + " (" + explain + ")" env . Execute ( SCons . Action . Action ( nop , msg ) ) return 0
Action function for POTUpdate builder
879
7
22,129
def _scan_xgettext_from_files ( target , source , env , files = None , path = None ) : import re import SCons . Util import SCons . Node . FS if files is None : return 0 if not SCons . Util . is_List ( files ) : files = [ files ] if path is None : if 'XGETTEXTPATH' in env : path = env [ 'XGETTEXTPATH' ] else : path = [ ] if not SCons . Util . is_List ( path ) : path = [ path ] path = SCons . Util . flatten ( path ) dirs = ( ) for p in path : if not isinstance ( p , SCons . Node . FS . Base ) : if SCons . Util . is_String ( p ) : p = env . subst ( p , source = source , target = target ) p = env . arg2nodes ( p , env . fs . Dir ) dirs += tuple ( p ) # cwd is the default search path (when no path is defined by user) if not dirs : dirs = ( env . fs . getcwd ( ) , ) # Parse 'POTFILE.in' files. re_comment = re . compile ( r'^#[^\n\r]*$\r?\n?' , re . M ) re_emptyln = re . compile ( r'^[ \t\r]*$\r?\n?' , re . M ) re_trailws = re . compile ( r'[ \t\r]+$' ) for f in files : # Find files in search path $XGETTEXTPATH if isinstance ( f , SCons . Node . FS . Base ) and f . rexists ( ) : contents = f . get_text_contents ( ) contents = re_comment . sub ( "" , contents ) contents = re_emptyln . sub ( "" , contents ) contents = re_trailws . sub ( "" , contents ) depnames = contents . splitlines ( ) for depname in depnames : depfile = SCons . Node . FS . find_file ( depname , dirs ) if not depfile : depfile = env . arg2nodes ( depname , dirs [ 0 ] . File ) env . Depends ( target , depfile ) return 0
Parses POTFILES . in - like file and returns list of extracted file names .
521
20
22,130
def _pot_update_emitter ( target , source , env ) : from SCons . Tool . GettextCommon import _POTargetFactory import SCons . Util import SCons . Node . FS if 'XGETTEXTFROM' in env : xfrom = env [ 'XGETTEXTFROM' ] else : return target , source if not SCons . Util . is_List ( xfrom ) : xfrom = [ xfrom ] xfrom = SCons . Util . flatten ( xfrom ) # Prepare list of 'POTFILE.in' files. files = [ ] for xf in xfrom : if not isinstance ( xf , SCons . Node . FS . Base ) : if SCons . Util . is_String ( xf ) : # Interpolate variables in strings xf = env . subst ( xf , source = source , target = target ) xf = env . arg2nodes ( xf ) files . extend ( xf ) if files : env . Depends ( target , files ) _scan_xgettext_from_files ( target , source , env , files ) return target , source
Emitter function for POTUpdate builder
251
8
22,131
def _POTUpdateBuilder ( env , * * kw ) : import SCons . Action from SCons . Tool . GettextCommon import _POTargetFactory kw [ 'action' ] = SCons . Action . Action ( _update_pot_file , None ) kw [ 'suffix' ] = '$POTSUFFIX' kw [ 'target_factory' ] = _POTargetFactory ( env , alias = '$POTUPDATE_ALIAS' ) . File kw [ 'emitter' ] = _pot_update_emitter return _POTBuilder ( * * kw )
Creates POTUpdate builder object
136
7
22,132
def generate ( env , * * kw ) : import SCons . Util from SCons . Tool . GettextCommon import RPaths , _detect_xgettext try : env [ 'XGETTEXT' ] = _detect_xgettext ( env ) except : env [ 'XGETTEXT' ] = 'xgettext' # NOTE: sources="$SOURCES" would work as well. However, we use following # construction to convert absolute paths provided by scons onto paths # relative to current working dir. Note, that scons expands $SOURCE(S) to # absolute paths for sources $SOURCE(s) outside of current subtree (e.g. in # "../"). With source=$SOURCE these absolute paths would be written to the # resultant *.pot file (and its derived *.po files) as references to lines in # source code (e.g. referring lines in *.c files). Such references would be # correct (e.g. in poedit) only on machine on which *.pot was generated and # would be of no use on other hosts (having a copy of source code located # in different place in filesystem). sources = '$( ${_concat( "", SOURCES, "", __env__, XgettextRPaths, TARGET' + ', SOURCES)} $)' # NOTE: the output from $XGETTEXTCOM command must go to stdout, not to a file. # This is required by the POTUpdate builder's action. xgettextcom = '$XGETTEXT $XGETTEXTFLAGS $_XGETTEXTPATHFLAGS' + ' $_XGETTEXTFROMFLAGS -o - ' + sources xgettextpathflags = '$( ${_concat( XGETTEXTPATHPREFIX, XGETTEXTPATH' + ', XGETTEXTPATHSUFFIX, __env__, RDirs, TARGET, SOURCES)} $)' xgettextfromflags = '$( ${_concat( XGETTEXTFROMPREFIX, XGETTEXTFROM' + ', XGETTEXTFROMSUFFIX, __env__, target=TARGET, source=SOURCES)} $)' env . SetDefault ( _XGETTEXTDOMAIN = '${TARGET.filebase}' , XGETTEXTFLAGS = [ ] , XGETTEXTCOM = xgettextcom , XGETTEXTCOMSTR = '' , XGETTEXTPATH = [ ] , XGETTEXTPATHPREFIX = '-D' , XGETTEXTPATHSUFFIX = '' , XGETTEXTFROM = None , XGETTEXTFROMPREFIX = '-f' , XGETTEXTFROMSUFFIX = '' , _XGETTEXTPATHFLAGS = xgettextpathflags , _XGETTEXTFROMFLAGS = xgettextfromflags , POTSUFFIX = [ '.pot' ] , POTUPDATE_ALIAS = 'pot-update' , XgettextRPaths = RPaths ( env ) ) env . Append ( BUILDERS = { '_POTUpdateBuilder' : _POTUpdateBuilder ( env ) } ) env . AddMethod ( _POTUpdateBuilderWrapper , 'POTUpdate' ) env . AlwaysBuild ( env . Alias ( '$POTUPDATE_ALIAS' ) )
Generate xgettext tool
752
6
22,133
def generate ( env ) : if 'CC' not in env : env [ 'CC' ] = env . Detect ( compilers ) or compilers [ 0 ] cc . generate ( env ) if env [ 'PLATFORM' ] in [ 'cygwin' , 'win32' ] : env [ 'SHCCFLAGS' ] = SCons . Util . CLVar ( '$CCFLAGS' ) else : env [ 'SHCCFLAGS' ] = SCons . Util . CLVar ( '$CCFLAGS -fPIC' ) # determine compiler version version = detect_version ( env , env [ 'CC' ] ) if version : env [ 'CCVERSION' ] = version
Add Builders and construction variables for gcc to an Environment .
152
12
22,134
def detect_version ( env , cc ) : cc = env . subst ( cc ) if not cc : return None version = None #pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'], pipe = SCons . Action . _subproc ( env , SCons . Util . CLVar ( cc ) + [ '--version' ] , stdin = 'devnull' , stderr = 'devnull' , stdout = subprocess . PIPE ) # -dumpversion was added in GCC 3.0. As long as we're supporting # GCC versions older than that, we should use --version and a # regular expression. #line = pipe.stdout.read().strip() #if line: # version = line line = SCons . Util . to_str ( pipe . stdout . readline ( ) ) match = re . search ( r'[0-9]+(\.[0-9]+)+' , line ) if match : version = match . group ( 0 ) # Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer: # So continue with reading to let the child process actually terminate. while SCons . Util . to_str ( pipe . stdout . readline ( ) ) : pass ret = pipe . wait ( ) if ret != 0 : return None return version
Return the version of the GNU compiler or None if it is not a GNU compiler .
307
17
22,135
def is_dos_short_file_name ( file ) : fname , ext = os . path . splitext ( file ) proper_ext = len ( ext ) == 0 or ( 2 <= len ( ext ) <= 4 ) # the ext contains the dot proper_fname = file . isupper ( ) and len ( fname ) <= 8 return proper_ext and proper_fname
Examine if the given file is in the 8 . 3 form .
84
14
22,136
def create_feature_dict ( files ) : dict = { } def add_to_dict ( feature , file ) : if not SCons . Util . is_List ( feature ) : feature = [ feature ] for f in feature : if f not in dict : dict [ f ] = [ file ] else : dict [ f ] . append ( file ) for file in files : if hasattr ( file , 'PACKAGING_X_MSI_FEATURE' ) : add_to_dict ( file . PACKAGING_X_MSI_FEATURE , file ) elif hasattr ( file , 'PACKAGING_DOC' ) : add_to_dict ( 'PACKAGING_DOC' , file ) else : add_to_dict ( 'default' , file ) return dict
X_MSI_FEATURE and doc FileTag s can be used to collect files in a hierarchy . This function collects the files into this hierarchy .
175
31
22,137
def generate_guids ( root ) : from hashlib import md5 # specify which tags need a guid and in which attribute this should be stored. needs_id = { 'Product' : 'Id' , 'Package' : 'Id' , 'Component' : 'Guid' , } # find all XMl nodes matching the key, retrieve their attribute, hash their # subtree, convert hash to string and add as a attribute to the xml node. for ( key , value ) in needs_id . items ( ) : node_list = root . getElementsByTagName ( key ) attribute = value for node in node_list : hash = md5 ( node . toxml ( ) ) . hexdigest ( ) hash_str = '%s-%s-%s-%s-%s' % ( hash [ : 8 ] , hash [ 8 : 12 ] , hash [ 12 : 16 ] , hash [ 16 : 20 ] , hash [ 20 : ] ) node . attributes [ attribute ] = hash_str
generates globally unique identifiers for parts of the xml which need them .
220
14
22,138
def create_default_directory_layout ( root , NAME , VERSION , VENDOR , filename_set ) : doc = Document ( ) d1 = doc . createElement ( 'Directory' ) d1 . attributes [ 'Id' ] = 'TARGETDIR' d1 . attributes [ 'Name' ] = 'SourceDir' d2 = doc . createElement ( 'Directory' ) d2 . attributes [ 'Id' ] = 'ProgramFilesFolder' d2 . attributes [ 'Name' ] = 'PFiles' d3 = doc . createElement ( 'Directory' ) d3 . attributes [ 'Id' ] = 'VENDOR_folder' d3 . attributes [ 'Name' ] = escape ( gen_dos_short_file_name ( VENDOR , filename_set ) ) d3 . attributes [ 'LongName' ] = escape ( VENDOR ) d4 = doc . createElement ( 'Directory' ) project_folder = "%s-%s" % ( NAME , VERSION ) d4 . attributes [ 'Id' ] = 'MY_DEFAULT_FOLDER' d4 . attributes [ 'Name' ] = escape ( gen_dos_short_file_name ( project_folder , filename_set ) ) d4 . attributes [ 'LongName' ] = escape ( project_folder ) d1 . childNodes . append ( d2 ) d2 . childNodes . append ( d3 ) d3 . childNodes . append ( d4 ) root . getElementsByTagName ( 'Product' ) [ 0 ] . childNodes . append ( d1 ) return d4
Create the wix default target directory layout and return the innermost directory .
351
15
22,139
def build_wxsfile_file_section ( root , files , NAME , VERSION , VENDOR , filename_set , id_set ) : root = create_default_directory_layout ( root , NAME , VERSION , VENDOR , filename_set ) components = create_feature_dict ( files ) factory = Document ( ) def get_directory ( node , dir ) : """ Returns the node under the given node representing the directory. Returns the component node if dir is None or empty. """ if dir == '' or not dir : return node Directory = node dir_parts = dir . split ( os . path . sep ) # to make sure that our directory ids are unique, the parent folders are # consecutively added to upper_dir upper_dir = '' # walk down the xml tree finding parts of the directory dir_parts = [ d for d in dir_parts if d != '' ] for d in dir_parts [ : ] : already_created = [ c for c in Directory . childNodes if c . nodeName == 'Directory' and c . attributes [ 'LongName' ] . value == escape ( d ) ] if already_created != [ ] : Directory = already_created [ 0 ] dir_parts . remove ( d ) upper_dir += d else : break for d in dir_parts : nDirectory = factory . createElement ( 'Directory' ) nDirectory . attributes [ 'LongName' ] = escape ( d ) nDirectory . attributes [ 'Name' ] = escape ( gen_dos_short_file_name ( d , filename_set ) ) upper_dir += d nDirectory . attributes [ 'Id' ] = convert_to_id ( upper_dir , id_set ) Directory . childNodes . append ( nDirectory ) Directory = nDirectory return Directory for file in files : drive , path = os . path . splitdrive ( file . PACKAGING_INSTALL_LOCATION ) filename = os . path . basename ( path ) dirname = os . path . dirname ( path ) h = { # tagname : default value 'PACKAGING_X_MSI_VITAL' : 'yes' , 'PACKAGING_X_MSI_FILEID' : convert_to_id ( filename , id_set ) , 'PACKAGING_X_MSI_LONGNAME' : filename , 'PACKAGING_X_MSI_SHORTNAME' : gen_dos_short_file_name ( filename , filename_set ) , 'PACKAGING_X_MSI_SOURCE' : file . get_path ( ) , } # fill in the default tags given above. for k , v in [ ( k , v ) for ( k , v ) in h . items ( ) if not hasattr ( file , k ) ] : setattr ( file , k , v ) File = factory . createElement ( 'File' ) File . attributes [ 'LongName' ] = escape ( file . PACKAGING_X_MSI_LONGNAME ) File . attributes [ 'Name' ] = escape ( file . PACKAGING_X_MSI_SHORTNAME ) File . attributes [ 'Source' ] = escape ( file . PACKAGING_X_MSI_SOURCE ) File . attributes [ 'Id' ] = escape ( file . PACKAGING_X_MSI_FILEID ) File . attributes [ 'Vital' ] = escape ( file . PACKAGING_X_MSI_VITAL ) # create the <Component> Tag under which this file should appear Component = factory . createElement ( 'Component' ) Component . attributes [ 'DiskId' ] = '1' Component . attributes [ 'Id' ] = convert_to_id ( filename , id_set ) # hang the component node under the root node and the file node # under the component node. Directory = get_directory ( root , dirname ) Directory . childNodes . append ( Component ) Component . childNodes . append ( File )
Builds the Component sections of the wxs file with their included files .
865
15
22,140
def build_wxsfile_default_gui ( root ) : factory = Document ( ) Product = root . getElementsByTagName ( 'Product' ) [ 0 ] UIRef = factory . createElement ( 'UIRef' ) UIRef . attributes [ 'Id' ] = 'WixUI_Mondo' Product . childNodes . append ( UIRef ) UIRef = factory . createElement ( 'UIRef' ) UIRef . attributes [ 'Id' ] = 'WixUI_ErrorProgressText' Product . childNodes . append ( UIRef )
This function adds a default GUI to the wxs file
130
11
22,141
def build_license_file ( directory , spec ) : name , text = '' , '' try : name = spec [ 'LICENSE' ] text = spec [ 'X_MSI_LICENSE_TEXT' ] except KeyError : pass # ignore this as X_MSI_LICENSE_TEXT is optional if name != '' or text != '' : file = open ( os . path . join ( directory . get_path ( ) , 'License.rtf' ) , 'w' ) file . write ( '{\\rtf' ) if text != '' : file . write ( text . replace ( '\n' , '\\par ' ) ) else : file . write ( name + '\\par\\par' ) file . write ( '}' ) file . close ( )
Creates a License . rtf file with the content of X_MSI_LICENSE_TEXT in the given directory
171
26
22,142
def build_wxsfile_header_section ( root , spec ) : # Create the needed DOM nodes and add them at the correct position in the tree. factory = Document ( ) Product = factory . createElement ( 'Product' ) Package = factory . createElement ( 'Package' ) root . childNodes . append ( Product ) Product . childNodes . append ( Package ) # set "mandatory" default values if 'X_MSI_LANGUAGE' not in spec : spec [ 'X_MSI_LANGUAGE' ] = '1033' # select english # mandatory sections, will throw a KeyError if the tag is not available Product . attributes [ 'Name' ] = escape ( spec [ 'NAME' ] ) Product . attributes [ 'Version' ] = escape ( spec [ 'VERSION' ] ) Product . attributes [ 'Manufacturer' ] = escape ( spec [ 'VENDOR' ] ) Product . attributes [ 'Language' ] = escape ( spec [ 'X_MSI_LANGUAGE' ] ) Package . attributes [ 'Description' ] = escape ( spec [ 'SUMMARY' ] ) # now the optional tags, for which we avoid the KeyErrror exception if 'DESCRIPTION' in spec : Package . attributes [ 'Comments' ] = escape ( spec [ 'DESCRIPTION' ] ) if 'X_MSI_UPGRADE_CODE' in spec : Package . attributes [ 'X_MSI_UPGRADE_CODE' ] = escape ( spec [ 'X_MSI_UPGRADE_CODE' ] ) # We hardcode the media tag as our current model cannot handle it. Media = factory . createElement ( 'Media' ) Media . attributes [ 'Id' ] = '1' Media . attributes [ 'Cabinet' ] = 'default.cab' Media . attributes [ 'EmbedCab' ] = 'yes' root . getElementsByTagName ( 'Product' ) [ 0 ] . childNodes . append ( Media )
Adds the xml file node which define the package meta - data .
441
13
22,143
def generate ( env ) : path , cxx , shcxx , version = get_cppc ( env ) if path : cxx = os . path . join ( path , cxx ) shcxx = os . path . join ( path , shcxx ) cplusplus . generate ( env ) env [ 'CXX' ] = cxx env [ 'SHCXX' ] = shcxx env [ 'CXXVERSION' ] = version env [ 'SHCXXFLAGS' ] = SCons . Util . CLVar ( '$CXXFLAGS -KPIC' ) env [ 'SHOBJPREFIX' ] = 'so_' env [ 'SHOBJSUFFIX' ] = '.o'
Add Builders and construction variables for SunPRO C ++ .
159
12
22,144
def FromReadings ( cls , uuid , readings , events , report_id = IOTileReading . InvalidReadingID , selector = 0xFFFF , streamer = 0x100 , sent_timestamp = 0 , received_time = None ) : lowest_id = IOTileReading . InvalidReadingID highest_id = IOTileReading . InvalidReadingID for item in itertools . chain ( iter ( readings ) , iter ( events ) ) : if item . reading_id == IOTileReading . InvalidReadingID : continue if lowest_id == IOTileReading . InvalidReadingID or item . reading_id < lowest_id : lowest_id = item . reading_id if highest_id == IOTileReading . InvalidReadingID or item . reading_id > highest_id : highest_id = item . reading_id reading_list = [ x . asdict ( ) for x in readings ] event_list = [ x . asdict ( ) for x in events ] report_dict = { "format" : cls . FORMAT_TAG , "device" : uuid , "streamer_index" : streamer , "streamer_selector" : selector , "incremental_id" : report_id , "lowest_id" : lowest_id , "highest_id" : highest_id , "device_sent_timestamp" : sent_timestamp , "events" : event_list , "data" : reading_list } encoded = msgpack . packb ( report_dict , default = _encode_datetime , use_bin_type = True ) return FlexibleDictionaryReport ( encoded , signed = False , encrypted = False , received_time = received_time )
Create a flexible dictionary report from a list of readings and events .
374
13
22,145
def decode ( self ) : report_dict = msgpack . unpackb ( self . raw_report , raw = False ) events = [ IOTileEvent . FromDict ( x ) for x in report_dict . get ( 'events' , [ ] ) ] readings = [ IOTileReading . FromDict ( x ) for x in report_dict . get ( 'data' , [ ] ) ] if 'device' not in report_dict : raise DataError ( "Invalid encoded FlexibleDictionaryReport that did not " "have a device key set with the device uuid" ) self . origin = report_dict [ 'device' ] self . report_id = report_dict . get ( "incremental_id" , IOTileReading . InvalidReadingID ) self . sent_timestamp = report_dict . get ( "device_sent_timestamp" , 0 ) self . origin_streamer = report_dict . get ( "streamer_index" ) self . streamer_selector = report_dict . get ( "streamer_selector" ) self . lowest_id = report_dict . get ( 'lowest_id' ) self . highest_id = report_dict . get ( 'highest_id' ) return readings , events
Decode this report from a msgpack encoded binary blob .
275
12
22,146
def _callable_contents ( obj ) : try : # Test if obj is a method. return _function_contents ( obj . __func__ ) except AttributeError : try : # Test if obj is a callable object. return _function_contents ( obj . __call__ . __func__ ) except AttributeError : try : # Test if obj is a code object. return _code_contents ( obj ) except AttributeError : # Test if obj is a function object. return _function_contents ( obj )
Return the signature contents of a callable Python object .
116
11
22,147
def _object_contents ( obj ) : try : # Test if obj is a method. return _function_contents ( obj . __func__ ) except AttributeError : try : # Test if obj is a callable object. return _function_contents ( obj . __call__ . __func__ ) except AttributeError : try : # Test if obj is a code object. return _code_contents ( obj ) except AttributeError : try : # Test if obj is a function object. return _function_contents ( obj ) except AttributeError as ae : # Should be a pickle-able Python object. try : return _object_instance_content ( obj ) # pickling an Action instance or object doesn't yield a stable # content as instance property may be dumped in different orders # return pickle.dumps(obj, ACTION_SIGNATURE_PICKLE_PROTOCOL) except ( pickle . PicklingError , TypeError , AttributeError ) as ex : # This is weird, but it seems that nested classes # are unpickable. The Python docs say it should # always be a PicklingError, but some Python # versions seem to return TypeError. Just do # the best we can. return bytearray ( repr ( obj ) , 'utf-8' )
Return the signature contents of any Python object .
282
9
22,148
def _code_contents ( code , docstring = None ) : # contents = [] # The code contents depends on the number of local variables # but not their actual names. contents = bytearray ( "{}, {}" . format ( code . co_argcount , len ( code . co_varnames ) ) , 'utf-8' ) contents . extend ( b", " ) contents . extend ( bytearray ( str ( len ( code . co_cellvars ) ) , 'utf-8' ) ) contents . extend ( b", " ) contents . extend ( bytearray ( str ( len ( code . co_freevars ) ) , 'utf-8' ) ) # The code contents depends on any constants accessed by the # function. Note that we have to call _object_contents on each # constants because the code object of nested functions can # show-up among the constants. z = [ _object_contents ( cc ) for cc in code . co_consts [ 1 : ] ] contents . extend ( b',(' ) contents . extend ( bytearray ( ',' , 'utf-8' ) . join ( z ) ) contents . extend ( b')' ) # The code contents depends on the variable names used to # accessed global variable, as changing the variable name changes # the variable actually accessed and therefore changes the # function result. z = [ bytearray ( _object_contents ( cc ) ) for cc in code . co_names ] contents . extend ( b',(' ) contents . extend ( bytearray ( ',' , 'utf-8' ) . join ( z ) ) contents . extend ( b')' ) # The code contents depends on its actual code!!! contents . extend ( b',(' ) contents . extend ( code . co_code ) contents . extend ( b')' ) return contents
Return the signature contents of a code object .
401
9
22,149
def _object_instance_content ( obj ) : retval = bytearray ( ) if obj is None : return b'N.' if isinstance ( obj , SCons . Util . BaseStringTypes ) : return SCons . Util . to_bytes ( obj ) inst_class = obj . __class__ inst_class_name = bytearray ( obj . __class__ . __name__ , 'utf-8' ) inst_class_module = bytearray ( obj . __class__ . __module__ , 'utf-8' ) inst_class_hierarchy = bytearray ( repr ( inspect . getclasstree ( [ obj . __class__ , ] ) ) , 'utf-8' ) # print("ICH:%s : %s"%(inst_class_hierarchy, repr(obj))) properties = [ ( p , getattr ( obj , p , "None" ) ) for p in dir ( obj ) if not ( p [ : 2 ] == '__' or inspect . ismethod ( getattr ( obj , p ) ) or inspect . isbuiltin ( getattr ( obj , p ) ) ) ] properties . sort ( ) properties_str = ',' . join ( [ "%s=%s" % ( p [ 0 ] , p [ 1 ] ) for p in properties ] ) properties_bytes = bytearray ( properties_str , 'utf-8' ) methods = [ p for p in dir ( obj ) if inspect . ismethod ( getattr ( obj , p ) ) ] methods . sort ( ) method_contents = [ ] for m in methods : # print("Method:%s"%m) v = _function_contents ( getattr ( obj , m ) ) # print("[%s->]V:%s [%s]"%(m,v,type(v))) method_contents . append ( v ) retval = bytearray ( b'{' ) retval . extend ( inst_class_name ) retval . extend ( b":" ) retval . extend ( inst_class_module ) retval . extend ( b'}[[' ) retval . extend ( inst_class_hierarchy ) retval . extend ( b']]{{' ) retval . extend ( bytearray ( b"," ) . join ( method_contents ) ) retval . extend ( b"}}{{{" ) retval . extend ( properties_bytes ) retval . extend ( b'}}}' ) return retval
Returns consistant content for a action class or an instance thereof
552
12
22,150
def _do_create_keywords ( args , kw ) : v = kw . get ( 'varlist' , ( ) ) # prevent varlist="FOO" from being interpreted as ['F', 'O', 'O'] if is_String ( v ) : v = ( v , ) kw [ 'varlist' ] = tuple ( v ) if args : # turn positional args into equivalent keywords cmdstrfunc = args [ 0 ] if cmdstrfunc is None or is_String ( cmdstrfunc ) : kw [ 'cmdstr' ] = cmdstrfunc elif callable ( cmdstrfunc ) : kw [ 'strfunction' ] = cmdstrfunc else : raise SCons . Errors . UserError ( 'Invalid command display variable type. ' 'You must either pass a string or a callback which ' 'accepts (target, source, env) as parameters.' ) if len ( args ) > 1 : kw [ 'varlist' ] = tuple ( SCons . Util . flatten ( args [ 1 : ] ) ) + kw [ 'varlist' ] if kw . get ( 'strfunction' , _null ) is not _null and kw . get ( 'cmdstr' , _null ) is not _null : raise SCons . Errors . UserError ( 'Cannot have both strfunction and cmdstr args to Action()' )
This converts any arguments after the action argument into their equivalent keywords and adds them to the kw argument .
297
21
22,151
def _do_create_list_action ( act , kw ) : acts = [ ] for a in act : aa = _do_create_action ( a , kw ) if aa is not None : acts . append ( aa ) if not acts : return ListAction ( [ ] ) elif len ( acts ) == 1 : return acts [ 0 ] else : return ListAction ( acts )
A factory for list actions . Convert the input list into Actions and then wrap them in a ListAction .
87
21
22,152
def Action ( act , * args , * * kw ) : # Really simple: the _do_create_* routines do the heavy lifting. _do_create_keywords ( args , kw ) if is_List ( act ) : return _do_create_list_action ( act , kw ) return _do_create_action ( act , kw )
A factory for action objects .
80
6
22,153
def _string_from_cmd_list ( cmd_list ) : cl = [ ] for arg in map ( str , cmd_list ) : if ' ' in arg or '\t' in arg : arg = '"' + arg + '"' cl . append ( arg ) return ' ' . join ( cl )
Takes a list of command line arguments and returns a pretty representation for printing .
67
16
22,154
def get_default_ENV ( env ) : global default_ENV try : return env [ 'ENV' ] except KeyError : if not default_ENV : import SCons . Environment # This is a hideously expensive way to get a default shell # environment. What it really should do is run the platform # setup to get the default ENV. Fortunately, it's incredibly # rare for an Environment not to have a shell environment, so # we're not going to worry about it overmuch. default_ENV = SCons . Environment . Environment ( ) [ 'ENV' ] return default_ENV
A fiddlin little function that has an import SCons . Environment which can t be moved to the top level without creating an import loop . Since this import creates a local variable named SCons it blocks access to the global variable so we move it here to prevent complaints about local variables being used uninitialized .
130
62
22,155
def execute ( self , target , source , env , executor = None ) : escape_list = SCons . Subst . escape_list flatten_sequence = SCons . Util . flatten_sequence try : shell = env [ 'SHELL' ] except KeyError : raise SCons . Errors . UserError ( 'Missing SHELL construction variable.' ) try : spawn = env [ 'SPAWN' ] except KeyError : raise SCons . Errors . UserError ( 'Missing SPAWN construction variable.' ) else : if is_String ( spawn ) : spawn = env . subst ( spawn , raw = 1 , conv = lambda x : x ) escape = env . get ( 'ESCAPE' , lambda x : x ) ENV = get_default_ENV ( env ) # Ensure that the ENV values are all strings: for key , value in ENV . items ( ) : if not is_String ( value ) : if is_List ( value ) : # If the value is a list, then we assume it is a # path list, because that's a pretty common list-like # value to stick in an environment variable: value = flatten_sequence ( value ) ENV [ key ] = os . pathsep . join ( map ( str , value ) ) else : # If it isn't a string or a list, then we just coerce # it to a string, which is the proper way to handle # Dir and File instances and will produce something # reasonable for just about everything else: ENV [ key ] = str ( value ) if executor : target = executor . get_all_targets ( ) source = executor . get_all_sources ( ) cmd_list , ignore , silent = self . process ( target , list ( map ( rfile , source ) ) , env , executor ) # Use len() to filter out any "command" that's zero-length. for cmd_line in filter ( len , cmd_list ) : # Escape the command line for the interpreter we are using. cmd_line = escape_list ( cmd_line , escape ) result = spawn ( shell , escape , cmd_line [ 0 ] , cmd_line , ENV ) if not ignore and result : msg = "Error %s" % result return SCons . Errors . BuildError ( errstr = msg , status = result , action = self , command = cmd_line ) return 0
Execute a command action .
517
6
22,156
def get_presig ( self , target , source , env ) : try : return self . gc ( target , source , env ) except AttributeError : return self . funccontents
Return the signature contents of this callable action .
41
10
22,157
def get_presig ( self , target , source , env ) : return b"" . join ( [ bytes ( x . get_contents ( target , source , env ) ) for x in self . list ] )
Return the signature contents of this action list .
46
9
22,158
async def _notify_update ( self , name , change_type , change_info = None , directed_client = None ) : for monitor in self . _monitors : try : result = monitor ( name , change_type , change_info , directed_client = directed_client ) if inspect . isawaitable ( result ) : await result except Exception : # We can't allow any exceptions in a monitor routine to break the server. self . _logger . warning ( "Error calling monitor with update %s" , name , exc_info = True )
Notify updates on a service to anyone who cares .
121
11
22,159
async def update_state ( self , short_name , state ) : if short_name not in self . services : raise ArgumentError ( "Service name is unknown" , short_name = short_name ) if state not in states . KNOWN_STATES : raise ArgumentError ( "Invalid service state" , state = state ) serv = self . services [ short_name ] [ 'state' ] if serv . state == state : return update = { } update [ 'old_status' ] = serv . state update [ 'new_status' ] = state update [ 'new_status_string' ] = states . KNOWN_STATES [ state ] serv . state = state await self . _notify_update ( short_name , 'state_change' , update )
Set the current state of a service .
167
8
22,160
def add_service ( self , name , long_name , preregistered = False , notify = True ) : if name in self . services : raise ArgumentError ( "Could not add service because the long_name is taken" , long_name = long_name ) serv_state = states . ServiceState ( name , long_name , preregistered ) service = { 'state' : serv_state , 'heartbeat_threshold' : 600 } self . services [ name ] = service if notify : return self . _notify_update ( name , 'new_service' , self . service_info ( name ) ) return None
Add a service to the list of tracked services .
134
10
22,161
def service_info ( self , short_name ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) info = { } info [ 'short_name' ] = short_name info [ 'long_name' ] = self . services [ short_name ] [ 'state' ] . long_name info [ 'preregistered' ] = self . services [ short_name ] [ 'state' ] . preregistered return info
Get static information about a service .
108
7
22,162
def service_messages ( self , short_name ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) return list ( self . services [ short_name ] [ 'state' ] . messages )
Get the messages stored for a service .
60
8
22,163
def service_headline ( self , short_name ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) return self . services [ short_name ] [ 'state' ] . headline
Get the headline stored for a service .
57
8
22,164
def service_status ( self , short_name ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) info = { } service = self . services [ short_name ] [ 'state' ] info [ 'heartbeat_age' ] = monotonic ( ) - service . last_heartbeat info [ 'numeric_status' ] = service . state info [ 'string_status' ] = service . string_state return info
Get the current status of a service .
110
8
22,165
async def send_message ( self , name , level , message ) : if name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = name ) msg = self . services [ name ] [ 'state' ] . post_message ( level , message ) await self . _notify_update ( name , 'new_message' , msg . to_dict ( ) )
Post a message for a service .
87
7
22,166
async def set_headline ( self , name , level , message ) : if name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = name ) self . services [ name ] [ 'state' ] . set_headline ( level , message ) headline = self . services [ name ] [ 'state' ] . headline . to_dict ( ) await self . _notify_update ( name , 'new_headline' , headline )
Set the sticky headline for a service .
103
8
22,167
async def send_heartbeat ( self , short_name ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) self . services [ short_name ] [ 'state' ] . heartbeat ( ) await self . _notify_update ( short_name , 'heartbeat' )
Post a heartbeat for a service .
78
7
22,168
def set_agent ( self , short_name , client_id ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) self . agents [ short_name ] = client_id
Register a client id that handlers commands for a service .
56
11
22,169
def clear_agent ( self , short_name , client_id ) : if short_name not in self . services : raise ArgumentError ( "Unknown service name" , short_name = short_name ) if short_name not in self . agents : raise ArgumentError ( "No agent registered for service" , short_name = short_name ) if client_id != self . agents [ short_name ] : raise ArgumentError ( "Client was not registered for service" , short_name = short_name , client_id = client_id , current_client = self . agents [ short_name ] ) del self . agents [ short_name ]
Remove a client id from being the command handler for a service .
139
13
22,170
async def send_rpc_command ( self , short_name , rpc_id , payload , sender_client , timeout = 1.0 ) : rpc_tag = str ( uuid . uuid4 ( ) ) self . rpc_results . declare ( rpc_tag ) if short_name in self . services and short_name in self . agents : agent_tag = self . agents [ short_name ] rpc_message = { 'rpc_id' : rpc_id , 'payload' : payload , 'response_uuid' : rpc_tag } self . in_flight_rpcs [ rpc_tag ] = InFlightRPC ( sender_client , short_name , monotonic ( ) , timeout ) await self . _notify_update ( short_name , 'rpc_command' , rpc_message , directed_client = agent_tag ) else : response = dict ( result = 'service_not_found' , response = b'' ) self . rpc_results . set ( rpc_tag , response ) return rpc_tag
Send an RPC to a service using its registered agent .
242
11
22,171
def send_rpc_response ( self , rpc_tag , result , response ) : if rpc_tag not in self . in_flight_rpcs : raise ArgumentError ( "In flight RPC could not be found, it may have timed out" , rpc_tag = rpc_tag ) del self . in_flight_rpcs [ rpc_tag ] response_message = { 'response' : response , 'result' : result } try : self . rpc_results . set ( rpc_tag , response_message ) except KeyError : self . _logger . warning ( "RPC response came but no one was waiting: response=%s" , response )
Send a response to an RPC .
151
7
22,172
def periodic_service_rpcs ( self ) : to_remove = [ ] now = monotonic ( ) for rpc_tag , rpc in self . in_flight_rpcs . items ( ) : expiry = rpc . sent_timestamp + rpc . timeout if now > expiry : to_remove . append ( rpc_tag ) for tag in to_remove : del self . in_flight_rpcs [ tag ]
Check if any RPC has expired and remove it from the in flight list .
100
15
22,173
def settings_directory ( ) : system = platform . system ( ) basedir = None if system == 'Windows' : if 'APPDATA' in os . environ : basedir = os . environ [ 'APPDATA' ] # If we're not on Windows assume we're on some # kind of posix system or Mac, where the appropriate place would be # ~/.config if basedir is None : basedir = os . path . expanduser ( '~' ) basedir = os . path . join ( basedir , '.config' ) settings_dir = os . path . abspath ( os . path . join ( basedir , 'IOTile-Core' ) ) return settings_dir
Find a per user settings directory that is appropriate for each type of system that we are installed on .
150
20
22,174
def generate ( env ) : global GhostscriptAction # The following try-except block enables us to use the Tool # in standalone mode (without the accompanying pdf.py), # whenever we need an explicit call of gs via the Gs() # Builder ... try : if GhostscriptAction is None : GhostscriptAction = SCons . Action . Action ( '$GSCOM' , '$GSCOMSTR' ) from SCons . Tool import pdf pdf . generate ( env ) bld = env [ 'BUILDERS' ] [ 'PDF' ] bld . add_action ( '.ps' , GhostscriptAction ) except ImportError as e : pass gsbuilder = SCons . Builder . Builder ( action = SCons . Action . Action ( '$GSCOM' , '$GSCOMSTR' ) ) env [ 'BUILDERS' ] [ 'Gs' ] = gsbuilder env [ 'GS' ] = gs env [ 'GSFLAGS' ] = SCons . Util . CLVar ( '-dNOPAUSE -dBATCH -sDEVICE=pdfwrite' ) env [ 'GSCOM' ] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
Add Builders and construction variables for Ghostscript to an Environment .
266
13
22,175
def resource_path ( relative_path = None , expect = None ) : if expect not in ( None , 'file' , 'folder' ) : raise ArgumentError ( "Invalid expect parameter, must be None, 'file' or 'folder'" , expect = expect ) this_dir = os . path . dirname ( __file__ ) _resource_path = os . path . join ( this_dir , '..' , 'config' ) if relative_path is not None : path = os . path . normpath ( relative_path ) _resource_path = os . path . join ( _resource_path , path ) if expect == 'file' and not os . path . isfile ( _resource_path ) : raise DataError ( "Expected resource %s to be a file and it wasn't" % _resource_path ) elif expect == 'folder' and not os . path . isdir ( _resource_path ) : raise DataError ( "Expected resource %s to be a folder and it wasn't" % _resource_path ) return os . path . abspath ( _resource_path )
Return the absolute path to a resource in iotile - build .
240
14
22,176
def unpack ( fmt , arg ) : if isinstance ( arg , bytearray ) and not ( sys . version_info >= ( 2 , 7 , 5 ) ) : return struct . unpack ( fmt , str ( arg ) ) return struct . unpack ( fmt , arg )
A shim around struct . unpack to allow it to work on python 2 . 7 . 3 .
61
21
22,177
async def initialize ( self , timeout = 2.0 ) : if self . initialized . is_set ( ) : raise InternalError ( "initialize called when already initialized" ) self . _emulator . add_task ( 8 , self . _reset_vector ( ) ) await asyncio . wait_for ( self . initialized . wait ( ) , timeout = timeout )
Launch any background tasks associated with this subsystem .
79
9
22,178
def _check_registry_type ( folder = None ) : folder = _registry_folder ( folder ) default_file = os . path . join ( folder , 'registry_type.txt' ) try : with open ( default_file , "r" ) as infile : data = infile . read ( ) data = data . strip ( ) ComponentRegistry . SetBackingStore ( data ) except IOError : pass
Check if the user has placed a registry_type . txt file to choose the registry type
93
19
22,179
def _ensure_package_loaded ( path , component ) : logger = logging . getLogger ( __name__ ) packages = component . find_products ( 'support_package' ) if len ( packages ) == 0 : return None elif len ( packages ) > 1 : raise ExternalError ( "Component had multiple products declared as 'support_package" , products = packages ) if len ( path ) > 2 and ':' in path [ 2 : ] : # Don't flag windows C: type paths path , _ , _ = path . rpartition ( ":" ) package_base = packages [ 0 ] relative_path = os . path . normpath ( os . path . relpath ( path , start = package_base ) ) if relative_path . startswith ( '..' ) : raise ExternalError ( "Component had python product output of support_package" , package = package_base , product = path , relative_path = relative_path ) if not relative_path . endswith ( '.py' ) : raise ExternalError ( "Python product did not end with .py" , path = path ) relative_path = relative_path [ : - 3 ] if os . pathsep in relative_path : raise ExternalError ( "Python support wheels with multiple subpackages not yet supported" , relative_path = relative_path ) support_distro = component . support_distribution if support_distro not in sys . modules : logger . debug ( "Creating dynamic support wheel package: %s" , support_distro ) file , path , desc = imp . find_module ( os . path . basename ( package_base ) , [ os . path . dirname ( package_base ) ] ) imp . load_module ( support_distro , file , path , desc ) return "{}.{}" . format ( support_distro , relative_path )
Ensure that the given module is loaded as a submodule .
400
13
22,180
def _try_load_module ( path , import_name = None ) : logger = logging . getLogger ( __name__ ) obj_name = None if len ( path ) > 2 and ':' in path [ 2 : ] : # Don't flag windows C: type paths path , _ , obj_name = path . rpartition ( ":" ) folder , basename = os . path . split ( path ) if folder == '' : folder = './' if basename == '' or not os . path . exists ( path ) : raise ArgumentError ( "Could not find python module to load extension" , path = path ) basename , ext = os . path . splitext ( basename ) if ext not in ( ".py" , ".pyc" , "" ) : raise ArgumentError ( "Attempted to load module is not a python package or module (.py or .pyc)" , path = path ) if import_name is None : import_name = basename else : logger . debug ( "Importing module as subpackage: %s" , import_name ) try : fileobj = None fileobj , pathname , description = imp . find_module ( basename , [ folder ] ) # Don't load modules twice if basename in sys . modules : mod = sys . modules [ basename ] else : mod = imp . load_module ( import_name , fileobj , pathname , description ) if obj_name is not None : if obj_name not in mod . __dict__ : raise ArgumentError ( "Cannot find named object '%s' inside module '%s'" % ( obj_name , basename ) , path = path ) mod = mod . __dict__ [ obj_name ] return basename , mod finally : if fileobj is not None : fileobj . close ( )
Try to programmatically load a python module by path .
390
11
22,181
def frozen ( self ) : frozen_path = os . path . join ( _registry_folder ( ) , 'frozen_extensions.json' ) return os . path . isfile ( frozen_path )
Return whether we have a cached list of all installed entry_points .
46
14
22,182
def kvstore ( self ) : if self . _kvstore is None : self . _kvstore = self . BackingType ( self . BackingFileName , respect_venv = True ) return self . _kvstore
Lazily load the underlying key - value store backing this registry .
52
14
22,183
def plugins ( self ) : if self . _plugins is None : self . _plugins = { } for _ , plugin in self . load_extensions ( 'iotile.plugin' ) : links = plugin ( ) for name , value in links : self . _plugins [ name ] = value return self . _plugins
Lazily load iotile plugins only on demand .
67
12
22,184
def load_extensions ( self , group , name_filter = None , comp_filter = None , class_filter = None , product_name = None , unique = False ) : found_extensions = [ ] if product_name is not None : for comp in self . iter_components ( ) : if comp_filter is not None and comp . name != comp_filter : continue products = comp . find_products ( product_name ) for product in products : try : entries = self . load_extension ( product , name_filter = name_filter , class_filter = class_filter , component = comp ) if len ( entries ) == 0 and name_filter is None : # Don't warn if we're filtering by name since most extensions won't match self . _logger . warn ( "Found no valid extensions in product %s of component %s" , product , comp . path ) continue found_extensions . extend ( entries ) except : # pylint:disable=bare-except;We don't want a broken extension to take down the whole system self . _logger . exception ( "Unable to load extension %s from local component %s at path %s" , product_name , comp , product ) for entry in self . _iter_entrypoint_group ( group ) : name = entry . name if name_filter is not None and name != name_filter : continue try : ext = entry . load ( ) except : # pylint:disable=bare-except; self . _logger . warn ( "Unable to load %s from %s" , entry . name , entry . distro , exc_info = True ) continue found_extensions . extend ( ( name , x ) for x in self . _filter_subclasses ( ext , class_filter ) ) for ( name , ext ) in self . _registered_extensions . get ( group , [ ] ) : if name_filter is not None and name != name_filter : continue found_extensions . extend ( ( name , x ) for x in self . _filter_subclasses ( ext , class_filter ) ) found_extensions = [ ( name , x ) for name , x in found_extensions if self . _filter_nonextensions ( x ) ] if unique is True : if len ( found_extensions ) > 1 : raise ArgumentError ( "Extension %s should have had exactly one instance of class %s, found %d" % ( group , class_filter . __name__ , len ( found_extensions ) ) , classes = found_extensions ) elif len ( found_extensions ) == 0 : raise ArgumentError ( "Extension %s had no instances of class %s" % ( group , class_filter . __name__ ) ) return found_extensions [ 0 ] return found_extensions
Dynamically load and return extension objects of a given type .
611
13
22,185
def register_extension ( self , group , name , extension ) : if isinstance ( extension , str ) : name , extension = self . load_extension ( extension ) [ 0 ] if group not in self . _registered_extensions : self . _registered_extensions [ group ] = [ ] self . _registered_extensions [ group ] . append ( ( name , extension ) )
Register an extension .
84
4
22,186
def clear_extensions ( self , group = None ) : if group is None : ComponentRegistry . _registered_extensions = { } return if group in self . _registered_extensions : self . _registered_extensions [ group ] = [ ]
Clear all previously registered extensions .
55
6
22,187
def freeze_extensions ( self ) : output_path = os . path . join ( _registry_folder ( ) , 'frozen_extensions.json' ) with open ( output_path , "w" ) as outfile : json . dump ( self . _dump_extensions ( ) , outfile )
Freeze the set of extensions into a single file .
69
11
22,188
def unfreeze_extensions ( self ) : output_path = os . path . join ( _registry_folder ( ) , 'frozen_extensions.json' ) if not os . path . isfile ( output_path ) : raise ExternalError ( "There is no frozen extension list" ) os . remove ( output_path ) ComponentRegistry . _frozen_extensions = None
Remove a previously frozen list of extensions .
86
8
22,189
def load_extension ( self , path , name_filter = None , class_filter = None , unique = False , component = None ) : import_name = None if component is not None : import_name = _ensure_package_loaded ( path , component ) name , ext = _try_load_module ( path , import_name = import_name ) if name_filter is not None and name != name_filter : return [ ] found = [ ( name , x ) for x in self . _filter_subclasses ( ext , class_filter ) ] found = [ ( name , x ) for name , x in found if self . _filter_nonextensions ( x ) ] if not unique : return found if len ( found ) > 1 : raise ArgumentError ( "Extension %s should have had exactly one instance of class %s, found %d" % ( path , class_filter . __name__ , len ( found ) ) , classes = found ) elif len ( found ) == 0 : raise ArgumentError ( "Extension %s had no instances of class %s" % ( path , class_filter . __name__ ) ) return found [ 0 ]
Load a single python module extension .
253
7
22,190
def _filter_nonextensions ( cls , obj ) : # Not all objects have __dict__ attributes. For example, tuples don't. # and tuples are used in iotile.build for some entry points. if hasattr ( obj , '__dict__' ) and obj . __dict__ . get ( '__NO_EXTENSION__' , False ) is True : return False return True
Remove all classes marked as not extensions .
89
8
22,191
def SetBackingStore ( cls , backing ) : if backing not in [ 'json' , 'sqlite' , 'memory' ] : raise ArgumentError ( "Unknown backing store type that is not json or sqlite" , backing = backing ) if backing == 'json' : cls . BackingType = JSONKVStore cls . BackingFileName = 'component_registry.json' elif backing == 'memory' : cls . BackingType = InMemoryKVStore cls . BackingFileName = None else : cls . BackingType = SQLiteKVStore cls . BackingFileName = 'component_registry.db'
Set the global backing type used by the ComponentRegistry from this point forward
146
15
22,192
def add_component ( self , component , temporary = False ) : tile = IOTile ( component ) value = os . path . normpath ( os . path . abspath ( component ) ) if temporary is True : self . _component_overlays [ tile . name ] = value else : self . kvstore . set ( tile . name , value )
Register a component with ComponentRegistry .
77
8
22,193
def list_plugins ( self ) : vals = self . plugins . items ( ) return { x : y for x , y in vals }
List all of the plugins that have been registerd for the iotile program on this computer
31
19
22,194
def clear_components ( self ) : ComponentRegistry . _component_overlays = { } for key in self . list_components ( ) : self . remove_component ( key )
Clear all of the registered components
42
6
22,195
def list_components ( self ) : overlays = list ( self . _component_overlays ) items = self . kvstore . get_all ( ) return overlays + [ x [ 0 ] for x in items if not x [ 0 ] . startswith ( 'config:' ) ]
List all of the registered component names .
65
8
22,196
def iter_components ( self ) : names = self . list_components ( ) for name in names : yield self . get_component ( name )
Iterate over all defined components yielding IOTile objects .
33
12
22,197
def list_config ( self ) : items = self . kvstore . get_all ( ) return [ "{0}={1}" . format ( x [ 0 ] [ len ( 'config:' ) : ] , x [ 1 ] ) for x in items if x [ 0 ] . startswith ( 'config:' ) ]
List all of the configuration variables
70
6
22,198
def set_config ( self , key , value ) : keyname = "config:" + key self . kvstore . set ( keyname , value )
Set a persistent config key to a value stored in the registry
33
12
22,199
def get_config ( self , key , default = MISSING ) : keyname = "config:" + key try : return self . kvstore . get ( keyname ) except KeyError : if default is MISSING : raise ArgumentError ( "No config value found for key" , key = key ) return default
Get the value of a persistent config key from the registry
66
11