idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
21,900 | def add_callback ( self , name , func ) : if name == 'on_scan' : events = [ 'device_seen' ] def callback ( _conn_string , _conn_id , _name , event ) : func ( self . id , event , event . get ( 'validity_period' , 60 ) ) elif name == 'on_report' : events = [ 'report' , 'broadcast' ] def callback ( _conn_string , conn_id , _name , event ) : func ( conn_id , event ) elif name == 'on_trace' : events = [ 'trace' ] def callback ( _conn_string , conn_id , _name , event ) : func ( conn_id , event ) elif name == 'on_disconnect' : events = [ 'disconnection' ] def callback ( _conn_string , conn_id , _name , _event ) : func ( self . id , conn_id ) else : raise ArgumentError ( "Unknown callback type {}" . format ( name ) ) self . _adapter . register_monitor ( [ None ] , events , callback ) | Add a callback when device events happen . | 246 | 8 |
21,901 | def disconnect_async ( self , conn_id , callback ) : future = self . _loop . launch_coroutine ( self . _adapter . disconnect ( conn_id ) ) future . add_done_callback ( lambda x : self . _callback_future ( conn_id , x , callback ) ) | Asynchronously disconnect from a device . | 67 | 8 |
21,902 | def send_script_async ( self , conn_id , data , progress_callback , callback ) : def monitor_callback ( _conn_string , _conn_id , _event_name , event ) : if event . get ( 'operation' ) != 'script' : return progress_callback ( event . get ( 'finished' ) , event . get ( 'total' ) ) async def _install_monitor ( ) : try : conn_string = self . _adapter . _get_property ( conn_id , 'connection_string' ) return self . _adapter . register_monitor ( [ conn_string ] , [ 'progress' ] , monitor_callback ) except : #pylint:disable=bare-except;This is a legacy shim that must always ensure it doesn't raise. self . _logger . exception ( "Error installing script progress monitor" ) return None monitor_id = self . _loop . run_coroutine ( _install_monitor ( ) ) if monitor_id is None : callback ( conn_id , self . id , False , 'could not install progress monitor' ) return future = self . _loop . launch_coroutine ( self . _adapter . send_script ( conn_id , data ) ) future . add_done_callback ( lambda x : self . _callback_future ( conn_id , x , callback , monitors = [ monitor_id ] ) ) | Asynchronously send a script to the device . | 305 | 10 |
21,903 | def lock ( self , key , client ) : self . key = key self . client = client | Set the key that will be used to ensure messages come from one party | 20 | 14 |
21,904 | def track_change ( self , tile , property_name , value , formatter = None ) : if not self . tracking : return if len ( self . _whitelist ) > 0 and ( tile , property_name ) not in self . _whitelist : return if formatter is None : formatter = str change = StateChange ( monotonic ( ) , tile , property_name , value , formatter ( value ) ) with self . _lock : self . changes . append ( change ) | Record that a change happened on a given tile s property . | 107 | 12 |
21,905 | def dump ( self , out_path , header = True ) : # See https://stackoverflow.com/a/3348664/9739119 for why this is necessary if sys . version_info [ 0 ] < 3 : mode = "wb" else : mode = "w" with open ( out_path , mode ) as outfile : writer = csv . writer ( outfile , quoting = csv . QUOTE_MINIMAL ) if header : writer . writerow ( [ "Timestamp" , "Tile Address" , "Property Name" , "Value" ] ) for entry in self . changes : writer . writerow ( [ entry . time , entry . tile , entry . property , entry . string_value ] ) | Save this list of changes as a csv file at out_path . | 158 | 15 |
21,906 | def generate ( env ) : global PDFTeXAction if PDFTeXAction is None : PDFTeXAction = SCons . Action . Action ( '$PDFTEXCOM' , '$PDFTEXCOMSTR' ) global PDFLaTeXAction if PDFLaTeXAction is None : PDFLaTeXAction = SCons . Action . Action ( "$PDFLATEXCOM" , "$PDFLATEXCOMSTR" ) global PDFTeXLaTeXAction if PDFTeXLaTeXAction is None : PDFTeXLaTeXAction = SCons . Action . Action ( PDFTeXLaTeXFunction , strfunction = SCons . Tool . tex . TeXLaTeXStrFunction ) env . AppendUnique ( LATEXSUFFIXES = SCons . Tool . LaTeXSuffixes ) from . import pdf pdf . generate ( env ) bld = env [ 'BUILDERS' ] [ 'PDF' ] bld . add_action ( '.tex' , PDFTeXLaTeXAction ) bld . add_emitter ( '.tex' , SCons . Tool . tex . tex_pdf_emitter ) # Add the epstopdf builder after the pdftex builder # so pdftex is the default for no source suffix pdf . generate2 ( env ) SCons . Tool . tex . generate_common ( env ) | Add Builders and construction variables for pdftex to an Environment . | 279 | 13 |
21,907 | def stop ( self ) : for tile in self . _tiles . values ( ) : tile . signal_stop ( ) for tile in self . _tiles . values ( ) : tile . wait_stopped ( ) super ( TileBasedVirtualDevice , self ) . stop ( ) | Stop running this virtual device including any worker threads . | 60 | 10 |
21,908 | def SetCacheMode ( mode ) : global cache_mode if mode == "auto" : cache_mode = AUTO elif mode == "force" : cache_mode = FORCE elif mode == "cache" : cache_mode = CACHE else : raise ValueError ( "SCons.SConf.SetCacheMode: Unknown mode " + mode ) | Set the Configure cache mode . mode must be one of auto force or cache . | 77 | 17 |
21,909 | def CreateConfigHBuilder ( env ) : action = SCons . Action . Action ( _createConfigH , _stringConfigH ) sconfigHBld = SCons . Builder . Builder ( action = action ) env . Append ( BUILDERS = { 'SConfigHBuilder' : sconfigHBld } ) for k in list ( _ac_config_hs . keys ( ) ) : env . SConfigHBuilder ( k , env . Value ( _ac_config_hs [ k ] ) ) | Called if necessary just before the building targets phase begins . | 110 | 12 |
21,910 | def CheckHeader ( context , header , include_quotes = '<>' , language = None ) : prog_prefix , hdr_to_check = createIncludesFromHeaders ( header , 1 , include_quotes ) res = SCons . Conftest . CheckHeader ( context , hdr_to_check , prog_prefix , language = language , include_quotes = include_quotes ) context . did_show_result = 1 return not res | A test for a C or C ++ header file . | 100 | 11 |
21,911 | def CheckLib ( context , library = None , symbol = "main" , header = None , language = None , autoadd = 1 ) : if library == [ ] : library = [ None ] if not SCons . Util . is_List ( library ) : library = [ library ] # ToDo: accept path for the library res = SCons . Conftest . CheckLib ( context , library , symbol , header = header , language = language , autoadd = autoadd ) context . did_show_result = 1 return not res | A test for a library . See also CheckLibWithHeader . Note that library may also be None to test whether the given symbol compiles without flags . | 117 | 31 |
21,912 | def CheckProg ( context , prog_name ) : res = SCons . Conftest . CheckProg ( context , prog_name ) context . did_show_result = 1 return res | Simple check if a program exists in the path . Returns the path for the application or None if not found . | 42 | 22 |
21,913 | def display_cached_string ( self , bi ) : if not isinstance ( bi , SConfBuildInfo ) : SCons . Warnings . warn ( SConfWarning , "The stored build information has an unexpected class: %s" % bi . __class__ ) else : self . display ( "The original builder output was:\n" + ( " |" + str ( bi . string ) ) . replace ( "\n" , "\n |" ) ) | Logs the original builder messages given the SConfBuildInfo instance bi . | 99 | 15 |
21,914 | def Define ( self , name , value = None , comment = None ) : lines = [ ] if comment : comment_str = "/* %s */" % comment lines . append ( comment_str ) if value is not None : define_str = "#define %s %s" % ( name , value ) else : define_str = "#define %s" % name lines . append ( define_str ) lines . append ( '' ) self . config_h_text = self . config_h_text + '\n' . join ( lines ) | Define a pre processor symbol name with the optional given value in the current config header . | 119 | 18 |
21,915 | def BuildNodes ( self , nodes ) : if self . logstream is not None : # override stdout / stderr to write in log file oldStdout = sys . stdout sys . stdout = self . logstream oldStderr = sys . stderr sys . stderr = self . logstream # the engine assumes the current path is the SConstruct directory ... old_fs_dir = SConfFS . getcwd ( ) old_os_dir = os . getcwd ( ) SConfFS . chdir ( SConfFS . Top , change_os_dir = 1 ) # Because we take responsibility here for writing out our # own .sconsign info (see SConfBuildTask.execute(), above), # we override the store_info() method with a null place-holder # so we really control how it gets written. for n in nodes : n . store_info = 0 if not hasattr ( n , 'attributes' ) : n . attributes = SCons . Node . Node . Attrs ( ) n . attributes . keep_targetinfo = 1 ret = 1 try : # ToDo: use user options for calc save_max_drift = SConfFS . get_max_drift ( ) SConfFS . set_max_drift ( 0 ) tm = SCons . Taskmaster . Taskmaster ( nodes , SConfBuildTask ) # we don't want to build tests in parallel jobs = SCons . Job . Jobs ( 1 , tm ) jobs . run ( ) for n in nodes : state = n . get_state ( ) if ( state != SCons . Node . executed and state != SCons . Node . up_to_date ) : # the node could not be built. we return 0 in this case ret = 0 finally : SConfFS . set_max_drift ( save_max_drift ) os . chdir ( old_os_dir ) SConfFS . chdir ( old_fs_dir , change_os_dir = 0 ) if self . logstream is not None : # restore stdout / stderr sys . stdout = oldStdout sys . stderr = oldStderr return ret | Tries to build the given nodes immediately . Returns 1 on success 0 on error . | 475 | 17 |
21,916 | def pspawn_wrapper ( self , sh , escape , cmd , args , env ) : return self . pspawn ( sh , escape , cmd , args , env , self . logstream , self . logstream ) | Wrapper function for handling piped spawns . | 45 | 9 |
21,917 | def _startup ( self ) : global _ac_config_logs global sconf_global global SConfFS self . lastEnvFs = self . env . fs self . env . fs = SConfFS self . _createDir ( self . confdir ) self . confdir . up ( ) . add_ignore ( [ self . confdir ] ) if self . logfile is not None and not dryrun : # truncate logfile, if SConf.Configure is called for the first time # in a build if self . logfile in _ac_config_logs : log_mode = "a" else : _ac_config_logs [ self . logfile ] = None log_mode = "w" fp = open ( str ( self . logfile ) , log_mode ) self . logstream = SCons . Util . Unbuffered ( fp ) # logfile may stay in a build directory, so we tell # the build system not to override it with a eventually # existing file with the same name in the source directory self . logfile . dir . add_ignore ( [ self . logfile ] ) tb = traceback . extract_stack ( ) [ - 3 - self . depth ] old_fs_dir = SConfFS . getcwd ( ) SConfFS . chdir ( SConfFS . Top , change_os_dir = 0 ) self . logstream . write ( 'file %s,line %d:\n\tConfigure(confdir = %s)\n' % ( tb [ 0 ] , tb [ 1 ] , str ( self . confdir ) ) ) SConfFS . chdir ( old_fs_dir ) else : self . logstream = None # we use a special builder to create source files from TEXT action = SCons . Action . Action ( _createSource , _stringSource ) sconfSrcBld = SCons . Builder . Builder ( action = action ) self . env . Append ( BUILDERS = { 'SConfSourceBuilder' : sconfSrcBld } ) self . config_h_text = _ac_config_hs . get ( self . config_h , "" ) self . active = 1 # only one SConf instance should be active at a time ... sconf_global = self | Private method . Set up logstream and set the environment variables necessary for a piped build | 501 | 18 |
21,918 | def _shutdown ( self ) : global sconf_global , _ac_config_hs if not self . active : raise SCons . Errors . UserError ( "Finish may be called only once!" ) if self . logstream is not None and not dryrun : self . logstream . write ( "\n" ) self . logstream . close ( ) self . logstream = None # remove the SConfSourceBuilder from the environment blds = self . env [ 'BUILDERS' ] del blds [ 'SConfSourceBuilder' ] self . env . Replace ( BUILDERS = blds ) self . active = 0 sconf_global = None if not self . config_h is None : _ac_config_hs [ self . config_h ] = self . config_h_text self . env . fs = self . lastEnvFs | Private method . Reset to non - piped spawn | 188 | 10 |
21,919 | def Result ( self , res ) : if isinstance ( res , str ) : text = res elif res : text = "yes" else : text = "no" if self . did_show_result == 0 : # Didn't show result yet, do it now. self . Display ( text + "\n" ) self . did_show_result = 1 | Inform about the result of the test . If res is not a string displays yes or no depending on whether res is evaluated as true or false . The result is only displayed when self . did_show_result is not set . | 77 | 47 |
21,920 | def linux_ver_normalize ( vstr ) : # Check for version number like 9.1.026: return 91.026 # XXX needs to be updated for 2011+ versions (like 2011.11.344 which is compiler v12.1.5) m = re . match ( r'([0-9]+)\.([0-9]+)\.([0-9]+)' , vstr ) if m : vmaj , vmin , build = m . groups ( ) return float ( vmaj ) * 10. + float ( vmin ) + float ( build ) / 1000. else : f = float ( vstr ) if is_windows : return f else : if f < 60 : return f * 10.0 else : return f | Normalize a Linux compiler version number . Intel changed from 80 to 9 . 0 in 2005 so we assume if the number is greater than 60 it s an old - style number and otherwise new - style . Always returns an old - style float like 80 or 90 for compatibility with Windows . Shades of Y2K! | 160 | 63 |
21,921 | def parse_node_descriptor ( desc , model ) : try : data = graph_node . parseString ( desc ) except ParseException : raise # TODO: Fix this to properly encapsulate the parse error stream_desc = u' ' . join ( data [ 'node' ] ) stream = DataStream . FromString ( stream_desc ) node = SGNode ( stream , model ) inputs = [ ] if 'input_a' in data : input_a = data [ 'input_a' ] stream_a = DataStreamSelector . FromString ( u' ' . join ( input_a [ 'input_stream' ] ) ) trigger_a = None if 'type' in input_a : trigger_a = InputTrigger ( input_a [ 'type' ] , input_a [ 'op' ] , int ( input_a [ 'reference' ] , 0 ) ) inputs . append ( ( stream_a , trigger_a ) ) if 'input_b' in data : input_a = data [ 'input_b' ] stream_a = DataStreamSelector . FromString ( u' ' . join ( input_a [ 'input_stream' ] ) ) trigger_a = None if 'type' in input_a : trigger_a = InputTrigger ( input_a [ 'type' ] , input_a [ 'op' ] , int ( input_a [ 'reference' ] , 0 ) ) inputs . append ( ( stream_a , trigger_a ) ) if 'combiner' in data and str ( data [ 'combiner' ] ) == u'||' : node . trigger_combiner = SGNode . OrTriggerCombiner else : node . trigger_combiner = SGNode . AndTriggerCombiner processing = data [ 'processor' ] return node , inputs , processing | Parse a string node descriptor . | 395 | 7 |
21,922 | def create_binary_descriptor ( descriptor ) : func_names = { 0 : 'copy_latest_a' , 1 : 'average_a' , 2 : 'copy_all_a' , 3 : 'sum_a' , 4 : 'copy_count_a' , 5 : 'trigger_streamer' , 6 : 'call_rpc' , 7 : 'subtract_afromb' } func_codes = { y : x for x , y in func_names . items ( ) } node , inputs , processing = parse_node_descriptor ( descriptor , DeviceModel ( ) ) func_code = func_codes . get ( processing ) if func_code is None : raise ArgumentError ( "Unknown processing function" , function = processing ) stream_a , trigger_a = inputs [ 0 ] stream_a = stream_a . encode ( ) if len ( inputs ) == 2 : stream_b , trigger_b = inputs [ 1 ] stream_b = stream_b . encode ( ) else : stream_b , trigger_b = 0xFFFF , None if trigger_a is None : trigger_a = TrueTrigger ( ) if trigger_b is None : trigger_b = TrueTrigger ( ) ref_a = 0 if isinstance ( trigger_a , InputTrigger ) : ref_a = trigger_a . reference ref_b = 0 if isinstance ( trigger_b , InputTrigger ) : ref_b = trigger_b . reference trigger_a = _create_binary_trigger ( trigger_a ) trigger_b = _create_binary_trigger ( trigger_b ) combiner = node . trigger_combiner bin_desc = struct . pack ( "<LLHHHBBBB2x" , ref_a , ref_b , node . stream . encode ( ) , stream_a , stream_b , func_code , trigger_a , trigger_b , combiner ) return bin_desc | Convert a string node descriptor into a 20 - byte binary descriptor . | 419 | 14 |
21,923 | def parse_binary_descriptor ( bindata ) : func_names = { 0 : 'copy_latest_a' , 1 : 'average_a' , 2 : 'copy_all_a' , 3 : 'sum_a' , 4 : 'copy_count_a' , 5 : 'trigger_streamer' , 6 : 'call_rpc' , 7 : 'subtract_afromb' } if len ( bindata ) != 20 : raise ArgumentError ( "Invalid binary node descriptor with incorrect size" , size = len ( bindata ) , expected = 20 , bindata = bindata ) a_trig , b_trig , stream_id , a_id , b_id , proc , a_cond , b_cond , trig_combiner = struct . unpack ( "<LLHHHBBBB2x" , bindata ) node_stream = DataStream . FromEncoded ( stream_id ) if a_id == 0xFFFF : raise ArgumentError ( "Invalid binary node descriptor with invalid first input" , input_selector = a_id ) a_selector = DataStreamSelector . FromEncoded ( a_id ) a_trigger = _process_binary_trigger ( a_trig , a_cond ) b_selector = None b_trigger = None if b_id != 0xFFFF : b_selector = DataStreamSelector . FromEncoded ( b_id ) b_trigger = _process_binary_trigger ( b_trig , b_cond ) if trig_combiner == SGNode . AndTriggerCombiner : comb = '&&' elif trig_combiner == SGNode . OrTriggerCombiner : comb = '||' else : raise ArgumentError ( "Invalid trigger combiner in binary node descriptor" , combiner = trig_combiner ) if proc not in func_names : raise ArgumentError ( "Unknown processing function" , function_id = proc , known_functions = func_names ) func_name = func_names [ proc ] # Handle one input nodes if b_selector is None : return '({} {}) => {} using {}' . format ( a_selector , a_trigger , node_stream , func_name ) return '({} {} {} {} {}) => {} using {}' . format ( a_selector , a_trigger , comb , b_selector , b_trigger , node_stream , func_name ) | Convert a binary node descriptor into a string descriptor . | 532 | 11 |
21,924 | def _process_binary_trigger ( trigger_value , condition ) : ops = { 0 : ">" , 1 : "<" , 2 : ">=" , 3 : "<=" , 4 : "==" , 5 : 'always' } sources = { 0 : 'value' , 1 : 'count' } encoded_source = condition & 0b1 encoded_op = condition >> 1 oper = ops . get ( encoded_op , None ) source = sources . get ( encoded_source , None ) if oper is None : raise ArgumentError ( "Unknown operation in binary trigger" , condition = condition , operation = encoded_op , known_ops = ops ) if source is None : raise ArgumentError ( "Unknown value source in binary trigger" , source = source , known_sources = sources ) if oper == 'always' : return TrueTrigger ( ) return InputTrigger ( source , oper , trigger_value ) | Create an InputTrigger object . | 192 | 6 |
21,925 | def _create_binary_trigger ( trigger ) : ops = { 0 : ">" , 1 : "<" , 2 : ">=" , 3 : "<=" , 4 : "==" , 5 : 'always' } op_codes = { y : x for x , y in ops . items ( ) } source = 0 if isinstance ( trigger , TrueTrigger ) : op_code = op_codes [ 'always' ] elif isinstance ( trigger , FalseTrigger ) : raise ArgumentError ( "Cannot express a never trigger in binary descriptor" , trigger = trigger ) else : op_code = op_codes [ trigger . comp_string ] if trigger . use_count : source = 1 return ( op_code << 1 ) | source | Create an 8 - bit binary trigger from an InputTrigger TrueTrigger FalseTrigger . | 158 | 16 |
21,926 | def _try_assign_utc_time ( self , raw_time , time_base ) : # Check if the raw time is encoded UTC since y2k or just uptime if raw_time != IOTileEvent . InvalidRawTime and ( raw_time & ( 1 << 31 ) ) : y2k_offset = self . raw_time ^ ( 1 << 31 ) return self . _Y2KReference + datetime . timedelta ( seconds = y2k_offset ) if time_base is not None : return time_base + datetime . timedelta ( seconds = raw_time ) return None | Try to assign a UTC time to this reading . | 133 | 10 |
21,927 | def asdict ( self ) : timestamp_str = None if self . reading_time is not None : timestamp_str = self . reading_time . isoformat ( ) return { 'stream' : self . stream , 'device_timestamp' : self . raw_time , 'streamer_local_id' : self . reading_id , 'timestamp' : timestamp_str , 'value' : self . value } | Encode the data in this reading into a dictionary . | 91 | 11 |
21,928 | def asdict ( self ) : return { 'stream' : self . stream , 'device_timestamp' : self . raw_time , 'streamer_local_id' : self . reading_id , 'timestamp' : self . reading_time , 'extra_data' : self . summary_data , 'data' : self . raw_data } | Encode the data in this event into a dictionary . | 78 | 11 |
21,929 | def save ( self , path ) : data = self . encode ( ) with open ( path , "wb" ) as out : out . write ( data ) | Save a binary copy of this report | 33 | 7 |
21,930 | def serialize ( self ) : info = { } info [ 'received_time' ] = self . received_time info [ 'encoded_report' ] = bytes ( self . encode ( ) ) # Handle python 2 / python 3 differences report_format = info [ 'encoded_report' ] [ 0 ] if not isinstance ( report_format , int ) : report_format = ord ( report_format ) info [ 'report_format' ] = report_format # Report format is the first byte of the encoded report info [ 'origin' ] = self . origin return info | Turn this report into a dictionary that encodes all information including received timestamp | 124 | 14 |
21,931 | def get_contents ( self ) : childsigs = [ n . get_csig ( ) for n in self . children ( ) ] return '' . join ( childsigs ) | The contents of an alias is the concatenation of the content signatures of all its sources . | 40 | 19 |
21,932 | def generate ( env ) : import SCons . Tool import SCons . Tool . cc static_obj , shared_obj = SCons . Tool . createObjBuilders ( env ) for suffix in CXXSuffixes : static_obj . add_action ( suffix , SCons . Defaults . CXXAction ) shared_obj . add_action ( suffix , SCons . Defaults . ShCXXAction ) static_obj . add_emitter ( suffix , SCons . Defaults . StaticObjectEmitter ) shared_obj . add_emitter ( suffix , SCons . Defaults . SharedObjectEmitter ) SCons . Tool . cc . add_common_cc_variables ( env ) if 'CXX' not in env : env [ 'CXX' ] = env . Detect ( compilers ) or compilers [ 0 ] env [ 'CXXFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'CXXCOM' ] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES' env [ 'SHCXX' ] = '$CXX' env [ 'SHCXXFLAGS' ] = SCons . Util . CLVar ( '$CXXFLAGS' ) env [ 'SHCXXCOM' ] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES' env [ 'CPPDEFPREFIX' ] = '-D' env [ 'CPPDEFSUFFIX' ] = '' env [ 'INCPREFIX' ] = '-I' env [ 'INCSUFFIX' ] = '' env [ 'SHOBJSUFFIX' ] = '.os' env [ 'OBJSUFFIX' ] = '.o' env [ 'STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME' ] = 0 env [ 'CXXFILESUFFIX' ] = '.cc' | Add Builders and construction variables for Visual Age C ++ compilers to an Environment . | 451 | 17 |
21,933 | def link_to_storage ( self , sensor_log ) : if self . walker is not None : self . _sensor_log . destroy_walker ( self . walker ) self . walker = None self . walker = sensor_log . create_walker ( self . selector ) self . _sensor_log = sensor_log | Attach this DataStreamer to an underlying SensorLog . | 74 | 10 |
21,934 | def triggered ( self , manual = False ) : if self . walker is None : raise InternalError ( "You can only check if a streamer is triggered if you create it with a SensorLog" ) if not self . automatic and not manual : return False return self . has_data ( ) | Check if this streamer should generate a report . | 62 | 10 |
21,935 | def build_report ( self , device_id , max_size = None , device_uptime = 0 , report_id = None , auth_chain = None ) : if self . walker is None or self . index is None : raise InternalError ( "You can only build a report with a DataStreamer if you create it with a SensorLog and a streamer index" ) if self . requires_signing ( ) and auth_chain is None : raise ArgumentError ( "You must pass an auth chain to sign this report." ) if self . requires_id ( ) and report_id is None : raise ArgumentError ( "You must pass a report_id to serialize this report" ) if self . format == 'individual' : reading = self . walker . pop ( ) highest_id = reading . reading_id if self . report_type == 'telegram' : return StreamerReport ( IndividualReadingReport . FromReadings ( device_id , [ reading ] ) , 1 , highest_id ) elif self . report_type == 'broadcast' : return StreamerReport ( BroadcastReport . FromReadings ( device_id , [ reading ] , device_uptime ) , 1 , highest_id ) elif self . format == 'hashedlist' : max_readings = ( max_size - 20 - 24 ) // 16 if max_readings <= 0 : raise InternalError ( "max_size is too small to hold even a single reading" , max_size = max_size ) readings = [ ] highest_id = 0 try : while len ( readings ) < max_readings : reading = self . walker . pop ( ) readings . append ( reading ) if reading . reading_id > highest_id : highest_id = reading . reading_id except StreamEmptyError : if len ( readings ) == 0 : raise return StreamerReport ( SignedListReport . FromReadings ( device_id , readings , report_id = report_id , selector = self . selector . encode ( ) , streamer = self . index , sent_timestamp = device_uptime ) , len ( readings ) , highest_id ) raise InternalError ( "Streamer report format or type is not supported currently" , report_format = self . format , report_type = self . report_type ) | Build a report with all of the readings in this streamer . | 494 | 13 |
21,936 | def matches ( self , address , name = None ) : if self . controller : return address == 8 return self . address == address | Check if this slot identifier matches the given tile . | 27 | 10 |
21,937 | def FromString ( cls , desc ) : desc = str ( desc ) if desc == u'controller' : return SlotIdentifier ( controller = True ) words = desc . split ( ) if len ( words ) != 2 or words [ 0 ] != u'slot' : raise ArgumentError ( u"Illegal slot identifier" , descriptor = desc ) try : slot_id = int ( words [ 1 ] , 0 ) except ValueError : raise ArgumentError ( u"Could not convert slot identifier to number" , descriptor = desc , number = words [ 1 ] ) return SlotIdentifier ( slot = slot_id ) | Create a slot identifier from a string description . | 130 | 9 |
21,938 | def FromEncoded ( cls , bindata ) : if len ( bindata ) != 8 : raise ArgumentError ( "Invalid binary slot descriptor with invalid length" , length = len ( bindata ) , expected = 8 , data = bindata ) slot , match_op = struct . unpack ( "<B6xB" , bindata ) match_name = cls . KNOWN_MATCH_CODES . get ( match_op ) if match_name is None : raise ArgumentError ( "Unknown match operation specified in binary slot descriptor" , operation = match_op , known_match_ops = cls . KNOWN_MATCH_CODES ) if match_name == 'match_controller' : return SlotIdentifier ( controller = True ) if match_name == 'match_slot' : return SlotIdentifier ( slot = slot ) raise ArgumentError ( "Unsupported match operation in binary slot descriptor" , match_op = match_name ) | Create a slot identifier from an encoded binary descriptor . | 206 | 10 |
21,939 | def encode ( self ) : slot = 0 match_op = self . KNOWN_MATCH_NAMES [ 'match_controller' ] if not self . controller : slot = self . slot match_op = self . KNOWN_MATCH_NAMES [ 'match_slot' ] return struct . pack ( "<B6xB" , slot , match_op ) | Encode this slot identifier into a binary descriptor . | 80 | 10 |
21,940 | def _scons_syntax_error ( e ) : etype , value , tb = sys . exc_info ( ) lines = traceback . format_exception_only ( etype , value ) for line in lines : sys . stderr . write ( line + '\n' ) sys . exit ( 2 ) | Handle syntax errors . Print out a message and show where the error occurred . | 71 | 15 |
21,941 | def find_deepest_user_frame ( tb ) : tb . reverse ( ) # find the deepest traceback frame that is not part # of SCons: for frame in tb : filename = frame [ 0 ] if filename . find ( os . sep + 'SCons' + os . sep ) == - 1 : return frame return tb [ 0 ] | Find the deepest stack frame that is not part of SCons . | 78 | 13 |
21,942 | def _scons_user_error ( e ) : global print_stacktrace etype , value , tb = sys . exc_info ( ) if print_stacktrace : traceback . print_exception ( etype , value , tb ) filename , lineno , routine , dummy = find_deepest_user_frame ( traceback . extract_tb ( tb ) ) sys . stderr . write ( "\nscons: *** %s\n" % value ) sys . stderr . write ( 'File "%s", line %d, in %s\n' % ( filename , lineno , routine ) ) sys . exit ( 2 ) | Handle user errors . Print out a message and a description of the error along with the line number and routine where it occured . The file and line number will be the deepest stack frame that is not part of SCons itself . | 145 | 46 |
21,943 | def _scons_user_warning ( e ) : etype , value , tb = sys . exc_info ( ) filename , lineno , routine , dummy = find_deepest_user_frame ( traceback . extract_tb ( tb ) ) sys . stderr . write ( "\nscons: warning: %s\n" % e ) sys . stderr . write ( 'File "%s", line %d, in %s\n' % ( filename , lineno , routine ) ) | Handle user warnings . Print out a message and a description of the warning along with the line number and routine where it occured . The file and line number will be the deepest stack frame that is not part of SCons itself . | 113 | 46 |
21,944 | def _SConstruct_exists ( dirname = '' , repositories = [ ] , filelist = None ) : if not filelist : filelist = [ 'SConstruct' , 'Sconstruct' , 'sconstruct' ] for file in filelist : sfile = os . path . join ( dirname , file ) if os . path . isfile ( sfile ) : return sfile if not os . path . isabs ( sfile ) : for rep in repositories : if os . path . isfile ( os . path . join ( rep , sfile ) ) : return sfile return None | This function checks that an SConstruct file exists in a directory . If so it returns the path of the file . By default it checks the current directory . | 127 | 31 |
21,945 | def make_ready ( self ) : SCons . Taskmaster . OutOfDateTask . make_ready ( self ) if self . out_of_date and self . options . debug_explain : explanation = self . out_of_date [ 0 ] . explain ( ) if explanation : sys . stdout . write ( "scons: " + explanation ) | Make a task ready for execution | 77 | 6 |
21,946 | def _unpack_version ( tag_data ) : tag = tag_data & ( ( 1 << 20 ) - 1 ) version_data = tag_data >> 20 major = ( version_data >> 6 ) & ( ( 1 << 6 ) - 1 ) minor = ( version_data >> 0 ) & ( ( 1 << 6 ) - 1 ) return ( tag , "{}.{}" . format ( major , minor ) ) | Parse a packed version info struct into tag and major . minor version . | 90 | 15 |
21,947 | def _handle_reset ( self ) : self . _logger . info ( "Resetting controller" ) self . _device . reset_count += 1 super ( ReferenceController , self ) . _handle_reset ( ) # Load in all default values into our config variables before streaming # updated data into them. self . reset_config_variables ( ) | Reset this controller tile . | 75 | 6 |
21,948 | async def _reset_vector ( self ) : # Send ourselves all of our config variable assignments config_rpcs = self . config_database . stream_matching ( 8 , self . name ) for rpc in config_rpcs : await self . _device . emulator . await_rpc ( * rpc ) config_assignments = self . latch_config_variables ( ) self . _logger . info ( "Latched config variables at reset for controller: %s" , config_assignments ) for system in self . _post_config_subsystems : try : system . clear_to_reset ( config_assignments ) await asyncio . wait_for ( system . initialize ( ) , timeout = 2.0 ) except : self . _logger . exception ( "Error initializing %s" , system ) raise self . _logger . info ( "Finished clearing controller to reset condition" ) # Now reset all of the tiles for address , _ in self . _device . iter_tiles ( include_controller = False ) : self . _logger . info ( "Sending reset signal to tile at address %d" , address ) try : await self . _device . emulator . await_rpc ( address , rpcs . RESET ) except TileNotFoundError : pass except : self . _logger . exception ( "Error sending reset signal to tile at address %d" , address ) raise self . initialized . set ( ) | Initialize the controller s subsystems inside the emulation thread . | 319 | 12 |
21,949 | def hardware_version ( self ) : hardware_string = self . hardware_string if not isinstance ( hardware_string , bytes ) : hardware_string = self . hardware_string . encode ( 'utf-8' ) if len ( hardware_string ) > 10 : self . _logger . warn ( "Truncating hardware string that was longer than 10 bytes: %s" , self . hardware_string ) if len ( hardware_string ) < 10 : hardware_string += b'\0' * ( 10 - len ( hardware_string ) ) return [ hardware_string ] | Get a hardware identification string . | 124 | 6 |
21,950 | def controller_info ( self ) : return [ self . _device . iotile_id , _pack_version ( * self . os_info ) , _pack_version ( * self . app_info ) ] | Get the controller UUID app tag and os tag . | 47 | 11 |
21,951 | def load_sgf ( self , sgf_data ) : if '\n' not in sgf_data : with open ( sgf_data , "r" ) as infile : sgf_data = infile . read ( ) model = DeviceModel ( ) parser = SensorGraphFileParser ( ) parser . parse_file ( data = sgf_data ) parser . compile ( model ) opt = SensorGraphOptimizer ( ) opt . optimize ( parser . sensor_graph , model = model ) sensor_graph = parser . sensor_graph self . _logger . info ( "Loading sensor_graph with %d nodes, %d streamers and %d configs" , len ( sensor_graph . nodes ) , len ( sensor_graph . streamers ) , len ( sensor_graph . config_database ) ) # Directly load the sensor_graph into our persisted storage self . sensor_graph . persisted_nodes = sensor_graph . dump_nodes ( ) self . sensor_graph . persisted_streamers = sensor_graph . dump_streamers ( ) self . sensor_graph . persisted_constants = [ ] for stream , value in sorted ( sensor_graph . constant_database . items ( ) , key = lambda x : x [ 0 ] . encode ( ) ) : reading = IOTileReading ( stream . encode ( ) , 0 , value ) self . sensor_graph . persisted_constants . append ( ( stream , reading ) ) self . sensor_graph . persisted_exists = True # Clear all config variables and load in those from this sgf file self . config_database . clear ( ) for slot in sorted ( sensor_graph . config_database , key = lambda x : x . encode ( ) ) : for conf_var , ( conf_type , conf_val ) in sorted ( sensor_graph . config_database [ slot ] . items ( ) ) : self . config_database . add_direct ( slot , conf_var , conf_type , conf_val ) # If we have an app tag and version set program them in app_tag = sensor_graph . metadata_database . get ( 'app_tag' ) app_version = sensor_graph . metadata_database . get ( 'app_version' ) if app_tag is not None : if app_version is None : app_version = "0.0" self . app_info = ( app_tag , app_version ) | Load persist a sensor_graph file . | 529 | 8 |
21,952 | def _parse_file ( self ) : # We need to set the CPU type to pull in the right register definitions # only preprocess the file (-E) and get rid of gcc extensions that aren't # supported in ISO C. args = utilities . build_includes ( self . arch . includes ( ) ) # args.append('-mcpu=%s' % self.arch.property('chip')) args . append ( '-E' ) args . append ( '-D__attribute__(x)=' ) args . append ( '-D__extension__=' ) self . ast = parse_file ( self . filepath , use_cpp = True , cpp_path = 'arm-none-eabi-gcc' , cpp_args = args ) | Preprocess and parse C file into an AST | 166 | 9 |
21,953 | def _clear_queue ( to_clear ) : while not to_clear . empty ( ) : try : to_clear . get ( False ) to_clear . task_done ( ) except queue . Empty : continue | Clear all items from a queue safely . | 46 | 8 |
21,954 | def finish ( self , status , response ) : self . response = binascii . hexlify ( response ) . decode ( 'utf-8' ) self . status = status self . runtime = monotonic ( ) - self . _start_time | Mark the end of a recorded RPC . | 55 | 8 |
21,955 | def serialize ( self ) : return "{},{: <26},{:2d},{:#06x},{:#04x},{:5.0f},{: <40},{: <40},{}" . format ( self . connection , self . start_stamp . isoformat ( ) , self . address , self . rpc_id , self . status , self . runtime * 1000 , self . call , self . response , self . error ) | Convert this recorded RPC into a string . | 101 | 9 |
21,956 | def scan ( self , wait = None ) : min_scan = self . adapter . get_config ( 'minimum_scan_time' , 0.0 ) probe_required = self . adapter . get_config ( 'probe_required' , False ) # Figure out how long and if we need to wait before returning our scan results wait_time = None elapsed = monotonic ( ) - self . _start_time if elapsed < min_scan : wait_time = min_scan - elapsed # If we need to probe for devices rather than letting them just bubble up, start the probe # and then use our min_scan_time to wait for them to arrive via the normal _on_scan event if probe_required : self . _loop . run_coroutine ( self . adapter . probe ( ) ) wait_time = min_scan # If an explicit wait is specified that overrides everything else if wait is not None : wait_time = wait if wait_time is not None : sleep ( wait_time ) to_remove = set ( ) now = monotonic ( ) with self . _scan_lock : for name , value in self . _scanned_devices . items ( ) : if value [ 'expiration_time' ] < now : to_remove . add ( name ) for name in to_remove : del self . _scanned_devices [ name ] devices = sorted ( self . _scanned_devices . values ( ) , key = lambda x : x [ 'uuid' ] ) return devices | Return the devices that have been found for this device adapter . | 325 | 12 |
21,957 | def connect ( self , uuid_value , wait = None ) : if self . connected : raise HardwareError ( "Cannot connect when we are already connected" ) if uuid_value not in self . _scanned_devices : self . scan ( wait = wait ) with self . _scan_lock : if uuid_value not in self . _scanned_devices : raise HardwareError ( "Could not find device to connect to by UUID" , uuid = uuid_value ) connstring = self . _scanned_devices [ uuid_value ] [ 'connection_string' ] self . connect_direct ( connstring ) | Connect to a specific device by its uuid | 138 | 9 |
21,958 | def connect_direct ( self , connection_string , no_rpc = False , force = False ) : if not force and self . connected : raise HardwareError ( "Cannot connect when we are already connected to '%s'" % self . connection_string ) self . _loop . run_coroutine ( self . adapter . connect ( 0 , connection_string ) ) try : if no_rpc : self . _logger . info ( "Not opening RPC interface on device %s" , self . connection_string ) else : self . _loop . run_coroutine ( self . adapter . open_interface ( 0 , 'rpc' ) ) except HardwareError as exc : self . _logger . exception ( "Error opening RPC interface on device %s" , connection_string ) self . _loop . run_coroutine ( self . adapter . disconnect ( 0 ) ) raise exc except Exception as exc : self . _logger . exception ( "Error opening RPC interface on device %s" , connection_string ) self . _loop . run_coroutine ( self . adapter . disconnect ( 0 ) ) raise HardwareError ( "Could not open RPC interface on device due to an exception: %s" % str ( exc ) ) from exc self . connected = True self . connection_string = connection_string self . connection_interrupted = False | Directly connect to a device using its stream specific connection string . | 286 | 13 |
21,959 | def disconnect ( self ) : if not self . connected : raise HardwareError ( "Cannot disconnect when we are not connected" ) # Close the streaming and tracing interfaces when we disconnect self . _reports = None self . _traces = None self . _loop . run_coroutine ( self . adapter . disconnect ( 0 ) ) self . connected = False self . connection_interrupted = False self . connection_string = None | Disconnect from the device that we are currently connected to . | 88 | 12 |
21,960 | def _try_reconnect ( self ) : try : if self . connection_interrupted : self . connect_direct ( self . connection_string , force = True ) self . connection_interrupted = False self . connected = True # Reenable streaming interface if that was open before as well if self . _reports is not None : self . _loop . run_coroutine ( self . adapter . open_interface ( 0 , 'streaming' ) ) # Reenable tracing interface if that was open before as well if self . _traces is not None : self . _loop . run_coroutine ( self . adapter . open_interface ( 0 , 'tracing' ) ) except HardwareError as exc : self . _logger . exception ( "Error reconnecting to device after an unexpected disconnect" ) raise HardwareError ( "Device disconnected unexpectedly and we could not reconnect" , reconnect_error = exc ) from exc | Try to recover an interrupted connection . | 191 | 7 |
21,961 | def send_rpc ( self , address , rpc_id , call_payload , timeout = 3.0 ) : if not self . connected : raise HardwareError ( "Cannot send an RPC if we are not in a connected state" ) if timeout is None : timeout = 3.0 status = - 1 payload = b'' recording = None if self . connection_interrupted : self . _try_reconnect ( ) if self . _record is not None : recording = _RecordedRPC ( self . connection_string , address , rpc_id , call_payload ) recording . start ( ) try : payload = self . _loop . run_coroutine ( self . adapter . send_rpc ( 0 , address , rpc_id , call_payload , timeout ) ) status , payload = pack_rpc_response ( payload , None ) except VALID_RPC_EXCEPTIONS as exc : status , payload = pack_rpc_response ( payload , exc ) if self . _record is not None : recording . finish ( status , payload ) self . _recording . append ( recording ) if self . connection_interrupted : self . _try_reconnect ( ) return unpack_rpc_response ( status , payload , rpc_id , address ) | Send an rpc to our connected device . | 276 | 9 |
21,962 | def send_highspeed ( self , data , progress_callback ) : if not self . connected : raise HardwareError ( "Cannot send a script if we are not in a connected state" ) if isinstance ( data , str ) and not isinstance ( data , bytes ) : raise ArgumentError ( "You must send bytes or bytearray to _send_highspeed" , type = type ( data ) ) if not isinstance ( data , bytes ) : data = bytes ( data ) try : self . _on_progress = progress_callback self . _loop . run_coroutine ( self . adapter . send_script ( 0 , data ) ) finally : self . _on_progress = None | Send a script to a device at highspeed reporting progress . | 149 | 12 |
21,963 | def enable_streaming ( self ) : if not self . connected : raise HardwareError ( "Cannot enable streaming if we are not in a connected state" ) if self . _reports is not None : _clear_queue ( self . _reports ) return self . _reports self . _reports = queue . Queue ( ) self . _loop . run_coroutine ( self . adapter . open_interface ( 0 , 'streaming' ) ) return self . _reports | Open the streaming interface and accumute reports in a queue . | 100 | 12 |
21,964 | def enable_tracing ( self ) : if not self . connected : raise HardwareError ( "Cannot enable tracing if we are not in a connected state" ) if self . _traces is not None : _clear_queue ( self . _traces ) return self . _traces self . _traces = queue . Queue ( ) self . _loop . run_coroutine ( self . adapter . open_interface ( 0 , 'tracing' ) ) return self . _traces | Open the tracing interface and accumulate traces in a queue . | 105 | 11 |
21,965 | def enable_broadcasting ( self ) : if self . _broadcast_reports is not None : _clear_queue ( self . _broadcast_reports ) return self . _broadcast_reports self . _broadcast_reports = queue . Queue ( ) return self . _broadcast_reports | Begin accumulating broadcast reports received from all devices . | 64 | 9 |
21,966 | def enable_debug ( self ) : if not self . connected : raise HardwareError ( "Cannot enable debug if we are not in a connected state" ) self . _loop . run_coroutine ( self . adapter . open_interface ( 0 , 'debug' ) ) | Open the debug interface on the connected device . | 58 | 9 |
21,967 | def debug_command ( self , cmd , args = None , progress_callback = None ) : if args is None : args = { } try : self . _on_progress = progress_callback return self . _loop . run_coroutine ( self . adapter . debug ( 0 , cmd , args ) ) finally : self . _on_progress = None | Send a debug command to the connected device . | 75 | 9 |
21,968 | def close ( self ) : try : self . _loop . run_coroutine ( self . adapter . stop ( ) ) finally : self . _save_recording ( ) | Close this adapter stream . | 37 | 5 |
21,969 | def _on_scan ( self , info ) : device_id = info [ 'uuid' ] expiration_time = info . get ( 'validity_period' , 60 ) infocopy = deepcopy ( info ) infocopy [ 'expiration_time' ] = monotonic ( ) + expiration_time with self . _scan_lock : self . _scanned_devices [ device_id ] = infocopy | Callback called when a new device is discovered on this CMDStream | 93 | 13 |
21,970 | def _on_disconnect ( self ) : self . _logger . info ( "Connection to device %s was interrupted" , self . connection_string ) self . connection_interrupted = True | Callback when a device is disconnected unexpectedly . | 41 | 8 |
21,971 | def midl_emitter ( target , source , env ) : base , _ = SCons . Util . splitext ( str ( target [ 0 ] ) ) tlb = target [ 0 ] incl = base + '.h' interface = base + '_i.c' targets = [ tlb , incl , interface ] midlcom = env [ 'MIDLCOM' ] if midlcom . find ( '/proxy' ) != - 1 : proxy = base + '_p.c' targets . append ( proxy ) if midlcom . find ( '/dlldata' ) != - 1 : dlldata = base + '_data.c' targets . append ( dlldata ) return ( targets , source ) | Produces a list of outputs from the MIDL compiler | 156 | 11 |
21,972 | def generate ( env ) : env [ 'MIDL' ] = 'MIDL.EXE' env [ 'MIDLFLAGS' ] = SCons . Util . CLVar ( '/nologo' ) env [ 'MIDLCOM' ] = '$MIDL $MIDLFLAGS /tlb ${TARGETS[0]} /h ${TARGETS[1]} /iid ${TARGETS[2]} /proxy ${TARGETS[3]} /dlldata ${TARGETS[4]} $SOURCE 2> NUL' env [ 'BUILDERS' ] [ 'TypeLibrary' ] = midl_builder | Add Builders and construction variables for midl to an Environment . | 144 | 13 |
21,973 | def set_entry ( self , filename , obj ) : self . entries [ filename ] = obj self . dirty = True | Set the entry . | 25 | 4 |
21,974 | def write ( self , sync = 1 ) : if not self . dirty : return self . merge ( ) temp = os . path . join ( self . dir . get_internal_path ( ) , '.scons%d' % os . getpid ( ) ) try : file = open ( temp , 'wb' ) fname = temp except IOError : try : file = open ( self . sconsign , 'wb' ) fname = self . sconsign except IOError : return for key , entry in self . entries . items ( ) : entry . convert_to_sconsign ( ) pickle . dump ( self . entries , file , PICKLE_PROTOCOL ) file . close ( ) if fname != self . sconsign : try : mode = os . stat ( self . sconsign ) [ 0 ] os . chmod ( self . sconsign , 0o666 ) os . unlink ( self . sconsign ) except ( IOError , OSError ) : # Try to carry on in the face of either OSError # (things like permission issues) or IOError (disk # or network issues). If there's a really dangerous # issue, it should get re-raised by the calls below. pass try : os . rename ( fname , self . sconsign ) except OSError : # An OSError failure to rename may indicate something # like the directory has no write permission, but # the .sconsign file itself might still be writable, # so try writing on top of it directly. An IOError # here, or in any of the following calls, would get # raised, indicating something like a potentially # serious disk or network issue. open ( self . sconsign , 'wb' ) . write ( open ( fname , 'rb' ) . read ( ) ) os . chmod ( self . sconsign , mode ) try : os . unlink ( temp ) except ( IOError , OSError ) : pass | Write the . sconsign file to disk . | 431 | 10 |
21,975 | def generate ( env ) : link . generate ( env ) env [ 'LINK' ] = env . Detect ( linkers ) or 'cc' env [ 'SHLINKFLAGS' ] = SCons . Util . CLVar ( '$LINKFLAGS -shared' ) # __RPATH is set to $_RPATH in the platform specification if that # platform supports it. env [ 'RPATHPREFIX' ] = '-rpath ' env [ 'RPATHSUFFIX' ] = '' env [ '_RPATH' ] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}' | Add Builders and construction variables for MIPSPro to an Environment . | 145 | 14 |
21,976 | async def start ( self ) : await self . server . start ( ) self . port = self . server . port | Start the supervisor server . | 25 | 5 |
21,977 | async def prepare_conn ( self , conn ) : client_id = str ( uuid . uuid4 ( ) ) monitor = functools . partial ( self . send_event , client_id ) self . _logger . info ( "New client connection: %s" , client_id ) self . service_manager . add_monitor ( monitor ) self . clients [ client_id ] = dict ( connection = conn , monitor = monitor ) return client_id | Setup a new connection from a client . | 101 | 8 |
21,978 | async def teardown_conn ( self , context ) : client_id = context . user_data self . _logger . info ( "Tearing down client connection: %s" , client_id ) if client_id not in self . clients : self . _logger . warning ( "client_id %s did not exist in teardown_conn" , client_id ) else : del self . clients [ client_id ] | Teardown a connection from a client . | 96 | 9 |
21,979 | async def send_event ( self , client_id , service_name , event_name , event_info , directed_client = None ) : if directed_client is not None and directed_client != client_id : return client_info = self . clients . get ( client_id ) if client_info is None : self . _logger . warning ( "Attempted to send event to invalid client id: %s" , client_id ) return conn = client_info [ 'connection' ] event = dict ( service = service_name ) if event_info is not None : event [ 'payload' ] = event_info self . _logger . debug ( "Sending event: %s" , event ) await self . server . send_event ( conn , event_name , event ) | Send an event to a client . | 173 | 7 |
21,980 | async def send_rpc ( self , msg , _context ) : service = msg . get ( 'name' ) rpc_id = msg . get ( 'rpc_id' ) payload = msg . get ( 'payload' ) timeout = msg . get ( 'timeout' ) response_id = await self . service_manager . send_rpc_command ( service , rpc_id , payload , timeout ) try : result = await self . service_manager . rpc_results . get ( response_id , timeout = timeout ) except asyncio . TimeoutError : self . _logger . warning ( "RPC 0x%04X on service %s timed out after %f seconds" , rpc_id , service , timeout ) result = dict ( result = 'timeout' , response = b'' ) return result | Send an RPC to a service on behalf of a client . | 180 | 12 |
21,981 | async def respond_rpc ( self , msg , _context ) : rpc_id = msg . get ( 'response_uuid' ) result = msg . get ( 'result' ) payload = msg . get ( 'response' ) self . service_manager . send_rpc_response ( rpc_id , result , payload ) | Respond to an RPC previously sent to a service . | 74 | 11 |
21,982 | async def set_agent ( self , msg , context ) : service = msg . get ( 'name' ) client = context . user_data self . service_manager . set_agent ( service , client ) | Mark a client as the RPC agent for a service . | 45 | 11 |
21,983 | async def service_messages ( self , msg , _context ) : msgs = self . service_manager . service_messages ( msg . get ( 'name' ) ) return [ x . to_dict ( ) for x in msgs ] | Get all messages for a service . | 54 | 7 |
21,984 | async def service_headline ( self , msg , _context ) : headline = self . service_manager . service_headline ( msg . get ( 'name' ) ) if headline is not None : headline = headline . to_dict ( ) return headline | Get the headline for a service . | 55 | 7 |
21,985 | def generate ( env ) : static_obj , shared_obj = SCons . Tool . createObjBuilders ( env ) for suffix in ASSuffixes : static_obj . add_action ( suffix , SCons . Defaults . ASAction ) static_obj . add_emitter ( suffix , SCons . Defaults . StaticObjectEmitter ) for suffix in ASPPSuffixes : static_obj . add_action ( suffix , SCons . Defaults . ASPPAction ) static_obj . add_emitter ( suffix , SCons . Defaults . StaticObjectEmitter ) env [ 'AS' ] = 'nasm' env [ 'ASFLAGS' ] = SCons . Util . CLVar ( '' ) env [ 'ASPPFLAGS' ] = '$ASFLAGS' env [ 'ASCOM' ] = '$AS $ASFLAGS -o $TARGET $SOURCES' env [ 'ASPPCOM' ] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o $TARGET $SOURCES' | Add Builders and construction variables for nasm to an Environment . | 247 | 13 |
21,986 | def generate ( env ) : link . generate ( env ) env [ 'SHLINKFLAGS' ] = SCons . Util . CLVar ( '$LINKFLAGS -G' ) env [ 'RPATHPREFIX' ] = '-R' env [ 'RPATHSUFFIX' ] = '' env [ '_RPATH' ] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}' # Support for versioned libraries link . _setup_versioned_lib_variables ( env , tool = 'sunlink' , use_soname = True ) env [ 'LINKCALLBACKS' ] = link . _versioned_lib_callbacks ( ) | Add Builders and construction variables for Forte to an Environment . | 163 | 13 |
21,987 | def _get_short_description ( self ) : if self . description is None : return None lines = [ x for x in self . description . split ( '\n' ) ] if len ( lines ) == 1 : return lines [ 0 ] elif len ( lines ) >= 3 and lines [ 1 ] == '' : return lines [ 0 ] return None | Return the first line of a multiline description | 75 | 10 |
21,988 | def _get_long_description ( self ) : if self . description is None : return None lines = [ x for x in self . description . split ( '\n' ) ] if len ( lines ) == 1 : return None elif len ( lines ) >= 3 and lines [ 1 ] == '' : return '\n' . join ( lines [ 2 : ] ) return self . description | Return the subsequent lines of a multiline description | 83 | 10 |
21,989 | def wrap_lines ( self , text , indent_level , indent_size = 4 ) : indent = ' ' * indent_size * indent_level lines = text . split ( '\n' ) wrapped_lines = [ ] for line in lines : if line == '' : wrapped_lines . append ( line ) else : wrapped_lines . append ( indent + line ) return '\n' . join ( wrapped_lines ) | Indent a multiline string | 91 | 7 |
21,990 | def format_name ( self , name , indent_size = 4 ) : name_block = '' if self . short_desc is None : name_block += name + '\n' else : name_block += name + ': ' + self . short_desc + '\n' if self . long_desc is not None : name_block += self . wrap_lines ( self . long_desc , 1 , indent_size = indent_size ) name_block += '\n' return name_block | Format the name of this verifier | 110 | 7 |
21,991 | def trim_whitespace ( self , text ) : lines = text . split ( '\n' ) new_lines = [ x . lstrip ( ) for x in lines ] return '\n' . join ( new_lines ) | Remove leading whitespace from each line of a multiline string | 51 | 13 |
21,992 | def __extend_targets_sources ( target , source ) : if not SCons . Util . is_List ( target ) : target = [ target ] if not source : source = target [ : ] elif not SCons . Util . is_List ( source ) : source = [ source ] if len ( target ) < len ( source ) : target . extend ( source [ len ( target ) : ] ) return target , source | Prepare the lists of target and source files . | 96 | 10 |
21,993 | def __select_builder ( lxml_builder , libxml2_builder , cmdline_builder ) : if prefer_xsltproc : return cmdline_builder if not has_libxml2 : # At the moment we prefer libxml2 over lxml, the latter can lead # to conflicts when installed together with libxml2. if has_lxml : return lxml_builder else : return cmdline_builder return libxml2_builder | Selects a builder based on which Python modules are present . | 95 | 12 |
21,994 | def __ensure_suffix ( t , suffix ) : tpath = str ( t ) if not tpath . endswith ( suffix ) : return tpath + suffix return t | Ensure that the target t has the given suffix . | 39 | 11 |
21,995 | def __ensure_suffix_stem ( t , suffix ) : tpath = str ( t ) if not tpath . endswith ( suffix ) : stem = tpath tpath += suffix return tpath , stem else : stem , ext = os . path . splitext ( tpath ) return t , stem | Ensure that the target t has the given suffix and return the file s stem . | 68 | 17 |
21,996 | def __create_output_dir ( base_dir ) : root , tail = os . path . split ( base_dir ) dir = None if tail : if base_dir . endswith ( '/' ) : dir = base_dir else : dir = root else : if base_dir . endswith ( '/' ) : dir = base_dir if dir and not os . path . isdir ( dir ) : os . makedirs ( dir ) | Ensure that the output directory base_dir exists . | 98 | 11 |
21,997 | def __detect_cl_tool ( env , chainkey , cdict , cpriority = None ) : if env . get ( chainkey , '' ) == '' : clpath = '' if cpriority is None : cpriority = cdict . keys ( ) for cltool in cpriority : if __debug_tool_location : print ( "DocBook: Looking for %s" % cltool ) clpath = env . WhereIs ( cltool ) if clpath : if __debug_tool_location : print ( "DocBook: Found:%s" % cltool ) env [ chainkey ] = clpath if not env [ chainkey + 'COM' ] : env [ chainkey + 'COM' ] = cdict [ cltool ] break | Helper function picks a command line tool from the list and initializes its environment variables . | 160 | 17 |
21,998 | def _detect ( env ) : global prefer_xsltproc if env . get ( 'DOCBOOK_PREFER_XSLTPROC' , '' ) : prefer_xsltproc = True if ( ( not has_libxml2 and not has_lxml ) or ( prefer_xsltproc ) ) : # Try to find the XSLT processors __detect_cl_tool ( env , 'DOCBOOK_XSLTPROC' , xsltproc_com , xsltproc_com_priority ) __detect_cl_tool ( env , 'DOCBOOK_XMLLINT' , xmllint_com ) __detect_cl_tool ( env , 'DOCBOOK_FOP' , fop_com , [ 'fop' , 'xep' , 'jw' ] ) | Detect all the command line tools that we might need for creating the requested output formats . | 181 | 17 |
21,999 | def __xml_scan ( node , env , path , arg ) : # Does the node exist yet? if not os . path . isfile ( str ( node ) ) : return [ ] if env . get ( 'DOCBOOK_SCANENT' , '' ) : # Use simple pattern matching for system entities..., no support # for recursion yet. contents = node . get_text_contents ( ) return sentity_re . findall ( contents ) xsl_file = os . path . join ( scriptpath , 'utils' , 'xmldepend.xsl' ) if not has_libxml2 or prefer_xsltproc : if has_lxml and not prefer_xsltproc : from lxml import etree xsl_tree = etree . parse ( xsl_file ) doc = etree . parse ( str ( node ) ) result = doc . xslt ( xsl_tree ) depfiles = [ x . strip ( ) for x in str ( result ) . splitlines ( ) if x . strip ( ) != "" and not x . startswith ( "<?xml " ) ] return depfiles else : # Try to call xsltproc xsltproc = env . subst ( "$DOCBOOK_XSLTPROC" ) if xsltproc and xsltproc . endswith ( 'xsltproc' ) : result = env . backtick ( ' ' . join ( [ xsltproc , xsl_file , str ( node ) ] ) ) depfiles = [ x . strip ( ) for x in str ( result ) . splitlines ( ) if x . strip ( ) != "" and not x . startswith ( "<?xml " ) ] return depfiles else : # Use simple pattern matching, there is currently no support # for xi:includes... contents = node . get_text_contents ( ) return include_re . findall ( contents ) styledoc = libxml2 . parseFile ( xsl_file ) style = libxslt . parseStylesheetDoc ( styledoc ) doc = libxml2 . readFile ( str ( node ) , None , libxml2 . XML_PARSE_NOENT ) result = style . applyStylesheet ( doc , None ) depfiles = [ ] for x in str ( result ) . splitlines ( ) : if x . strip ( ) != "" and not x . startswith ( "<?xml " ) : depfiles . extend ( x . strip ( ) . split ( ) ) style . freeStylesheet ( ) doc . freeDoc ( ) result . freeDoc ( ) return depfiles | Simple XML file scanner detecting local images and XIncludes as implicit dependencies . | 564 | 14 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.