idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
22,000
def __xinclude_libxml2 ( target , source , env ) : doc = libxml2 . readFile ( str ( source [ 0 ] ) , None , libxml2 . XML_PARSE_NOENT ) doc . xincludeProcessFlags ( libxml2 . XML_PARSE_NOENT ) doc . saveFile ( str ( target [ 0 ] ) ) doc . freeDoc ( ) return None
Resolving XIncludes using the libxml2 module .
87
11
22,001
def __xinclude_lxml ( target , source , env ) : from lxml import etree doc = etree . parse ( str ( source [ 0 ] ) ) doc . xinclude ( ) try : doc . write ( str ( target [ 0 ] ) , xml_declaration = True , encoding = "UTF-8" , pretty_print = True ) except : pass return None
Resolving XIncludes using the lxml module .
82
10
22,002
def DocbookHtml ( env , target , source = None , * args , * * kw ) : # Init list of targets/sources target , source = __extend_targets_sources ( target , source ) # Init XSL stylesheet __init_xsl_stylesheet ( kw , env , '$DOCBOOK_DEFAULT_XSL_HTML' , [ 'html' , 'docbook.xsl' ] ) # Setup builder __builder = __select_builder ( __lxml_builder , __libxml2_builder , __xsltproc_builder ) # Create targets result = [ ] for t , s in zip ( target , source ) : r = __builder . __call__ ( env , __ensure_suffix ( t , '.html' ) , s , * * kw ) env . Depends ( r , kw [ 'DOCBOOK_XSL' ] ) result . extend ( r ) return result
A pseudo - Builder providing a Docbook toolchain for HTML output .
207
14
22,003
def DocbookMan ( env , target , source = None , * args , * * kw ) : # Init list of targets/sources target , source = __extend_targets_sources ( target , source ) # Init XSL stylesheet __init_xsl_stylesheet ( kw , env , '$DOCBOOK_DEFAULT_XSL_MAN' , [ 'manpages' , 'docbook.xsl' ] ) # Setup builder __builder = __select_builder ( __lxml_builder , __libxml2_builder , __xsltproc_builder ) # Create targets result = [ ] for t , s in zip ( target , source ) : volnum = "1" outfiles = [ ] srcfile = __ensure_suffix ( str ( s ) , '.xml' ) if os . path . isfile ( srcfile ) : try : import xml . dom . minidom dom = xml . dom . minidom . parse ( __ensure_suffix ( str ( s ) , '.xml' ) ) # Extract volume number, default is 1 for node in dom . getElementsByTagName ( 'refmeta' ) : for vol in node . getElementsByTagName ( 'manvolnum' ) : volnum = __get_xml_text ( vol ) # Extract output filenames for node in dom . getElementsByTagName ( 'refnamediv' ) : for ref in node . getElementsByTagName ( 'refname' ) : outfiles . append ( __get_xml_text ( ref ) + '.' + volnum ) except : # Use simple regex parsing f = open ( __ensure_suffix ( str ( s ) , '.xml' ) , 'r' ) content = f . read ( ) f . close ( ) for m in re_manvolnum . finditer ( content ) : volnum = m . group ( 1 ) for m in re_refname . finditer ( content ) : outfiles . append ( m . group ( 1 ) + '.' + volnum ) if not outfiles : # Use stem of the source file spath = str ( s ) if not spath . endswith ( '.xml' ) : outfiles . append ( spath + '.' + volnum ) else : stem , ext = os . path . splitext ( spath ) outfiles . append ( stem + '.' + volnum ) else : # We have to completely rely on the given target name outfiles . append ( t ) __builder . __call__ ( env , outfiles [ 0 ] , s , * * kw ) env . Depends ( outfiles [ 0 ] , kw [ 'DOCBOOK_XSL' ] ) result . append ( outfiles [ 0 ] ) if len ( outfiles ) > 1 : env . Clean ( outfiles [ 0 ] , outfiles [ 1 : ] ) return result
A pseudo - Builder providing a Docbook toolchain for Man page output .
630
15
22,004
def DocbookSlidesPdf ( env , target , source = None , * args , * * kw ) : # Init list of targets/sources target , source = __extend_targets_sources ( target , source ) # Init XSL stylesheet __init_xsl_stylesheet ( kw , env , '$DOCBOOK_DEFAULT_XSL_SLIDESPDF' , [ 'slides' , 'fo' , 'plain.xsl' ] ) # Setup builder __builder = __select_builder ( __lxml_builder , __libxml2_builder , __xsltproc_builder ) # Create targets result = [ ] for t , s in zip ( target , source ) : t , stem = __ensure_suffix_stem ( t , '.pdf' ) xsl = __builder . __call__ ( env , stem + '.fo' , s , * * kw ) env . Depends ( xsl , kw [ 'DOCBOOK_XSL' ] ) result . extend ( xsl ) result . extend ( __fop_builder . __call__ ( env , t , xsl , * * kw ) ) return result
A pseudo - Builder providing a Docbook toolchain for PDF slides output .
256
15
22,005
def DocbookSlidesHtml ( env , target , source = None , * args , * * kw ) : # Init list of targets/sources if not SCons . Util . is_List ( target ) : target = [ target ] if not source : source = target target = [ 'index.html' ] elif not SCons . Util . is_List ( source ) : source = [ source ] # Init XSL stylesheet __init_xsl_stylesheet ( kw , env , '$DOCBOOK_DEFAULT_XSL_SLIDESHTML' , [ 'slides' , 'html' , 'plain.xsl' ] ) # Setup builder __builder = __select_builder ( __lxml_builder , __libxml2_builder , __xsltproc_builder ) # Detect base dir base_dir = kw . get ( 'base_dir' , '' ) if base_dir : __create_output_dir ( base_dir ) # Create targets result = [ ] r = __builder . __call__ ( env , __ensure_suffix ( str ( target [ 0 ] ) , '.html' ) , source [ 0 ] , * * kw ) env . Depends ( r , kw [ 'DOCBOOK_XSL' ] ) result . extend ( r ) # Add supporting files for cleanup env . Clean ( r , [ os . path . join ( base_dir , 'toc.html' ) ] + glob . glob ( os . path . join ( base_dir , 'foil*.html' ) ) ) return result
A pseudo - Builder providing a Docbook toolchain for HTML slides output .
341
15
22,006
def DocbookXInclude ( env , target , source , * args , * * kw ) : # Init list of targets/sources target , source = __extend_targets_sources ( target , source ) # Setup builder __builder = __select_builder ( __xinclude_lxml_builder , __xinclude_libxml2_builder , __xmllint_builder ) # Create targets result = [ ] for t , s in zip ( target , source ) : result . extend ( __builder . __call__ ( env , t , s , * * kw ) ) return result
A pseudo - Builder for resolving XIncludes in a separate processing step .
131
14
22,007
def DocbookXslt ( env , target , source = None , * args , * * kw ) : # Init list of targets/sources target , source = __extend_targets_sources ( target , source ) # Init XSL stylesheet kw [ 'DOCBOOK_XSL' ] = kw . get ( 'xsl' , 'transform.xsl' ) # Setup builder __builder = __select_builder ( __lxml_builder , __libxml2_builder , __xsltproc_builder ) # Create targets result = [ ] for t , s in zip ( target , source ) : r = __builder . __call__ ( env , t , s , * * kw ) env . Depends ( r , kw [ 'DOCBOOK_XSL' ] ) result . extend ( r ) return result
A pseudo - Builder applying a simple XSL transformation to the input file .
183
15
22,008
def generate ( env ) : env . SetDefault ( # Default names for customized XSL stylesheets DOCBOOK_DEFAULT_XSL_EPUB = '' , DOCBOOK_DEFAULT_XSL_HTML = '' , DOCBOOK_DEFAULT_XSL_HTMLCHUNKED = '' , DOCBOOK_DEFAULT_XSL_HTMLHELP = '' , DOCBOOK_DEFAULT_XSL_PDF = '' , DOCBOOK_DEFAULT_XSL_MAN = '' , DOCBOOK_DEFAULT_XSL_SLIDESPDF = '' , DOCBOOK_DEFAULT_XSL_SLIDESHTML = '' , # Paths to the detected executables DOCBOOK_XSLTPROC = '' , DOCBOOK_XMLLINT = '' , DOCBOOK_FOP = '' , # Additional flags for the text processors DOCBOOK_XSLTPROCFLAGS = SCons . Util . CLVar ( '' ) , DOCBOOK_XMLLINTFLAGS = SCons . Util . CLVar ( '' ) , DOCBOOK_FOPFLAGS = SCons . Util . CLVar ( '' ) , DOCBOOK_XSLTPROCPARAMS = SCons . Util . CLVar ( '' ) , # Default command lines for the detected executables DOCBOOK_XSLTPROCCOM = xsltproc_com [ 'xsltproc' ] , DOCBOOK_XMLLINTCOM = xmllint_com [ 'xmllint' ] , DOCBOOK_FOPCOM = fop_com [ 'fop' ] , # Screen output for the text processors DOCBOOK_XSLTPROCCOMSTR = None , DOCBOOK_XMLLINTCOMSTR = None , DOCBOOK_FOPCOMSTR = None , ) _detect ( env ) env . AddMethod ( DocbookEpub , "DocbookEpub" ) env . AddMethod ( DocbookHtml , "DocbookHtml" ) env . AddMethod ( DocbookHtmlChunked , "DocbookHtmlChunked" ) env . AddMethod ( DocbookHtmlhelp , "DocbookHtmlhelp" ) env . AddMethod ( DocbookPdf , "DocbookPdf" ) env . AddMethod ( DocbookMan , "DocbookMan" ) env . AddMethod ( DocbookSlidesPdf , "DocbookSlidesPdf" ) env . AddMethod ( DocbookSlidesHtml , "DocbookSlidesHtml" ) env . AddMethod ( DocbookXInclude , "DocbookXInclude" ) env . AddMethod ( DocbookXslt , "DocbookXslt" )
Add Builders and construction variables for docbook to an Environment .
580
13
22,009
def save ( self ) : try : with open ( self . path , "w" ) as f : f . writelines ( self . contents ) except IOError as e : raise InternalError ( "Could not write RCFile contents" , name = self . name , path = self . path , error_message = str ( e ) )
Update the configuration file on disk with the current contents of self . contents . Previous contents are overwritten .
71
21
22,010
async def probe_message ( self , _message , context ) : client_id = context . user_data await self . probe ( client_id )
Handle a probe message .
33
5
22,011
async def connect_message ( self , message , context ) : conn_string = message . get ( 'connection_string' ) client_id = context . user_data await self . connect ( client_id , conn_string )
Handle a connect message .
50
5
22,012
async def disconnect_message ( self , message , context ) : conn_string = message . get ( 'connection_string' ) client_id = context . user_data await self . disconnect ( client_id , conn_string )
Handle a disconnect message .
50
5
22,013
async def open_interface_message ( self , message , context ) : conn_string = message . get ( 'connection_string' ) interface = message . get ( 'interface' ) client_id = context . user_data await self . open_interface ( client_id , conn_string , interface )
Handle an open_interface message .
66
7
22,014
async def close_interface_message ( self , message , context ) : conn_string = message . get ( 'connection_string' ) interface = message . get ( 'interface' ) client_id = context . user_data await self . close_interface ( client_id , conn_string , interface )
Handle a close_interface message .
66
7
22,015
async def send_rpc_message ( self , message , context ) : conn_string = message . get ( 'connection_string' ) rpc_id = message . get ( 'rpc_id' ) address = message . get ( 'address' ) timeout = message . get ( 'timeout' ) payload = message . get ( 'payload' ) client_id = context . user_data self . _logger . debug ( "Calling RPC %d:0x%04X with payload %s on %s" , address , rpc_id , payload , conn_string ) response = bytes ( ) err = None try : response = await self . send_rpc ( client_id , conn_string , address , rpc_id , payload , timeout = timeout ) except VALID_RPC_EXCEPTIONS as internal_err : err = internal_err except ( DeviceAdapterError , DeviceServerError ) : raise except Exception as internal_err : self . _logger . warning ( "Unexpected exception calling RPC %d:0x%04x" , address , rpc_id , exc_info = True ) raise ServerCommandError ( 'send_rpc' , str ( internal_err ) ) from internal_err status , response = pack_rpc_response ( response , err ) return { 'status' : status , 'payload' : base64 . b64encode ( response ) }
Handle a send_rpc message .
307
8
22,016
async def send_script_message ( self , message , context ) : script = message . get ( 'script' ) conn_string = message . get ( 'connection_string' ) client_id = context . user_data if message . get ( 'fragment_count' ) != 1 : raise DeviceServerError ( client_id , conn_string , 'send_script' , 'fragmented scripts are not yet supported' ) await self . send_script ( client_id , conn_string , script )
Handle a send_script message .
112
7
22,017
async def debug_command_message ( self , message , context ) : conn_string = message . get ( 'connection_string' ) command = message . get ( 'command' ) args = message . get ( 'args' ) client_id = context . user_data result = await self . debug ( client_id , conn_string , command , args ) return result
Handle a debug message .
80
5
22,018
async def client_event_handler ( self , client_id , event_tuple , user_data ) : #TODO: Support sending disconnection events conn_string , event_name , event = event_tuple if event_name == 'report' : report = event . serialize ( ) report [ 'encoded_report' ] = base64 . b64encode ( report [ 'encoded_report' ] ) msg_payload = dict ( connection_string = conn_string , serialized_report = report ) msg_name = OPERATIONS . NOTIFY_REPORT elif event_name == 'trace' : encoded_payload = base64 . b64encode ( event ) msg_payload = dict ( connection_string = conn_string , payload = encoded_payload ) msg_name = OPERATIONS . NOTIFY_TRACE elif event_name == 'progress' : msg_payload = dict ( connection_string = conn_string , operation = event . get ( 'operation' ) , done_count = event . get ( 'finished' ) , total_count = event . get ( 'total' ) ) msg_name = OPERATIONS . NOTIFY_PROGRESS elif event_name == 'device_seen' : msg_payload = event msg_name = OPERATIONS . NOTIFY_DEVICE_FOUND elif event_name == 'broadcast' : report = event . serialize ( ) report [ 'encoded_report' ] = base64 . b64encode ( report [ 'encoded_report' ] ) msg_payload = dict ( connection_string = conn_string , serialized_report = report ) msg_name = OPERATIONS . NOTIFY_BROADCAST else : self . _logger . debug ( "Not forwarding unknown event over websockets: %s" , event_tuple ) return try : self . _logger . debug ( "Sending event %s: %s" , msg_name , msg_payload ) await self . server . send_event ( user_data , msg_name , msg_payload ) except websockets . exceptions . ConnectionClosed : self . _logger . debug ( "Could not send notification because connection was closed for client %s" , client_id )
Forward an event on behalf of a client .
500
9
22,019
def generate ( env ) : add_all_to_env ( env ) fcomp = env . Detect ( compilers ) or 'f90' env [ 'FORTRAN' ] = fcomp env [ 'F90' ] = fcomp env [ 'SHFORTRAN' ] = '$FORTRAN' env [ 'SHF90' ] = '$F90' env [ 'SHFORTRANFLAGS' ] = SCons . Util . CLVar ( '$FORTRANFLAGS -KPIC' ) env [ 'SHF90FLAGS' ] = SCons . Util . CLVar ( '$F90FLAGS -KPIC' )
Add Builders and construction variables for sun f90 compiler to an Environment .
148
15
22,020
def Builder ( * * kw ) : composite = None if 'generator' in kw : if 'action' in kw : raise UserError ( "You must not specify both an action and a generator." ) kw [ 'action' ] = SCons . Action . CommandGeneratorAction ( kw [ 'generator' ] , { } ) del kw [ 'generator' ] elif 'action' in kw : source_ext_match = kw . get ( 'source_ext_match' , 1 ) if 'source_ext_match' in kw : del kw [ 'source_ext_match' ] if SCons . Util . is_Dict ( kw [ 'action' ] ) : composite = DictCmdGenerator ( kw [ 'action' ] , source_ext_match ) kw [ 'action' ] = SCons . Action . CommandGeneratorAction ( composite , { } ) kw [ 'src_suffix' ] = composite . src_suffixes ( ) else : kw [ 'action' ] = SCons . Action . Action ( kw [ 'action' ] ) if 'emitter' in kw : emitter = kw [ 'emitter' ] if SCons . Util . is_String ( emitter ) : # This allows users to pass in an Environment # variable reference (like "$FOO") as an emitter. # We will look in that Environment variable for # a callable to use as the actual emitter. var = SCons . Util . get_environment_var ( emitter ) if not var : raise UserError ( "Supplied emitter '%s' does not appear to refer to an Environment variable" % emitter ) kw [ 'emitter' ] = EmitterProxy ( var ) elif SCons . Util . is_Dict ( emitter ) : kw [ 'emitter' ] = DictEmitter ( emitter ) elif SCons . Util . is_List ( emitter ) : kw [ 'emitter' ] = ListEmitter ( emitter ) result = BuilderBase ( * * kw ) if not composite is None : result = CompositeBuilder ( result , composite ) return result
A factory for builder objects .
485
6
22,021
def _node_errors ( builder , env , tlist , slist ) : # First, figure out if there are any errors in the way the targets # were specified. for t in tlist : if t . side_effect : raise UserError ( "Multiple ways to build the same target were specified for: %s" % t ) if t . has_explicit_builder ( ) : # Check for errors when the environments are different # No error if environments are the same Environment instance if ( not t . env is None and not t . env is env and # Check OverrideEnvironment case - no error if wrapped Environments # are the same instance, and overrides lists match not ( getattr ( t . env , '__subject' , 0 ) is getattr ( env , '__subject' , 1 ) and getattr ( t . env , 'overrides' , 0 ) == getattr ( env , 'overrides' , 1 ) and not builder . multi ) ) : action = t . builder . action t_contents = t . builder . action . get_contents ( tlist , slist , t . env ) contents = builder . action . get_contents ( tlist , slist , env ) if t_contents == contents : msg = "Two different environments were specified for target %s,\n\tbut they appear to have the same action: %s" % ( t , action . genstring ( tlist , slist , t . env ) ) SCons . Warnings . warn ( SCons . Warnings . DuplicateEnvironmentWarning , msg ) else : try : msg = "Two environments with different actions were specified for the same target: %s\n(action 1: %s)\n(action 2: %s)" % ( t , t_contents . decode ( 'utf-8' ) , contents . decode ( 'utf-8' ) ) except UnicodeDecodeError as e : msg = "Two environments with different actions were specified for the same target: %s" % t raise UserError ( msg ) if builder . multi : if t . builder != builder : msg = "Two different builders (%s and %s) were specified for the same target: %s" % ( t . builder . get_name ( env ) , builder . get_name ( env ) , t ) raise UserError ( msg ) # TODO(batch): list constructed each time! if t . get_executor ( ) . get_all_targets ( ) != tlist : msg = "Two different target lists have a target in common: %s (from %s and from %s)" % ( t , list ( map ( str , t . get_executor ( ) . get_all_targets ( ) ) ) , list ( map ( str , tlist ) ) ) raise UserError ( msg ) elif t . sources != slist : msg = "Multiple ways to build the same target were specified for: %s (from %s and from %s)" % ( t , list ( map ( str , t . sources ) ) , list ( map ( str , slist ) ) ) raise UserError ( msg ) if builder . single_source : if len ( slist ) > 1 : raise UserError ( "More than one source given for single-source builder: targets=%s sources=%s" % ( list ( map ( str , tlist ) ) , list ( map ( str , slist ) ) ) )
Validate that the lists of target and source nodes are legal for this builder and environment . Raise errors or issue warnings as appropriate .
743
26
22,022
def is_a_Builder ( obj ) : return ( isinstance ( obj , BuilderBase ) or isinstance ( obj , CompositeBuilder ) or callable ( obj ) )
Returns True if the specified obj is one of our Builder classes .
36
13
22,023
def get_name ( self , env ) : try : index = list ( env [ 'BUILDERS' ] . values ( ) ) . index ( self ) return list ( env [ 'BUILDERS' ] . keys ( ) ) [ index ] except ( AttributeError , KeyError , TypeError , ValueError ) : try : return self . name except AttributeError : return str ( self . __class__ )
Attempts to get the name of the Builder .
89
9
22,024
def _create_nodes ( self , env , target = None , source = None ) : src_suf = self . get_src_suffix ( env ) target_factory = env . get_factory ( self . target_factory ) source_factory = env . get_factory ( self . source_factory ) source = self . _adjustixes ( source , None , src_suf ) slist = env . arg2nodes ( source , source_factory ) pre = self . get_prefix ( env , slist ) suf = self . get_suffix ( env , slist ) if target is None : try : t_from_s = slist [ 0 ] . target_from_source except AttributeError : raise UserError ( "Do not know how to create a target from source `%s'" % slist [ 0 ] ) except IndexError : tlist = [ ] else : splitext = lambda S : self . splitext ( S , env ) tlist = [ t_from_s ( pre , suf , splitext ) ] else : target = self . _adjustixes ( target , pre , suf , self . ensure_suffix ) tlist = env . arg2nodes ( target , target_factory , target = target , source = source ) if self . emitter : # The emitter is going to do str(node), but because we're # being called *from* a builder invocation, the new targets # don't yet have a builder set on them and will look like # source files. Fool the emitter's str() calls by setting # up a temporary builder on the new targets. new_targets = [ ] for t in tlist : if not t . is_derived ( ) : t . builder_set ( self ) new_targets . append ( t ) orig_tlist = tlist [ : ] orig_slist = slist [ : ] target , source = self . emitter ( target = tlist , source = slist , env = env ) # Now delete the temporary builders that we attached to any # new targets, so that _node_errors() doesn't do weird stuff # to them because it thinks they already have builders. for t in new_targets : if t . builder is self : # Only delete the temporary builder if the emitter # didn't change it on us. t . builder_set ( None ) # Have to call arg2nodes yet again, since it is legal for # emitters to spit out strings as well as Node instances. tlist = env . arg2nodes ( target , target_factory , target = orig_tlist , source = orig_slist ) slist = env . arg2nodes ( source , source_factory , target = orig_tlist , source = orig_slist ) return tlist , slist
Create and return lists of target and source nodes .
625
10
22,025
def _get_sdict ( self , env ) : sdict = { } for bld in self . get_src_builders ( env ) : for suf in bld . src_suffixes ( env ) : sdict [ suf ] = bld return sdict
Returns a dictionary mapping all of the source suffixes of all src_builders of this Builder to the underlying Builder that should be called first .
60
28
22,026
def get_src_builders ( self , env ) : memo_key = id ( env ) try : memo_dict = self . _memo [ 'get_src_builders' ] except KeyError : memo_dict = { } self . _memo [ 'get_src_builders' ] = memo_dict else : try : return memo_dict [ memo_key ] except KeyError : pass builders = [ ] for bld in self . src_builder : if SCons . Util . is_String ( bld ) : try : bld = env [ 'BUILDERS' ] [ bld ] except KeyError : continue builders . append ( bld ) memo_dict [ memo_key ] = builders return builders
Returns the list of source Builders for this Builder .
156
11
22,027
def subst_src_suffixes ( self , env ) : memo_key = id ( env ) try : memo_dict = self . _memo [ 'subst_src_suffixes' ] except KeyError : memo_dict = { } self . _memo [ 'subst_src_suffixes' ] = memo_dict else : try : return memo_dict [ memo_key ] except KeyError : pass suffixes = [ env . subst ( x ) for x in self . src_suffix ] memo_dict [ memo_key ] = suffixes return suffixes
The suffix list may contain construction variable expansions so we have to evaluate the individual strings . To avoid doing this over and over we memoize the results for each construction environment .
128
34
22,028
def src_suffixes ( self , env ) : sdict = { } suffixes = self . subst_src_suffixes ( env ) for s in suffixes : sdict [ s ] = 1 for builder in self . get_src_builders ( env ) : for s in builder . src_suffixes ( env ) : if s not in sdict : sdict [ s ] = 1 suffixes . append ( s ) return suffixes
Returns the list of source suffixes for all src_builders of this Builder .
97
16
22,029
def generate ( env ) : link . generate ( env ) env [ 'SMARTLINKFLAGS' ] = smart_linkflags env [ 'LINKFLAGS' ] = SCons . Util . CLVar ( '$SMARTLINKFLAGS' ) env [ 'SHLINKFLAGS' ] = SCons . Util . CLVar ( '$LINKFLAGS -qmkshrobj -qsuppress=1501-218' ) env [ 'SHLIBSUFFIX' ] = '.a'
Add Builders and construction variables for Visual Age linker to an Environment .
115
15
22,030
def _parse_target ( target ) : if len ( target ) != 8 : raise ArgumentError ( "Invalid targeting data length" , expected = 8 , length = len ( target ) ) slot , match_op = struct . unpack ( "<B6xB" , target ) if match_op == _MATCH_CONTROLLER : return { 'controller' : True , 'slot' : 0 } elif match_op == _MATCH_SLOT : return { 'controller' : False , 'slot' : slot } raise ArgumentError ( "Unsupported complex targeting specified" , match_op = match_op )
Parse a binary targeting information structure .
135
8
22,031
def put_task ( self , func , args , response ) : self . _rpc_queue . put_nowait ( ( func , args , response ) )
Place a task onto the RPC queue .
35
8
22,032
def put_rpc ( self , address , rpc_id , arg_payload , response ) : self . _rpc_queue . put_nowait ( ( address , rpc_id , arg_payload , response ) )
Place an RPC onto the RPC queue .
52
8
22,033
async def stop ( self ) : if self . _rpc_task is not None : self . _rpc_task . cancel ( ) try : await self . _rpc_task except asyncio . CancelledError : pass self . _rpc_task = None
Stop the rpc queue from inside the event loop .
59
11
22,034
def add_segment ( self , address , data , overwrite = False ) : seg_type = self . _classify_segment ( address , len ( data ) ) if not isinstance ( seg_type , DisjointSegment ) : raise ArgumentError ( "Unsupported segment type" ) segment = MemorySegment ( address , address + len ( data ) - 1 , len ( data ) , bytearray ( data ) ) self . _segments . append ( segment )
Add a contiguous segment of data to this memory map
106
10
22,035
def _create_slice ( self , key ) : if isinstance ( key , slice ) : step = key . step if step is None : step = 1 if step != 1 : raise ArgumentError ( "You cannot slice with a step that is not equal to 1" , step = key . step ) start_address = key . start end_address = key . stop - 1 start_i , start_seg = self . _find_address ( start_address ) end_i , _end_seg = self . _find_address ( end_address ) if start_seg is None or start_i != end_i : raise ArgumentError ( "Slice would span invalid data in memory" , start_address = start_address , end_address = end_address ) block_offset = start_address - start_seg . start_address block_length = end_address - start_address + 1 return start_seg , block_offset , block_offset + block_length elif isinstance ( key , int ) : start_i , start_seg = self . _find_address ( key ) if start_seg is None : raise ArgumentError ( "Requested invalid address" , address = key ) return start_seg , key - start_seg . start_address , None else : raise ArgumentError ( "Unknown type of address key" , address = key )
Create a slice in a memory segment corresponding to a key .
300
12
22,036
def _classify_segment ( self , address , length ) : end_address = address + length - 1 _ , start_seg = self . _find_address ( address ) _ , end_seg = self . _find_address ( end_address ) if start_seg is not None or end_seg is not None : raise ArgumentError ( "Overlapping segments are not yet supported" , address = address , length = length ) return DisjointSegment ( )
Determine how a new data segment fits into our existing world
106
13
22,037
def generate ( env ) : # ifort supports Fortran 90 and Fortran 95 # Additionally, ifort recognizes more file extensions. fscan = FortranScan ( "FORTRANPATH" ) SCons . Tool . SourceFileScanner . add_scanner ( '.i' , fscan ) SCons . Tool . SourceFileScanner . add_scanner ( '.i90' , fscan ) if 'FORTRANFILESUFFIXES' not in env : env [ 'FORTRANFILESUFFIXES' ] = [ '.i' ] else : env [ 'FORTRANFILESUFFIXES' ] . append ( '.i' ) if 'F90FILESUFFIXES' not in env : env [ 'F90FILESUFFIXES' ] = [ '.i90' ] else : env [ 'F90FILESUFFIXES' ] . append ( '.i90' ) add_all_to_env ( env ) fc = 'ifort' for dialect in [ 'F77' , 'F90' , 'FORTRAN' , 'F95' ] : env [ '%s' % dialect ] = fc env [ 'SH%s' % dialect ] = '$%s' % dialect if env [ 'PLATFORM' ] == 'posix' : env [ 'SH%sFLAGS' % dialect ] = SCons . Util . CLVar ( '$%sFLAGS -fPIC' % dialect ) if env [ 'PLATFORM' ] == 'win32' : # On Windows, the ifort compiler specifies the object on the # command line with -object:, not -o. Massage the necessary # command-line construction variables. for dialect in [ 'F77' , 'F90' , 'FORTRAN' , 'F95' ] : for var in [ '%sCOM' % dialect , '%sPPCOM' % dialect , 'SH%sCOM' % dialect , 'SH%sPPCOM' % dialect ] : env [ var ] = env [ var ] . replace ( '-o $TARGET' , '-object:$TARGET' ) env [ 'FORTRANMODDIRPREFIX' ] = "/module:" else : env [ 'FORTRANMODDIRPREFIX' ] = "-module "
Add Builders and construction variables for ifort to an Environment .
505
13
22,038
def run ( self , postfunc = lambda : None ) : self . _setup_sig_handler ( ) try : self . job . start ( ) finally : postfunc ( ) self . _reset_sig_handler ( )
Run the jobs .
50
4
22,039
def expired ( self ) : if self . timeout is None : return False return monotonic ( ) - self . start_time > self . timeout
Boolean property if this action has expired
31
8
22,040
def begin_connection ( self , connection_id , internal_id , callback , context , timeout ) : data = { 'callback' : callback , 'connection_id' : connection_id , 'internal_id' : internal_id , 'context' : context } action = ConnectionAction ( 'begin_connection' , data , timeout = timeout , sync = False ) self . _actions . put ( action )
Asynchronously begin a connection attempt
87
7
22,041
def begin_operation ( self , conn_or_internal_id , op_name , callback , timeout ) : data = { 'id' : conn_or_internal_id , 'callback' : callback , 'operation_name' : op_name } action = ConnectionAction ( 'begin_operation' , data , timeout = timeout , sync = False ) self . _actions . put ( action )
Begin an operation on a connection
85
6
22,042
def _begin_operation_action ( self , action ) : conn_key = action . data [ 'id' ] callback = action . data [ 'callback' ] if self . _get_connection_state ( conn_key ) != self . Idle : callback ( conn_key , self . id , False , 'Cannot start operation, connection is not idle' ) return data = self . _get_connection ( conn_key ) data [ 'state' ] = self . InProgress data [ 'microstate' ] = action . data [ 'operation_name' ] data [ 'action' ] = action
Begin an attempted operation .
129
5
22,043
def allow_exception ( self , exc_class ) : name = exc_class . __name__ self . _allowed_exceptions [ name ] = exc_class
Allow raising this class of exceptions from commands .
36
9
22,044
async def start ( self , name = "websocket_client" ) : self . _con = await websockets . connect ( self . url ) self . _connection_task = self . _loop . add_task ( self . _manage_connection ( ) , name = name )
Connect to the websocket server .
63
7
22,045
async def stop ( self ) : if self . _connection_task is None : return try : await self . _connection_task . stop ( ) finally : self . _con = None self . _connection_task = None self . _manager . clear ( )
Stop this websocket client and disconnect from the server .
56
11
22,046
async def send_command ( self , command , args , validator , timeout = 10.0 ) : if self . _con is None : raise ExternalError ( "No websock connection established" ) cmd_uuid = str ( uuid . uuid4 ( ) ) msg = dict ( type = 'command' , operation = command , uuid = cmd_uuid , payload = args ) packed = pack ( msg ) # Note: register future before sending to avoid race conditions response_future = self . _manager . wait_for ( type = "response" , uuid = cmd_uuid , timeout = timeout ) await self . _con . send ( packed ) response = await response_future if response . get ( 'success' ) is False : self . _raise_error ( command , response ) if validator is None : return response . get ( 'payload' ) return validator . verify ( response . get ( 'payload' ) )
Send a command and synchronously wait for a single response .
204
12
22,047
async def _manage_connection ( self ) : try : while True : message = await self . _con . recv ( ) try : unpacked = unpack ( message ) except Exception : # pylint:disable=broad-except;This is a background worker self . _logger . exception ( "Corrupt message received" ) continue if not VALID_SERVER_MESSAGE . matches ( unpacked ) : self . _logger . warning ( "Dropping invalid message from server: %s" , unpacked ) continue # Don't block until all callbacks have finished since once of # those callbacks may call self.send_command, which would deadlock # since it couldn't get the response until it had already finished. if not await self . _manager . process_message ( unpacked , wait = False ) : self . _logger . warning ( "No handler found for received message, message=%s" , unpacked ) except asyncio . CancelledError : self . _logger . info ( "Closing connection to server due to stop()" ) finally : await self . _manager . process_message ( dict ( type = 'event' , name = self . DISCONNECT_EVENT , payload = None ) ) await self . _con . close ( )
Internal coroutine for managing the client connection .
278
9
22,048
def register_event ( self , name , callback , validator ) : async def _validate_and_call ( message ) : payload = message . get ( 'payload' ) try : payload = validator . verify ( payload ) except ValidationError : self . _logger . warning ( "Dropping invalid payload for event %s, payload=%s" , name , payload ) return try : result = callback ( payload ) if inspect . isawaitable ( result ) : await result except : # pylint:disable=bare-except;This is a background logging routine self . _logger . error ( "Error calling callback for event %s, payload=%s" , name , payload , exc_info = True ) self . _manager . every_match ( _validate_and_call , type = "event" , name = name )
Register a callback to receive events .
184
7
22,049
def post_command ( self , command , args ) : self . _loop . log_coroutine ( self . send_command ( command , args , Verifier ( ) ) )
Post a command asynchronously and don t wait for a response .
38
14
22,050
def copy_all_a ( input_a , * other_inputs , * * kwargs ) : output = [ ] while input_a . count ( ) > 0 : output . append ( input_a . pop ( ) ) for input_x in other_inputs : input_x . skip_all ( ) return output
Copy all readings in input a into the output .
72
10
22,051
def copy_count_a ( input_a , * other_inputs , * * kwargs ) : count = input_a . count ( ) input_a . skip_all ( ) for input_x in other_inputs : input_x . skip_all ( ) return [ IOTileReading ( 0 , 0 , count ) ]
Copy the latest reading from input a into the output .
75
11
22,052
def call_rpc ( * inputs , * * kwargs ) : rpc_executor = kwargs [ 'rpc_executor' ] output = [ ] try : value = inputs [ 1 ] . pop ( ) addr = value . value >> 16 rpc_id = value . value & 0xFFFF reading_value = rpc_executor . rpc ( addr , rpc_id ) output . append ( IOTileReading ( 0 , 0 , reading_value ) ) except ( HardwareError , StreamEmptyError ) : pass for input_x in inputs : input_x . skip_all ( ) return output
Call an RPC based on the encoded value read from input b .
136
13
22,053
def trigger_streamer ( * inputs , * * kwargs ) : streamer_marker = kwargs [ 'mark_streamer' ] try : reading = inputs [ 1 ] . pop ( ) except StreamEmptyError : return [ ] finally : for input_x in inputs : input_x . skip_all ( ) try : streamer_marker ( reading . value ) except ArgumentError : return [ ] return [ IOTileReading ( 0 , 0 , 0 ) ]
Trigger a streamer based on the index read from input b .
104
13
22,054
def subtract_afromb ( * inputs , * * kwargs ) : try : value_a = inputs [ 0 ] . pop ( ) value_b = inputs [ 1 ] . pop ( ) return [ IOTileReading ( 0 , 0 , value_b . value - value_a . value ) ] except StreamEmptyError : return [ ]
Subtract stream a from stream b .
75
9
22,055
def _clean_intenum ( obj ) : if isinstance ( obj , dict ) : for key , value in obj . items ( ) : if isinstance ( value , IntEnum ) : obj [ key ] = value . value elif isinstance ( value , ( dict , list ) ) : obj [ key ] = _clean_intenum ( value ) elif isinstance ( obj , list ) : for i , value in enumerate ( obj ) : if isinstance ( value , IntEnum ) : obj [ i ] = value . value elif isinstance ( value , ( dict , list ) ) : obj [ i ] = _clean_intenum ( value ) return obj
Remove all IntEnum classes from a map .
145
10
22,056
def _track_change ( self , name , value , formatter = None ) : self . _emulation_log . track_change ( self . _emulation_address , name , value , formatter )
Track that a change happened .
45
6
22,057
def save_state ( self , out_path ) : state = self . dump_state ( ) # Remove all IntEnums from state since they cannot be json-serialized on python 2.7 # See https://bitbucket.org/stoneleaf/enum34/issues/17/difference-between-enum34-and-enum-json state = _clean_intenum ( state ) with open ( out_path , "w" ) as outfile : json . dump ( state , outfile , indent = 4 )
Save the current state of this emulated object to a file .
113
13
22,058
def load_state ( self , in_path ) : with open ( in_path , "r" ) as infile : state = json . load ( infile ) self . restore_state ( state )
Load the current state of this emulated object from a file .
44
13
22,059
def load_scenario ( self , scenario_name , * * kwargs ) : scenario = self . _known_scenarios . get ( scenario_name ) if scenario is None : raise ArgumentError ( "Unknown scenario %s" % scenario_name , known_scenarios = list ( self . _known_scenarios ) ) scenario ( * * kwargs )
Load a scenario into the emulated object .
82
9
22,060
def register_scenario ( self , scenario_name , handler ) : if scenario_name in self . _known_scenarios : raise ArgumentError ( "Attempted to add the same scenario name twice" , scenario_name = scenario_name , previous_handler = self . _known_scenarios [ scenario_name ] ) self . _known_scenarios [ scenario_name ] = handler
Register a scenario handler for this object .
86
8
22,061
def generate ( env ) : cc . generate ( env ) env [ 'CXX' ] = 'aCC' env [ 'SHCCFLAGS' ] = SCons . Util . CLVar ( '$CCFLAGS +Z' )
Add Builders and construction variables for aCC & cc to an Environment .
52
15
22,062
def add_pass ( self , name , opt_pass , before = None , after = None ) : if before is None : before = [ ] if after is None : after = [ ] self . _known_passes [ name ] = ( opt_pass , before , after )
Add an optimization pass to the optimizer .
60
9
22,063
def _order_pases ( self , passes ) : passes = set ( passes ) pass_deps = { } for opt in passes : _ , before , after = self . _known_passes [ opt ] if opt not in pass_deps : pass_deps [ opt ] = set ( ) for after_pass in after : pass_deps [ opt ] . add ( after_pass ) # For passes that we are before, we may need to # preemptively add them to the list early for other in before : if other not in passes : continue if other not in pass_deps : pass_deps [ other ] = set ( ) pass_deps [ other ] . add ( opt ) return toposort_flatten ( pass_deps )
Topologically sort optimization passes .
165
6
22,064
def optimize ( self , sensor_graph , model ) : passes = self . _order_pases ( self . _known_passes . keys ( ) ) for opt_name in passes : rerun = True pass_instance = self . _known_passes [ opt_name ] [ 0 ] ( ) while rerun : rerun = pass_instance . run ( sensor_graph , model = model )
Optimize a sensor graph by running optimization passes .
87
10
22,065
def get_calling_namespaces ( ) : try : 1 // 0 except ZeroDivisionError : # Don't start iterating with the current stack-frame to # prevent creating reference cycles (f_back is safe). frame = sys . exc_info ( ) [ 2 ] . tb_frame . f_back # Find the first frame that *isn't* from this file. This means # that we expect all of the SCons frames that implement an Export() # or SConscript() call to be in this file, so that we can identify # the first non-Script.SConscript frame as the user's local calling # environment, and the locals and globals dictionaries from that # frame as the calling namespaces. See the comment below preceding # the DefaultEnvironmentCall block for even more explanation. while frame . f_globals . get ( "__name__" ) == __name__ : frame = frame . f_back return frame . f_locals , frame . f_globals
Return the locals and globals for the function that called into this module in the current call stack .
215
20
22,066
def annotate ( node ) : tb = sys . exc_info ( ) [ 2 ] while tb and stack_bottom not in tb . tb_frame . f_locals : tb = tb . tb_next if not tb : # We did not find any exec of an SConscript file: what?! raise SCons . Errors . InternalError ( "could not find SConscript stack frame" ) node . creator = traceback . extract_stack ( tb ) [ 0 ]
Annotate a node with the stack frame describing the SConscript file and line number that created it .
110
22
22,067
def BuildDefaultGlobals ( ) : global GlobalDict if GlobalDict is None : GlobalDict = { } import SCons . Script d = SCons . Script . __dict__ def not_a_module ( m , d = d , mtype = type ( SCons . Script ) ) : return not isinstance ( d [ m ] , mtype ) for m in filter ( not_a_module , dir ( SCons . Script ) ) : GlobalDict [ m ] = d [ m ] return GlobalDict . copy ( )
Create a dictionary containing all the default globals for SConstruct and SConscript files .
118
18
22,068
def _exceeds_version ( self , major , minor , v_major , v_minor ) : return ( major > v_major or ( major == v_major and minor > v_minor ) )
Return 1 if major and minor are greater than the version in v_major and v_minor and 0 otherwise .
47
24
22,069
def EnsureSConsVersion ( self , major , minor , revision = 0 ) : # split string to avoid replacement during build process if SCons . __version__ == '__' + 'VERSION__' : SCons . Warnings . warn ( SCons . Warnings . DevelopmentVersionWarning , "EnsureSConsVersion is ignored for development version" ) return scons_ver = self . _get_major_minor_revision ( SCons . __version__ ) if scons_ver < ( major , minor , revision ) : if revision : scons_ver_string = '%d.%d.%d' % ( major , minor , revision ) else : scons_ver_string = '%d.%d' % ( major , minor ) print ( "SCons %s or greater required, but you have SCons %s" % ( scons_ver_string , SCons . __version__ ) ) sys . exit ( 2 )
Exit abnormally if the SCons version is not late enough .
206
13
22,070
def EnsurePythonVersion ( self , major , minor ) : if sys . version_info < ( major , minor ) : v = sys . version . split ( ) [ 0 ] print ( "Python %d.%d or greater required, but you have Python %s" % ( major , minor , v ) ) sys . exit ( 2 )
Exit abnormally if the Python version is not late enough .
72
12
22,071
def validate_vars ( env ) : if 'PCH' in env and env [ 'PCH' ] : if 'PCHSTOP' not in env : raise SCons . Errors . UserError ( "The PCHSTOP construction must be defined if PCH is defined." ) if not SCons . Util . is_String ( env [ 'PCHSTOP' ] ) : raise SCons . Errors . UserError ( "The PCHSTOP construction variable must be a string: %r" % env [ 'PCHSTOP' ] )
Validate the PCH and PCHSTOP construction variables .
121
13
22,072
def msvc_set_PCHPDBFLAGS ( env ) : if env . get ( 'MSVC_VERSION' , False ) : maj , min = msvc_version_to_maj_min ( env [ 'MSVC_VERSION' ] ) if maj < 8 : env [ 'PCHPDBFLAGS' ] = SCons . Util . CLVar ( [ '${(PDB and "/Yd") or ""}' ] ) else : env [ 'PCHPDBFLAGS' ] = '' else : # Default if we can't determine which version of MSVC we're using env [ 'PCHPDBFLAGS' ] = SCons . Util . CLVar ( [ '${(PDB and "/Yd") or ""}' ] )
Set appropriate PCHPDBFLAGS for the MSVC version being used .
170
16
22,073
def pch_emitter ( target , source , env ) : validate_vars ( env ) pch = None obj = None for t in target : if SCons . Util . splitext ( str ( t ) ) [ 1 ] == '.pch' : pch = t if SCons . Util . splitext ( str ( t ) ) [ 1 ] == '.obj' : obj = t if not obj : obj = SCons . Util . splitext ( str ( pch ) ) [ 0 ] + '.obj' target = [ pch , obj ] # pch must be first, and obj second for the PCHCOM to work return ( target , source )
Adds the object file target .
149
6
22,074
def object_emitter ( target , source , env , parent_emitter ) : validate_vars ( env ) parent_emitter ( target , source , env ) # Add a dependency, but only if the target (e.g. 'Source1.obj') # doesn't correspond to the pre-compiled header ('Source1.pch'). # If the basenames match, then this was most likely caused by # someone adding the source file to both the env.PCH() and the # env.Program() calls, and adding the explicit dependency would # cause a cycle on the .pch file itself. # # See issue #2505 for a discussion of what to do if it turns # out this assumption causes trouble in the wild: # http://scons.tigris.org/issues/show_bug.cgi?id=2505 if 'PCH' in env : pch = env [ 'PCH' ] if str ( target [ 0 ] ) != SCons . Util . splitext ( str ( pch ) ) [ 0 ] + '.obj' : env . Depends ( target , pch ) return ( target , source )
Sets up the PCH dependencies for an object file .
249
12
22,075
def msvc_batch_key ( action , env , target , source ) : # Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH # was set to False. This new version should work better. # Note we need to do the env.subst so $MSVC_BATCH can be a reference to # another construction variable, which is why we test for False and 0 # as strings. if not 'MSVC_BATCH' in env or env . subst ( '$MSVC_BATCH' ) in ( '0' , 'False' , '' , None ) : # We're not using batching; return no key. return None t = target [ 0 ] s = source [ 0 ] if os . path . splitext ( t . name ) [ 0 ] != os . path . splitext ( s . name ) [ 0 ] : # The base names are different, so this *must* be compiled # separately; return no key. return None return ( id ( action ) , id ( env ) , t . dir , s . dir )
Returns a key to identify unique batches of sources for compilation .
234
12
22,076
def open ( self ) : self . hwman = HardwareManager ( port = self . _port ) self . opened = True if self . _connection_string is not None : try : self . hwman . connect_direct ( self . _connection_string ) except HardwareError : self . hwman . close ( ) raise elif self . _connect_id is not None : try : self . hwman . connect ( self . _connect_id ) except HardwareError : self . hwman . close ( ) raise
Open and potentially connect to a device .
114
8
22,077
def close ( self ) : if self . hwman . stream . connected : self . hwman . disconnect ( ) self . hwman . close ( ) self . opened = False
Close and potentially disconnect from a device .
40
8
22,078
def get_support_package ( tile ) : packages = tile . find_products ( 'support_package' ) if len ( packages ) == 0 : return None elif len ( packages ) == 1 : return packages [ 0 ] raise BuildError ( "Tile declared multiple support packages, only one is supported" , packages = packages )
Returns the support_package product .
69
7
22,079
def iter_support_files ( tile ) : support_package = get_support_package ( tile ) if support_package is None : for module , _ , _ in iter_python_modules ( tile ) : yield os . path . basename ( module ) , module else : for dirpath , _dirnames , filenames in os . walk ( support_package ) : for filename in filenames : if not filename . endswith ( '.py' ) : continue input_path = os . path . join ( dirpath , filename ) output_path = os . path . relpath ( input_path , start = support_package ) if output_path == "__init__.py" : continue yield output_path , input_path
Iterate over all files that go in the support wheel .
159
12
22,080
def iter_python_modules ( tile ) : for product_type in tile . PYTHON_PRODUCTS : for product in tile . find_products ( product_type ) : entry_point = ENTRY_POINT_MAP . get ( product_type ) if entry_point is None : raise BuildError ( "Found an unknown python product (%s) whose entrypoint could not be determined (%s)" % ( product_type , product ) ) if ':' in product : module , _ , obj_name = product . rpartition ( ':' ) else : module = product obj_name = None if not os . path . exists ( module ) : raise BuildError ( "Found a python product whose path did not exist: %s" % module ) product_name = os . path . basename ( module ) if product_name . endswith ( ".py" ) : product_name = product_name [ : - 3 ] import_string = "{} = {}.{}" . format ( product_name , tile . support_distribution , product_name ) if obj_name is not None : import_string += ":{}" . format ( obj_name ) yield ( module , import_string , entry_point )
Iterate over all python products in the given tile .
264
11
22,081
def generate_setup_py ( target , source , env ) : tile = env [ 'TILE' ] data = { } entry_points = { } for _mod , import_string , entry_point in iter_python_modules ( tile ) : if entry_point not in entry_points : entry_points [ entry_point ] = [ ] entry_points [ entry_point ] . append ( import_string ) data [ 'name' ] = tile . support_distribution data [ 'package' ] = tile . support_distribution data [ 'version' ] = tile . parsed_version . pep440_string ( ) data [ 'deps' ] = [ "{0} {1}" . format ( x . support_distribution , x . parsed_version . pep440_compatibility_specifier ( ) ) for x in _iter_dependencies ( tile ) if x . has_wheel ] # If there are some python packages needed, we add them to the list of dependencies required if tile . support_wheel_depends : data [ 'deps' ] += tile . support_wheel_depends data [ 'entry_points' ] = entry_points outdir = os . path . dirname ( str ( target [ 0 ] ) ) render_template ( 'setup.py.tpl' , data , out_path = str ( target [ 0 ] ) ) # Run setuptools to generate a wheel and an sdist curr = os . getcwd ( ) os . chdir ( outdir ) try : setuptools . sandbox . run_setup ( 'setup.py' , [ '-q' , 'clean' , 'sdist' ] ) if "python_universal" in tile . settings : setuptools . sandbox . run_setup ( 'setup.py' , [ '-q' , 'clean' , 'bdist_wheel' , '--universal' ] ) else : setuptools . sandbox . run_setup ( 'setup.py' , [ '-q' , 'clean' , 'bdist_wheel' ] ) finally : os . chdir ( curr )
Generate the setup . py file for this distribution .
459
11
22,082
def defaultMachine ( use_rpm_default = True ) : if use_rpm_default : try : # This should be the most reliable way to get the default arch rmachine = subprocess . check_output ( [ 'rpm' , '--eval=%_target_cpu' ] , shell = False ) . rstrip ( ) rmachine = SCons . Util . to_str ( rmachine ) except Exception as e : # Something went wrong, try again by looking up platform.machine() return defaultMachine ( False ) else : rmachine = platform . machine ( ) # Try to lookup the string in the canon table if rmachine in arch_canon : rmachine = arch_canon [ rmachine ] [ 0 ] return rmachine
Return the canonicalized machine name .
156
7
22,083
def defaultSystem ( ) : rsystem = platform . system ( ) # Try to lookup the string in the canon tables if rsystem in os_canon : rsystem = os_canon [ rsystem ] [ 0 ] return rsystem
Return the canonicalized system name .
48
7
22,084
def prepare ( self ) : global print_prepare T = self . tm . trace if T : T . write ( self . trace_message ( u'Task.prepare()' , self . node ) ) # Now that it's the appropriate time, give the TaskMaster a # chance to raise any exceptions it encountered while preparing # this task. self . exception_raise ( ) if self . tm . message : self . display ( self . tm . message ) self . tm . message = None # Let the targets take care of any necessary preparations. # This includes verifying that all of the necessary sources # and dependencies exist, removing the target file(s), etc. # # As of April 2008, the get_executor().prepare() method makes # sure that all of the aggregate sources necessary to build this # Task's target(s) exist in one up-front check. The individual # target t.prepare() methods check that each target's explicit # or implicit dependencies exists, and also initialize the # .sconsign info. executor = self . targets [ 0 ] . get_executor ( ) if executor is None : return executor . prepare ( ) for t in executor . get_action_targets ( ) : if print_prepare : print ( "Preparing target %s..." % t ) for s in t . side_effects : print ( "...with side-effect %s..." % s ) t . prepare ( ) for s in t . side_effects : if print_prepare : print ( "...Preparing side-effect %s..." % s ) s . prepare ( )
Called just before the task is executed .
345
9
22,085
def execute ( self ) : T = self . tm . trace if T : T . write ( self . trace_message ( u'Task.execute()' , self . node ) ) try : cached_targets = [ ] for t in self . targets : if not t . retrieve_from_cache ( ) : break cached_targets . append ( t ) if len ( cached_targets ) < len ( self . targets ) : # Remove targets before building. It's possible that we # partially retrieved targets from the cache, leaving # them in read-only mode. That might cause the command # to fail. # for t in cached_targets : try : t . fs . unlink ( t . get_internal_path ( ) ) except ( IOError , OSError ) : pass self . targets [ 0 ] . build ( ) else : for t in cached_targets : t . cached = 1 except SystemExit : exc_value = sys . exc_info ( ) [ 1 ] raise SCons . Errors . ExplicitExit ( self . targets [ 0 ] , exc_value . code ) except SCons . Errors . UserError : raise except SCons . Errors . BuildError : raise except Exception as e : buildError = SCons . Errors . convert_to_BuildError ( e ) buildError . node = self . targets [ 0 ] buildError . exc_info = sys . exc_info ( ) raise buildError
Called to execute the task .
311
7
22,086
def executed_without_callbacks ( self ) : T = self . tm . trace if T : T . write ( self . trace_message ( 'Task.executed_without_callbacks()' , self . node ) ) for t in self . targets : if t . get_state ( ) == NODE_EXECUTING : for side_effect in t . side_effects : side_effect . set_state ( NODE_NO_STATE ) t . set_state ( NODE_EXECUTED )
Called when the task has been successfully executed and the Taskmaster instance doesn t want to call the Node s callback methods .
114
25
22,087
def executed_with_callbacks ( self ) : global print_prepare T = self . tm . trace if T : T . write ( self . trace_message ( 'Task.executed_with_callbacks()' , self . node ) ) for t in self . targets : if t . get_state ( ) == NODE_EXECUTING : for side_effect in t . side_effects : side_effect . set_state ( NODE_NO_STATE ) t . set_state ( NODE_EXECUTED ) if not t . cached : t . push_to_cache ( ) t . built ( ) t . visited ( ) if ( not print_prepare and ( not hasattr ( self , 'options' ) or not self . options . debug_includes ) ) : t . release_target_info ( ) else : t . visited ( )
Called when the task has been successfully executed and the Taskmaster instance wants to call the Node s callback methods .
191
23
22,088
def fail_stop ( self ) : T = self . tm . trace if T : T . write ( self . trace_message ( 'Task.failed_stop()' , self . node ) ) # Invoke will_not_build() to clean-up the pending children # list. self . tm . will_not_build ( self . targets , lambda n : n . set_state ( NODE_FAILED ) ) # Tell the taskmaster to not start any new tasks self . tm . stop ( ) # We're stopping because of a build failure, but give the # calling Task class a chance to postprocess() the top-level # target under which the build failure occurred. self . targets = [ self . tm . current_top ] self . top = 1
Explicit stop - the - build failure .
168
9
22,089
def fail_continue ( self ) : T = self . tm . trace if T : T . write ( self . trace_message ( 'Task.failed_continue()' , self . node ) ) self . tm . will_not_build ( self . targets , lambda n : n . set_state ( NODE_FAILED ) )
Explicit continue - the - build failure .
75
9
22,090
def make_ready_all ( self ) : T = self . tm . trace if T : T . write ( self . trace_message ( 'Task.make_ready_all()' , self . node ) ) self . out_of_date = self . targets [ : ] for t in self . targets : t . disambiguate ( ) . set_state ( NODE_EXECUTING ) for s in t . side_effects : # add disambiguate here to mirror the call on targets above s . disambiguate ( ) . set_state ( NODE_EXECUTING )
Marks all targets in a task ready for execution .
133
11
22,091
def make_ready_current ( self ) : global print_prepare T = self . tm . trace if T : T . write ( self . trace_message ( u'Task.make_ready_current()' , self . node ) ) self . out_of_date = [ ] needs_executing = False for t in self . targets : try : t . disambiguate ( ) . make_ready ( ) is_up_to_date = not t . has_builder ( ) or ( not t . always_build and t . is_up_to_date ( ) ) except EnvironmentError as e : raise SCons . Errors . BuildError ( node = t , errstr = e . strerror , filename = e . filename ) if not is_up_to_date : self . out_of_date . append ( t ) needs_executing = True if needs_executing : for t in self . targets : t . set_state ( NODE_EXECUTING ) for s in t . side_effects : # add disambiguate here to mirror the call on targets in first loop above s . disambiguate ( ) . set_state ( NODE_EXECUTING ) else : for t in self . targets : # We must invoke visited() to ensure that the node # information has been computed before allowing the # parent nodes to execute. (That could occur in a # parallel build...) t . visited ( ) t . set_state ( NODE_UP_TO_DATE ) if ( not print_prepare and ( not hasattr ( self , 'options' ) or not self . options . debug_includes ) ) : t . release_target_info ( )
Marks all targets in a task ready for execution if any target is not current .
370
17
22,092
def postprocess ( self ) : T = self . tm . trace if T : T . write ( self . trace_message ( u'Task.postprocess()' , self . node ) ) # We may have built multiple targets, some of which may have # common parents waiting for this build. Count up how many # targets each parent was waiting for so we can subtract the # values later, and so we *don't* put waiting side-effect Nodes # back on the candidates list if the Node is also a waiting # parent. targets = set ( self . targets ) pending_children = self . tm . pending_children parents = { } for t in targets : # A node can only be in the pending_children set if it has # some waiting_parents. if t . waiting_parents : if T : T . write ( self . trace_message ( u'Task.postprocess()' , t , 'removing' ) ) pending_children . discard ( t ) for p in t . waiting_parents : parents [ p ] = parents . get ( p , 0 ) + 1 for t in targets : if t . side_effects is not None : for s in t . side_effects : if s . get_state ( ) == NODE_EXECUTING : s . set_state ( NODE_NO_STATE ) for p in s . waiting_parents : parents [ p ] = parents . get ( p , 0 ) + 1 for p in s . waiting_s_e : if p . ref_count == 0 : self . tm . candidates . append ( p ) for p , subtract in parents . items ( ) : p . ref_count = p . ref_count - subtract if T : T . write ( self . trace_message ( u'Task.postprocess()' , p , 'adjusted parent ref count' ) ) if p . ref_count == 0 : self . tm . candidates . append ( p ) for t in targets : t . postprocess ( )
Post - processes a task after it s been executed .
428
11
22,093
def exception_set ( self , exception = None ) : if not exception : exception = sys . exc_info ( ) self . exception = exception self . exception_raise = self . _exception_raise
Records an exception to be raised at the appropriate time .
43
12
22,094
def _exception_raise ( self ) : exc = self . exc_info ( ) [ : ] try : exc_type , exc_value , exc_traceback = exc except ValueError : exc_type , exc_value = exc exc_traceback = None # raise exc_type(exc_value).with_traceback(exc_traceback) if sys . version_info [ 0 ] == 2 : exec ( "raise exc_type, exc_value, exc_traceback" ) else : # sys.version_info[0] == 3: if isinstance ( exc_value , Exception ) : #hasattr(exc_value, 'with_traceback'): # If exc_value is an exception, then just reraise exec ( "raise exc_value.with_traceback(exc_traceback)" ) else : # else we'll create an exception using the value and raise that exec ( "raise exc_type(exc_value).with_traceback(exc_traceback)" )
Raises a pending exception that was recorded while getting a Task ready for execution .
217
16
22,095
def no_next_candidate ( self ) : while self . candidates : candidates = self . candidates self . candidates = [ ] self . will_not_build ( candidates ) return None
Stops Taskmaster processing by not returning a next candidate .
39
12
22,096
def _validate_pending_children ( self ) : for n in self . pending_children : assert n . state in ( NODE_PENDING , NODE_EXECUTING ) , ( str ( n ) , StateString [ n . state ] ) assert len ( n . waiting_parents ) != 0 , ( str ( n ) , len ( n . waiting_parents ) ) for p in n . waiting_parents : assert p . ref_count > 0 , ( str ( n ) , str ( p ) , p . ref_count )
Validate the content of the pending_children set . Assert if an internal error is found .
120
20
22,097
def next_task ( self ) : node = self . _find_next_ready_node ( ) if node is None : return None executor = node . get_executor ( ) if executor is None : return None tlist = executor . get_all_targets ( ) task = self . tasker ( self , tlist , node in self . original_top , node ) try : task . make_ready ( ) except Exception as e : # We had a problem just trying to get this task ready (like # a child couldn't be linked to a VariantDir when deciding # whether this node is current). Arrange to raise the # exception when the Task is "executed." self . ready_exc = sys . exc_info ( ) if self . ready_exc : task . exception_set ( self . ready_exc ) self . ready_exc = None return task
Returns the next task to be executed .
189
8
22,098
def cleanup ( self ) : if not self . pending_children : return nclist = [ ( n , find_cycle ( [ n ] , set ( ) ) ) for n in self . pending_children ] genuine_cycles = [ node for node , cycle in nclist if cycle or node . get_state ( ) != NODE_EXECUTED ] if not genuine_cycles : # All of the "cycles" found were single nodes in EXECUTED state, # which is to say, they really weren't cycles. Just return. return desc = 'Found dependency cycle(s):\n' for node , cycle in nclist : if cycle : desc = desc + " " + " -> " . join ( map ( str , cycle ) ) + "\n" else : desc = desc + " Internal Error: no cycle found for node %s (%s) in state %s\n" % ( node , repr ( node ) , StateString [ node . get_state ( ) ] ) raise SCons . Errors . UserError ( desc )
Check for dependency cycles .
225
5
22,099
def instantiate_resolver ( self , name , args ) : if name not in self . _known_resolvers : raise ArgumentError ( "Attempting to instantiate unknown dependency resolver" , name = name ) return self . _known_resolvers [ name ] ( args )
Directly instantiate a dependency resolver by name with the given arguments
62
14