idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
45,700
def prompt_overwrite_json ( original , new , target_path , dumps = json_dumps ) : diff = '\n' . join ( l for l in ( line . rstrip ( ) for line in difflib . ndiff ( json_dumps ( original ) . splitlines ( ) , json_dumps ( new ) . splitlines ( ) , ) ) if l [ : 1 ] in '?+-' or l [ - 1 : ] in '{}' or l [ - 2 : ] == '},' ) basename_target = basename ( target_path ) return prompt ( "Generated '%(basename_target)s' differs with '%(target_path)s'.\n\n" "The following is a compacted list of changes required:\n" "%(diff)s\n\n" "Overwrite '%(target_path)s'?" % locals ( ) , choices = ( ( 'Yes' , True ) , ( 'No' , False ) , ) , default_key = 1 , )
Prompt end user with a diff of original and new json that may overwrite the file at the target_path . This function only displays a confirmation prompt and it is up to the caller to implement the actual functionality . Optionally a custom json . dumps method can also be passed in for output generation .
45,701
def locate_package_entry_file ( working_dir , package_name ) : basedir = join ( working_dir , 'node_modules' , package_name ) package_json = join ( basedir , 'package.json' ) if not exists ( package_json ) : logger . debug ( "could not locate package.json for the npm package '%s' in the " "current working directory '%s'; the package may have been " "not installed, the build process may fail" , package_name , working_dir , ) return with open ( package_json ) as fd : package_info = json . load ( fd ) if ( 'browser' in package_info or 'main' in package_info ) : return join ( basedir , * ( package_info . get ( 'browser' ) or package_info [ 'main' ] ) . split ( '/' ) ) index_js = join ( basedir , 'index.js' ) if exists ( index_js ) : return index_js logger . debug ( "package.json for the npm package '%s' does not contain a main " "entry point" , package_name , )
Locate a single npm package to return its browser or main entry .
45,702
def render_pictures ( context , selection = 'recent' , amount = 3 ) : pictures = Image . objects . filter ( folder__id__in = Gallery . objects . filter ( is_published = True ) . values_list ( 'folder__pk' , flat = True ) ) if selection == 'recent' : context . update ( { 'pictures' : pictures . order_by ( '-uploaded_at' ) [ : amount ] } ) elif selection == 'random' : context . update ( { 'pictures' : pictures . order_by ( '?' ) [ : amount ] } ) else : return None return context
Template tag to render a list of pictures .
45,703
def add_header ( self , entry ) : info = entry . split ( '\t' ) self . n_individuals = len ( info ) - 9 for i , v in enumerate ( info [ 9 : ] ) : self . individuals [ v ] = i return self . n_individuals > 0
Parses the VCF Header field and returns the number of samples in the VCF file
45,704
def add_entry ( self , row ) : var_call = VCFEntry ( self . individuals ) var_call . parse_entry ( row ) self . entries [ ( var_call . chrom , var_call . pos ) ] = var_call return var_call
This will parse the VCF entry and also store it within the VCFFile . It will also return the VCFEntry as well .
45,705
def get_header ( self , individual = - 1 ) : type_map = dict ( [ ( val , key ) for key , val in self . meta . type_map . iteritems ( ) ] ) extra = '\n' . join ( [ '##{0}' . format ( i ) for i in self . meta . extra ] ) info = '\n' . join ( [ '##INFO=<ID={0},Number={1},Type={2},Description={3}>' . format ( key , val . get ( 'num_entries' , '.' ) , type_map . get ( val . get ( 'type' , '' ) ) , val . get ( 'description' ) ) for key , val in self . meta . info . iteritems ( ) ] ) filter = '\n' . join ( [ '##FILTER=<ID={0},Description={1}>' . format ( key , val . get ( 'description' , '.' ) ) for key , val in self . meta . filter . iteritems ( ) ] ) format = '\n' . join ( [ '##FORMAT=<ID={0},Number={1},Type={2},Description={3}>' . format ( key , val . get ( 'num_entries' , '.' ) , type_map . get ( val . get ( 'type' , '' ) ) , val . get ( 'description' ) ) for key , val in self . meta . format . iteritems ( ) ] ) alt = '\n' . join ( [ '##ALT=<ID={0},Description={1}>' . format ( key , val . get ( 'description' , '.' ) ) for key , val in self . meta . alt . iteritems ( ) ] ) header = '\t' . join ( [ '#CHROM' , 'POS' , 'ID' , 'REF' , 'ALT' , 'QUAL' , 'FILTER' , 'INFO' , 'FORMAT' ] ) if individual is not None : if individual == - 1 : individual = '\t' . join ( self . individuals . keys ( ) ) else : if isinstance ( individual , int ) : for i , v in self . individuals . iteritems ( ) : if v == individual : individual = i break header += '\t' + individual return '\n' . join ( [ extra , info , filter , format , alt , header ] )
Returns the vcf header
45,706
def add_info ( self , entry ) : entry = entry [ 8 : - 1 ] info = entry . split ( ',' ) if len ( info ) < 4 : return False for v in info : key , value = v . split ( '=' , 1 ) if key == 'ID' : self . info [ value ] = { } id_ = value elif key == 'Number' : if value == 'A' or value == 'G' : value = - 1 self . info [ id_ ] [ 'num_entries' ] = value elif key == 'Type' : self . info [ id_ ] [ 'type' ] = self . type_map [ value ] elif key == 'Description' : self . info [ id_ ] [ 'description' ] = value if len ( info ) > 4 : self . info [ id_ ] [ 'description' ] += '; ' . join ( info [ 4 : ] ) break return True
Parse and store the info field
45,707
def add_filter ( self , entry ) : entry = entry [ 10 : - 1 ] info = entry . split ( ',' ) if len ( info ) < 2 : return False for v in info : key , value = v . split ( '=' , 1 ) if key == 'ID' : self . filter [ value ] = { } id_ = value elif key == 'Description' : self . filter [ id_ ] [ 'description' ] = value if len ( info ) > 2 : self . info [ id_ ] [ 'description' ] += '; ' . join ( info [ 2 : ] ) return True
Parse and store the filter field
45,708
def add_alt ( self , entry ) : entry = entry [ 7 : - 1 ] info = entry . split ( ',' ) if len ( info ) < 2 : return False for v in info : key , value = v . split ( '=' , 1 ) if key == 'ID' : self . alt [ value ] = { } id_ = value elif key == 'Description' : self . alt [ id_ ] [ 'description' ] = value if len ( info ) > 4 : self . alt [ id_ ] [ 'description' ] += '; ' . join ( info [ 4 : ] ) break return True
Parse and store the alternative allele field
45,709
def sample_string ( self , individual = - 1 ) : base = str ( self ) extra = self . get_sample_info ( individual = individual ) extra = [ ':' . join ( [ str ( j ) for j in i ] ) for i in zip ( * extra . values ( ) ) ] return '\t' . join ( [ base , '\t' . join ( extra ) ] )
Returns the VCF entry as it appears in the vcf file
45,710
def get_sample_info ( self , individual = - 1 ) : if isinstance ( individual , str ) : individual = self . individuals [ individual ] extra = OrderedDict ( ) for format_ in self . format : index = getattr ( self , format_ ) if index != - 1 : if format_ == 'GT' : d = self . genotype elif format_ == 'GQ' : d = self . genome_quality elif format_ == 'DP' : d = self . depth if individual == - 1 : if len ( d ) != len ( self . samples ) : [ self . parse_sample ( i ) for i in six . moves . range ( len ( self . samples ) ) ] extra [ format_ ] = [ d [ i ] for i in six . moves . range ( len ( d ) ) ] else : if individual not in d : self . parse_sample ( individual ) extra [ format_ ] = [ d [ individual ] ] return extra
Returns the sample info of a given sample or all by default
45,711
def is_homozygous ( self , individual = None ) : if individual is not None : if isinstance ( individual , str ) : individual = self . individuals [ individual ] alts = self . genotype [ individual ] return [ sum ( alts ) == len ( alts ) ] if sum ( alts ) > 0 else [ False ] else : return [ sum ( alts ) == len ( alts ) if sum ( alts ) > 0 else False for i , alts in self . genotype . iteritems ( ) ]
This will give a boolean list corresponding to whether each individual is homozygous for the alternative allele .
45,712
def get_alt ( self , individual = 0 , nucleotides_only = True ) : if isinstance ( individual , str ) : individual = self . individuals [ individual ] if nucleotides_only : return [ self . alt [ i - 1 ] . replace ( '.' , '' ) for i in self . genotype [ individual ] if i > 0 and not self . alt [ i - 1 ] . startswith ( '<' ) ] else : return [ self . alt [ i - 1 ] . replace ( '.' , '' ) for i in self . genotype [ individual ] if i > 0 ]
Returns the alternative alleles of the individual as a list
45,713
def get_alt_length ( self , individual = 0 ) : if isinstance ( individual , str ) : individual = self . individuals [ individual ] return [ len ( self . alt [ i - 1 ] . replace ( '.' , '' ) ) for i in self . genotype [ individual ] if i > 0 and not self . alt [ i - 1 ] . startswith ( '<' ) ]
Returns the number of basepairs of each alternative allele
45,714
def get_alt_lengths ( self ) : out = [ ] for i in six . moves . range ( len ( self . genotype ) ) : valid_alt = self . get_alt_length ( individual = i ) if not valid_alt : out . append ( None ) else : out . append ( max ( valid_alt ) - len ( self . ref ) ) return out
Returns the longest length of the variant . For deletions return is negative SNPs return 0 and insertions are + . None return corresponds to no variant in interval for specified individual
45,715
def has_snp ( self , individual = 0 ) : if isinstance ( individual , str ) : individual = self . individuals [ individual ] alts = self . get_alt ( individual = individual ) if alts : return [ i != self . ref and len ( i ) == len ( self . ref ) for i in alts ] return [ False ]
Returns a boolean list of SNP status ordered by samples
45,716
def parse_entry ( self , entry ) : entry = entry . split ( '\t' ) self . chrom , self . pos , self . id , self . ref , alt_ , self . qual , filter_ , info , self . format = entry [ : 9 ] self . samples = entry [ 9 : ] self . alt = alt_ . split ( ',' ) if filter_ == 'PASS' or filter_ == '.' : self . passed = True else : self . passed = filter_ . split ( ';' ) self . info = info self . format = self . format . split ( ':' ) if 'GT' in self . format : self . GT = self . format . index ( 'GT' ) if 'GQ' in self . format : self . GQ = self . format . index ( 'GQ' ) if 'DP' in self . format : self . DP = self . format . index ( 'DP' ) if 'FT' in self . format : self . FT = self . format . index ( 'FT' )
This parses a VCF row and stores the relevant information
45,717
def add_child ( self , child ) : child_id = getattr ( child , 'id' , None ) if child_id : if not hasattr ( self , 'children' ) : self . children = { } if child_id not in self . children : self . children [ child_id ] = child
Children are GFFFeatures and are defined when added . This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows .
45,718
def _iso_handler ( obj ) : if hasattr ( obj , 'isoformat' ) : result = obj . isoformat ( ) else : raise TypeError ( "Unserializable object {} of type {}" . format ( obj , type ( obj ) ) ) return result
Transforms an object into it s ISO format if possible .
45,719
def argparser ( self ) : if self . __argparser is None : self . __argparser = self . argparser_factory ( ) self . init_argparser ( self . __argparser ) return self . __argparser
For setting up the argparser for this instance .
45,720
def argparser_factory ( self ) : return ArgumentParser ( prog = self . prog , description = self . __doc__ , add_help = False , )
Produces argparser for this type of Runtime .
45,721
def unrecognized_arguments_error ( self , args , parsed , extras ) : kwargs = vars ( parsed ) failed = list ( extras ) runtime , subparser , idx = ( self , self . argparser , 0 ) while isinstance ( runtime , Runtime ) : cmd = kwargs . pop ( runtime . action_key ) action_idx = None if cmd not in args else args . index ( cmd ) if cmd not in args and cmd is not None : logger . debug ( "command for prog=%r is set to %r without being specified " "as part of the input arguments - the following error " "message may contain misleading references" , subparser . prog , cmd ) subargs = args [ idx : action_idx ] subparsed , subextras = subparser . parse_known_args ( subargs ) if subextras : subparser . unrecognized_arguments_error ( subextras ) failed = failed [ len ( subextras ) : ] if not failed : break details = runtime . get_argparser_details ( subparser ) runtime = details . runtimes [ cmd ] subparser = details . subparsers [ cmd ] idx = action_idx + 1 if failed : subparser . unrecognized_arguments_error ( failed ) sys . exit ( 2 )
This exists because argparser is dumb and naive and doesn t fail unrecognized arguments early .
45,722
def error ( self , argparser , target , message ) : warnings . warn ( 'Runtime.error is deprecated and will be removed by calmjs-4.0.0' , DeprecationWarning ) details = self . get_argparser_details ( argparser ) argparser = details . subparsers [ target ] if details else self . argparser argparser . error ( message )
This was used as part of the original non - recursive lookup for the target parser .
45,723
def init_argparser_working_dir ( self , argparser , explanation = '' , help_template = ( 'the working directory; %(explanation)s' 'default is current working directory (%(cwd)s)' ) , ) : cwd = self . toolchain . join_cwd ( ) argparser . add_argument ( '--working-dir' , dest = WORKING_DIR , metavar = metavar ( WORKING_DIR ) , default = cwd , help = help_template % { 'explanation' : explanation , 'cwd' : cwd } , )
Subclass could an extra expanation on how this is used .
45,724
def init_argparser_build_dir ( self , argparser , help = ( 'the build directory, where all sources will be copied to ' 'as part of the build process; if left unspecified, the ' 'default behavior is to create a new temporary directory ' 'that will be removed upon conclusion of the build; if ' 'specified, it must be an existing directory and all files ' 'for the build will be copied there instead, overwriting any ' 'existing file, with no cleanup done after.' ) ) : argparser . add_argument ( '--build-dir' , default = None , dest = BUILD_DIR , metavar = metavar ( BUILD_DIR ) , help = help , )
For setting up build directory
45,725
def init_argparser_optional_advice ( self , argparser , default = [ ] , help = ( 'a comma separated list of packages to retrieve optional ' 'advice from; the provided packages should have registered ' 'the appropriate entry points for setting up the advices for ' 'the toolchain; refer to documentation for the specified ' 'packages for details' ) ) : argparser . add_argument ( '--optional-advice' , default = default , required = False , dest = ADVICE_PACKAGES , action = StoreRequirementList , metavar = '<advice>[,<advice>[...]]' , help = help )
For setting up optional advice .
45,726
def prepare_spec ( self , spec , ** kwargs ) : self . prepare_spec_debug_flag ( spec , ** kwargs ) self . prepare_spec_export_target_checks ( spec , ** kwargs ) spec . advise ( SETUP , self . prepare_spec_advice_packages , spec , ** kwargs )
Prepare a spec for usage with the generic ToolchainRuntime .
45,727
def kwargs_to_spec ( self , ** kwargs ) : spec = self . create_spec ( ** kwargs ) self . prepare_spec ( spec , ** kwargs ) return spec
Turn the provided kwargs into arguments ready for toolchain .
45,728
def init_argparser_package_names ( self , argparser , help = ( 'names of the python package to generate artifacts for; ' 'note that the metadata directory for the specified ' 'packages must be writable' ) ) : argparser . add_argument ( 'package_names' , metavar = metavar ( 'package' ) , nargs = '+' , help = help )
Default helper for setting up the package_names option .
45,729
def init_argparser_source_registry ( self , argparser , default = None , help = ( 'comma separated list of registries to use for gathering ' 'JavaScript sources from the given Python packages' ) ) : argparser . add_argument ( '--source-registry' , default = default , dest = CALMJS_MODULE_REGISTRY_NAMES , action = StoreDelimitedList , metavar = '<registry>[,<registry>[...]]' , help = help , ) argparser . add_argument ( '--source-registries' , default = default , dest = CALMJS_MODULE_REGISTRY_NAMES , action = StoreDelimitedList , help = SUPPRESS , )
For setting up the source registry flag .
45,730
def init_argparser_loaderplugin_registry ( self , argparser , default = None , help = ( 'the name of the registry to use for the handling of loader ' 'plugins that may be loaded from the given Python packages' ) ) : argparser . add_argument ( '--loaderplugin-registry' , default = default , dest = CALMJS_LOADERPLUGIN_REGISTRY_NAME , action = 'store' , metavar = metavar ( 'registry' ) , help = help , )
Default helper for setting up the loaderplugin registries flags .
45,731
def _check_not_empty ( string ) : string = string . strip ( ) if len ( string ) == 0 : message = 'The string should not be empty' raise pp . ParseException ( message )
Checks that the string is not empty .
45,732
def _to_numeric_float ( number , nums_int ) : index_end = len ( number ) - nums_int return float ( number [ : nums_int ] + '.' + number [ - index_end : ] )
Transforms a string into a float .
45,733
def _check_above_value_float ( string , minimum ) : value = float ( string ) if value < minimum : message = 'The Numeric Field value should be above %s' % minimum raise pp . ParseException ( message )
Checks that the number parsed from the string is above a minimum .
45,734
def _to_boolean ( string ) : if string == 'Y' : result = True elif string == 'N' : result = False else : raise pp . ParseException ( string , msg = 'Is not a valid boolean value' ) return result
Transforms a string into a boolean value .
45,735
def blank ( columns = 1 , name = None ) : if name is None : name = 'Blank Field' field = pp . Regex ( '[ ]{' + str ( columns ) + '}' ) field . leaveWhitespace ( ) field . suppress ( ) field . setName ( name ) return field
Creates the grammar for a blank field .
45,736
def contains ( self , seqid , start , end , overlap = True ) : d = self . positions . get ( seqid , [ ] ) if overlap : return [ gff_object for gff_start , gff_end in d for gff_object in d [ ( gff_start , gff_end ) ] if not ( end <= gff_start or start >= gff_end ) ] else : return [ gff_object for gff_start , gff_end in d for gff_object in d [ ( gff_start , gff_end ) ] if ( gff_start <= start and gff_end >= end ) ]
This returns a list of GFF objects which cover a specified location .
45,737
def contains ( self , chrom , start , end , overlap = True ) : d = self . positions . get ( chrom , [ ] ) if overlap : return [ vcf_entry for vcf_start , vcf_end in d for vcf_entry in d [ ( vcf_start , vcf_end ) ] if not ( end < vcf_start or start > vcf_end ) ] else : return [ vcf_entry for vcf_start , vcf_end in d for vcf_entry in d [ ( vcf_start , vcf_end ) ] if ( vcf_start <= start and vcf_end >= end ) ]
This returns a list of VCFEntry objects which cover a specified location .
45,738
def remove_variants ( self , variants ) : chroms = set ( [ i . chrom for i in variants ] ) for chrom in chroms : if self . append_chromosome : chrom = 'chr%s' % chrom to_delete = [ pos for pos in self . positions [ chrom ] if pos in variants ] for pos in to_delete : del self . positions [ chrom ] [ pos ]
Remove a list of variants from the positions we are scanning
45,739
def generate_handler_sourcepath ( self , toolchain , spec , loaderplugin_sourcepath ) : fake_spec = { } registry = spec . get ( CALMJS_LOADERPLUGIN_REGISTRY ) if registry : fake_spec [ CALMJS_LOADERPLUGIN_REGISTRY ] = registry spec_update_sourcepath_filter_loaderplugins ( fake_spec , { self . unwrap ( k ) : v for k , v in loaderplugin_sourcepath . items ( ) } , 'current' , 'nested' ) result = { } for plugin_name , sourcepath in fake_spec [ 'nested' ] . items ( ) : if sourcepath == loaderplugin_sourcepath : logger . warning ( "loaderplugin '%s' extracted same sourcepath of while " "locating chain loaders: %s; skipping" , self . name , sourcepath ) continue plugin = self . registry . get_record ( plugin_name ) if not plugin : logger . warning ( "loaderplugin '%s' from registry '%s' cannot find " "sibling loaderplugin handler for '%s'; processing " "may fail for the following nested/chained sources: " "%s" , self . name , self . registry . registry_name , plugin_name , sourcepath , ) continue result . update ( plugin . generate_handler_sourcepath ( toolchain , spec , sourcepath ) ) return result
The default implementation is a recursive lookup method which subclasses may make use of .
45,740
def generate_handler_sourcepath ( self , toolchain , spec , loaderplugin_sourcepath ) : npm_pkg_name = ( self . node_module_pkg_name if self . node_module_pkg_name else self . find_node_module_pkg_name ( toolchain , spec ) ) if not npm_pkg_name : cls = type ( self ) registry_name = getattr ( self . registry , 'registry_name' , '<invalid_registry/handler>' ) if cls is NPMLoaderPluginHandler : logger . error ( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; please subclass " "%s:%s such that the npm package name become specified" , self . name , registry_name , cls . __module__ , cls . __name__ , ) else : logger . error ( "no npm package name specified or could be resolved for " "loaderplugin '%s' of registry '%s'; implementation of " "%s:%s may be at fault" , self . name , registry_name , cls . __module__ , cls . __name__ , ) return { } working_dir = spec . get ( WORKING_DIR , None ) if working_dir is None : logger . info ( "attempting to derive working directory using %s, as the " "provided spec is missing working_dir" , toolchain ) working_dir = toolchain . join_cwd ( ) logger . debug ( "deriving npm loader plugin from '%s'" , working_dir ) target = locate_package_entry_file ( working_dir , npm_pkg_name ) if target : logger . debug ( 'picked %r for loader plugin %r' , target , self . name ) result = super ( NPMLoaderPluginHandler , self ) . generate_handler_sourcepath ( toolchain , spec , loaderplugin_sourcepath ) result . update ( { self . name : target } ) return result if exists ( join ( working_dir , 'node_modules' , npm_pkg_name , 'package.json' ) ) : logger . warning ( "'package.json' for the npm package '%s' does not contain a " "valid entry point: sources required for loader plugin '%s' " "cannot be included automatically; the build process may fail" , npm_pkg_name , self . name , ) else : logger . warning ( "could not locate 'package.json' for the npm package '%s' " "which was specified to contain the loader plugin '%s' in the " "current working directory '%s'; the missing package may " "be installed by running 'npm install %s' for the mean time " "as a workaround, though the package that owns that source " "file that has this requirement should declare an explicit " "dependency; the build process may fail" , npm_pkg_name , self . name , working_dir , npm_pkg_name , ) return { }
Attempt to locate the plugin source ; returns a mapping of modnames to the absolute path of the located sources .
45,741
def encode ( self , tag ) : sequence = str ( tag . sequence_n ) if len ( sequence ) > self . _sequence_l : sequence = sequence [ : self . _sequence_l ] while len ( sequence ) < self . _sequence_l : sequence = '0' + sequence version = str ( tag . version ) if len ( version ) > 2 : version = version [ : 1 ] + version [ - 1 : ] while len ( version ) < 2 : version = '0' + version year = str ( tag . year ) [ - 2 : ] sender = tag . sender [ : 3 ] receiver = tag . receiver [ : 3 ] rule = self . _header + year + sequence + sender rule = rule + self . _ip_delimiter + receiver + ".V" + version return rule
Parses a CWR file name from a FileTag object .
45,742
def encode ( self , transmission ) : data = '' data += self . _record_encode ( transmission . header ) for group in transmission . groups : data += self . _record_encode ( group . group_header ) for transaction in group . transactions : for record in transaction : data += self . _record_encode ( record ) data += self . _record_encode ( group . group_trailer ) data += self . _record_encode ( transmission . trailer ) return data
Encodes the data creating a CWR structure from an instance from the domain model .
45,743
def getScan ( self , title , peptide = None ) : if self . ra . has_key ( title ) : self . filename . seek ( self . ra [ title ] [ 0 ] , 0 ) toRead = self . ra [ title ] [ 1 ] - self . ra [ title ] [ 0 ] info = self . filename . read ( toRead ) scan = self . parseScan ( info ) else : return None return scan
allows random lookup
45,744
def parseScan ( self , scan ) : setupScan = True foundCharge = False foundMass = False foundTitle = False scanObj = ScanObject ( ) scanObj . ms_level = 2 for row in scan . split ( '\n' ) : if not row : continue entry = row . strip ( ) . split ( '=' ) if len ( entry ) >= 2 : if entry [ 0 ] == 'PEPMASS' : scanObj . mass = float ( entry [ 1 ] ) foundMass = True elif entry [ 0 ] == 'CHARGE' : scanObj . charge = entry [ 1 ] foundCharge = True elif entry [ 0 ] == 'TITLE' : title = '=' . join ( entry [ 1 : ] ) foundTitle = True scanObj . title = title scanObj . id = title elif entry [ 0 ] == 'RTINSECONDS' : scanObj . rt = float ( entry [ 1 ] ) else : mz , intensity = self . scanSplit . split ( row . strip ( ) ) scanObj . scans . append ( ( float ( mz ) , float ( intensity ) ) ) if foundCharge and foundMass and foundTitle : return scanObj return None
All input follows the BEGIN IONS row and ends before END IONS
45,745
def _cls_lookup_dist ( cls ) : frags = cls . __module__ . split ( '.' ) for name in ( '.' . join ( frags [ : x ] ) for x in range ( len ( frags ) , 0 , - 1 ) ) : dist = find_pkg_dist ( name ) if dist : return dist
Attempt to resolve the distribution from the provided class in the most naive way - this assumes the Python module path to the class contains the name of the package that provided the module and class .
45,746
def verify_builder ( builder ) : try : d = getcallargs ( builder , package_names = [ ] , export_target = 'some_path' ) except TypeError : return False return d == { 'package_names' : [ ] , 'export_target' : 'some_path' }
To ensure that the provided builder has a signature that is at least compatible .
45,747
def extract_builder_result ( builder_result , toolchain_cls = Toolchain ) : try : toolchain , spec = builder_result except Exception : return None , None if not isinstance ( toolchain , toolchain_cls ) or not isinstance ( spec , Spec ) : return None , None return toolchain , spec
Extract the builder result to produce a Toolchain and Spec instance .
45,748
def trace_toolchain ( toolchain ) : pkgs = [ ] for cls in getmro ( type ( toolchain ) ) : if not issubclass ( cls , Toolchain ) : continue dist = _cls_lookup_dist ( cls ) value = { 'project_name' : dist . project_name , 'version' : dist . version , } if dist else { } key = '%s:%s' % ( cls . __module__ , cls . __name__ ) pkgs . append ( { key : value } ) return pkgs
Trace the versions of the involved packages for the provided toolchain instance .
45,749
def get_artifact_filename ( self , package_name , artifact_name ) : project_name = self . packages . normalize ( package_name ) return self . records . get ( ( project_name , artifact_name ) )
Similar to pkg_resources . resource_filename however this works with the information cached in this registry instance and arguments are not quite the same .
45,750
def resolve_artifacts_by_builder_compat ( self , package_names , builder_name , dependencies = False ) : paths = self . compat_builders . get ( builder_name ) if not paths : return resolver = ( find_packages_requirements_dists if dependencies else pkg_names_to_dists ) for distribution in resolver ( package_names ) : path = paths . get ( distribution . project_name ) if path : yield path
Yield the list of paths to the artifacts in the order of the dependency resolution
45,751
def get_artifact_metadata ( self , package_name ) : filename = self . metadata . get ( package_name ) if not filename or not exists ( filename ) : return { } with open ( filename , encoding = 'utf8' ) as fd : contents = fd . read ( ) try : is_json_compat ( contents ) except ValueError : logger . info ( "artifact metadata file '%s' is invalid" , filename ) return { } return json . loads ( contents )
Return metadata of the artifacts built through this registry .
45,752
def generate_metadata_entry ( self , entry_point , toolchain , spec ) : export_target = spec [ 'export_target' ] toolchain_bases = trace_toolchain ( toolchain ) toolchain_bin_path = spec . get ( TOOLCHAIN_BIN_PATH ) toolchain_bin = ( [ basename ( toolchain_bin_path ) , get_bin_version_str ( toolchain_bin_path ) , ] if toolchain_bin_path else [ ] ) return { basename ( export_target ) : { 'toolchain_bases' : toolchain_bases , 'toolchain_bin' : toolchain_bin , 'builder' : '%s:%s' % ( entry_point . module_name , '.' . join ( entry_point . attrs ) ) , } }
After the toolchain and spec have been executed this may be called to generate the artifact export entry for persistence into the metadata file .
45,753
def iter_records_for ( self , package_name ) : entry_points = self . packages . get ( package_name , NotImplemented ) if entry_points is NotImplemented : logger . debug ( "package '%s' has not declared any entry points for the '%s' " "registry for artifact construction" , package_name , self . registry_name , ) return iter ( [ ] ) logger . debug ( "package '%s' has declared %d entry points for the '%s' " "registry for artifact construction" , package_name , len ( entry_points ) , self . registry_name , ) return iter ( entry_points . values ( ) )
Iterate records for a specific package .
45,754
def generate_builder ( self , entry_point , export_target ) : try : builder = entry_point . resolve ( ) except ImportError : logger . error ( "unable to import the target builder for the entry point " "'%s' from package '%s' to generate artifact '%s'" , entry_point , entry_point . dist , export_target , ) return if not self . verify_builder ( builder ) : logger . error ( "the builder referenced by the entry point '%s' " "from package '%s' has an incompatible signature" , entry_point , entry_point . dist , ) return verifier = self . verify_export_target ( export_target ) if not verifier : logger . error ( "the export target '%s' has been rejected" , export_target ) return toolchain , spec = self . extract_builder_result ( builder ( [ entry_point . dist . project_name ] , export_target = export_target ) ) if not toolchain : logger . error ( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a valid " "toolchain" , entry_point , entry_point . dist , ) return if spec . get ( EXPORT_TARGET ) != export_target : logger . error ( "the builder referenced by the entry point '%s' " "from package '%s' failed to produce a spec with the " "expected export_target" , entry_point , entry_point . dist , ) return if callable ( verifier ) : warnings . warn ( "%s:%s.verify_export_target returned a callable, which " "will no longer be passed to spec.advise by calmjs-4.0.0; " "please instead override 'setup_export_location' or " "'prepare_export_location' in that class" % ( self . __class__ . __module__ , self . __class__ . __name__ ) , DeprecationWarning ) spec . advise ( BEFORE_PREPARE , verifier , export_target ) else : spec . advise ( BEFORE_PREPARE , self . prepare_export_location , export_target ) yield entry_point , toolchain , spec
Yields exactly one builder if both the provided entry point and export target satisfies the checks required .
45,755
def execute_builder ( self , entry_point , toolchain , spec ) : toolchain ( spec ) if not exists ( spec [ 'export_target' ] ) : logger . error ( "the entry point '%s' from package '%s' failed to " "generate an artifact at '%s'" , entry_point , entry_point . dist , spec [ 'export_target' ] ) return { } return self . generate_metadata_entry ( entry_point , toolchain , spec )
Accepts the arguments provided by the builder and executes them .
45,756
def process_package ( self , package_name ) : metadata = super ( ArtifactRegistry , self ) . process_package ( package_name ) if metadata : self . update_artifact_metadata ( package_name , metadata )
Build artifacts declared for the given package .
45,757
def alphanum_variable ( min_size , max_size , name = None ) : if name is None : name = 'Alphanumeric Field' if min_size < 0 : raise BaseException ( ) if max_size < min_size : raise BaseException ( ) field = pp . Word ( pp . alphanums , min = min_size , max = max_size ) field . setParseAction ( lambda s : s [ 0 ] . strip ( ) ) field . leaveWhitespace ( ) field . setName ( name ) return field
Creates the grammar for an alphanumeric code where the size ranges between two values .
45,758
def year ( columns , name = None ) : if columns < 0 : raise BaseException ( ) field = numeric ( columns , name ) field . addParseAction ( _to_year ) return field
Creates the grammar for a field containing a year .
45,759
def is_json_compat ( value ) : try : value = json . loads ( value ) except ValueError as e : raise ValueError ( 'JSON decoding error: ' + str ( e ) ) except TypeError : try : json . dumps ( value ) except TypeError as e : raise ValueError ( 'must be a JSON serializable object: ' + str ( e ) ) if not isinstance ( value , dict ) : raise ValueError ( 'must be specified as a JSON serializable dict or a ' 'JSON deserializable string' ) return True
Check that the value is either a JSON decodable string or a dict that can be encoded into a JSON .
45,760
def validate_json_field ( dist , attr , value ) : try : is_json_compat ( value ) except ValueError as e : raise DistutilsSetupError ( "%r %s" % ( attr , e ) ) return True
Check for json validity .
45,761
def validate_line_list ( dist , attr , value ) : if isinstance ( value , str ) : value = value . split ( ) value = list ( value ) try : check = ( ' ' . join ( value ) ) . split ( ) if check == value : return True except Exception : pass raise DistutilsSetupError ( "%r must be a list of valid identifiers" % attr )
Validate that the value is compatible
45,762
def write_json_file ( argname , cmd , basename , filename ) : value = getattr ( cmd . distribution , argname , None ) if isinstance ( value , dict ) : value = json . dumps ( value , indent = 4 , sort_keys = True , separators = ( ',' , ': ' ) ) cmd . write_or_delete_file ( argname , filename , value , force = True )
Write JSON captured from the defined argname into the package s egg - info directory using the specified filename .
45,763
def write_line_list ( argname , cmd , basename , filename ) : values = getattr ( cmd . distribution , argname , None ) if isinstance ( values , list ) : values = '\n' . join ( values ) cmd . write_or_delete_file ( argname , filename , values , force = True )
Write out the retrieved value as list of lines .
45,764
def find_pkg_dist ( pkg_name , working_set = None ) : working_set = working_set or default_working_set req = Requirement . parse ( pkg_name ) return working_set . find ( req )
Locate a package s distribution by its name .
45,765
def convert_package_names ( package_names ) : results = [ ] errors = [ ] for name in ( package_names . split ( ) if hasattr ( package_names , 'split' ) else package_names ) : try : Requirement . parse ( name ) except ValueError : errors . append ( name ) else : results . append ( name ) return results , errors
Convert package names which can be a string of a number of package names or requirements separated by spaces .
45,766
def find_packages_requirements_dists ( pkg_names , working_set = None ) : working_set = working_set or default_working_set requirements = [ r for r in ( Requirement . parse ( req ) for req in pkg_names ) if working_set . find ( r ) ] return list ( reversed ( working_set . resolve ( requirements ) ) )
Return the entire list of dependency requirements reversed from the bottom .
45,767
def find_packages_parents_requirements_dists ( pkg_names , working_set = None ) : dists = [ ] targets = set ( pkg_names ) for dist in find_packages_requirements_dists ( pkg_names , working_set ) : if dist . project_name in targets : continue dists . append ( dist ) return dists
Leverages the find_packages_requirements_dists but strip out the distributions that matches pkg_names .
45,768
def read_dist_egginfo_json ( dist , filename = DEFAULT_JSON ) : if not dist . has_metadata ( filename ) : logger . debug ( "no '%s' for '%s'" , filename , dist ) return try : result = dist . get_metadata ( filename ) except IOError : logger . error ( "I/O error on reading of '%s' for '%s'." , filename , dist ) return try : obj = json . loads ( result ) except ( TypeError , ValueError ) : logger . error ( "the '%s' found in '%s' is not a valid json." , filename , dist ) return logger . debug ( "found '%s' for '%s'." , filename , dist ) return obj
Safely get a json within an egginfo from a distribution .
45,769
def read_egginfo_json ( pkg_name , filename = DEFAULT_JSON , working_set = None ) : working_set = working_set or default_working_set dist = find_pkg_dist ( pkg_name , working_set = working_set ) return read_dist_egginfo_json ( dist , filename )
Read json from egginfo of a package identified by pkg_name that s already installed within the current Python environment .
45,770
def flatten_dist_egginfo_json ( source_dists , filename = DEFAULT_JSON , dep_keys = DEP_KEYS , working_set = None ) : working_set = working_set or default_working_set obj = { } depends = { dep : { } for dep in dep_keys } for dist in source_dists : obj = read_dist_egginfo_json ( dist , filename ) if not obj : continue logger . debug ( "merging '%s' for required '%s'" , filename , dist ) for dep in dep_keys : depends [ dep ] . update ( obj . get ( dep , { } ) ) if obj is None : return depends for dep in dep_keys : obj [ dep ] = { k : v for k , v in depends [ dep ] . items ( ) if v is not None } return obj
Flatten a distribution s egginfo json with the depended keys to be flattened .
45,771
def flatten_egginfo_json ( pkg_names , filename = DEFAULT_JSON , dep_keys = DEP_KEYS , working_set = None ) : working_set = working_set or default_working_set dists = find_packages_requirements_dists ( pkg_names , working_set = working_set ) return flatten_dist_egginfo_json ( dists , filename = filename , dep_keys = dep_keys , working_set = working_set )
A shorthand calling convention where the package name is supplied instead of a distribution .
45,772
def build_helpers_egginfo_json ( json_field , json_key_registry , json_filename = None ) : json_filename = ( json_field + '.json' if json_filename is None else json_filename ) def get_extras_json ( pkg_names , working_set = None ) : working_set = working_set or default_working_set dep_keys = set ( get ( json_key_registry ) . iter_records ( ) ) dists = pkg_names_to_dists ( pkg_names , working_set = working_set ) return flatten_dist_egginfo_json ( dists , filename = json_filename , dep_keys = dep_keys , working_set = working_set ) def _flatten_extras_json ( pkg_names , find_dists , working_set ) : dep_keys = set ( get ( json_key_registry ) . iter_records ( ) ) dists = find_dists ( pkg_names , working_set = working_set ) return flatten_dist_egginfo_json ( dists , filename = json_filename , dep_keys = dep_keys , working_set = working_set ) def flatten_extras_json ( pkg_names , working_set = None ) : working_set = working_set or default_working_set return _flatten_extras_json ( pkg_names , find_packages_requirements_dists , working_set ) def flatten_parents_extras_json ( pkg_names , working_set = None ) : working_set = working_set or default_working_set return _flatten_extras_json ( pkg_names , find_packages_parents_requirements_dists , working_set ) write_extras_json = partial ( write_json_file , json_field ) return ( get_extras_json , flatten_extras_json , flatten_parents_extras_json , write_extras_json , )
Return a tuple of functions that will provide the usage of the JSON egginfo based around the provided field .
45,773
def build_helpers_module_registry_dependencies ( registry_name = 'calmjs.module' ) : def get_module_registry_dependencies ( pkg_names , registry_name = registry_name , working_set = None ) : working_set = working_set or default_working_set registry = get ( registry_name ) if not isinstance ( registry , BaseModuleRegistry ) : return { } result = { } for pkg_name in pkg_names : result . update ( registry . get_records_for_package ( pkg_name ) ) return result def _flatten_module_registry_dependencies ( pkg_names , registry_name , find_dists , working_set ) : result = { } registry = get ( registry_name ) if not isinstance ( registry , BaseModuleRegistry ) : return result dists = find_dists ( pkg_names , working_set = working_set ) for dist in dists : result . update ( registry . get_records_for_package ( dist . project_name ) ) return result def flatten_module_registry_dependencies ( pkg_names , registry_name = registry_name , working_set = None ) : working_set = working_set or default_working_set return _flatten_module_registry_dependencies ( pkg_names , registry_name , find_packages_requirements_dists , working_set ) def flatten_parents_module_registry_dependencies ( pkg_names , registry_name = registry_name , working_set = None ) : working_set = working_set or default_working_set return _flatten_module_registry_dependencies ( pkg_names , registry_name , find_packages_parents_requirements_dists , working_set ) return ( get_module_registry_dependencies , flatten_module_registry_dependencies , flatten_parents_module_registry_dependencies , )
Return a tuple of funtions that will provide the functions that return the relevant sets of module registry records based on the dependencies defined for the provided packages .
45,774
def has_calmjs_artifact_declarations ( cmd , registry_name = 'calmjs.artifacts' ) : return any ( get ( registry_name ) . iter_records_for ( cmd . distribution . get_name ( ) ) )
For a distutils command to verify that the artifact build step is possible .
45,775
def build_calmjs_artifacts ( dist , key , value , cmdclass = BuildCommand ) : if value is not True : return build_cmd = dist . get_command_obj ( 'build' ) if not isinstance ( build_cmd , cmdclass ) : logger . error ( "'build' command in Distribution is not an instance of " "'%s:%s' (got %r instead)" , cmdclass . __module__ , cmdclass . __name__ , build_cmd ) return build_cmd . sub_commands . append ( ( key , has_calmjs_artifact_declarations ) )
Trigger the artifact build process through the setuptools .
45,776
def get_rule ( self , field_id ) : if field_id in self . _fields : field = self . _fields [ field_id ] else : field = self . _create_field ( field_id ) self . _fields [ field_id ] = field return field
Returns the rule for the field identified by the id .
45,777
def _create_field ( self , field_id ) : config = self . _field_configs [ field_id ] adapter = self . _adapters [ config [ 'type' ] ] if 'name' in config : name = config [ 'name' ] else : name = None if 'size' in config : columns = config [ 'size' ] else : columns = None if 'values' in config : values = config [ 'values' ] else : values = None field = adapter . get_field ( name , columns , values ) if 'results_name' in config : field = field . setResultsName ( config [ 'results_name' ] ) else : field = field . setResultsName ( field_id ) return field
Creates the field with the specified parameters .
45,778
def read_csv_file ( self , file_name ) : result = [ ] with open ( os . path . join ( self . __path ( ) , os . path . basename ( file_name ) ) , 'rt' ) as csvfile : headers_reader = csv . reader ( csvfile , delimiter = ',' , quotechar = '|' ) for type_row in headers_reader : for t in type_row : result . append ( t ) return result
Parses a CSV file into a list .
45,779
def read_yaml_file ( self , file_name ) : with open ( os . path . join ( self . __path ( ) , os . path . basename ( file_name ) ) , 'rt' ) as yamlfile : return yaml . load ( yamlfile )
Parses a YAML file into a matrix .
45,780
def get_data ( self , file_id ) : if file_id not in self . _file_values : file_contents = 'cwr_%s.csv' % file_id self . _file_values [ file_id ] = self . _reader . read_csv_file ( file_contents ) return self . _file_values [ file_id ]
Acquires the data from the table identified by the id .
45,781
def record_type ( values ) : field = basic . lookup ( values , name = 'Record Type (one of %s)' % values ) return field . setResultsName ( 'record_type' )
Creates a record type field .
45,782
def record_prefix ( required_type , factory ) : field = record_type ( required_type ) field += factory . get_rule ( 'transaction_sequence_n' ) field += factory . get_rule ( 'record_sequence_n' ) return field
Creates a record prefix for the specified record type .
45,783
def read_config_file ( self , file_name ) : with open ( os . path . join ( self . __path ( ) , os . path . basename ( file_name ) ) , 'rt' ) as file_config : return self . _parser . parseString ( file_config . read ( ) )
Reads a CWR grammar config file .
45,784
def _load_cwr_defaults ( self ) : if self . _cwr_defaults is None : self . _cwr_defaults = self . _reader . read_yaml_file ( self . _file_defaults ) return self . _cwr_defaults
Loads the CWR default values file creating a matrix from it and then returns this data .
45,785
def soft_error ( self , message ) : self . print_usage ( sys . stderr ) args = { 'prog' : self . prog , 'message' : message } self . _print_message ( _ ( '%(prog)s: error: %(message)s\n' ) % args , sys . stderr )
Same as error without the dying in a fire part .
45,786
def default_filename_decoder ( ) : factory = default_filename_grammar_factory ( ) grammar_old = factory . get_rule ( 'filename_old' ) grammar_new = factory . get_rule ( 'filename_new' ) return FileNameDecoder ( grammar_old , grammar_new )
Creates a decoder which parses CWR filenames following the old or the new convention .
45,787
def decode ( self , data ) : file_name = self . _filename_decoder . decode ( data [ 'filename' ] ) file_data = data [ 'contents' ] i = 0 max_size = len ( file_data ) while file_data [ i : i + 1 ] != 'H' and i < max_size : i += 1 if i > 0 : data [ 'contents' ] = file_data [ i : ] transmission = self . _file_decoder . decode ( data [ 'contents' ] ) [ 0 ] return CWRFile ( file_name , transmission )
Parses the file creating a CWRFile from it .
45,788
def decode ( self , file_name ) : try : file_tag = self . _filename_decoder_new . decode ( file_name ) except : try : file_tag = self . _filename_decoder_old . decode ( file_name ) except : file_tag = FileTag ( 0 , 0 , '' , '' , '' ) return file_tag
Parses the filename creating a FileTag from it .
45,789
def enable_pretty_logging ( logger = 'calmjs' , level = logging . DEBUG , stream = None ) : def cleanup ( ) : logger . removeHandler ( handler ) logger . level = old_level if not isinstance ( logger , logging . Logger ) : logger = logging . getLogger ( logger ) old_level = logger . level handler = logging . StreamHandler ( stream ) handler . setFormatter ( logging . Formatter ( u'%(asctime)s %(levelname)s %(name)s %(message)s' ) ) logger . addHandler ( handler ) logger . setLevel ( level ) return cleanup
Shorthand to enable pretty logging
45,790
def finalize_env ( env ) : keys = _PLATFORM_ENV_KEYS . get ( sys . platform , [ ] ) if 'PATH' not in keys : keys . append ( 'PATH' ) results = { key : os . environ . get ( key , '' ) for key in keys } results . update ( env ) return results
Produce a platform specific env for passing into subprocess . Popen family of external process calling methods and the supplied env will be updated on top of it . Returns a new env .
45,791
def fork_exec ( args , stdin = '' , ** kwargs ) : as_bytes = isinstance ( stdin , bytes ) source = stdin if as_bytes else stdin . encode ( locale ) p = Popen ( args , stdin = PIPE , stdout = PIPE , stderr = PIPE , ** kwargs ) stdout , stderr = p . communicate ( source ) if as_bytes : return stdout , stderr return ( stdout . decode ( locale ) , stderr . decode ( locale ) )
Do a fork - exec through the subprocess . Popen abstraction in a way that takes a stdin and return stdout .
45,792
def raise_os_error ( _errno , path = None ) : msg = "%s: '%s'" % ( strerror ( _errno ) , path ) if path else strerror ( _errno ) raise OSError ( _errno , msg )
Helper for raising the correct exception under Python 3 while still being able to raise the same common exception class in Python 2 . 7 .
45,793
def which ( cmd , mode = os . F_OK | os . X_OK , path = None ) : if os . path . dirname ( cmd ) : if os . path . isfile ( cmd ) and os . access ( cmd , mode ) : return cmd if path is None : path = os . environ . get ( 'PATH' , defpath ) if not path : return None paths = path . split ( pathsep ) if sys . platform == 'win32' : if curdir not in paths : paths = [ curdir ] + paths pathext = os . environ . get ( 'PATHEXT' , '' ) . split ( pathsep ) if any ( cmd . lower ( ) . endswith ( ext . lower ( ) ) for ext in pathext ) : files = [ cmd ] else : files = [ cmd + ext for ext in pathext ] else : files = [ cmd ] seen = set ( ) for p in paths : normpath = normcase ( p ) if normpath in seen : continue seen . add ( normpath ) for f in files : fn = os . path . join ( p , f ) if os . path . isfile ( fn ) and os . access ( fn , mode ) : return fn return None
Given cmd check where it is on PATH .
45,794
def _init ( self ) : self . _entry_points = { } for entry_point in self . raw_entry_points : if entry_point . dist . project_name != self . reserved . get ( entry_point . name , entry_point . dist . project_name ) : logger . error ( "registry '%s' for '%s' is reserved for package '%s'" , entry_point . name , self . registry_name , self . reserved [ entry_point . name ] , ) continue if self . get_record ( entry_point . name ) : logger . warning ( "registry '%s' for '%s' is already registered." , entry_point . name , self . registry_name , ) existing = self . _entry_points [ entry_point . name ] logger . debug ( "registered '%s' from '%s'" , existing , existing . dist ) logger . debug ( "discarded '%s' from '%s'" , entry_point , entry_point . dist ) continue logger . debug ( "recording '%s' from '%s'" , entry_point , entry_point . dist ) self . _entry_points [ entry_point . name ] = entry_point
Turn the records into actual usable keys .
45,795
def toolchain_spec_compile_entries ( toolchain , spec , entries , process_name , overwrite_log = None ) : processor = getattr ( toolchain , 'compile_%s_entry' % process_name ) modpath_logger = ( partial ( overwrite_log , toolchain . modpath_suffix ) if callable ( overwrite_log ) else None ) targetpath_logger = ( partial ( overwrite_log , toolchain . targetpath_suffix ) if callable ( overwrite_log ) else None ) return process_compile_entries ( processor , spec , entries , modpath_logger , targetpath_logger )
The standardized Toolchain Spec Entries compile function
45,796
def process_compile_entries ( processor , spec , entries , modpath_logger = None , targetpath_logger = None ) : all_modpaths = { } all_targets = { } all_export_module_names = [ ] def update ( base , fresh , logger ) : if callable ( logger ) : for dupes in dict_update_overwrite_check ( base , fresh ) : logger ( * dupes ) else : base . update ( fresh ) for entry in entries : modpaths , targetpaths , export_module_names = processor ( spec , entry ) update ( all_modpaths , modpaths , modpath_logger ) update ( all_targets , targetpaths , targetpath_logger ) all_export_module_names . extend ( export_module_names ) return all_modpaths , all_targets , all_export_module_names
The generalized raw spec entry process invocation loop .
45,797
def update_selected ( self , other , selected ) : self . update ( { k : other [ k ] for k in selected } )
Like update however a list of selected keys must be provided .
45,798
def advise ( self , name , f , * a , ** kw ) : if name is None : return advice = ( f , a , kw ) debug = self . get ( DEBUG ) frame = currentframe ( ) if frame is None : logger . debug ( 'currentframe() failed to return frame' ) else : if name in self . _called : self . __advice_stack_frame_protection ( frame ) if debug : logger . debug ( "advise '%s' invoked by %s:%d" , name , frame . f_back . f_code . co_filename , frame . f_back . f_lineno , ) if debug > 1 : self . _frames [ id ( advice ) ] = '' . join ( format_stack ( frame . f_back ) ) self . _advices [ name ] = self . _advices . get ( name , [ ] ) self . _advices [ name ] . append ( advice )
Add an advice that will be handled later by the handle method .
45,799
def handle ( self , name ) : if name in self . _called : logger . warning ( "advice group '%s' has been called for this spec %r" , name , self , ) self . __advice_stack_frame_protection ( currentframe ( ) ) else : self . _called . add ( name ) advices = [ ] advices . extend ( self . _advices . get ( name , [ ] ) ) if advices and self . get ( 'debug' ) : logger . debug ( "handling %d advices in group '%s' " , len ( advices ) , name ) while advices : try : values = advices . pop ( ) advice , a , kw = values if not ( ( callable ( advice ) ) and isinstance ( a , tuple ) and isinstance ( kw , dict ) ) : raise TypeError except ValueError : logger . info ( 'Spec advice extraction error: got %s' , values ) except TypeError : logger . info ( 'Spec advice malformed: got %s' , values ) else : try : try : advice ( * a , ** kw ) except Exception as e : frame = self . _frames . get ( id ( values ) ) if frame : logger . info ( 'Spec advice exception: %r' , e ) logger . info ( 'Traceback for original advice:\n%s' , frame ) raise except AdviceCancel as e : logger . info ( "advice %s in group '%s' signaled its cancellation " "during its execution: %s" , advice , name , e ) if self . get ( DEBUG ) : logger . debug ( 'showing traceback for cancellation' , exc_info = 1 , ) except AdviceAbort as e : logger . warning ( "advice %s in group '%s' encountered a known error " "during its execution: %s; continuing with toolchain " "execution" , advice , name , e ) if self . get ( DEBUG ) : logger . warning ( 'showing traceback for error' , exc_info = 1 , ) except ToolchainCancel : raise except ToolchainAbort as e : logger . critical ( "an advice in group '%s' triggered an abort: %s" , name , str ( e ) ) raise except KeyboardInterrupt : raise ToolchainCancel ( 'interrupted' ) except Exception as e : logger . critical ( "advice %s in group '%s' terminated due to an " "unexpected exception: %s" , advice , name , e ) if self . get ( DEBUG ) : logger . critical ( 'showing traceback for error' , exc_info = 1 , )
Call all advices at the provided name .