signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def to_lower ( cls ) : # NOQA """Return a list of all the fields that should be lowercased This is done on fields with ` lower = True ` ."""
email = cls . get_fields_by_class ( EmailType ) lower = cls . get_fields_by_prop ( 'lower' , True ) + email return list ( set ( email + lower ) )
def describe ( self , bucket , descriptor = None ) : """https : / / github . com / frictionlessdata / tableschema - pandas - py # storage"""
# Set descriptor if descriptor is not None : self . __descriptors [ bucket ] = descriptor # Get descriptor else : descriptor = self . __descriptors . get ( bucket ) if descriptor is None : dataframe = self . __dataframes [ bucket ] descriptor = self . __mapper . restore_descriptor ( dataframe ) return descriptor
def addTask ( self , task ) : """Add a task to the task set ."""
with self . regcond : self . taskset . append ( task ) task . add_callback ( 'resolved' , self . child_done , self . numtasks ) self . numtasks += 1 self . count += 1 task . initialize ( self ) task . start ( )
def get_realtime_alarm ( username , auth , url ) : """Takes in no param as input to fetch RealTime Alarms from HP IMC RESTFUL API : param username OpeatorName , String type . Required . Default Value " admin " . Checks the operator has the privileges to view the Real - Time Alarms . : param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class : param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass : return : list of dictionaries where each element of the list represents a single alarm as pulled from the the current list of realtime alarms in the HPE IMC Platform : rtype : list > > > from pyhpeimc . auth import * > > > from pyhpeimc . plat . alarms import * > > > auth = IMCAuth ( " http : / / " , " 10.101.0.203 " , " 8080 " , " admin " , " admin " ) > > > real _ time _ alarm = get _ realtime _ alarm ( ' admin ' , auth . creds , auth . url ) > > > assert type ( real _ time _ alarm ) is list > > > assert ' faultDesc ' in real _ time _ alarm [ 0]"""
f_url = url + "/imcrs/fault/faultRealTime?operatorName=" + username response = requests . get ( f_url , auth = auth , headers = HEADERS ) try : realtime_alarm_list = ( json . loads ( response . text ) ) return realtime_alarm_list [ 'faultRealTime' ] [ 'faultRealTimeList' ] except requests . exceptions . RequestException as error : return "Error:\n" + str ( error ) + ' get_realtime_alarm: An Error has occured'
def from_array ( array ) : """Deserialize a new ShippingOption from a given dictionary . : return : new ShippingOption instance . : rtype : ShippingOption"""
if array is None or not array : return None # end if assert_type_or_raise ( array , dict , parameter_name = "array" ) data = { } data [ 'id' ] = u ( array . get ( 'id' ) ) data [ 'title' ] = u ( array . get ( 'title' ) ) data [ 'prices' ] = LabeledPrice . from_array_list ( array . get ( 'prices' ) , list_level = 1 ) instance = ShippingOption ( ** data ) instance . _raw = array return instance
def print_functions ( self , d ) : """Export all the functions to dot files"""
for c in self . contracts : for f in c . functions : f . cfg_to_dot ( os . path . join ( d , '{}.{}.dot' . format ( c . name , f . name ) ) )
def _read_state_variables ( self ) : """Reads the stateVariable information from the xml - file . The information we like to extract are name and dataType so we can assign them later on to FritzActionArgument - instances . Returns a dictionary : key : value = name : dataType"""
nodes = self . root . iterfind ( './/ns:stateVariable' , namespaces = { 'ns' : self . namespace } ) for node in nodes : key = node . find ( self . nodename ( 'name' ) ) . text value = node . find ( self . nodename ( 'dataType' ) ) . text self . state_variables [ key ] = value
def update_policy_configuration ( self , configuration , project , configuration_id ) : """UpdatePolicyConfiguration . Update a policy configuration by its ID . : param : class : ` < PolicyConfiguration > < azure . devops . v5_0 . policy . models . PolicyConfiguration > ` configuration : The policy configuration to update . : param str project : Project ID or project name : param int configuration _ id : ID of the existing policy configuration to be updated . : rtype : : class : ` < PolicyConfiguration > < azure . devops . v5_0 . policy . models . PolicyConfiguration > `"""
route_values = { } if project is not None : route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' ) if configuration_id is not None : route_values [ 'configurationId' ] = self . _serialize . url ( 'configuration_id' , configuration_id , 'int' ) content = self . _serialize . body ( configuration , 'PolicyConfiguration' ) response = self . _send ( http_method = 'PUT' , location_id = 'dad91cbe-d183-45f8-9c6e-9c1164472121' , version = '5.0' , route_values = route_values , content = content ) return self . _deserialize ( 'PolicyConfiguration' , response )
def checked_open ( filename , log = None , quiet = False ) : """Open and validate the given metafile . Optionally provide diagnostics on the passed logger , for invalid metafiles , which then just cause a warning but no exception . " quiet " can supress that warning ."""
with open ( filename , "rb" ) as handle : raw_data = handle . read ( ) data = bencode . bdecode ( raw_data ) try : check_meta ( data ) if raw_data != bencode . bencode ( data ) : raise ValueError ( "Bad bencoded data - dict keys out of order?" ) except ValueError as exc : if log : # Warn about it , unless it ' s a quiet value query if not quiet : log . warn ( "%s: %s" % ( filename , exc ) ) else : raise return data
def sign_up ( self ) : """Signs up a participant for the experiment . This is done using a POST request to the / participant / endpoint ."""
self . log ( "Bot player signing up." ) self . subscribe_to_quorum_channel ( ) while True : url = ( "{host}/participant/{self.worker_id}/" "{self.hit_id}/{self.assignment_id}/" "debug?fingerprint_hash={hash}&recruiter=bots:{bot_name}" . format ( host = self . host , self = self , hash = uuid . uuid4 ( ) . hex , bot_name = self . __class__ . __name__ , ) ) try : result = requests . post ( url ) result . raise_for_status ( ) except RequestException : self . stochastic_sleep ( ) continue if result . json ( ) [ "status" ] == "error" : self . stochastic_sleep ( ) continue self . on_signup ( result . json ( ) ) return True
def compose_all ( stream , Loader = Loader ) : """Parse all YAML documents in a stream and produce corresponding representation trees ."""
loader = Loader ( stream ) try : while loader . check_node ( ) : yield loader . get_node ( ) finally : loader . dispose ( )
def write ( self , obj , ** kwargs ) : """we are going to write this as a frame table"""
name = obj . name or 'values' obj , self . levels = self . validate_multiindex ( obj ) cols = list ( self . levels ) cols . append ( name ) obj . columns = cols return super ( ) . write ( obj = obj , ** kwargs )
def _calc_F_guess ( self , alpha , predictions , probabilities ) : """Calculate an estimate of the F - measure based on the scores"""
num = np . sum ( predictions . T * probabilities , axis = 1 ) den = np . sum ( ( 1 - alpha ) * probabilities + alpha * predictions . T , axis = 1 ) F_guess = num / den # Ensure guess is not undefined F_guess [ den == 0 ] = 0.5 return F_guess
def advisory_lock ( dax , key , lock_mode = LockMode . wait , xact = False ) : """A context manager for obtaining a lock , executing code , and then releasing the lock . A boolean value is passed to the block indicating whether or not the lock was obtained . : dax : a DataAccess instance : key : either a big int or a 2 - tuple of integers : lock _ mode : a member of the LockMode enum . Determines how this function operates : - wait : the wrapped code will not be executed until the lock is obtained . - skip : an attempt will be made to get the lock , and if unsuccessful , False is passed to the code block - error : an attempt will be made to get the lock , and if unsuccessful , an exception will be raised . : xact : a boolean , if True , the lock will be obtained according to lock _ mode , but will not be released after the code is executed , since it will be automatically released at the end of the transaction ."""
if lock_mode == LockMode . wait : obtain_lock ( dax , key , lock_mode , xact ) else : got_lock = obtain_lock ( dax , key , lock_mode , xact ) if not got_lock : if lock_mode == LockMode . error : raise Exception ( "Unable to obtain advisory lock {}" . format ( key ) ) else : # lock _ mode is skip yield False return # At this point we have the lock try : yield True finally : if not xact : release_lock ( dax , key , lock_mode )
def add_tags_to_bookmark ( self , bookmark_id , tags ) : """Add tags to to a bookmark . The identified bookmark must belong to the current user . : param bookmark _ id : ID of the bookmark to delete . : param tags : Comma separated tags to be applied ."""
url = self . _generate_url ( 'bookmarks/{0}/tags' . format ( bookmark_id ) ) params = dict ( tags = tags ) return self . post ( url , params )
def reconnect ( self , exc = None ) : """Schedule reconnect after connection has been unexpectedly lost ."""
# Reset protocol binding before starting reconnect self . protocol = None if not self . closing : log . warning ( 'disconnected from Rflink, reconnecting' ) self . loop . create_task ( self . connect ( ) )
def sphere ( target , pore_diameter = 'pore.diameter' , throat_area = 'throat.area' ) : r"""Calculates internal surface area of pore bodies assuming they are spherical then subtracts the area of the neighboring throats in a crude way , by simply considering the throat cross - sectional area , thus not accounting for the actual curvature of the intersection . Parameters target : OpenPNM Object The object for which these values are being calculated . This controls the length of the calculated array , and also provides access to other necessary thermofluid properties . pore _ diameter : string The dictionary key to the pore diameter array . throat _ area : string The dictioanry key to the throat area array . Throat areas are needed since their insection with the pore are removed from the computation ."""
network = target . project . network R = target [ pore_diameter ] / 2 Asurf = 4 * _np . pi * R ** 2 Tn = network . find_neighbor_throats ( pores = target . Ps , flatten = False ) Tsurf = _np . array ( [ _np . sum ( network [ throat_area ] [ Ts ] ) for Ts in Tn ] ) value = Asurf - Tsurf return value
def create_http_method ( logic : Callable , http_method : str , handle_http : Callable , before : Callable = None , after : Callable = None ) -> Callable : """Create a handler method to be used in a handler class . : param callable logic : The underlying function to execute with the parsed and validated parameters . : param str http _ method : HTTP method this will handle . : param handle _ http : The HTTP handler function that should be used to wrap the logic functions . : param before : A function to be called before the logic function associated with the route . : param after : A function to be called after the logic function associated with the route . : returns : A handler function ."""
@ functools . wraps ( logic ) def fn ( handler , * args , ** kwargs ) : if before is not None and callable ( before ) : before ( ) result = handle_http ( handler , args , kwargs , logic ) if after is not None and callable ( after ) : after ( result ) return result return fn
def protect ( self , password = None , read_protect = False , protect_from = 0 ) : """Protect a FeliCa Lite Tag . A FeliCa Lite Tag can be provisioned with a custom password ( or the default manufacturer key if the password is an empty string or bytearray ) to ensure that data retrieved by future read operations , after authentication , is genuine . Read protection is not supported . A non - empty * password * must provide at least 128 bit key material , in other words it must be a string or bytearray of length 16 or more . The memory unit for the value of * protect _ from * is 16 byte , thus with ` ` protect _ from = 2 ` ` bytes 0 to 31 are not protected . If * protect _ from * is zero ( the default value ) and the Tag has valid NDEF management data , the NDEF RW Flag is set to read only ."""
return super ( FelicaLite , self ) . protect ( password , read_protect , protect_from )
def list_numbers ( self , ** kwargs ) : # noqa : E501 """Get your numbers # noqa : E501 List all your purchased numbers # noqa : E501 This method makes a synchronous HTTP request by default . To make an asynchronous HTTP request , please pass async = True > > > thread = api . list _ numbers ( async = True ) > > > result = thread . get ( ) : param async bool : return : ResponseNumberList If the method is called asynchronously , returns the request thread ."""
kwargs [ '_return_http_data_only' ] = True if kwargs . get ( 'async' ) : return self . list_numbers_with_http_info ( ** kwargs ) # noqa : E501 else : ( data ) = self . list_numbers_with_http_info ( ** kwargs ) # noqa : E501 return data
def extract ( src_vector : str , burn_attribute : str , src_raster : list , dst_names : list , dst_dir : str , src_raster_template : str = None , gdal_dtype : int = 4 , n_jobs : int = 1 ) : """Extract values from list of single band raster for pixels overlapping with a vector data . The extracted data will be stored in the ` ` dst _ dir ` ` by using the ` ` dst _ names ` ` for the filename . If a file with a given name already exists the raster will be skipped . Arguments : src _ vector { str } - - Filename of the vector dataset . Currently it must have the same CRS as the raster . burn _ attribute { str } - - Name of the attribute column in the ` ` src _ vector ` ` dataset to be stored with the extracted data . This should usually be a unique ID for the features ( points , lines , polygons ) in the vector dataset . src _ raster { list } - - List of filenames of the single band raster files from which to extract . dst _ names { list } - - List corresponding to ` ` src _ raster ` ` names used to store and later identify the extracted to . dst _ dir { str } - - Directory to store the data to . Keyword Arguments : src _ raster _ template { str } - - A template raster to be used for rasterizing the vectorfile . Usually the first element of ` ` src _ raster ` ` . ( default : { None } ) gdal _ dtype { int } - - Numeric GDAL data type , defaults to 4 which is UInt32. See https : / / github . com / mapbox / rasterio / blob / master / rasterio / dtypes . py for useful look - up tables . Returns : [ int ] - - If successful , 0 is returned as exit code ."""
if src_raster_template is None : src_raster_template = src_raster [ 0 ] path_rasterized = os . path . join ( dst_dir , f"burn_attribute_rasterized_{burn_attribute}.tif" ) paths_extracted_aux = { ele : os . path . join ( dst_dir , f"{ele}.npy" ) for ele in [ f"aux_vector_{burn_attribute}" , "aux_coord_x" , "aux_coord_y" ] } paths_extracted_raster = { } for path , name in zip ( src_raster , dst_names ) : dst = f"{os.path.join(dst_dir, name)}.npy" if not os . path . exists ( dst ) : paths_extracted_raster [ path ] = dst if not os . path . exists ( dst_dir ) : os . makedirs ( dst_dir ) # if it does not already exist , here we first create the rasterized data if not os . path . exists ( path_rasterized ) : if src_raster_template is None : src_raster_template = src_raster [ 0 ] # print ( " Rasterizing vector attribute . " ) rasterize ( src_vector = src_vector , burn_attribute = burn_attribute , src_raster_template = src_raster_template , dst_rasterized = path_rasterized , gdal_dtype = gdal_dtype ) # if any of the destination files do not exist we need the locations of the pixels to be # extracted in form of a numpy array bool ( mask _ arr ) that fits the rasters from which we will # extract below if not ( all ( [ os . path . exists ( path ) for path in paths_extracted_aux . values ( ) ] ) and all ( [ os . path . exists ( path ) for path in paths_extracted_raster . values ( ) ] ) ) : # print ( " Creating mask array for pixels to be extracted . " ) mask_arr = _get_mask_array ( path_rasterized , paths_extracted_aux , burn_attribute ) else : return 0 # create the pixel coordinates if they do not exist if not all ( [ os . path . exists ( paths_extracted_aux [ "aux_coord_x" ] ) , os . path . exists ( paths_extracted_aux [ "aux_coord_y" ] ) ] ) : _create_and_save_coords ( path_rasterized , paths_extracted_aux , mask_arr ) # lets extract the raster values in case of sequential processing # or remove existing raster layers to prepare parallel processing if n_jobs == 1 : for path_src , path_dst in tqdm ( paths_extracted_raster . items ( ) , total = len ( paths_extracted_raster ) ) : _extract_and_save_one_layer ( path_src , path_dst , mask_arr ) else : import multiprocessing as mp if n_jobs == - 1 : n_jobs = mp . cpu_count ( ) pool = mp . Pool ( processes = n_jobs ) _ = [ pool . apply_async ( _extract_and_save_one_layer , args = ( src , dst , mask_arr ) ) for src , dst in paths_extracted_raster . items ( ) ] pool . close ( ) pool . join ( ) return 0
def add_next_tick_callback ( self , callback ) : '''Add callback to be invoked once on the next tick of the event loop . Args : callback ( callable ) : A callback function to execute on the next tick . Returns : NextTickCallback : can be used with ` ` remove _ next _ tick _ callback ` ` . . note : : Next tick callbacks only work within the context of a Bokeh server session . This function will no effect when Bokeh outputs to standalone HTML or Jupyter notebook cells .'''
from . . server . callbacks import NextTickCallback cb = NextTickCallback ( self , None ) return self . _add_session_callback ( cb , callback , one_shot = True , originator = self . add_next_tick_callback )
def get_best_experiment_sets ( nets , expvars , num ) : '''given the network and the experimental variables , and the bound on the size of an experiment set returns the experiments as a ` ` TermSet ` ` object [ instance ] .'''
netsf = nets . to_file ( ) expvarsf = expvars . to_file ( ) best = - 1 best_solutions = [ ] best_found = False i = 0 while i < num and not best_found : i += 1 num_exp = String2TermSet ( 'pexperiment(' + str ( i ) + ')' ) num_expf = num_exp . to_file ( ) prg = [ netsf , expvarsf , num_expf , find_best_exp_sets_prg , elem_path_prg ] coptions = '--project --opt-mode=optN --opt-strategy=0 --opt-heuristic' solver = GringoClasp ( clasp_options = coptions ) solutions = solver . run ( prg , collapseTerms = True , collapseAtoms = False ) # print ( solutions [ 0 ] . score [ 0 ] , solutions [ 0 ] . score [ 1 ] , solutions [ 0 ] . score [ 2 ] , solutions [ 0 ] . score [ 3 ] ) os . unlink ( num_expf ) if solutions == [ ] : best_found = True else : opt = ( solutions [ 0 ] . score [ 0 ] + solutions [ 0 ] . score [ 1 ] + solutions [ 0 ] . score [ 2 ] ) if best == opt : best_found = True else : best = opt best_solutions = solutions os . unlink ( netsf ) os . unlink ( expvarsf ) return best_solutions
def symbols_to_prob ( symbols ) : '''Return a dict mapping symbols to probability . input : symbols : iterable of hashable items works well if symbols is a zip of iterables'''
myCounter = Counter ( symbols ) N = float ( len ( list ( symbols ) ) ) # symbols might be a zip object in python 3 for k in myCounter : myCounter [ k ] /= N return myCounter
def get_utc_date ( entry ) : """Return datestamp converted to UTC"""
if entry [ 'numeric_date_stamp' ] == '0' : entry [ 'numeric_date_stamp_utc' ] = '0' return entry else : if '.' in entry [ 'numeric_date_stamp' ] : t = datetime . strptime ( entry [ 'numeric_date_stamp' ] , '%Y%m%d%H%M%S.%f' ) else : t = datetime . strptime ( entry [ 'numeric_date_stamp' ] , '%Y%m%d%H%M%S' ) tdelta = timedelta ( hours = int ( entry [ 'tzone' ] [ 1 : 3 ] ) , minutes = int ( entry [ 'tzone' ] [ 3 : 5 ] ) ) if entry [ 'tzone' ] [ 0 ] == '-' : ut = t + tdelta else : ut = t - tdelta entry [ 'numeric_date_stamp_utc' ] = ut . strftime ( '%Y%m%d%H%M%S.%f' ) return entry
def dict ( self ) : """Return this ` ` SoSOptions ` ` option values as a dictionary of argument name to value mappings . : returns : a name : value dictionary of option values ."""
odict = { } for arg in _arg_names : value = getattr ( self , arg ) # Do not attempt to store preset option values in presets if arg in ( 'add_preset' , 'del_preset' , 'desc' , 'note' ) : value = None odict [ arg ] = value return odict
def _partition_operation_sql ( self , operation , settings = None , from_part = None ) : """Performs some operation over partition : param db : Database object to execute operation on : param operation : Operation to execute from SystemPart . OPERATIONS set : param settings : Settings for executing request to ClickHouse over db . raw ( ) method : return : Operation execution result"""
operation = operation . upper ( ) assert operation in self . OPERATIONS , "operation must be in [%s]" % comma_join ( self . OPERATIONS ) sql = "ALTER TABLE `%s`.`%s` %s PARTITION %s" % ( self . _database . db_name , self . table , operation , self . partition ) if from_part is not None : sql += " FROM %s" % from_part self . _database . raw ( sql , settings = settings , stream = False )
def _run_smoove ( full_bams , sr_bams , disc_bams , work_dir , items ) : """Run lumpy - sv using smoove ."""
batch = sshared . get_cur_batch ( items ) ext = "-%s-svs" % batch if batch else "-svs" name = "%s%s" % ( dd . get_sample_name ( items [ 0 ] ) , ext ) out_file = os . path . join ( work_dir , "%s-smoove.genotyped.vcf.gz" % name ) sv_exclude_bed = sshared . prepare_exclude_file ( items , out_file ) old_out_file = os . path . join ( work_dir , "%s%s-prep.vcf.gz" % ( os . path . splitext ( os . path . basename ( items [ 0 ] [ "align_bam" ] ) ) [ 0 ] , ext ) ) if utils . file_exists ( old_out_file ) : return old_out_file , sv_exclude_bed if not utils . file_exists ( out_file ) : with file_transaction ( items [ 0 ] , out_file ) as tx_out_file : cores = dd . get_num_cores ( items [ 0 ] ) out_dir = os . path . dirname ( tx_out_file ) ref_file = dd . get_ref_file ( items [ 0 ] ) full_bams = " " . join ( _prepare_smoove_bams ( full_bams , sr_bams , disc_bams , items , os . path . dirname ( tx_out_file ) ) ) std_excludes = [ "~^GL" , "~^HLA" , "~_random" , "~^chrUn" , "~alt" , "~decoy" ] def _is_std_exclude ( n ) : clean_excludes = [ x . replace ( "~" , "" ) . replace ( "^" , "" ) for x in std_excludes ] return any ( [ n . startswith ( x ) or n . endswith ( x ) for x in clean_excludes ] ) exclude_chrs = [ c . name for c in ref . file_contigs ( ref_file ) if not chromhacks . is_nonalt ( c . name ) and not _is_std_exclude ( c . name ) ] exclude_chrs = "--excludechroms '%s'" % "," . join ( std_excludes + exclude_chrs ) exclude_bed = ( "--exclude %s" % sv_exclude_bed ) if utils . file_exists ( sv_exclude_bed ) else "" tempdir = os . path . dirname ( tx_out_file ) cmd = ( "export TMPDIR={tempdir} && " "smoove call --processes {cores} --genotype --removepr --fasta {ref_file} " "--name {name} --outdir {out_dir} " "{exclude_bed} {exclude_chrs} {full_bams}" ) with utils . chdir ( tempdir ) : try : do . run ( cmd . format ( ** locals ( ) ) , "smoove lumpy calling" , items [ 0 ] ) except subprocess . CalledProcessError as msg : if _allowed_errors ( msg ) : vcfutils . write_empty_vcf ( tx_out_file , config = items [ 0 ] [ "config" ] , samples = [ dd . get_sample_name ( d ) for d in items ] ) else : logger . exception ( ) raise vcfutils . bgzip_and_index ( out_file , items [ 0 ] [ "config" ] ) return out_file , sv_exclude_bed
def post ( self , url , data ) : """Send a HTTP POST request to a URL and return the result ."""
headers = { "Content-type" : "application/x-www-form-urlencoded" , "Accept" : "text/json" } self . conn . request ( "POST" , url , data , headers ) return self . _process_response ( )
def get_modifications ( self ) : """Extract INDRA Modification Statements from the BioPAX model . To extract Modifications , this method reuses the structure of BioPAX Pattern ' s org . biopax . paxtools . pattern . PatternBox . constrolsStateChange pattern with additional constraints to specify the type of state change occurring ( phosphorylation , deubiquitination , etc . ) ."""
for modtype , modclass in modtype_to_modclass . items ( ) : # TODO : we could possibly try to also extract generic # modifications here if modtype == 'modification' : continue stmts = self . _get_generic_modification ( modclass ) self . statements += stmts
def _get_files ( self , folderId ) : """Retrieve the list of files contained in a folder"""
uri = '/' . join ( [ self . base_url , self . name , folderId , 'Files' ] ) return uri , { } , 'get' , None , None , False , None
def get_fixture ( self , fixture_id , head2head = None ) : """Loads a single fixture . Args : * fixture _ id ( str ) : the id of the fixture * head2head ( int , optional ) : load the previous n fixture of the two teams Returns : * : obj : json : the fixture - json"""
filters = [ ] if head2head is not None and int ( head2head ) > 0 : self . logger . debug ( f'Getting fixture {fixture_id}. head2head is {head2head}.' ) filters . append ( self . __createFilter ( 'head2head' , head2head ) ) else : self . logger . debug ( f'Getting fixture {fixture_id}.' ) return self . _request ( 'fixtures' , fixture_id , filters = filters )
def count_genes ( model ) : """Count the number of distinct genes in model reactions ."""
genes = set ( ) for reaction in model . reactions : if reaction . genes is None : continue if isinstance ( reaction . genes , boolean . Expression ) : genes . update ( v . symbol for v in reaction . genes . variables ) else : genes . update ( reaction . genes ) return len ( genes )
def add_spaces ( self , spaces , ret = False ) : """Add ` ` pyny . Spaces ` ` to the current space . In other words , it merges multiple ` ` pyny . Spaces ` ` in this instance . : param places : ` ` pyny . Spaces ` ` to add . : type places : list of pyny . Spaces : param ret : If True , returns the whole updated Space . : type ret : bool : returns : None , ` ` pyny . Space ` ` . . warning : : This method acts inplace ."""
if type ( spaces ) != list : spaces = [ spaces ] Space . add_places ( self , sum ( [ space . places for space in spaces ] , [ ] ) ) if ret : return self
def generate_dc_xml ( dc_dict ) : """Generate a DC XML string ."""
# Define the root namespace . root_namespace = '{%s}' % DC_NAMESPACES [ 'oai_dc' ] # Set the elements namespace URL . elements_namespace = '{%s}' % DC_NAMESPACES [ 'dc' ] schema_location = ( 'http://www.openarchives.org/OAI/2.0/oai_dc/ ' 'http://www.openarchives.org/OAI/2.0/oai_dc.xsd' ) root_attributes = { '{%s}schemaLocation' % XSI : schema_location , } # Return the DC XML string . return pydict2xmlstring ( dc_dict , ordering = DC_ORDER , root_label = 'dc' , root_namespace = root_namespace , elements_namespace = elements_namespace , namespace_map = DC_NAMESPACES , root_attributes = root_attributes , )
def clustering_fields ( self ) : """Union [ List [ str ] , None ] : Fields defining clustering for the table ( Defaults to : data : ` None ` ) . Clustering fields are immutable after table creation . . . note : : As of 2018-06-29 , clustering fields cannot be set on a table which does not also have time partioning defined ."""
prop = self . _properties . get ( "clustering" ) if prop is not None : return list ( prop . get ( "fields" , ( ) ) )
def process_pulls ( self , testpulls = None , testarchive = None , expected = None ) : """Runs self . find _ pulls ( ) * and * processes the pull requests unit tests , status updates and wiki page creations . : arg expected : for unit testing the output results that would be returned from running the tests in real time ."""
from datetime import datetime pulls = self . find_pulls ( None if testpulls is None else testpulls . values ( ) ) for reponame in pulls : for pull in pulls [ reponame ] : try : archive = self . archive [ pull . repokey ] if pull . snumber in archive : # We pass the archive in so that an existing staging directory ( if # different from the configured one ) can be cleaned up if the previous # attempt failed and left the file system dirty . pull . init ( archive [ pull . snumber ] ) else : pull . init ( { } ) if self . testmode and testarchive is not None : # Hard - coded start times so that the model output is reproducible if pull . number in testarchive [ pull . repokey ] : start = testarchive [ pull . repokey ] [ pull . number ] [ "start" ] else : start = datetime ( 2015 , 4 , 23 , 13 , 8 ) else : start = datetime . now ( ) archive [ pull . snumber ] = { "success" : False , "start" : start , "number" : pull . number , "stage" : pull . repodir , "completed" : False , "finished" : None } # Once a local staging directory has been initialized , we add the sha # signature of the pull request to our archive so we can track the rest # of the testing process . If it fails when trying to merge the head of # the pull request , the exception block should catch it and email the # owner of the repo . # We need to save the state of the archive now in case the testing causes # an unhandled exception . self . _save_archive ( ) pull . begin ( ) self . cron . email ( pull . repo . name , "start" , self . _get_fields ( "start" , pull ) , self . testmode ) pull . test ( expected [ pull . number ] ) pull . finalize ( ) # Update the status of this pull request on the archive , save the archive # file in case the next pull request throws an unhandled exception . archive [ pull . snumber ] [ "completed" ] = True archive [ pull . snumber ] [ "success" ] = abs ( pull . percent - 1 ) < 1e-12 # This if block looks like a mess ; it is necessary so that we can easily # unit test this processing code by passing in the model outputs etc . that should # have been returned from running live . if ( self . testmode and testarchive is not None and pull . number in testarchive [ pull . repokey ] and testarchive [ pull . repokey ] [ pull . number ] [ "finished" ] is not None ) : archive [ pull . snumber ] [ "finished" ] = testarchive [ pull . repokey ] [ pull . number ] [ "finished" ] elif self . testmode : archive [ pull . snumber ] [ "finished" ] = datetime ( 2015 , 4 , 23 , 13 , 9 ) else : # This single line could replace the whole if block if we didn ' t have # unit tests integrated with the main code . archive [ pull . snumber ] [ "finished" ] = datetime . now ( ) self . _save_archive ( ) # We email after saving the archive in case the email server causes exceptions . if archive [ pull . snumber ] [ "success" ] : key = "success" else : key = "failure" self . cron . email ( pull . repo . name , key , self . _get_fields ( key , pull ) , self . testmode ) except : import sys , traceback e = sys . exc_info ( ) errmsg = '\n' . join ( traceback . format_exception ( e [ 0 ] , e [ 1 ] , e [ 2 ] ) ) err ( errmsg ) self . cron . email ( pull . repo . name , "error" , self . _get_fields ( "error" , pull , errmsg ) , self . testmode )
def coarse_tag_str ( pos_seq ) : """Convert POS sequence to our coarse system , formatted as a string ."""
global tag2coarse tags = [ tag2coarse . get ( tag , 'O' ) for tag in pos_seq ] return '' . join ( tags )
def get_all_network_interfaces ( self , filters = None ) : """Retrieve all of the Elastic Network Interfaces ( ENI ' s ) associated with your account . : type filters : dict : param filters : Optional filters that can be used to limit the results returned . Filters are provided in the form of a dictionary consisting of filter names as the key and filter values as the value . The set of allowable filter names / values is dependent on the request being performed . Check the EC2 API guide for details . : rtype : list : return : A list of : class : ` boto . ec2 . networkinterface . NetworkInterface `"""
params = { } if filters : self . build_filter_params ( params , filters ) return self . get_list ( 'DescribeNetworkInterfaces' , params , [ ( 'item' , NetworkInterface ) ] , verb = 'POST' )
def parse_args ( args , default_server_definitions_path , default_profile_definitions_path ) : """Parse the command line arguments and return the args dictionary"""
prog = os . path . basename ( sys . argv [ 0 ] ) usage = '%(prog)s [options] server' desc = """ Test script for testing get_central_instances method. This script executes a number of tests and displays against a set of servers defined in the test file """ epilog = """ Examples: %s Fujitsu -v 3 %s -v 1` """ % ( prog , prog ) argparser = _argparse . ArgumentParser ( prog = prog , usage = usage , description = desc , epilog = epilog , add_help = False , formatter_class = _WbemcliCustomFormatter ) pos_arggroup = argparser . add_argument_group ( 'Positional arguments' ) pos_arggroup . add_argument ( 'servers' , metavar = 'servers' , nargs = '*' , type = str , default = [ 'default' ] , help = 'R|Define by nickname zero or more specific servers to\n' 'test. If no servers are specified, the default\n' 'group is tested. If there is no default group defined,\n' 'all servers are tested.' ) general_arggroup = argparser . add_argument_group ( 'General options' ) general_arggroup . add_argument ( '-l' , '--list_servers' , dest = 'list_servers' , action = 'store_true' , default = False , help = 'List servers nicknames defined by the servers arguement. If ' 'there are no servers defined by the servers argument, list ' 'the servers defined by the group "default". If there is no ' 'default group. list all servers' ) general_arggroup . add_argument ( '--tests' , dest = 'tests' , default = None , nargs = '*' , choices = [ 'overview' , 'tree' , 'profiles' , 'insts' , 'centralinsts' ] , help = 'List the tests to be executed. Default is all tests' ) general_arggroup . add_argument ( '-d' , '--direction' , dest = 'direction' , default = None , choices = [ 'dmtf' , 'snia' ] , help = 'Define a particular reference direction for the test. If this ' 'is not specified, the direction algorithm is used, all of them.' ) general_arggroup . add_argument ( '--serversfile' , dest = 'servers_file' , metavar = 'serversfile' , default = default_server_definitions_path , help = 'R|The file path for the JSON file that defines the servers\n' 'to be tested\n' 'Default: %(default)s' ) general_arggroup . add_argument ( '--profilesfile' , dest = 'profiles_file' , metavar = 'profilesfile' , default = default_profile_definitions_path , help = 'R|The file path for the file that defines the\n' 'characteristics of known profiles.\n' 'Default: %(default)s' ) general_arggroup . add_argument ( '-v' , '--verbosity' , dest = 'verbosity' , type = int , default = 0 , help = 'Increment the output verbosity as integer from 0 t0 3' ) general_arggroup . add_argument ( '-V' , '--version' , action = 'version' , version = '%(prog)s ' + __version__ , help = 'Display script version and exit.' ) general_arggroup . add_argument ( '-h' , '--help' , action = 'help' , help = 'Show this help message and exit' ) args = argparser . parse_args ( ) return args
def clear_samples ( self ) : """Clears the chain and blobs from memory ."""
# store the iteration that the clear is occuring on self . _lastclear = self . niterations self . _itercounter = 0 # now clear the chain self . _sampler . reset ( )
def search_group_by_id ( self , groupID ) -> Group : """searches a group by given id Args : groupID ( str ) : groupID the group to search for Returns the group object or None if it couldn ' t find a group"""
for g in self . groups : if g . id == groupID : return g return None
def getUserAgent ( ) : '''Generate a randomized user agent by permuting a large set of possible values . The returned user agent should look like a valid , in - use brower , with a specified preferred language of english . Return value is a list of tuples , where each tuple is one of the user - agent headers . Currently can provide approximately 147 * 17 * 5 * 5 * 2 * 3 * 2 values , or ~ 749K possible unique user - agents .'''
coding = random . choice ( ENCODINGS ) random . shuffle ( coding ) coding = random . choice ( ( ", " , "," ) ) . join ( coding ) accept_list = [ tmp for tmp in random . choice ( ACCEPT ) ] accept_list . append ( random . choice ( ACCEPT_POSTFIX ) ) accept_str = random . choice ( ( ", " , "," ) ) . join ( accept_list ) assert accept_str . count ( "*.*" ) <= 1 user_agent = [ ( 'User-Agent' , random . choice ( USER_AGENTS ) ) , ( 'Accept-Language' , random . choice ( ACCEPT_LANGUAGE ) ) , ( 'Accept' , accept_str ) , ( 'Accept-Encoding' , coding ) ] return user_agent
def auto_change_docstring ( app , what , name , obj , options , lines ) : r"""Make some automatic changes to docstrings . Things this function does are : - Add a title to module docstrings - Merge lines that end with a ' \ ' with the next line ."""
if what == 'module' and name . startswith ( 'pylatex' ) : lines . insert ( 0 , len ( name ) * '=' ) lines . insert ( 0 , name ) hits = 0 for i , line in enumerate ( lines . copy ( ) ) : if line . endswith ( '\\' ) : lines [ i - hits ] += lines . pop ( i + 1 - hits ) hits += 1
def run ( self ) : """Run the plugin ."""
# Only run if the build was successful if self . workflow . build_process_failed : self . log . info ( "Not promoting failed build to koji" ) return self . koji_session = get_koji_session ( self . workflow , self . koji_fallback ) koji_metadata , output_files = self . get_metadata ( ) try : server_dir = self . get_upload_server_dir ( ) for output in output_files : if output . file : self . upload_file ( self . koji_session , output , server_dir ) finally : for output in output_files : if output . file : output . file . close ( ) try : build_info = self . koji_session . CGImport ( koji_metadata , server_dir ) except Exception : self . log . debug ( "metadata: %r" , koji_metadata ) raise # Older versions of CGImport do not return a value . build_id = build_info . get ( "id" ) if build_info else None self . log . debug ( "Build information: %s" , json . dumps ( build_info , sort_keys = True , indent = 4 ) ) # If configured , koji _ tag _ build plugin will perform build tagging tag_later = are_plugins_in_order ( self . workflow . exit_plugins_conf , PLUGIN_KOJI_PROMOTE_PLUGIN_KEY , PLUGIN_KOJI_TAG_BUILD_KEY ) if not tag_later and build_id is not None and self . target is not None : tag_koji_build ( self . koji_session , build_id , self . target , poll_interval = self . poll_interval ) return build_id
def calcSMA ( self ) : """Calculates the semi - major axis from Keplers Third Law"""
try : return eq . KeplersThirdLaw ( None , self . star . M , self . P ) . a except HierarchyError : return np . nan
def create_secgroup ( self , name , desc ) : """Creates a new server security group . : param str name : The name of the security group to create . : param str desc : A short description of the group ."""
self . nova . security_groups . create ( name , desc )
def _add_argument_register ( self , reg_offset ) : """Registers a register offset as being used as an argument to the function . : param reg _ offset : The offset of the register to register ."""
if reg_offset in self . _function_manager . _arg_registers and reg_offset not in self . _argument_registers : self . _argument_registers . append ( reg_offset )
def update_image ( self , container_name , image_name ) : """update a container ' s image , : param container _ name : ` class ` : ` str ` , container name : param image _ name : ` class ` : ` str ` , the full image name , like alpine : 3.3 : return : ` class ` : ` bool ` , True if success , otherwise False ."""
code , container = self . get_container ( container_name ) if code != httplib . OK : self . logger . error ( "Container %s is not exists. error code %s, error message %s" , container_name , code , container ) return False _ , old_image_name , _ = utils . parse_image_name ( container . image ) repository , name , version = utils . parse_image_name ( image_name ) if not repository or repository . lower ( ) != DOCKER_NEG : self . logger . error ( "You image %s must have a 'docker.neg/' prefix string" , image_name ) return False if not repo . image_exists ( name , tag = version ) : self . logger . error ( "You image %s must be location in docker.neg repository." , image_name ) return False if old_image_name . lower ( ) != name . lower ( ) : self . logger . error ( "You image %s must be same with container's Image." , image_name , container . image ) return False code , result = self . update ( container_name , tag = version ) if code != httplib . OK : self . logger . error ( "Update container %s with image failure, code %s, result %s" , container_name , code , result ) return False return True
def reachableFrom ( self , id , hint = None , relationships = None , lbls = None , callback = None , output = 'application/json' ) : """Get all the nodes reachable from a starting point , traversing the provided edges . from : / graph / reachablefrom / { id } Arguments : id : The type of the edge hint : A label hint to find the start node . relationships : A list of relationships to traverse , in order . Supports cypher operations such as relA | relB or relA * . lbls : A list of node labels to filter . callback : Name of the JSONP callback ( ' fn ' by default ) . Supplying this parameter or requesting a javascript media type will cause a JSONP response to be rendered . outputs : application / json application / graphson application / xml application / graphml + xml application / xgmml text / gml text / csv text / tab - separated - values image / jpeg image / png"""
if id and id . startswith ( 'http:' ) : id = parse . quote ( id , safe = '' ) kwargs = { 'id' : id , 'hint' : hint , 'relationships' : relationships , 'lbls' : lbls , 'callback' : callback } kwargs = { k : dumps ( v ) if builtins . type ( v ) is dict else v for k , v in kwargs . items ( ) } param_rest = self . _make_rest ( 'id' , ** kwargs ) url = self . _basePath + ( '/graph/reachablefrom/{id}' ) . format ( ** kwargs ) requests_params = { k : v for k , v in kwargs . items ( ) if k != 'id' } output = self . _get ( 'GET' , url , requests_params , output ) return output if output else [ ]
def frequencies_plot ( self , xmin = 0 , xmax = 200 ) : """Generate the qualities plot"""
helptext = ''' A possible way to assess the complexity of a library even in absence of a reference sequence is to look at the kmer profile of the reads. The idea is to count all the kmers (_i.e._, sequence of length `k`) that occur in the reads. In this way it is possible to know how many kmers occur `1,2,.., N` times and represent this as a plot. This plot tell us for each x, how many k-mers (y-axis) are present in the dataset in exactly x-copies. In an ideal world (no errors in sequencing, no bias, no repeated regions) this plot should be as close as possible to a gaussian distribution. In reality we will always see a peak for `x=1` (_i.e._, the errors) and another peak close to the expected coverage. If the genome is highly heterozygous a second peak at half of the coverage can be expected.''' pconfig = { 'id' : 'Jellyfish_kmer_plot' , 'title' : 'Jellyfish: K-mer plot' , 'ylab' : 'Counts' , 'xlab' : 'k-mer frequency' , 'xDecimals' : False , 'xmin' : xmin , 'xmax' : xmax } self . add_section ( anchor = 'jellyfish_kmer_plot' , description = 'The K-mer plot lets you estimate library complexity and coverage from k-mer content.' , helptext = helptext , plot = linegraph . plot ( self . jellyfish_data , pconfig ) )
def get_config ( cls , key , default = None ) : """Shortcut to access the application ' s config in your class : param key : The key to access : param default : The default value when None : returns mixed :"""
return cls . _app . config . get ( key , default )
def _pull_query_results ( resultset ) : '''Parses a ResultSet returned from InfluxDB into a dictionary of results , grouped by series names and optional JSON - encoded grouping tags .'''
_results = collections . defaultdict ( lambda : { } ) for _header , _values in resultset . items ( ) : _header , _group_tags = _header if _group_tags : _results [ _header ] [ salt . utils . json . dumps ( _group_tags ) ] = [ _value for _value in _values ] else : _results [ _header ] = [ _value for _value in _values ] return dict ( sorted ( _results . items ( ) ) )
def repeat_last_axis ( array , count ) : """Restride ` array ` to repeat ` count ` times along the last axis . Parameters array : np . array The array to restride . count : int Number of times to repeat ` array ` . Returns result : array Array of shape array . shape + ( count , ) composed of ` array ` repeated ` count ` times along the last axis . Example > > > from numpy import arange > > > a = arange ( 3 ) ; a array ( [ 0 , 1 , 2 ] ) > > > repeat _ last _ axis ( a , 2) array ( [ [ 0 , 0 ] , [1 , 1 ] , [2 , 2 ] ] ) > > > repeat _ last _ axis ( a , 4) array ( [ [ 0 , 0 , 0 , 0 ] , [1 , 1 , 1 , 1 ] , [2 , 2 , 2 , 2 ] ] ) Notes The resulting array will share memory with ` array ` . If you need to assign to the input or output , you should probably make a copy first . See Also repeat _ last _ axis"""
return as_strided ( array , array . shape + ( count , ) , array . strides + ( 0 , ) )
def get_syllable_count ( self , syllables : List [ str ] ) -> int : """Counts the number of syllable groups that would occur after ellision . Often we will want preserve the position and separation of syllables so that they can be used to reconstitute a line , and apply stresses to the original word positions . However , we also want to be able to count the number of syllables accurately . : param syllables : : return : > > > syllabifier = Syllabifier ( ) > > > print ( syllabifier . get _ syllable _ count ( [ . . . ' Jām ' , ' tūm ' , ' c ' , ' au ' , ' sus ' , ' es ' , ' u ' , ' nus ' , ' I ' , ' ta ' , ' lo ' , ' rum ' ] ) ) 11"""
tmp_syllables = copy . deepcopy ( syllables ) return len ( string_utils . remove_blank_spaces ( string_utils . move_consonant_right ( tmp_syllables , self . _find_solo_consonant ( tmp_syllables ) ) ) )
def get_modules ( modulename = None ) : """Return a list of modules and packages under modulename . If modulename is not given , return a list of all top level modules and packages ."""
modulename = compat . ensure_not_unicode ( modulename ) if not modulename : try : return ( [ modname for ( importer , modname , ispkg ) in iter_modules ( ) if not modname . startswith ( "_" ) ] + list ( sys . builtin_module_names ) ) except OSError : # Bug in Python 2.6 , see # 275 return list ( sys . builtin_module_names ) try : module = safeimport ( modulename ) except ErrorDuringImport : return [ ] if module is None : return [ ] if hasattr ( module , "__path__" ) : return [ modname for ( importer , modname , ispkg ) in iter_modules ( module . __path__ ) if not modname . startswith ( "_" ) ] return [ ]
def get_sections ( self , s , base , sections = [ 'Parameters' , 'Other Parameters' ] ) : """Method that extracts the specified sections out of the given string if ( and only if ) the docstring follows the numpy documentation guidelines [1 ] _ . Note that the section either must appear in the : attr : ` param _ like _ sections ` or the : attr : ` text _ sections ` attribute . Parameters s : str Docstring to split base : str base to use in the : attr : ` sections ` attribute sections : list of str sections to look for . Each section must be followed by a newline character ( ' \\ n ' ) and a bar of ' - ' ( following the numpy ( napoleon ) docstring conventions ) . Returns str The replaced string References . . [ 1 ] https : / / github . com / numpy / numpy / blob / master / doc / HOWTO _ DOCUMENT . rst . txt See Also delete _ params , keep _ params , delete _ types , keep _ types , delete _ kwargs : For manipulating the docstring sections save _ docstring : for saving an entire docstring"""
params = self . params # Remove the summary and dedent the rest s = self . _remove_summary ( s ) for section in sections : key = '%s.%s' % ( base , section . lower ( ) . replace ( ' ' , '_' ) ) params [ key ] = self . _get_section ( s , section ) return s
def _GetStatus ( self , two_factor = False ) : """Check whether OS Login is installed . Args : two _ factor : bool , True if two factor should be enabled . Returns : bool , True if OS Login is installed ."""
params = [ 'status' ] if two_factor : params += [ '--twofactor' ] retcode = self . _RunOsLoginControl ( params ) if retcode is None : if self . oslogin_installed : self . logger . warning ( 'OS Login not installed.' ) self . oslogin_installed = False return None # Prevent log spam when OS Login is not installed . self . oslogin_installed = True if not os . path . exists ( constants . OSLOGIN_NSS_CACHE ) : return False return not retcode
def initialize ( app : Flask , app_config ) : """Initialize the module . : param app : The Flask application . : param app _ config : Application configuration dictionary . If ` http . serve _ on _ endpoint ` attribute is specified ( e . g . / sacredboard / ) , the application will be hosted on that endpoint ( e . g . http : / / localhost : 5000 / sacredboard / )"""
sub_url = app_config [ "http.serve_on_endpoint" ] if sub_url != "/" : app . wsgi_app = ReverseProxied ( app . wsgi_app , script_name = sub_url )
def _validate_config ( self ) : """Ensure at least one switch is configured"""
if len ( cfg . CONF . ml2_arista . get ( 'switch_info' ) ) < 1 : msg = _ ( 'Required option - when "sec_group_support" is enabled, ' 'at least one switch must be specified ' ) LOG . exception ( msg ) raise arista_exc . AristaConfigError ( msg = msg )
def fetch ( args ) : """% prog fetch " query " OR % prog fetch queries . txt Please provide a UniProt compatible ` query ` to retrieve data . If ` query ` contains spaces , please remember to " quote " it . You can also specify a ` filename ` which contains queries , one per line . Follow this syntax < http : / / www . uniprot . org / help / text - search # text - search - syntax > to query any of the documented fields < http : / / www . uniprot . org / help / query - fields >"""
import re import csv p = OptionParser ( fetch . __doc__ ) p . add_option ( "--format" , default = "tab" , choices = valid_formats , help = "download format [default: %default]" ) p . add_option ( "--columns" , default = "entry name, protein names, genes,organism" , help = "columns to download, if --format is `tab` or `xls`." + " [default: %default]" ) p . add_option ( "--include" , default = False , action = "store_true" , help = "Include isoforms when --format is `fasta` or include `description` when" + " --format is `rdf`. [default: %default]" ) p . add_option ( "--limit" , default = 10 , type = "int" , help = "Max number of results to retrieve [default: %default]" ) p . add_option ( "--offset" , default = 0 , type = "int" , help = "Offset of first result, used with --limit [default: %default]" ) p . add_option ( "--skipcheck" , default = False , action = "store_true" , help = "turn off prompt to check file existence [default: %default]" ) opts , args = p . parse_args ( args ) if len ( args ) != 1 : sys . exit ( not p . print_help ( ) ) query , = args url_params = { } if op . exists ( query ) : pf = query . rsplit ( "." , 1 ) [ 0 ] list_of_queries = [ row . strip ( ) for row in open ( query ) ] else : # the query is the search term pf = query . strip ( ) . strip ( '\"' ) list_of_queries = [ pf ] pf = re . sub ( r"\s+" , '_' , pf ) assert len ( list_of_queries ) > 0 , "Please provide atleast one input query" url_params [ 'format' ] = opts . format if opts . columns and opts . format in valid_column_formats : reader = csv . reader ( [ opts . columns ] , skipinitialspace = True ) cols = [ col for r in reader for col in r ] for col in cols : assert col in valid_columns , "Column '{0}' is not a valid. Allowed options are {1}" . format ( col , valid_columns ) url_params [ 'columns' ] = "," . join ( cols ) if opts . include and opts . format in valid_include_formats : url_params [ 'include' ] = "yes" url_params [ 'limit' ] = opts . limit url_params [ 'offset' ] = opts . offset outfile = "{0}.{1}" . format ( pf , opts . format ) # If noprompt , will not check file existence fw = must_open ( outfile , "w" , checkexists = True , skipcheck = opts . skipcheck ) if fw is None : return seen = set ( ) for query in list_of_queries : if query in seen : logging . error ( "Duplicate query ({0}) found" . format ( query ) ) continue url_params [ 'query' ] = query data = urlencode ( url_params ) try : request = Request ( uniprot_url , data ) response = urlopen ( request ) except ( HTTPError , URLError , RuntimeError , KeyError ) as e : logging . error ( e ) logging . debug ( "wait 5 seconds to reconnect..." ) time . sleep ( 5 ) page = response . read ( ) if not page : logging . error ( "query `{0}` yielded no results" . format ( query ) ) continue print ( page , file = fw ) seen . add ( query ) if seen : print ( "A total of {0} out of {1} queries returned results." . format ( len ( seen ) , len ( list_of_queries ) ) , file = sys . stderr )
def get_addition_score ( source_counts , prediction_counts , target_counts ) : """Compute the addition score ( Equation 4 in the paper ) ."""
added_to_prediction_counts = prediction_counts - source_counts true_positives = sum ( ( added_to_prediction_counts & target_counts ) . values ( ) ) selected = sum ( added_to_prediction_counts . values ( ) ) # Note that in the paper the summation is done over all the ngrams in the # output rather than the ngrams in the following set difference . Since the # former does not make as much sense we compute the latter , which is also done # in the GitHub implementation . relevant = sum ( ( target_counts - source_counts ) . values ( ) ) return _get_fbeta_score ( true_positives , selected , relevant )
def DataRefreshRequired ( self , path = None , last = None ) : """True if we need to update this path from the client . Args : path : The path relative to the root to check freshness of . last : An aff4 : last attribute to check freshness of . At least one of path or last must be supplied . Returns : True if the path hasn ' t been updated in the last self . max _ age _ before _ refresh seconds , else False . Raises : type _ info . TypeValueError : If no arguments are supplied ."""
# If we didn ' t get given a last attribute , use the path to get one from the # object . if last is None : if path is None : # If we didn ' t get a path either , we can ' t do anything . raise type_info . TypeValueError ( "Either 'path' or 'last' must" " be supplied as an argument." ) fd = aff4 . FACTORY . Open ( self . root . Add ( path ) , token = self . token ) # We really care about the last time the stat was updated , so we use # this instead of the LAST attribute , which is the last time anything # was updated about the object . stat_obj = fd . Get ( fd . Schema . STAT ) if stat_obj : last = stat_obj . age else : last = rdfvalue . RDFDatetime ( 0 ) # If the object doesn ' t even have a LAST attribute by this point , # we say it hasn ' t been accessed within the cache expiry time . if last is None : return True last = last . AsDatetime ( ) # Remember to use UTC time , since that ' s what the datastore uses . return datetime . datetime . utcnow ( ) - last > self . max_age_before_refresh
def radial_density ( im , bins = 10 , voxel_size = 1 ) : r"""Computes radial density function by analyzing the histogram of voxel values in the distance transform . This function is defined by Torquato [ 1 ] as : . . math : : \ int _ 0 ^ \ infty P ( r ) dr = 1.0 where * P ( r ) dr * is the probability of finding a voxel at a lying at a radial distance between * r * and * dr * from the solid interface . This is equivalent to a probability density function ( * pdf * ) The cumulative distribution is defined as : . . math : : F ( r ) = \ int _ r ^ \ infty P ( r ) dr which gives the fraction of pore - space with a radius larger than * r * . This is equivalent to the cumulative distribution function ( * cdf * ) . Parameters im : ND - array Either a binary image of the pore space with ` ` True ` ` indicating the pore phase ( or phase of interest ) , or a pre - calculated distance transform which can save time . bins : int or array _ like This number of bins ( if int ) or the location of the bins ( if array ) . This argument is passed directly to Scipy ' s ` ` histogram ` ` function so see that docstring for more information . The default is 10 bins , which reduces produces a relatively smooth distribution . voxel _ size : scalar The size of a voxel side in preferred units . The default is 1 , so the user can apply the scaling to the returned results after the fact . Returns result : named _ tuple A named - tuple containing several 1D arrays : * R * - radius , equivalent to ` ` bin _ centers ` ` * pdf * - probability density function * cdf * - cumulative density function * bin _ centers * - the center point of each bin * bin _ edges * - locations of bin divisions , including 1 more value than the number of bins * bin _ widths * - useful for passing to the ` ` width ` ` argument of ` ` matplotlib . pyplot . bar ` ` Notes This function should not be taken as a pore size distribution in the explict sense , but rather an indicator of the sizes in the image . The distance transform contains a very skewed number of voxels with small values near the solid walls . Nonetheless , it does provide a useful indicator and it ' s mathematical formalism is handy . Torquato refers to this as the * pore - size density function * , and mentions that it is also known as the * pore - size distribution function * . These terms are avoided here since they have specific connotations in porous media analysis . References [1 ] Torquato , S . Random Heterogeneous Materials : Mircostructure and Macroscopic Properties . Springer , New York ( 2002 ) - See page 48 & 292"""
if im . dtype == bool : im = spim . distance_transform_edt ( im ) mask = find_dt_artifacts ( im ) == 0 im [ mask ] = 0 x = im [ im > 0 ] . flatten ( ) h = sp . histogram ( x , bins = bins , density = True ) h = _parse_histogram ( h = h , voxel_size = voxel_size ) rdf = namedtuple ( 'radial_density_function' , ( 'R' , 'pdf' , 'cdf' , 'bin_centers' , 'bin_edges' , 'bin_widths' ) ) return rdf ( h . bin_centers , h . pdf , h . cdf , h . bin_centers , h . bin_edges , h . bin_widths )
def interface_by_macaddr ( self , macaddr ) : '''Given a MAC address , return the interface that ' owns ' this address'''
macaddr = EthAddr ( macaddr ) for devname , iface in self . _devinfo . items ( ) : if iface . ethaddr == macaddr : return iface raise KeyError ( "No device has MAC address {}" . format ( macaddr ) )
def name ( self ) : """The name for this window as it should be displayed in the status bar ."""
# Name , explicitely set for the window . if self . chosen_name : return self . chosen_name else : pane = self . active_pane if pane : return pane . name return ''
def all ( self ) : """Return all registered users http : / / www . keycloak . org / docs - api / 3.4 / rest - api / index . html # _ users _ resource"""
return self . _client . get ( url = self . _client . get_full_url ( self . get_path ( 'collection' , realm = self . _realm_name ) ) )
def description ( self ) : """This read - only attribute is a sequence of 7 - item sequences ."""
if self . _closed : return description = [ ] for col in self . _result [ "cols" ] : description . append ( ( col , None , None , None , None , None , None ) ) return tuple ( description )
def advanced_search ( pattern ) : """Parse the grammar of a pattern and build a queryset with it ."""
query_parsed = QUERY . parseString ( pattern ) return Entry . published . filter ( query_parsed [ 0 ] ) . distinct ( )
def get ( self ) : """Get a task from the queue ."""
tasks = self . _get_avaliable_tasks ( ) if not tasks : return None name , data = tasks [ 0 ] self . _client . kv . delete ( name ) return data
def listext ( self , ext , stream = sys . stdout ) : """Print to the given ` stream ` a table with the list of the output files with the given ` ext ` produced by the flow ."""
nodes_files = [ ] for node in self . iflat_nodes ( ) : filepath = node . outdir . has_abiext ( ext ) if filepath : nodes_files . append ( ( node , File ( filepath ) ) ) if nodes_files : print ( "Found %s files with extension `%s` produced by the flow" % ( len ( nodes_files ) , ext ) , file = stream ) table = [ [ f . relpath , "%.2f" % ( f . get_stat ( ) . st_size / 1024 ** 2 ) , node . node_id , node . __class__ . __name__ ] for node , f in nodes_files ] print ( tabulate ( table , headers = [ "File" , "Size [Mb]" , "Node_ID" , "Node Class" ] ) , file = stream ) else : print ( "No output file with extension %s has been produced by the flow" % ext , file = stream )
def save_config ( config , logdir = None ) : """Save a new configuration by name . If a logging directory is specified , is will be created and the configuration will be stored there . Otherwise , a log message will be printed . Args : config : Configuration object . logdir : Location for writing summaries and checkpoints if specified . Returns : Configuration object ."""
if logdir : with config . unlocked : config . logdir = logdir message = 'Start a new run and write summaries and checkpoints to {}.' tf . logging . info ( message . format ( config . logdir ) ) tf . gfile . MakeDirs ( config . logdir ) config_path = os . path . join ( config . logdir , 'config.yaml' ) with tf . gfile . FastGFile ( config_path , 'w' ) as file_ : yaml . dump ( config , file_ , default_flow_style = False ) else : message = ( 'Start a new run without storing summaries and checkpoints since no ' 'logging directory was specified.' ) tf . logging . info ( message ) return config
def closed ( self , reason ) : '''异步爬取本地化处理完成后 , 使用结果数据 , 进行输出文件的渲染 , 渲染完毕 , 调用 : meth : ` . MobiSpider . generate _ mobi _ file ` 方法 , 生成目标 ` ` mobi ` ` 文件'''
# 拷贝封面 & 报头图片文件 utils . mkdirp ( os . path . join ( self . build_source_dir , 'images' ) ) self . _logger . info ( self . options ) shutil . copy ( self . options . get ( 'img_cover' ) , os . path . join ( self . build_source_dir , 'images' , 'cover.jpg' ) ) shutil . copy ( self . options . get ( 'img_masthead' ) , os . path . join ( self . build_source_dir , 'images' , 'masthead.gif' ) ) # 拷贝css文件 css_base_path = self . options . get ( 'css_base' ) css_package_path = self . options . get ( 'css_package' ) css_extra = self . options . get ( 'extra_css' , '' ) css_output_dir = os . path . join ( self . build_source_dir , 'css' ) utils . mkdirp ( css_output_dir ) if css_base_path : shutil . copy ( css_base_path , os . path . join ( css_output_dir , 'base.css' ) ) if css_package_path : shutil . copy ( css_package_path , os . path . join ( css_output_dir , 'package.css' ) ) if css_extra : with codecs . open ( os . path . join ( css_output_dir , 'custom.css' ) , 'wb' , 'utf-8' ) as fh : fh . write ( css_extra ) # 拷贝icons路径文件 icons_path = self . options . get ( 'icons_path' ) icons_output_dir = os . path . join ( self . build_source_dir , 'icons' ) shutil . rmtree ( icons_output_dir , ignore_errors = True ) if icons_path : shutil . copytree ( icons_path , icons_output_dir ) # 获取content模板对象 template_content_path = os . path . join ( self . template_dir , 'OEBPS' , 'content.opf' ) with open ( template_content_path , 'r' ) as fh : template_content = Template ( fh . read ( ) ) # 渲染content目标文件 content_path = os . path . join ( self . build_source_dir , 'moear.opf' ) with codecs . open ( content_path , 'wb' , 'utf-8' ) as fh : fh . write ( template_content . render ( data = self . data , spider = self . spider , options = self . options ) ) # 获取toc . ncx模板对象 template_toc_path = os . path . join ( self . template_dir , 'OEBPS' , 'toc.ncx' ) with open ( template_toc_path , 'r' ) as fh : template_toc = Template ( fh . read ( ) ) # 渲染toc . ncx目标文件 toc_path = os . path . join ( self . build_source_dir , 'misc' , 'toc.ncx' ) utils . mkdirp ( os . path . dirname ( toc_path ) ) with codecs . open ( toc_path , 'wb' , 'utf-8' ) as fh : fh . write ( template_toc . render ( data = self . data , spider = self . spider , options = self . options ) ) # 获取toc . html模板对象 template_toc_path = os . path . join ( self . template_dir , 'OEBPS' , 'toc.html' ) with open ( template_toc_path , 'r' ) as fh : template_toc = Template ( fh . read ( ) ) # 渲染toc . html目标文件 toc_path = os . path . join ( self . build_source_dir , 'html' , 'toc.html' ) utils . mkdirp ( os . path . dirname ( toc_path ) ) with codecs . open ( toc_path , 'wb' , 'utf-8' ) as fh : fh . write ( template_toc . render ( data = self . data , options = self . options ) ) # 生成mobi文件到mobi _ dir self . generate_mobi_file ( )
def move_in_32 ( library , session , space , offset , length , extended = False ) : """Moves an 32 - bit block of data from the specified address space and offset to local memory . Corresponds to viMoveIn32 * functions of the VISA library . : param library : the visa library wrapped by ctypes . : param session : Unique logical identifier to a session . : param space : Specifies the address space . ( Constants . * SPACE * ) : param offset : Offset ( in bytes ) of the address or register from which to read . : param length : Number of elements to transfer , where the data width of the elements to transfer is identical to the source data width . : param extended : Use 64 bits offset independent of the platform . : return : Data read from the bus , return value of the library call . : rtype : list , : class : ` pyvisa . constants . StatusCode `"""
buffer_32 = ( ViUInt32 * length ) ( ) if extended : ret = library . viMoveIn32Ex ( session , space , offset , length , buffer_32 ) else : ret = library . viMoveIn32 ( session , space , offset , length , buffer_32 ) return list ( buffer_32 ) , ret
def original_unescape ( self , s ) : """Since we need to use this sometimes"""
if isinstance ( s , basestring ) : return unicode ( HTMLParser . unescape ( self , s ) ) elif isinstance ( s , list ) : return [ unicode ( HTMLParser . unescape ( self , item ) ) for item in s ] else : return s
def redim ( cls , dataset , dimensions ) : """Rename coords on the Cube ."""
new_dataset = dataset . data . copy ( ) for name , new_dim in dimensions . items ( ) : if name == new_dataset . name ( ) : new_dataset . rename ( new_dim . name ) for coord in new_dataset . dim_coords : if name == coord . name ( ) : coord . rename ( new_dim . name ) return new_dataset
def sleep ( self , ms = 1 ) : """Pauses the current green thread for * ms * milliseconds : : p = h . pipe ( ) @ h . spawn def _ ( ) : p . send ( ' 1 ' ) h . sleep ( 50) p . send ( ' 2 ' ) p . recv ( ) # returns ' 1' p . recv ( ) # returns ' 2 ' after 50 ms"""
self . scheduled . add ( ms , getcurrent ( ) ) self . loop . switch ( )
def envelop ( self , begin , end = None ) : """Returns the set of all intervals fully contained in the range [ begin , end ) . Completes in O ( m + k * log n ) time , where : * n = size of the tree * m = number of matches * k = size of the search range : rtype : set of Interval"""
root = self . top_node if not root : return set ( ) if end is None : iv = begin return self . envelop ( iv . begin , iv . end ) elif begin >= end : return set ( ) result = root . search_point ( begin , set ( ) ) # bound _ begin might be greater boundary_table = self . boundary_table bound_begin = boundary_table . bisect_left ( begin ) bound_end = boundary_table . bisect_left ( end ) # up to , but not including end result . update ( root . search_overlap ( # slice notation is slightly slower boundary_table . keys ( ) [ index ] for index in xrange ( bound_begin , bound_end ) ) ) # TODO : improve envelop ( ) to use node info instead of less - efficient filtering result = set ( iv for iv in result if iv . begin >= begin and iv . end <= end ) return result
def build ( self , title , text , img_url ) : """: param title : Title of the card : param text : Description of the card : param img _ url : Image of the card"""
super ( ImageCard , self ) . build ( ) self . title = Title ( id = self . id + "-title" , text = title , classname = "card-title" , size = 3 , parent = self ) self . block = Panel ( id = self . id + "-block" , classname = "card-block" , parent = self ) self . image = Image ( id = self . id + "-image" , img_url = img_url , classname = "card-image-top img-fluid" , parent = self . block ) self . text = Paragraph ( id = self . id + "-text" , text = text , classname = "card-text" , parent = self . block )
def detect_member ( row , key ) : '''properly detects if a an attribute exists'''
( target , tkey , tvalue ) = dict_crawl ( row , key ) if target : return True return False
def compute ( self ) : """This is the main method of the : class : ` MUSX ` class . It computes a set of soft clauses belonging to an MUS of the input formula . First , the method checks whether the formula is satisfiable . If it is , nothing else is done . Otherwise , an * unsatisfiable core * of the formula is extracted , which is later used as an over - approximation of an MUS refined in : func : ` _ compute ` ."""
# cheking whether or not the formula is unsatisfiable if not self . oracle . solve ( assumptions = self . sels ) : # get an overapproximation of an MUS approx = sorted ( self . oracle . get_core ( ) ) if self . verbose : print ( 'c MUS approx:' , ' ' . join ( [ str ( self . vmap [ sel ] + 1 ) for sel in approx ] ) , '0' ) # iterate over clauses in the approximation and try to delete them self . _compute ( approx ) # return an MUS return list ( map ( lambda x : self . vmap [ x ] + 1 , approx ) )
def dim_dtau ( self , pars ) : r""": math : Add formula"""
self . _set_parameters ( pars ) # term 1 num1 = - self . m * ( self . w ** self . c ) * self . c * ( self . tau ** ( self . c - 1 ) ) * np . sin ( self . ang ) term1 = self . sigmai * num1 / self . denom # term 2 num2a = - self . m * self . otc * np . sin ( self . ang ) num2b = 2 * ( self . w ** 2.0 ) * self . c * ( self . tau ** ( self . c - 1 ) ) * np . cos ( self . ang ) num2c = 2 * self . c * ( self . w ** ( self . c * 2 ) ) * ( self . tau ** ( 2 * self . c - 1 ) ) term2 = self . sigma0 * num2a * ( num2b + num2c ) / ( self . denom ** 2 ) result = term1 + term2 return result
def do_down ( self , arg ) : """d ( own ) [ count ] Move the current frame count ( default one ) levels down in the stack trace ( to a newer frame ) ."""
if self . curindex + 1 == len ( self . stack ) : self . error ( 'Newest frame' ) return try : count = int ( arg or 1 ) except ValueError : self . error ( 'Invalid frame count (%s)' % arg ) return if count < 0 : newframe = len ( self . stack ) - 1 else : newframe = min ( len ( self . stack ) - 1 , self . curindex + count ) self . _select_frame ( newframe )
def print_mhc_peptide ( neoepitope_info , peptides , pepmap , outfile , netmhc = False ) : """Accept data about one neoepitope from merge _ mhc _ peptide _ calls and print it to outfile . This is a generic module to reduce code redundancy . : param pandas . core . frame neoepitope _ info : object containing with allele , pept , pred , core , normal _ pept , normal _ pred : param dict peptides : Dict of pepname : pep sequence for all IARS considered : param dict pepmap : Dict containing teh contents from the peptide map file . : param file outfile : An open file descriptor to the output file : param bool netmhc : Does this record correspond to a netmhcIIpan record ? These are processed differently ."""
if netmhc : peptide_names = [ neoepitope_info . peptide_name ] else : peptide_names = [ x for x , y in peptides . items ( ) if neoepitope_info . pept in y ] # Convert named tuple to dict so it can be modified neoepitope_info = neoepitope_info . _asdict ( ) # Handle fusion peptides ( They are characterized by having all N ' s as the normal partner ) if neoepitope_info [ 'normal_pept' ] == 'N' * len ( neoepitope_info [ 'pept' ] ) : neoepitope_info [ 'normal_pept' ] = neoepitope_info [ 'normal_pred' ] = 'NA' # For each peptide , append the ensembl gene for peptide_name in peptide_names : print ( '{ni[allele]}\t' '{ni[pept]}\t' '{ni[normal_pept]}\t' '{pname}\t' '{ni[core]}\t' '0\t' '{ni[tumor_pred]}\t' '{ni[normal_pred]}\t' '{pmap}' . format ( ni = neoepitope_info , pname = peptide_name , pmap = pepmap [ peptide_name ] ) , file = outfile ) return None
def env ( self , current_scope ) : """Return an environment that will look up in current _ scope for keys in this tuple , and the parent env otherwise ."""
return self . __env_cache . get ( current_scope . ident , framework . Environment , current_scope , names = self . keys ( ) , parent = framework . Environment ( { 'self' : current_scope } , parent = self . __parent_env ) )
def evdev_device ( self ) : """Return our corresponding evdev device object"""
devices = [ evdev . InputDevice ( fn ) for fn in evdev . list_devices ( ) ] for device in devices : if device . name == self . evdev_device_name : return device raise Exception ( "%s: could not find evdev device '%s'" % ( self , self . evdev_device_name ) )
def validate_config_must_have ( config , required_keys ) : """Validate a config dictionary to make sure it has all of the specified keys Args : config : the config to validate . required _ keys : the list of possible keys that config must include . Raises : Exception if the config does not have any of them ."""
missing_keys = set ( required_keys ) - set ( config ) if len ( missing_keys ) > 0 : raise Exception ( 'Invalid config with missing keys "%s"' % ', ' . join ( missing_keys ) )
def register_success ( self , upgrade ) : """Register a successful upgrade ."""
u = Upgrade ( upgrade = upgrade . name , applied = datetime . now ( ) ) db . session . add ( u ) db . session . commit ( )
def DOMDebugger_setXHRBreakpoint ( self , url ) : """Function path : DOMDebugger . setXHRBreakpoint Domain : DOMDebugger Method name : setXHRBreakpoint Parameters : Required arguments : ' url ' ( type : string ) - > Resource URL substring . All XHRs having this substring in the URL will get stopped upon . No return value . Description : Sets breakpoint on XMLHttpRequest ."""
assert isinstance ( url , ( str , ) ) , "Argument 'url' must be of type '['str']'. Received type: '%s'" % type ( url ) subdom_funcs = self . synchronous_command ( 'DOMDebugger.setXHRBreakpoint' , url = url ) return subdom_funcs
def support_autoupload_enable ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) support = ET . SubElement ( config , "support" , xmlns = "urn:brocade.com:mgmt:brocade-ras" ) autoupload = ET . SubElement ( support , "autoupload" ) enable = ET . SubElement ( autoupload , "enable" ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def get_dict ( self , obj , state = None , base_name = 'View' ) : """The style dict for a view instance ."""
return self . get_dict_for_class ( class_name = obj . __class__ , state = obj . state , base_name = base_name )
def devices ( self ) : """Manages users enrolled u2f devices"""
self . verify_integrity ( ) if session . get ( 'u2f_device_management_authorized' , False ) : if request . method == 'GET' : return jsonify ( self . get_devices ( ) ) , 200 elif request . method == 'DELETE' : response = self . remove_device ( request . json ) if response [ 'status' ] == 'ok' : return jsonify ( response ) , 200 else : return jsonify ( response ) , 404 return jsonify ( { 'status' : 'failed' , 'error' : 'Unauthorized!' } ) , 401
def do_speak ( self , args : argparse . Namespace ) : """Repeats what you tell me to ."""
words = [ ] for word in args . words : if args . piglatin : word = '%s%say' % ( word [ 1 : ] , word [ 0 ] ) if args . shout : word = word . upper ( ) words . append ( word ) repetitions = args . repeat or 1 for i in range ( min ( repetitions , self . maxrepeats ) ) : self . poutput ( ' ' . join ( words ) )
def independentlinear60__ffnn ( ) : """4 - Layer Neural Network"""
from keras . models import Sequential from keras . layers import Dense model = Sequential ( ) model . add ( Dense ( 32 , activation = 'relu' , input_dim = 60 ) ) model . add ( Dense ( 20 , activation = 'relu' ) ) model . add ( Dense ( 20 , activation = 'relu' ) ) model . add ( Dense ( 1 ) ) model . compile ( optimizer = 'adam' , loss = 'mean_squared_error' , metrics = [ 'mean_squared_error' ] ) return KerasWrap ( model , 30 , flatten_output = True )
def apply_empty_result ( self ) : """we have an empty result ; at least 1 axis is 0 we will try to apply the function to an empty series in order to see if this is a reduction function"""
# we are not asked to reduce or infer reduction # so just return a copy of the existing object if self . result_type not in [ 'reduce' , None ] : return self . obj . copy ( ) # we may need to infer reduce = self . result_type == 'reduce' from pandas import Series if not reduce : EMPTY_SERIES = Series ( [ ] ) try : r = self . f ( EMPTY_SERIES , * self . args , ** self . kwds ) reduce = not isinstance ( r , Series ) except Exception : pass if reduce : return self . obj . _constructor_sliced ( np . nan , index = self . agg_axis ) else : return self . obj . copy ( )
def _fail_early ( message , ** kwds ) : """The module arguments are dynamically generated based on the Opsview version . This means that fail _ json isn ' t available until after the module has been properly initialized and the schemas have been loaded ."""
import json output = dict ( kwds ) output . update ( { 'msg' : message , 'failed' : True , } ) print ( json . dumps ( output ) ) sys . exit ( 1 )
def get_scopes_information ( self ) : """Return a list with the description of all the scopes requested ."""
scopes = StandardScopeClaims . get_scopes_info ( self . params [ 'scope' ] ) if settings . get ( 'OIDC_EXTRA_SCOPE_CLAIMS' ) : scopes_extra = settings . get ( 'OIDC_EXTRA_SCOPE_CLAIMS' , import_str = True ) . get_scopes_info ( self . params [ 'scope' ] ) for index_extra , scope_extra in enumerate ( scopes_extra ) : for index , scope in enumerate ( scopes [ : ] ) : if scope_extra [ 'scope' ] == scope [ 'scope' ] : del scopes [ index ] else : scopes_extra = [ ] return scopes + scopes_extra
def accept ( self ) : """Method invoked when OK button is clicked ."""
output_path = self . output_path_line_edit . text ( ) if not output_path : display_warning_message_box ( self , tr ( 'Empty Output Path' ) , tr ( 'Output path can not be empty' ) ) return try : self . convert_metadata ( ) except MetadataConversionError as e : display_warning_message_box ( self , tr ( 'Metadata Conversion Failed' ) , str ( e ) ) return if not os . path . exists ( output_path ) : display_warning_message_box ( self , tr ( 'Metadata Conversion Failed' ) , tr ( 'Result file is not found.' ) ) return display_success_message_bar ( tr ( 'Metadata Conversion Success' ) , tr ( 'You can find your copied layer with metadata version 3.5 in ' '%s' % output_path ) , iface_object = self . iface ) super ( MetadataConverterDialog , self ) . accept ( )
def get_cursor ( cls ) : """Return a message list cursor that returns sqlite3 . Row objects"""
db = SqliteConnection . get ( ) db . row_factory = sqlite3 . Row return db . cursor ( )
def explain_linear_regressor_weights ( reg , vec = None , top = _TOP , target_names = None , targets = None , feature_names = None , coef_scale = None , feature_re = None , feature_filter = None , ) : """Return an explanation of a linear regressor weights . See : func : ` eli5 . explain _ weights ` for description of ` ` top ` ` , ` ` target _ names ` ` , ` ` targets ` ` , ` ` feature _ names ` ` , ` ` feature _ re ` ` and ` ` feature _ filter ` ` parameters . ` ` vec ` ` is a vectorizer instance used to transform raw features to the input of the regressor ` ` reg ` ` ; you can pass it instead of ` ` feature _ names ` ` . ` ` coef _ scale ` ` is a 1D np . ndarray with a scaling coefficient for each feature ; coef [ i ] = coef [ i ] * coef _ scale [ i ] if coef _ scale [ i ] is not nan . Use it if you want to scale coefficients before displaying them , to take input feature sign or scale in account ."""
if isinstance ( reg , ( SVR , NuSVR ) ) and reg . kernel != 'linear' : return explain_weights_sklearn_not_supported ( reg ) feature_names , coef_scale = handle_hashing_vec ( vec , feature_names , coef_scale ) feature_names , flt_indices = get_feature_names_filtered ( reg , vec , feature_names = feature_names , feature_filter = feature_filter , feature_re = feature_re , ) _extra_caveats = "\n" + HASHING_CAVEATS if is_invhashing ( vec ) else '' def _features ( target_id ) : coef = get_coef ( reg , target_id , scale = coef_scale ) if flt_indices is not None : coef = coef [ flt_indices ] return get_top_features ( feature_names , coef , top ) display_names = get_target_display_names ( get_default_target_names ( reg ) , target_names , targets ) if is_multitarget_regressor ( reg ) : return Explanation ( targets = [ TargetExplanation ( target = target_name , feature_weights = _features ( target_id ) ) for target_id , target_name in display_names ] , description = DESCRIPTION_REGRESSION_MULTITARGET + _extra_caveats , estimator = repr ( reg ) , method = 'linear model' , is_regression = True , ) else : return Explanation ( targets = [ TargetExplanation ( target = display_names [ 0 ] [ 1 ] , feature_weights = _features ( 0 ) , ) ] , description = DESCRIPTION_REGRESSION + _extra_caveats , estimator = repr ( reg ) , method = 'linear model' , is_regression = True , )