signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def _RunScripts ( self , run_dir = None ) : """Retrieve metadata scripts and execute them . Args : run _ dir : string , the base directory location of the temporary directory ."""
with _CreateTempDir ( self . script_type , run_dir = run_dir ) as dest_dir : try : self . logger . info ( 'Starting %s scripts.' , self . script_type ) script_dict = self . retriever . GetScripts ( dest_dir ) self . executor . RunScripts ( script_dict ) finally : self . logger . info ( 'Finished running %s scripts.' , self . script_type )
def add_field_processors ( config , processors , model , field ) : """Add processors for model field . Under the hood , regular nefertari event subscribed is created which calls field processors in order passed to this function . Processors are passed following params : * * * new _ value * * : New value of of field . * * * instance * * : Instance affected by request . Is None when set of items is updated in bulk and when item is created . * * * field * * : Instance of nefertari . utils . data . FieldData instance containing data of changed field . * * * request * * : Current Pyramid Request instance . * * * model * * : Model class affected by request . * * * event * * : Underlying event object . Each processor must return processed value which is passed to next processor . : param config : Pyramid Congurator instance . : param processors : Sequence of processor functions . : param model : Model class for field if which processors are registered . : param field : Field name for which processors are registered ."""
before_change_events = ( BeforeCreate , BeforeUpdate , BeforeReplace , BeforeUpdateMany , BeforeRegister , ) def wrapper ( event , _processors = processors , _field = field ) : proc_kw = { 'new_value' : event . field . new_value , 'instance' : event . instance , 'field' : event . field , 'request' : event . view . request , 'model' : event . model , 'event' : event , } for proc_func in _processors : proc_kw [ 'new_value' ] = proc_func ( ** proc_kw ) event . field . new_value = proc_kw [ 'new_value' ] event . set_field_value ( _field , proc_kw [ 'new_value' ] ) for evt in before_change_events : config . add_subscriber ( wrapper , evt , model = model , field = field )
def FromString ( cls , desc ) : """Create a new stimulus from a description string . The string must have the format : [ time : ] [ system ] input X = Y where X and Y are integers . The time , if given must be a time _ interval , which is an integer followed by a time unit such as second ( s ) , minute ( s ) , etc . Args : desc ( str ) : A string description of the stimulus . Returns : SimulationStimulus : The parsed stimulus object ."""
if language . stream is None : language . get_language ( ) parse_exp = Optional ( time_interval ( 'time' ) - Literal ( ':' ) . suppress ( ) ) - language . stream ( 'stream' ) - Literal ( '=' ) . suppress ( ) - number ( 'value' ) try : data = parse_exp . parseString ( desc ) time = 0 if 'time' in data : time = data [ 'time' ] [ 0 ] return SimulationStimulus ( time , data [ 'stream' ] [ 0 ] , data [ 'value' ] ) except ( ParseException , ParseSyntaxException ) : raise ArgumentError ( "Could not parse stimulus descriptor" , descriptor = desc )
def __live_receivers ( signal ) : """Return all signal handlers that are currently still alive for the input ` signal ` . Args : signal : A signal name . Returns : A list of callable receivers for the input signal ."""
with __lock : __purge ( ) receivers = [ funcref ( ) for funcref in __receivers [ signal ] ] return receivers
def clear ( self ) : """Clear all waiters . This method will remove any current scheduled waiter with an asyncio . CancelledError exception ."""
for _ , waiter in self . waiters ( ) : if isinstance ( waiter , asyncio . Future ) and not waiter . done ( ) : waiter . set_exception ( asyncio . CancelledError ( ) ) self . _waiters = { }
def normalizeRotationAngle ( value ) : """Normalizes an angle . * Value must be a : ref : ` type - int - float ` . * Value must be between - 360 and 360. * If the value is negative , it is normalized by adding it to 360 * Returned value is a ` ` float ` ` between 0 and 360."""
if not isinstance ( value , ( int , float ) ) : raise TypeError ( "Angle must be instances of " ":ref:`type-int-float`, not %s." % type ( value ) . __name__ ) if abs ( value ) > 360 : raise ValueError ( "Angle must be between -360 and 360." ) if value < 0 : value = value + 360 return float ( value )
def _normalize_string ( self , text ) : '''Prepares incoming text for parsing : removes excessive spaces , tabs , newlines , etc .'''
conversion = { # newlines '\r?\n' : ' ' , # replace excessive empty spaces '\s+' : ' ' , # convert all types of hyphens / dashes to a # simple old - school dash # from http : / / utf8 - chartable . de / unicode - utf8 - table . pl ? # start = 8192 & number = 128 & utf8 = string - literal '‐' : '-' , '‑' : '-' , '‒' : '-' , '–' : '-' , '—' : '-' , '―' : '-' , } for find , replace in six . iteritems ( conversion ) : text = re . sub ( find , replace , text , flags = re . UNICODE ) return text
def setSeed ( self , value ) : """Sets the seed to value ."""
self . seed = value random . seed ( self . seed ) if self . verbosity >= 0 : print ( "Conx using seed:" , self . seed )
def simple_ins_from_obs ( obsnames , insfilename = 'model.output.ins' ) : """writes an instruction file that assumes wanting to read the values names in obsnames in order one per line from a model output file Args : obsnames : list of obsnames to read in insfilename : filename for INS file ( default : model . output . ins ) Returns : writes a file < insfilename > with each observation read off a line"""
with open ( insfilename , 'w' ) as ofp : ofp . write ( 'pif ~\n' ) [ ofp . write ( '!{0}!\n' . format ( cob ) ) for cob in obsnames ]
def ReadCronJobRuns ( self , job_id , cursor = None ) : """Reads all cron job runs for a given job id ."""
query = """ SELECT run, UNIX_TIMESTAMP(write_time) FROM cron_job_runs WHERE job_id = %s """ cursor . execute ( query , [ job_id ] ) runs = [ self . _CronJobRunFromRow ( row ) for row in cursor . fetchall ( ) ] return sorted ( runs , key = lambda run : run . started_at , reverse = True )
def daemonize ( enable_stdio_inheritance = False , auto_close_fds = True , keep_fds = None ) : # pragma nocover """Standard daemonization of a process . http : / / www . svbug . com / documentation / comp . unix . programmer - FAQ / faq _ 2 . html # SEC16"""
if os . fork ( ) : os . _exit ( 0 ) os . setsid ( ) if os . fork ( ) : os . _exit ( 0 ) os . umask ( 0o22 ) # In both the following any file descriptors above stdin # stdout and stderr are left untouched . The inheritence # option simply allows one to have output go to a file # specified by way of shell redirection when not wanting # to use - - error - log option . if not enable_stdio_inheritance : # Remap all of stdin , stdout and stderr on to # / dev / null . The expectation is that users have # specified the - - error - log option . if keep_fds : keep_fds = set ( keep_fds ) for fd in range ( 0 , 3 ) : if fd not in keep_fds : try : os . close ( fd ) except OSError : pass else : os . closerange ( 0 , 3 ) fd_null = os . open ( REDIRECT_TO , os . O_RDWR ) if fd_null != 0 : os . dup2 ( fd_null , 0 ) os . dup2 ( fd_null , 1 ) os . dup2 ( fd_null , 2 ) else : fd_null = os . open ( REDIRECT_TO , os . O_RDWR ) # Always redirect stdin to / dev / null as we would # never expect to need to read interactive input . if fd_null != 0 : os . close ( 0 ) os . dup2 ( fd_null , 0 ) # If stdout and stderr are still connected to # their original file descriptors we check to see # if they are associated with terminal devices . # When they are we map them to / dev / null so that # are still detached from any controlling terminal # properly . If not we preserve them as they are . # If stdin and stdout were not hooked up to the # original file descriptors , then all bets are # off and all we can really do is leave them as # they were . # This will allow ' gunicorn . . . > output . log 2 > & 1' # to work with stdout / stderr going to the file # as expected . # Note that if using - - error - log option , the log # file specified through shell redirection will # only be used up until the log file specified # by the option takes over . As it replaces stdout # and stderr at the file descriptor level , then # anything using stdout or stderr , including having # cached a reference to them , will still work . def redirect ( stream , fd_expect ) : try : fd = stream . fileno ( ) if fd == fd_expect and stream . isatty ( ) : os . close ( fd ) os . dup2 ( fd_null , fd ) except AttributeError : pass redirect ( sys . stdout , 1 ) redirect ( sys . stderr , 2 )
def repmi ( instr , marker , value , lenout = None ) : """Replace a marker with an integer . http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / repmi _ c . html : param instr : Input string . : type instr : str : param marker : Marker to be replaced . : type marker : str : param value : Replacement value . : type value : int : param lenout : Optional available space in output string . : type lenout : int : return : Output string . : rtype : str"""
if lenout is None : lenout = ctypes . c_int ( len ( instr ) + len ( marker ) + 15 ) instr = stypes . stringToCharP ( instr ) marker = stypes . stringToCharP ( marker ) value = ctypes . c_int ( value ) out = stypes . stringToCharP ( lenout ) libspice . repmi_c ( instr , marker , value , lenout , out ) return stypes . toPythonString ( out )
def validate_response ( response , validator_map ) : """Validates response against our schemas . : param response : the response object to validate : type response : : class : ` pyramid . response . Response ` : type validator _ map : : class : ` pyramid _ swagger . load _ schema . ValidatorMap `"""
validator = validator_map . response # Short circuit if we are supposed to not validate anything . returns_nothing = validator . schema . get ( 'type' ) == 'void' body_empty = response . body in ( None , b'' , b'{}' , b'null' ) if returns_nothing and body_empty : return # Don ' t attempt to validate non - success responses in v1.2 if not 200 <= response . status_code <= 203 : return validator . validate ( prepare_body ( response ) )
def addFeatureSA ( self , callback , default = None , name = None ) : """Add a feature to the suffix array . The callback must return a sequence such that the feature at position i is attached to the suffix referenced by self . SA [ i ] . It is called with one argument : the instance of SuffixArray self . The callback may traverse self . SA in any fashion . The default behavior is to name the new feature after the callback name . To give another name , set the argument name accordingly . When the feature of an unknown substring of the text is requested , the value of the default argument is used . If the feature attached to a suffix is independent of the other suffix features , then the method addFeature gives a better alternative . You may use addFeatureSA as a decorator as in the following example . Example : feature named bigram which attach the frequencies of the leading bigram to each suffix . > > > SA = SuffixArray ( " mississippi " , unit = UNIT _ BYTE ) > > > def bigram ( SA ) : . . . res = [ 0 ] * SA . length . . . end = 0 . . . while end < = SA . length : . . . begin = end - 1 . . . while end < SA . length and SA . _ LCP _ values [ end ] > = 2: . . . if SA . SA [ end ] + 2 < = SA . length : # end of string . . . end + = 1 . . . nbBigram = end - begin . . . for i in xrange ( begin , end ) : . . . if SA . SA [ i ] + 2 < = SA . length : . . . res [ i ] = nbBigram . . . end + = 1 . . . return res > > > SA . addFeatureSA ( bigram , 0) > > > SA . _ bigram _ values [0 , 1 , 2 , 2 , 1 , 1 , 1 , 2 , 2 , 2 , 2] > > > print str ( SA ) . expandtabs ( 14 ) # doctest : + SKIP . . . 10 ' i ' LCP = 0 , bigram = 0 . . . 7 ' ippi ' LCP = 1 , bigram = 1 . . . 4 ' issippi ' LCP = 1 , bigram = 2 . . . 1 ' ississippi ' LCP = 4 , bigram = 2 . . . 0 ' mississipp ' LCP = 0 , bigram = 1 . . . 9 ' pi ' LCP = 0 , bigram = 1 . . . 8 ' ppi ' LCP = 1 , bigram = 1 . . . 6 ' sippi ' LCP = 0 , bigram = 2 . . . 3 ' sissippi ' LCP = 2 , bigram = 2 . . . 5 ' ssippi ' LCP = 1 , bigram = 2 . . . 2 ' ssissippi ' LCP = 3 , bigram = 2 > > > SA . bigram ( ' ip ' ) > > > SA . bigram ( ' si ' ) > > > SA . bigram ( ' zw ' )"""
if name is None : featureName = callback . __name__ else : featureName = name featureValues = callback ( self ) setattr ( self , "_%s_values" % featureName , featureValues ) setattr ( self , "%s_default" % featureName , default ) self . features . append ( featureName ) def findFeature ( substring ) : res = self . _findOne ( substring , ) if res is not False : return featureValues [ res ] else : return default setattr ( self , featureName , findFeature )
def process ( self , state , procedure , ret_to = None , inline = None , force_addr = None , ** kwargs ) : """Perform execution with a state . : param state : The state with which to execute : param procedure : An instance of a SimProcedure to run : param ret _ to : The address to return to when this procedure is finished : param inline : This is an inline execution . Do not bother copying the state . : param force _ addr : Force execution to pretend that we ' re working at this concrete address : returns : A SimSuccessors object categorizing the execution ' s successor states"""
return super ( SimEngineProcedure , self ) . process ( state , procedure , ret_to = ret_to , inline = inline , force_addr = force_addr )
def highlight_multi_regex ( str_ , pat_to_color , reflags = 0 ) : """FIXME Use pygments instead . must be mututally exclusive"""
# import colorama # from colorama import Fore , Style # color = Fore . MAGENTA # color = Fore . RED # match = re . search ( pat , str _ , flags = reflags ) colored = str_ to_replace = [ ] for pat , color in pat_to_color . items ( ) : matches = list ( re . finditer ( pat , str_ , flags = reflags ) ) for match in matches : start = match . start ( ) end = match . end ( ) to_replace . append ( ( end , start , color ) ) for tup in reversed ( sorted ( to_replace ) ) : end , start , color = tup colored_part = color_text ( colored [ start : end ] , color ) colored = colored [ : start ] + colored_part + colored [ end : ] return colored
def uncertainty_K ( self ) : """Estimate of the element - wise asymptotic standard deviation in the rate matrix"""
if self . information_ is None : self . _build_information ( ) sigma_K = _ratematrix . sigma_K ( self . information_ , theta = self . theta_ , n = self . n_states_ ) return sigma_K
def inferSuperimposedSequenceObjects ( exp , sequenceId , objectId , sequences , objects ) : """Run inference on the given sequence ."""
# Create the ( loc , feat ) pairs for this sequence for column 0. objectSensations = { 0 : [ pair for pair in sequences [ sequenceId ] ] } inferConfig = { "object" : sequenceId , "numSteps" : len ( objectSensations [ 0 ] ) , "pairs" : objectSensations , } inferenceSDRSequence = sequences . provideObjectToInfer ( inferConfig ) # Create sequence of random sensations for this object for one column . The # total number of sensations is equal to the number of points on the object . # No point should be visited more than once . objectSensations = { } objectSensations [ 0 ] = [ ] obj = objects [ objectId ] objectCopy = [ pair for pair in obj ] random . shuffle ( objectCopy ) for pair in objectCopy : objectSensations [ 0 ] . append ( pair ) inferConfig = { "numSteps" : len ( objectSensations [ 0 ] ) , "pairs" : objectSensations , "includeRandomLocation" : False , } inferenceSDRObject = objects . provideObjectToInfer ( inferConfig ) superimposedSDRs = createSuperimposedSDRs ( inferenceSDRSequence , inferenceSDRObject ) # exp . infer ( superimposedSDRs , objectName = str ( sequenceId ) + " + " + str ( objectId ) ) exp . infer ( superimposedSDRs , objectName = sequenceId * len ( objects ) + objectId )
def color_array_by_hue_mix ( value , palette ) : """Figure out the appropriate color for a binary string value by averaging the colors corresponding the indices of each one that it contains . Makes for visualizations that intuitively show patch overlap ."""
if int ( value , 2 ) > 0 : # Convert bits to list and reverse order to avoid issues with # differing lengths int_list = [ int ( i ) for i in list ( value [ 2 : ] ) ] int_list . reverse ( ) # since this is a 1D array , we need the zeroth elements # of np . nonzero . locs = np . nonzero ( int_list ) [ 0 ] # print ( locs ) # print ( palette ) rgb_vals = [ palette [ i ] for i in locs ] rgb = [ 0 ] * len ( rgb_vals [ 0 ] ) # We don ' t know if it ' s rgb or rgba for val in rgb_vals : for index in range ( len ( val ) ) : rgb [ index ] += val [ index ] for i in range ( len ( rgb ) ) : rgb [ i ] /= len ( locs ) return tuple ( rgb ) if int ( value , 2 ) == 0 : return ( 1 , 1 , 1 ) if len ( palette [ 0 ] ) == 3 else ( 1 , 1 , 1 , 1 ) return - 1
def sync_remotes ( self , force = False ) : """Pull down all non - local items and save them into remotes _ storage ."""
connectors = juicer . utils . get_login_info ( ) [ 0 ] for repo , items in self . iterrepos ( ) : repoid = "%s-%s" % ( repo , self . current_env ) for rpm in items : # don ' t bother syncing down if it ' s already in the pulp repo it needs to go to if not rpm . path . startswith ( juicer . utils . pulp_repo_path ( connectors [ self . current_env ] , repoid ) ) or force : rpm . sync_to ( self . remotes_storage ) else : juicer . utils . Log . log_debug ( "Not syncing %s because it's already in pulp" % rpm . path )
def create ( project : 'projects.Project' ) -> COMPONENT : """: return :"""
try : from bokeh . resources import Resources as BokehResources bokeh_resources = BokehResources ( mode = 'absolute' ) except Exception : bokeh_resources = None if bokeh_resources is None : environ . log ( BOKEH_WARNING ) return COMPONENT ( [ ] , [ ] ) return definitions . merge_components ( _assemble_component ( project , 'bokeh-css' , [ 'bokeh' , 'bokeh.css' ] , bokeh_resources . css_files ) , _assemble_component ( project , 'bokeh-js' , [ 'bokeh' , 'bokeh.js' ] , bokeh_resources . js_files ) )
def has_pending ( self ) : """Return True if there are pending test items This indicates that collection has finished and nodes are still processing test items , so this can be thought of as " the scheduler is active " ."""
if self . pending : return True for pending in self . node2pending . values ( ) : if pending : return True return False
def manifest ( self , subvol ) : """Generator for manifest , yields 7 - tuples"""
subvol_path = os . path . join ( self . path , str ( subvol ) ) builtin_path = os . path . join ( subvol_path , MANIFEST_DIR [ 1 : ] , str ( subvol ) ) manifest_path = os . path . join ( MANIFEST_DIR , str ( subvol ) ) if os . path . exists ( builtin_path ) : # Stream the manifest written into the ( read - only ) template , # note that this has not been done up to now return open ( builtin_path , "rb" ) elif os . path . exists ( manifest_path ) : # Stream the manifest written into / var / lib / butterknife / manifests return open ( manifest_path , "rb" ) else : # If we don ' t have any stream manifest and save it under / var / lib / butterknife / manifests def generator ( ) : with tempfile . NamedTemporaryFile ( prefix = str ( subvol ) , dir = MANIFEST_DIR , delete = False ) as fh : print ( "Temporarily writing to" , fh . name ) for entry in generate_manifest ( os . path . join ( self . path , str ( subvol ) ) ) : line = ( "\t" . join ( [ "-" if j == None else str ( j ) for j in entry ] ) ) . encode ( "utf-8" ) + b"\n" fh . write ( line ) yield line print ( "Renaming to" , manifest_path ) os . rename ( fh . name , manifest_path ) return generator ( )
def plasma_get ( object_id ) : """Get an object directly from plasma without going through object table . Precondition : plasma _ prefetch ( object _ id ) has been called before ."""
client = ray . worker . global_worker . plasma_client plasma_id = ray . pyarrow . plasma . ObjectID ( object_id ) while not client . contains ( plasma_id ) : pass return client . get ( plasma_id )
def _import_marshaller_modules ( self , m ) : """Imports the modules required by the marshaller . Parameters m : marshaller The marshaller to load the modules for . Returns success : bool Whether the modules ` m ` requires could be imported successfully or not ."""
try : for name in m . required_modules : if name not in sys . modules : if _has_importlib : importlib . import_module ( name ) else : __import__ ( name ) except ImportError : return False except : raise else : return True
def _parse_memory_embedded_health ( self , data ) : """Parse the get _ host _ health _ data ( ) for essential properties : param data : the output returned by get _ host _ health _ data ( ) : returns : memory size in MB . : raises IloError , if unable to get the memory details ."""
memory_mb = 0 memory = self . _get_memory_details_value_based_on_model ( data ) if memory is None : msg = "Unable to get memory data. Error: Data missing" raise exception . IloError ( msg ) total_memory_size = 0 for memory_item in memory : memsize = memory_item [ self . MEMORY_SIZE_TAG ] [ "VALUE" ] if memsize != self . MEMORY_SIZE_NOT_PRESENT_TAG : memory_bytes = ( strutils . string_to_bytes ( memsize . replace ( ' ' , '' ) , return_int = True ) ) memory_mb = int ( memory_bytes / ( 1024 * 1024 ) ) total_memory_size = total_memory_size + memory_mb return total_memory_size
def get_project_content_commit_date ( root_dir = '.' , exclusions = None ) : """Get the datetime for the most recent commit to a project that affected Sphinx content . * Content * is considered any file with one of these extensions : - ` ` rst ` ` ( README . rst and LICENSE . rst are excluded ) - ` ` ipynb ` ` - ` ` png ` ` - ` ` jpeg ` ` - ` ` jpg ` ` - ` ` svg ` ` - ` ` gif ` ` This function allows project infrastructure and configuration files to be updated without changing the timestamp . Parameters root _ dir : ` str ` , optional Root directory . This is the current working directory by default . exclusions : ` list ` of ` str ` , optional List of file paths or directory paths to ignore . Returns commit _ date : ` datetime . datetime ` Datetime of the most recent content commit . Raises RuntimeError Raised if no content files are found ."""
logger = logging . getLogger ( __name__ ) # Supported ' content ' extensions extensions = ( 'rst' , 'ipynb' , 'png' , 'jpeg' , 'jpg' , 'svg' , 'gif' ) content_paths = [ ] for extname in extensions : content_paths += get_filepaths_with_extension ( extname , root_dir = root_dir ) # Known files that should be excluded ; lower case for comparison exclude = Matcher ( exclusions if exclusions else [ 'readme.rst' , 'license.rst' ] ) # filter out excluded files content_paths = [ p for p in content_paths if not ( exclude ( p ) or exclude ( p . split ( os . path . sep ) [ 0 ] ) ) ] logger . debug ( 'Found content paths: {}' . format ( ', ' . join ( content_paths ) ) ) if not content_paths : raise RuntimeError ( 'No content files found in {}' . format ( root_dir ) ) commit_datetimes = [ ] for filepath in content_paths : try : datetime = read_git_commit_timestamp_for_file ( filepath , repo_path = root_dir ) commit_datetimes . append ( datetime ) except IOError : logger . warning ( 'Could not get commit for {}, skipping' . format ( filepath ) ) if not commit_datetimes : raise RuntimeError ( 'No content commits could be found' ) latest_datetime = max ( commit_datetimes ) return latest_datetime
def _zeropad ( sig , N , axis = 0 ) : """pads with N zeros at the end of the signal , along given axis"""
# ensures concatenation dimension is the first sig = np . moveaxis ( sig , axis , 0 ) # zero pad out = np . zeros ( ( sig . shape [ 0 ] + N , ) + sig . shape [ 1 : ] ) out [ : sig . shape [ 0 ] , ... ] = sig # put back axis in place out = np . moveaxis ( out , 0 , axis ) return out
def remove_trailing_spaces ( self , index = None ) : """Remove trailing spaces"""
if index is None : index = self . get_stack_index ( ) finfo = self . data [ index ] finfo . editor . remove_trailing_spaces ( )
def get_callback_url ( self , ** kwargs ) : """Returns a relative URL for invoking this Pipeline ' s callback method . Args : kwargs : Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked . Raises : UnexpectedPipelineError if this is invoked on pipeline that is not async ."""
# TODO : Support positional parameters . if not self . async : raise UnexpectedPipelineError ( 'May only call get_callback_url() method for asynchronous pipelines.' ) kwargs [ 'pipeline_id' ] = self . _pipeline_key . name ( ) params = urllib . urlencode ( sorted ( kwargs . items ( ) ) ) return '%s/callback?%s' % ( self . base_path , params )
def _future_done ( self , future ) : """Will be called when the coroutine is done"""
try : # notify the subscribers ( except result is an exception or NONE ) result = future . result ( ) # may raise exception if result is not NONE : self . notify ( result ) # may also raise exception except asyncio . CancelledError : return except Exception : # pylint : disable = broad - except self . _options . error_callback ( * sys . exc_info ( ) ) # check if queue is present and something is in the queue if self . _queue : value = self . _queue . popleft ( ) # start the coroutine self . _run_coro ( value ) else : self . _future = None
def _default_plugins ( self ) : """Get entry points to load any plugins installed . The build process should create an " entry _ points . json " file with all of the data from the installed entry points ."""
plugins = { } try : with open ( 'entry_points.json' ) as f : entry_points = json . load ( f ) for ep , obj in entry_points . items ( ) : plugins [ ep ] = [ ] for name , src in obj . items ( ) : plugins [ ep ] . append ( Plugin ( name = name , source = src ) ) except Exception as e : print ( "Failed to load entry points {}" . format ( e ) ) return plugins
def translate_cds ( seq , full_codons = True , ter_symbol = "*" ) : """translate a DNA or RNA sequence into a single - letter amino acid sequence using the standard translation table If full _ codons is True , a sequence whose length isn ' t a multiple of three generates a ValueError ; else an ' X ' will be added as the last amino acid . This matches biopython ' s behaviour when padding the last codon with ' N ' s . > > > translate _ cds ( " ATGCGA " ) ' MR ' > > > translate _ cds ( " AUGCGA " ) ' MR ' > > > translate _ cds ( None ) > > > translate _ cds ( " " ) > > > translate _ cds ( " AUGCG " ) Traceback ( most recent call last ) : ValueError : Sequence length must be a multiple of three > > > translate _ cds ( " AUGCG " , full _ codons = False ) > > > translate _ cds ( " AUGCGQ " ) Traceback ( most recent call last ) : ValueError : Codon CGQ at position 4 . . 6 is undefined in codon table"""
if seq is None : return None if len ( seq ) == 0 : return "" if full_codons and len ( seq ) % 3 != 0 : raise ValueError ( "Sequence length must be a multiple of three" ) seq = replace_u_to_t ( seq ) seq = seq . upper ( ) protein_seq = list ( ) for i in range ( 0 , len ( seq ) - len ( seq ) % 3 , 3 ) : try : aa = dna_to_aa1_lut [ seq [ i : i + 3 ] ] except KeyError : raise ValueError ( "Codon {} at position {}..{} is undefined in codon table" . format ( seq [ i : i + 3 ] , i + 1 , i + 3 ) ) protein_seq . append ( aa ) # check for trailing bases and add the ter symbol if required if not full_codons and len ( seq ) % 3 != 0 : protein_seq . append ( ter_symbol ) return '' . join ( protein_seq )
def update_tracking_terms ( self ) : """Terms must be one - per - line . Blank lines will be skipped ."""
import codecs with codecs . open ( self . filename , "r" , encoding = 'utf8' ) as input : # read all the lines lines = input . readlines ( ) # build a set of terms new_terms = set ( ) for line in lines : line = line . strip ( ) if len ( line ) : new_terms . add ( line ) return set ( new_terms )
def is_empty ( self ) : """Returns True if the root node contains no child elements , no text , and no attributes other than * * type * * . Returns False if any are present ."""
non_type_attributes = [ attr for attr in self . node . attrib . keys ( ) if attr != 'type' ] return len ( self . node ) == 0 and len ( non_type_attributes ) == 0 and not self . node . text and not self . node . tail
def render ( self , template_name : str , ** kwargs : Any ) -> "Future[None]" : """Renders the template with the given arguments as the response . ` ` render ( ) ` ` calls ` ` finish ( ) ` ` , so no other output methods can be called after it . Returns a ` . Future ` with the same semantics as the one returned by ` finish ` . Awaiting this ` . Future ` is optional . . . versionchanged : : 5.1 Now returns a ` . Future ` instead of ` ` None ` ` ."""
if self . _finished : raise RuntimeError ( "Cannot render() after finish()" ) html = self . render_string ( template_name , ** kwargs ) # Insert the additional JS and CSS added by the modules on the page js_embed = [ ] js_files = [ ] css_embed = [ ] css_files = [ ] html_heads = [ ] html_bodies = [ ] for module in getattr ( self , "_active_modules" , { } ) . values ( ) : embed_part = module . embedded_javascript ( ) if embed_part : js_embed . append ( utf8 ( embed_part ) ) file_part = module . javascript_files ( ) if file_part : if isinstance ( file_part , ( unicode_type , bytes ) ) : js_files . append ( _unicode ( file_part ) ) else : js_files . extend ( file_part ) embed_part = module . embedded_css ( ) if embed_part : css_embed . append ( utf8 ( embed_part ) ) file_part = module . css_files ( ) if file_part : if isinstance ( file_part , ( unicode_type , bytes ) ) : css_files . append ( _unicode ( file_part ) ) else : css_files . extend ( file_part ) head_part = module . html_head ( ) if head_part : html_heads . append ( utf8 ( head_part ) ) body_part = module . html_body ( ) if body_part : html_bodies . append ( utf8 ( body_part ) ) if js_files : # Maintain order of JavaScript files given by modules js = self . render_linked_js ( js_files ) sloc = html . rindex ( b"</body>" ) html = html [ : sloc ] + utf8 ( js ) + b"\n" + html [ sloc : ] if js_embed : js_bytes = self . render_embed_js ( js_embed ) sloc = html . rindex ( b"</body>" ) html = html [ : sloc ] + js_bytes + b"\n" + html [ sloc : ] if css_files : css = self . render_linked_css ( css_files ) hloc = html . index ( b"</head>" ) html = html [ : hloc ] + utf8 ( css ) + b"\n" + html [ hloc : ] if css_embed : css_bytes = self . render_embed_css ( css_embed ) hloc = html . index ( b"</head>" ) html = html [ : hloc ] + css_bytes + b"\n" + html [ hloc : ] if html_heads : hloc = html . index ( b"</head>" ) html = html [ : hloc ] + b"" . join ( html_heads ) + b"\n" + html [ hloc : ] if html_bodies : hloc = html . index ( b"</body>" ) html = html [ : hloc ] + b"" . join ( html_bodies ) + b"\n" + html [ hloc : ] return self . finish ( html )
def set_value ( self , name , value , PY2_frontend ) : """Set the value of a variable"""
import cloudpickle ns = self . _get_reference_namespace ( name ) # We send serialized values in a list of one element # from Spyder to the kernel , to be able to send them # at all in Python 2 svalue = value [ 0 ] # We need to convert svalue to bytes if the frontend # runs in Python 2 and the kernel runs in Python 3 if PY2_frontend and not PY2 : svalue = bytes ( svalue , 'latin-1' ) # Deserialize and set value in namespace dvalue = cloudpickle . loads ( svalue ) ns [ name ] = dvalue self . log . debug ( ns )
def str_to_datetime ( ts ) : """Format a string to a datetime object . This functions supports several date formats like YYYY - MM - DD , MM - DD - YYYY and YY - MM - DD . When the given data is None or an empty string , the function returns None . : param ts : string to convert : returns : a datetime object : raises IvalidDateError : when the given string cannot be converted into a valid date"""
if not ts : return None try : return dateutil . parser . parse ( ts ) . replace ( tzinfo = None ) except Exception : raise InvalidDateError ( date = str ( ts ) )
async def make_transition_register ( self , request : 'Request' ) : """Use all underlying stacks to generate the next transition register ."""
register = { } for stack in self . _stacks : register = await stack . patch_register ( register , request ) return register
def capture_events ( receiver , dest ) : """Capture all events sent to ` receiver ` in the sequence ` dest ` . This is a generator , and it is best used with ` ` yield from ` ` . The observable effect of using this generator with ` ` yield from ` ` is identical to the effect of using ` receiver ` with ` ` yield from ` ` directly ( including the return value ) , but in addition , the values which are * sent * to the receiver are captured in ` dest ` . If ` receiver ` raises an exception or the generator is closed prematurely using its : meth : ` close ` , ` dest ` is cleared . This is used to implement : class : ` CapturingXSO ` . See the documentation there for use cases . . . versionadded : : 0.5"""
# the following code is a copy of the formal definition of ` yield from ` # in PEP 380 , with modifications to capture the value sent during yield _i = iter ( receiver ) try : _y = next ( _i ) except StopIteration as _e : return _e . value try : while True : try : _s = yield _y except GeneratorExit as _e : try : _m = _i . close except AttributeError : pass else : _m ( ) raise _e except BaseException as _e : _x = sys . exc_info ( ) try : _m = _i . throw except AttributeError : raise _e else : try : _y = _m ( * _x ) except StopIteration as _e : _r = _e . value break else : dest . append ( _s ) try : if _s is None : _y = next ( _i ) else : _y = _i . send ( _s ) except StopIteration as _e : _r = _e . value break except : # NOQA dest . clear ( ) raise return _r
def list_unique ( cls ) : '''Return all unique namespaces : returns : a list of all predicates : rtype : list of ckan . model . semantictag . Predicate objects'''
query = meta . Session . query ( Predicate ) . distinct ( Predicate . namespace ) return query . all ( )
def draw_geoscale ( ax , minx = 0 , maxx = 175 ) : """Draw geological epoch on million year ago ( mya ) scale ."""
a , b = .1 , .6 # Correspond to 200mya and 0mya def cv ( x ) : return b - ( x - b ) / ( maxx - minx ) * ( b - a ) ax . plot ( ( a , b ) , ( .5 , .5 ) , "k-" ) tick = .015 for mya in xrange ( maxx - 25 , 0 , - 25 ) : p = cv ( mya ) ax . plot ( ( p , p ) , ( .5 , .5 - tick ) , "k-" ) ax . text ( p , .5 - 2.5 * tick , str ( mya ) , ha = "center" , va = "center" ) ax . text ( ( a + b ) / 2 , .5 - 5 * tick , "Time before present (million years)" , ha = "center" , va = "center" ) # Source : # http : / / www . weston . org / schools / ms / biologyweb / evolution / handouts / GSAchron09 . jpg Geo = ( ( "Neogene" , 2.6 , 23.0 , "#fee400" ) , ( "Paleogene" , 23.0 , 65.5 , "#ff9a65" ) , ( "Cretaceous" , 65.5 , 145.5 , "#80ff40" ) , ( "Jurassic" , 145.5 , 201.6 , "#33fff3" ) ) h = .05 for era , start , end , color in Geo : start , end = cv ( start ) , cv ( end ) end = max ( a , end ) p = Rectangle ( ( end , .5 + tick / 2 ) , abs ( start - end ) , h , lw = 1 , ec = "w" , fc = color ) ax . text ( ( start + end ) / 2 , .5 + ( tick + h ) / 2 , era , ha = "center" , va = "center" , size = 9 ) ax . add_patch ( p )
def search ( signal = '' , action = '' , signals = SIGNALS ) : """Search the signals DB for signal named * signal * , and which action matches * action * in a case insensitive way . : param signal : Regex for signal name . : param action : Regex for default action . : param signals : Database of signals ."""
sig_re = re . compile ( signal , re . IGNORECASE ) act_re = re . compile ( action , re . IGNORECASE ) res = [ ] for code in signals : sig , act , _ = signals [ code ] if sig_re . match ( sig ) and act_re . match ( act ) : res . append ( explain ( code , signals = signals ) ) return res
def add_task_file_manager ( self , task_file_manager ) : """Add a task file manager . Only available after that the Plugin Manager is loaded"""
if not self . _loaded : raise PluginManagerNotLoadedException ( ) self . _task_factory . add_custom_task_file_manager ( task_file_manager )
def _GetSocket ( self ) : """Establishes a connection to an nsrlsvr instance . Returns : socket . _ socketobject : socket connected to an nsrlsvr instance or None if a connection cannot be established ."""
try : return socket . create_connection ( ( self . _host , self . _port ) , self . _SOCKET_TIMEOUT ) except socket . error as exception : logger . error ( 'Unable to connect to nsrlsvr with error: {0!s}.' . format ( exception ) )
def plot ( self , resolution_constant_regions = 20 , resolution_smooth_regions = 200 ) : """Return arrays x , y for plotting the piecewise constant function . Just the minimum number of straight lines are returned if ` ` eps = 0 ` ` , otherwise ` resolution _ constant _ regions ` plotting intervals are insed in the constant regions with ` resolution _ smooth _ regions ` plotting intervals in the smoothed regions ."""
if self . eps == 0 : x = [ ] ; y = [ ] for I , value in zip ( self . _indicator_functions , self . _values ) : x . append ( I . L ) y . append ( value ) x . append ( I . R ) y . append ( value ) return x , y else : n = float ( resolution_smooth_regions ) / self . eps if len ( self . data ) == 1 : return [ self . L , self . R ] , [ self . _values [ 0 ] , self . _values [ 0 ] ] else : x = [ np . linspace ( self . data [ 0 ] [ 0 ] , self . data [ 1 ] [ 0 ] - self . eps , resolution_constant_regions + 1 ) ] # Iterate over all internal discontinuities for I in self . _indicator_functions [ 1 : ] : x . append ( np . linspace ( I . L - self . eps , I . L + self . eps , resolution_smooth_regions + 1 ) ) x . append ( np . linspace ( I . L + self . eps , I . R - self . eps , resolution_constant_regions + 1 ) ) # Last part x . append ( np . linspace ( I . R - self . eps , I . R , 3 ) ) x = np . concatenate ( x ) y = self ( x ) return x , y
def parsed ( self ) : """Get the code object which represents the compiled Python file . This property is cached and only parses the content once ."""
if not self . _parsed : self . _parsed = compile ( self . content , self . path , 'exec' ) return self . _parsed
def run ( commands , shell = None , prompt_template = "default" , speed = 1 , quiet = False , test_mode = False , commentecho = False , ) : """Main function for " magic - running " a list of commands ."""
if not quiet : secho ( "We'll do it live!" , fg = "red" , bold = True ) secho ( "STARTING SESSION: Press Ctrl-C at any time to exit." , fg = "yellow" , bold = True , ) click . pause ( ) click . clear ( ) state = SessionState ( shell = shell , prompt_template = prompt_template , speed = speed , test_mode = test_mode , commentecho = commentecho , ) i = 0 while i < len ( commands ) : command = commands [ i ] . strip ( ) i += 1 if not command : continue is_comment = command . startswith ( "#" ) if not is_comment : command_as_list = shlex . split ( ensure_utf8 ( command ) ) else : command_as_list = None shell_match = SHELL_RE . match ( command ) if is_comment : # Parse comment magic match = OPTION_RE . match ( command ) if match : option , arg = match . group ( "option" ) , match . group ( "arg" ) func = OPTION_MAP [ option ] func ( state , arg ) elif state . commentecho ( ) : comment = command . lstrip ( "#" ) secho ( comment , fg = "yellow" , bold = True ) continue # Handle ' export ' and ' alias ' commands by storing them in SessionState elif command_as_list and command_as_list [ 0 ] in [ "alias" , "export" ] : magictype ( command , prompt_template = state [ "prompt_template" ] , speed = state [ "speed" ] ) # Store the raw commands instead of using add _ envvar and add _ alias # to avoid having to parse the command ourselves state . add_command ( command ) # Handle ` ` ` python and ` ` ` ipython by running " player " consoles elif shell_match : shell_name = shell_match . groups ( ) [ 0 ] . strip ( ) py_commands = [ ] more = True while more : # slurp up all the python code try : py_command = commands [ i ] . rstrip ( ) except IndexError : raise SessionError ( "Unmatched {0} code block in " "session file." . format ( shell_name ) ) i += 1 if py_command . startswith ( "```" ) : i += 1 more = False else : py_commands . append ( py_command ) # Run the player console magictype ( shell_name , prompt_template = state [ "prompt_template" ] , speed = state [ "speed" ] , ) if shell_name == "ipython" : try : from doitlive . ipython_consoles import start_ipython_player except ImportError : raise RuntimeError ( "```ipython blocks require IPython to be installed" ) # dedent all the commands to account for IPython ' s autoindentation ipy_commands = [ textwrap . dedent ( cmd ) for cmd in py_commands ] start_ipython_player ( ipy_commands , speed = state [ "speed" ] ) else : start_python_player ( py_commands , speed = state [ "speed" ] ) else : # goto _ stealthmode determines when to switch to stealthmode goto_stealthmode = magicrun ( command , ** state ) # stealthmode allows user to type live commands outside of automated script i -= stealthmode ( state , goto_stealthmode ) echo_prompt ( state [ "prompt_template" ] ) wait_for ( RETURNS ) if not quiet : secho ( "FINISHED SESSION" , fg = "yellow" , bold = True )
def support_autoupload_param_hostip ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) support = ET . SubElement ( config , "support" , xmlns = "urn:brocade.com:mgmt:brocade-ras" ) autoupload_param = ET . SubElement ( support , "autoupload-param" ) hostip = ET . SubElement ( autoupload_param , "hostip" ) hostip . text = kwargs . pop ( 'hostip' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def streamReachAndWatershed ( self , delineate , out_stream_order_grid , out_network_connectivity_tree , out_network_coordinates , out_stream_reach_file , out_watershed_grid , pit_filled_elevation_grid = None , flow_dir_grid = None , contributing_area_grid = None , stream_raster_grid = None , outlet_shapefile = None ) : """Creates vector network and shapefile from stream raster grid"""
log ( "PROCESS: StreamReachAndWatershed" ) if pit_filled_elevation_grid : self . pit_filled_elevation_grid = pit_filled_elevation_grid if flow_dir_grid : self . flow_dir_grid = flow_dir_grid if contributing_area_grid : self . contributing_area_grid = contributing_area_grid if stream_raster_grid : self . stream_raster_grid = stream_raster_grid # Construct the taudem command line . cmd = [ os . path . join ( self . taudem_exe_path , 'streamnet' ) , '-fel' , self . pit_filled_elevation_grid , '-p' , self . flow_dir_grid , '-ad8' , self . contributing_area_grid , '-src' , self . stream_raster_grid , '-ord' , out_stream_order_grid , '-tree' , out_network_connectivity_tree , '-coord' , out_network_coordinates , '-net' , out_stream_reach_file , '-w' , out_watershed_grid , ] if outlet_shapefile : cmd += [ '-o' , outlet_shapefile ] if delineate : cmd += [ '-sw' ] self . _run_mpi_cmd ( cmd ) # create projection file self . _add_prj_file ( self . pit_filled_elevation_grid , out_stream_order_grid ) self . _add_prj_file ( self . pit_filled_elevation_grid , out_stream_reach_file ) self . _add_prj_file ( self . pit_filled_elevation_grid , out_watershed_grid )
def as_ipywidget ( self ) : """Provides an IPywidgets player that can be used in a notebook ."""
from IPython . display import Audio return Audio ( data = self . y , rate = self . sr )
def kill ( self , dwExitCode = 0 ) : """Terminates the thread execution . @ note : If the C { lpInjectedMemory } member contains a valid pointer , the memory is freed . @ type dwExitCode : int @ param dwExitCode : ( Optional ) Thread exit code ."""
hThread = self . get_handle ( win32 . THREAD_TERMINATE ) win32 . TerminateThread ( hThread , dwExitCode ) # Ugliest hack ever , won ' t work if many pieces of code are injected . # Seriously , what was I thinking ? Lame ! : ( if self . pInjectedMemory is not None : try : self . get_process ( ) . free ( self . pInjectedMemory ) self . pInjectedMemory = None except Exception : # # raise # XXX DEBUG pass
def current_boost_dir ( ) : """Returns the ( relative ) path to the Boost source - directory this file is located in ( if any ) ."""
# Path to directory containing this script . path = os . path . dirname ( os . path . realpath ( __file__ ) ) # Making sure it is located in " $ { boost - dir } / libs / mpl / preprocessed " . for directory in reversed ( [ "libs" , "mpl" , "preprocessed" ] ) : ( head , tail ) = os . path . split ( path ) if tail == directory : path = head else : return None return os . path . relpath ( path )
def set_phases ( self , literals = [ ] ) : """Sets polarities of a given list of variables ."""
if self . minisat : pysolvers . minisat22_setphases ( self . minisat , literals )
def stack ( self , level = - 1 , dropna = True ) : """Stack the prescribed level ( s ) from columns to index . Return a reshaped DataFrame or Series having a multi - level index with one or more new inner - most levels compared to the current DataFrame . The new inner - most levels are created by pivoting the columns of the current dataframe : - if the columns have a single level , the output is a Series ; - if the columns have multiple levels , the new index level ( s ) is ( are ) taken from the prescribed level ( s ) and the output is a DataFrame . The new index levels are sorted . Parameters level : int , str , list , default - 1 Level ( s ) to stack from the column axis onto the index axis , defined as one index or label , or a list of indices or labels . dropna : bool , default True Whether to drop rows in the resulting Frame / Series with missing values . Stacking a column level onto the index axis can create combinations of index and column values that are missing from the original dataframe . See Examples section . Returns DataFrame or Series Stacked dataframe or series . See Also DataFrame . unstack : Unstack prescribed level ( s ) from index axis onto column axis . DataFrame . pivot : Reshape dataframe from long format to wide format . DataFrame . pivot _ table : Create a spreadsheet - style pivot table as a DataFrame . Notes The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position ( the columns of the dataframe ) to being stacked vertically on top of each other ( in the index of the dataframe ) . Examples * * Single level columns * * > > > df _ single _ level _ cols = pd . DataFrame ( [ [ 0 , 1 ] , [ 2 , 3 ] ] , . . . index = [ ' cat ' , ' dog ' ] , . . . columns = [ ' weight ' , ' height ' ] ) Stacking a dataframe with a single level column axis returns a Series : > > > df _ single _ level _ cols weight height cat 0 1 dog 2 3 > > > df _ single _ level _ cols . stack ( ) cat weight 0 height 1 dog weight 2 height 3 dtype : int64 * * Multi level columns : simple case * * > > > multicol1 = pd . MultiIndex . from _ tuples ( [ ( ' weight ' , ' kg ' ) , . . . ( ' weight ' , ' pounds ' ) ] ) > > > df _ multi _ level _ cols1 = pd . DataFrame ( [ [ 1 , 2 ] , [ 2 , 4 ] ] , . . . index = [ ' cat ' , ' dog ' ] , . . . columns = multicol1) Stacking a dataframe with a multi - level column axis : > > > df _ multi _ level _ cols1 weight kg pounds cat 1 2 dog 2 4 > > > df _ multi _ level _ cols1 . stack ( ) weight cat kg 1 pounds 2 dog kg 2 pounds 4 * * Missing values * * > > > multicol2 = pd . MultiIndex . from _ tuples ( [ ( ' weight ' , ' kg ' ) , . . . ( ' height ' , ' m ' ) ] ) > > > df _ multi _ level _ cols2 = pd . DataFrame ( [ [ 1.0 , 2.0 ] , [ 3.0 , 4.0 ] ] , . . . index = [ ' cat ' , ' dog ' ] , . . . columns = multicol2) It is common to have missing values when stacking a dataframe with multi - level columns , as the stacked dataframe typically has more values than the original dataframe . Missing values are filled with NaNs : > > > df _ multi _ level _ cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 > > > df _ multi _ level _ cols2 . stack ( ) height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN * * Prescribing the level ( s ) to be stacked * * The first parameter controls which level or levels are stacked : > > > df _ multi _ level _ cols2 . stack ( 0) kg m cat height NaN 2.0 weight 1.0 NaN dog height NaN 4.0 weight 3.0 NaN > > > df _ multi _ level _ cols2 . stack ( [ 0 , 1 ] ) cat height m 2.0 weight kg 1.0 dog height m 4.0 weight kg 3.0 dtype : float64 * * Dropping missing values * * > > > df _ multi _ level _ cols3 = pd . DataFrame ( [ [ None , 1.0 ] , [ 2.0 , 3.0 ] ] , . . . index = [ ' cat ' , ' dog ' ] , . . . columns = multicol2) Note that rows where all values are missing are dropped by default but this behaviour can be controlled via the dropna keyword parameter : > > > df _ multi _ level _ cols3 weight height kg m cat NaN 1.0 dog 2.0 3.0 > > > df _ multi _ level _ cols3 . stack ( dropna = False ) height weight cat kg NaN NaN m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN > > > df _ multi _ level _ cols3 . stack ( dropna = True ) height weight cat m 1.0 NaN dog kg NaN 2.0 m 3.0 NaN"""
from pandas . core . reshape . reshape import stack , stack_multiple if isinstance ( level , ( tuple , list ) ) : return stack_multiple ( self , level , dropna = dropna ) else : return stack ( self , level , dropna = dropna )
def pull ( name , version , force = False ) : """Pull a released IOTile component into the current working directory The component is found using whatever DependencyResolvers are installed and registered as part of the default DependencyResolverChain . This is the same mechanism used in iotile depends update , so any component that can be updated using iotile depends update can be found and pulled using this method ."""
chain = DependencyResolverChain ( ) ver = SemanticVersionRange . FromString ( version ) chain . pull_release ( name , ver , force = force )
def hold ( name = None , pkgs = None , sources = None , normalize = True , ** kwargs ) : # pylint : disable = W0613 '''. . versionadded : : 2014.7.0 Version - lock packages . . note : : Requires the appropriate ` ` versionlock ` ` plugin package to be installed : - On RHEL 5 : ` ` yum - versionlock ` ` - On RHEL 6 & 7 : ` ` yum - plugin - versionlock ` ` - On Fedora : ` ` python - dnf - plugins - extras - versionlock ` ` name The name of the package to be held . Multiple Package Options : pkgs A list of packages to hold . Must be passed as a python list . The ` ` name ` ` parameter will be ignored if this option is passed . Returns a dict containing the changes . CLI Example : . . code - block : : bash salt ' * ' pkg . hold < package name > salt ' * ' pkg . hold pkgs = ' [ " foo " , " bar " ] ' '''
_check_versionlock ( ) if not name and not pkgs and not sources : raise SaltInvocationError ( 'One of name, pkgs, or sources must be specified.' ) if pkgs and sources : raise SaltInvocationError ( 'Only one of pkgs or sources can be specified.' ) targets = [ ] if pkgs : targets . extend ( pkgs ) elif sources : for source in sources : targets . append ( next ( six . iterkeys ( source ) ) ) else : targets . append ( name ) current_locks = list_holds ( full = False ) ret = { } for target in targets : if isinstance ( target , dict ) : target = next ( six . iterkeys ( target ) ) ret [ target ] = { 'name' : target , 'changes' : { } , 'result' : False , 'comment' : '' } if target not in current_locks : if 'test' in __opts__ and __opts__ [ 'test' ] : ret [ target ] . update ( result = None ) ret [ target ] [ 'comment' ] = ( 'Package {0} is set to be held.' . format ( target ) ) else : out = _call_yum ( [ 'versionlock' , target ] ) if out [ 'retcode' ] == 0 : ret [ target ] . update ( result = True ) ret [ target ] [ 'comment' ] = ( 'Package {0} is now being held.' . format ( target ) ) ret [ target ] [ 'changes' ] [ 'new' ] = 'hold' ret [ target ] [ 'changes' ] [ 'old' ] = '' else : ret [ target ] [ 'comment' ] = ( 'Package {0} was unable to be held.' . format ( target ) ) else : ret [ target ] . update ( result = True ) ret [ target ] [ 'comment' ] = ( 'Package {0} is already set to be held.' . format ( target ) ) return ret
def populate_metadata ( model , MetadataClass ) : """For a given model and metadata class , ensure there is metadata for every instance ."""
content_type = ContentType . objects . get_for_model ( model ) for instance in model . objects . all ( ) : create_metadata_instance ( MetadataClass , instance )
def update_leads_list ( self , leads_list_id , name , team_id = None ) : """Update a leads list . : param name : Name of the list to update . Must be defined . : param team _ id : The id of the list to share this list with . : return : 204 Response ."""
params = self . base_params payload = { 'name' : name } if team_id : payload [ 'team_id' ] = team_id endpoint = self . base_endpoint . format ( 'leads_lists/' + str ( leads_list_id ) ) return self . _query_hunter ( endpoint , params , 'put' , payload )
def getpcmd ( pid ) : """Returns command of process . : param pid :"""
if os . name == "nt" : # Use wmic command instead of ps on Windows . cmd = 'wmic path win32_process where ProcessID=%s get Commandline 2> nul' % ( pid , ) with os . popen ( cmd , 'r' ) as p : lines = [ line for line in p . readlines ( ) if line . strip ( "\r\n " ) != "" ] if lines : _ , val = lines return val elif sys . platform == "darwin" : # Use pgrep instead of / proc on macOS . pidfile = ".%d.pid" % ( pid , ) with open ( pidfile , 'w' ) as f : f . write ( str ( pid ) ) try : p = Popen ( [ 'pgrep' , '-lf' , '-F' , pidfile ] , stdout = PIPE ) stdout , _ = p . communicate ( ) line = stdout . decode ( 'utf8' ) . strip ( ) if line : _ , scmd = line . split ( ' ' , 1 ) return scmd finally : os . unlink ( pidfile ) else : # Use the / proc filesystem # At least on android there have been some issues with not all # process infos being readable . In these cases using the ` ps ` command # worked . See the pull request at # https : / / github . com / spotify / luigi / pull / 1876 try : with open ( '/proc/{0}/cmdline' . format ( pid ) , 'r' ) as fh : if six . PY3 : return fh . read ( ) . replace ( '\0' , ' ' ) . rstrip ( ) else : return fh . read ( ) . replace ( '\0' , ' ' ) . decode ( 'utf8' ) . rstrip ( ) except IOError : # the system may not allow reading the command line # of a process owned by another user pass # Fallback instead of None , for e . g . Cygwin where - o is an " unknown option " for the ps command : return '[PROCESS_WITH_PID={}]' . format ( pid )
def delete_index ( self , cardinality ) : """Delete index for the table with the given cardinality . Parameters cardinality : int The cardinality of the index to delete ."""
DatabaseConnector . delete_index ( self , cardinality ) query = "DROP INDEX IF EXISTS idx_{0}_gram_varchar;" . format ( cardinality ) self . execute_sql ( query ) query = "DROP INDEX IF EXISTS idx_{0}_gram_normalized_varchar;" . format ( cardinality ) self . execute_sql ( query ) query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_varchar;" . format ( cardinality ) self . execute_sql ( query ) query = "DROP INDEX IF EXISTS idx_{0}_gram_lower_normalized_varchar;" . format ( cardinality ) self . execute_sql ( query ) for i in reversed ( range ( cardinality ) ) : if i != 0 : query = "DROP INDEX IF EXISTS idx_{0}_gram_{1}_lower;" . format ( cardinality , i ) self . execute_sql ( query )
def _handleInvertAxesSelected ( self , evt ) : """Called when the invert all menu item is selected"""
if len ( self . _axisId ) == 0 : return for i in range ( len ( self . _axisId ) ) : if self . _menu . IsChecked ( self . _axisId [ i ] ) : self . _menu . Check ( self . _axisId [ i ] , False ) else : self . _menu . Check ( self . _axisId [ i ] , True ) self . _toolbar . set_active ( self . getActiveAxes ( ) ) evt . Skip ( )
def addSiiToContainer ( siiContainer , specfile , siiList ) : """Adds the ` ` Sii ` ` elements contained in the siiList to the appropriate list in ` ` siiContainer . container [ specfile ] ` ` . : param siiContainer : instance of : class : ` maspy . core . SiiContainer ` : param specfile : unambiguous identifier of a ms - run file . Is also used as a reference to other MasPy file containers . : param siiList : a list of ` ` Sii ` ` elements imported from any PSM search engine results"""
for sii in siiList : if sii . id not in siiContainer . container [ specfile ] : siiContainer . container [ specfile ] [ sii . id ] = list ( ) siiContainer . container [ specfile ] [ sii . id ] . append ( sii )
def getFoundIn ( self , foundin_name , projectarea_id = None , projectarea_name = None , archived = False ) : """Get : class : ` rtcclient . models . FoundIn ` object by its name : param foundin _ name : the foundin name : param projectarea _ id : the : class : ` rtcclient . project _ area . ProjectArea ` id : param projectarea _ name : the project area name : param archived : ( default is False ) whether the foundin is archived : return : the : class : ` rtcclient . models . FoundIn ` object : rtype : rtcclient . models . FoundIn"""
self . log . debug ( "Try to get <FoundIn %s>" , foundin_name ) if not isinstance ( foundin_name , six . string_types ) or not foundin_name : excp_msg = "Please specify a valid PlannedFor name" self . log . error ( excp_msg ) raise exception . BadValue ( excp_msg ) foundins = self . _getFoundIns ( projectarea_id = projectarea_id , projectarea_name = projectarea_name , archived = archived , foundin_name = foundin_name ) if foundins is not None : foundin = foundins [ 0 ] self . log . info ( "Find <FoundIn %s>" , foundin ) return foundin self . log . error ( "No FoundIn named %s" , foundin_name ) raise exception . NotFound ( "No FoundIn named %s" % foundin_name )
def deviance_information_criterions ( mean_posterior_lls , ll_per_sample ) : r"""Calculates the Deviance Information Criteria ( DIC ) using three methods . This returns a dictionary returning the ` ` DIC _ 2002 ` ` , the ` ` DIC _ 2004 ` ` and the ` ` DIC _ Ando _ 2011 ` ` method . The first is based on Spiegelhalter et al ( 2002 ) , the second based on Gelman et al . ( 2004 ) and the last on Ando ( 2011 ) . All cases differ in how they calculate model complexity , i . e . the effective number of parameters in the model . In all cases the model with the smallest DIC is preferred . All these DIC methods measure fitness using the deviance , which is , for a likelihood : math : ` p ( y | \ theta ) ` defined as : . . math : : D ( \ theta ) = - 2 \ log p ( y | \ theta ) From this , the posterior mean deviance , . . math : : \ bar { D } = \ mathbb { E } _ { \ theta } [ D ( \ theta ) ] is then used as a measure of how well the model fits the data . The complexity , or measure of effective number of parameters , can be measured in see ways , see Spiegelhalter et al . ( 2002 ) , Gelman et al ( 2004 ) and Ando ( 2011 ) . The first method calculated the parameter deviance as : . . math : : : nowrap : \ begin { align } p _ { D } & = \ mathbb { E } _ { \ theta } [ D ( \ theta ) ] - D ( \ mathbb { E } [ \ theta ) ] ) \ \ & = \ bar { D } - D ( \ bar { \ theta } ) \ end { align } i . e . posterior mean deviance minus the deviance evaluated at the posterior mean of the parameters . The second method calculated : math : ` p _ { D } ` as : . . math : : p _ { D } = p _ { V } = \ frac { 1 } { 2 } \ hat { var } ( D ( \ theta ) ) i . e . half the variance of the deviance is used as an estimate of the number of free parameters in the model . The third method calculates the parameter deviance as : . . math : : p _ { D } = 2 \ cdot ( \ bar { D } - D ( \ bar { \ theta } ) ) That is , twice the complexity of that of the first method . Finally , the DIC is ( for all cases ) defined as : . . math : : DIC = \ bar { D } + p _ { D } Args : mean _ posterior _ lls ( ndarray ) : a 1d matrix containing the log likelihood for the average posterior point estimate . That is , the single log likelihood of the average parameters . ll _ per _ sample ( ndarray ) : a ( d , n ) array with for d problems the n log likelihoods . This is the log likelihood per sample . Returns : dict : a dictionary containing the ` ` DIC _ 2002 ` ` , the ` ` DIC _ 2004 ` ` and the ` ` DIC _ Ando _ 2011 ` ` information criterion maps ."""
mean_deviance = - 2 * np . mean ( ll_per_sample , axis = 1 ) deviance_at_mean = - 2 * mean_posterior_lls pd_2002 = mean_deviance - deviance_at_mean pd_2004 = np . var ( ll_per_sample , axis = 1 ) / 2.0 return { 'DIC_2002' : np . nan_to_num ( mean_deviance + pd_2002 ) , 'DIC_2004' : np . nan_to_num ( mean_deviance + pd_2004 ) , 'DIC_Ando_2011' : np . nan_to_num ( mean_deviance + 2 * pd_2002 ) }
def launch_browser ( self , soup ) : """Launch a browser to display a page , for debugging purposes . : param : soup : Page contents to display , supplied as a bs4 soup object ."""
with tempfile . NamedTemporaryFile ( delete = False , suffix = '.html' ) as file : file . write ( soup . encode ( ) ) webbrowser . open ( 'file://' + file . name )
def config ( ) : """Loads and returns a ConfigParser from ` ` ~ / . deepdish . conf ` ` ."""
conf = ConfigParser ( ) # Set up defaults conf . add_section ( 'io' ) conf . set ( 'io' , 'compression' , 'zlib' ) conf . read ( os . path . expanduser ( '~/.deepdish.conf' ) ) return conf
def run ( self , * args , ** kwargs ) : """The Node main method , running in a child process ( similar to Process . run ( ) but also accepts args ) A children class can override this method , but it needs to call super ( ) . run ( * args , * * kwargs ) for the node to start properly and call update ( ) as expected . : param args : arguments to pass to update ( ) : param kwargs : keyword arguments to pass to update ( ) : return : last exitcode returned by update ( )"""
# TODO : make use of the arguments ? since run is now the target for Process . . . exitstatus = None # keeping the semantic of multiprocessing . Process : running process has None if setproctitle and self . new_title : setproctitle . setproctitle ( "{0}" . format ( self . name ) ) print ( '[{proc}] Proc started as [{pid}]' . format ( proc = self . name , pid = self . ident ) ) with self . context_manager ( * args , ** kwargs ) as cm : if cm : cmargs = maybe_tuple ( cm ) # prepending context manager , to be able to access it from target args = cmargs + args exitstatus = self . eventloop ( * args , ** kwargs ) logging . debug ( "[{self.name}] Proc exited." . format ( ** locals ( ) ) ) return exitstatus
def Start ( self , hostname , port ) : """Starts the process status RPC server . Args : hostname ( str ) : hostname or IP address to connect to for requests . port ( int ) : port to connect to for requests . Returns : bool : True if the RPC server was successfully started ."""
if not self . _Open ( hostname , port ) : return False self . _rpc_thread = threading . Thread ( name = self . _THREAD_NAME , target = self . _xmlrpc_server . serve_forever ) self . _rpc_thread . start ( ) return True
def plugin_privileges ( self , name ) : """Retrieve list of privileges to be granted to a plugin . Args : name ( string ) : Name of the remote plugin to examine . The ` ` : latest ` ` tag is optional , and is the default if omitted . Returns : A list of dictionaries representing the plugin ' s permissions"""
params = { 'remote' : name , } headers = { } registry , repo_name = auth . resolve_repository_name ( name ) header = auth . get_config_header ( self , registry ) if header : headers [ 'X-Registry-Auth' ] = header url = self . _url ( '/plugins/privileges' ) return self . _result ( self . _get ( url , params = params , headers = headers ) , True )
def get_error ( self , xml ) : '''Obtem do XML de resposta , o código e a descrição do erro . O XML corresponde ao corpo da resposta HTTP de código 500. : param xml : XML contido na resposta da requisição HTTP . : return : Tupla com o código e a descrição do erro contido no XML : ( < codigo _ erro > , < descricao _ erro > )'''
map = loads ( xml ) network_map = map [ 'networkapi' ] error_map = network_map [ 'erro' ] return int ( error_map [ 'codigo' ] ) , str ( error_map [ 'descricao' ] )
def run_cufflinks ( data ) : """Quantitate transcript expression with Cufflinks"""
if "cufflinks" in dd . get_tools_off ( data ) : return [ [ data ] ] work_bam = dd . get_work_bam ( data ) ref_file = dd . get_sam_ref ( data ) out_dir , fpkm_file , fpkm_isoform_file = cufflinks . run ( work_bam , ref_file , data ) data = dd . set_cufflinks_dir ( data , out_dir ) data = dd . set_fpkm ( data , fpkm_file ) data = dd . set_fpkm_isoform ( data , fpkm_isoform_file ) return [ [ data ] ]
def xiphias_get_users ( self , peer_jids : Union [ str , List [ str ] ] ) : """Calls the new format xiphias message to request user data such as profile creation date and background picture URL . : param peer _ jids : one jid , or a list of jids"""
return self . _send_xmpp_element ( xiphias . UsersRequest ( peer_jids ) )
def to_dict ( self ) : """Return the user as a dict ."""
public_keys = [ public_key . b64encoded for public_key in self . public_keys ] return dict ( name = self . name , passwd = self . passwd , uid = self . uid , gid = self . gid , gecos = self . gecos , home_dir = self . home_dir , shell = self . shell , public_keys = public_keys )
def create_finance_metrics ( metrics : list , pronacs : list ) : """Creates metrics , creating an Indicator if it doesn ' t already exists Metrics are created for projects that are in pronacs and saved in database . args : metrics : list of names of metrics that will be calculated pronacs : pronacs in dataset that is used to calculate those metrics"""
missing = missing_metrics ( metrics , pronacs ) print ( f"There are {len(missing)} missing metrics!" ) processors = mp . cpu_count ( ) print ( f"Using {processors} processors to calculate metrics!" ) indicators_qs = FinancialIndicator . objects . filter ( project_id__in = [ p for p , _ in missing ] ) indicators = { i . project_id : i for i in indicators_qs } pool = mp . Pool ( processors ) results = [ pool . apply_async ( create_metric , args = ( indicators , metric_name , pronac ) ) for pronac , metric_name in missing ] calculated_metrics = [ p . get ( ) for p in results ] if calculated_metrics : Metric . objects . bulk_create ( calculated_metrics ) print ( "Bulk completed" ) for indicator in indicators . values ( ) : indicator . fetch_weighted_complexity ( ) print ( "Finished update indicators!" ) pool . close ( ) print ( "Finished metrics calculation!" )
def _load_package ( self , json_line , installed_packages ) : """Returns the user _ package ( name , version , source ) , and the list of envs registered when the package was loaded"""
if len ( json_line ) == 0 : return { } , set ( [ ] ) valid_json = False try : user_package = json . loads ( json_line ) valid_json = True except ValueError : user_package = { } package_name = user_package [ 'name' ] if 'name' in user_package else None module_name = package_name . replace ( '-' , '_' ) if package_name is not None else '' envs_before = set ( registry . list ( ) ) if not valid_json or package_name is None : self . cache_needs_update = True logger . warn ( 'Unable to load user environments. Try deleting your cache ' 'file "%s" if this problem persists. \n\nLine: %s' , self . cache_path , json_line ) return { } , set ( [ ] ) elif package_name not in installed_packages : self . cache_needs_update = True logger . warn ( 'The package "%s" does not seem to be installed anymore. User environments from this ' 'package will not be registered, and the package will no longer be loaded on `import gym`' , package_name ) elif module_name in sys . modules : self . cache_needs_update = True try : reload_module ( sys . modules [ module_name ] ) except ImportError : if 'gym' in package_name : # To avoid uninstalling failing dependencies logger . warn ( 'Unable to reload the module "%s" from package "%s" (%s). This is usually caused by a ' 'invalid pip package. The package will be uninstalled and no longer be loaded on `import gym`.\n' , module_name , package_name , installed_packages [ package_name ] ) traceback . print_exc ( file = sys . stdout ) sys . stdout . write ( '\n' ) self . _run_cmd ( '{} uninstall -y {}' . format ( pip_exec , package_name ) ) else : try : __import__ ( module_name ) except ImportError : if 'gym' in package_name : # To avoid uninstalling failing dependencies self . cache_needs_update = True logger . warn ( 'Unable to import the module "%s" from package "%s" (%s). This is usually caused by a ' 'invalid pip package. The package will be uninstalled and no longer be loaded on `import gym`.\n' , module_name , package_name , installed_packages [ package_name ] ) traceback . print_exc ( file = sys . stdout ) sys . stdout . write ( '\n' ) self . _run_cmd ( '{} uninstall -y {}' . format ( pip_exec , package_name ) ) envs_after = set ( registry . list ( ) ) registered_envs = envs_after - envs_before if len ( registered_envs ) > 0 : self . user_packages [ package_name ] = user_package for new_env in registered_envs : new_spec = registry . spec ( new_env ) new_spec . source = user_package [ 'source' ] new_spec . package = '{} ({})' . format ( user_package [ 'name' ] , user_package [ 'version' ] ) self . env_ids . add ( new_env . lower ( ) ) return user_package , registered_envs
def install_pyenv ( name , user = None ) : '''Install pyenv if not installed . Allows you to require pyenv be installed prior to installing the plugins . Useful if you want to install pyenv plugins via the git or file modules and need them installed before installing any rubies . Use the pyenv . root configuration option to set the path for pyenv if you want a system wide install that is not in a user home dir . user : None The user to run pyenv as .'''
ret = { 'name' : name , 'result' : None , 'comment' : '' , 'changes' : { } } if __opts__ [ 'test' ] : ret [ 'comment' ] = 'pyenv is set to be installed' return ret return _check_and_install_python ( ret , user )
def _infer_transform_options ( transform ) : """figure out what transform options should be by examining the provided regexes for keywords"""
TransformOptions = collections . namedtuple ( "TransformOptions" , [ 'CB' , 'dual_index' , 'triple_index' , 'MB' , 'SB' ] ) CB = False SB = False MB = False dual_index = False triple_index = False for rx in transform . values ( ) : if not rx : continue if "CB1" in rx : if "CB3" in rx : triple_index = True else : dual_index = True if "SB" in rx : SB = True if "CB" in rx : CB = True if "MB" in rx : MB = True return TransformOptions ( CB = CB , dual_index = dual_index , triple_index = triple_index , MB = MB , SB = SB )
def deref ( data , spec : dict ) : """Return dereference data : param data : : param spec : : return :"""
if isinstance ( data , Sequence ) : is_dict = False gen = enumerate ( data ) elif not isinstance ( data , Mapping ) : return data elif '$ref' in data : return deref ( get_ref ( spec , data [ '$ref' ] ) , spec ) else : is_dict = True gen = data . items ( ) # type : ignore result = None for k , v in gen : new_v = deref ( v , spec ) if new_v is not v : if result is not None : pass elif is_dict : result = data . copy ( ) else : result = data [ : ] result [ k ] = new_v return result or data
def inverse_transform ( self , Y , columns = None ) : """Transform input data ` Y ` to ambient data space defined by ` self . data ` Takes data in the same reduced space as ` self . data _ nu ` and transforms it to be in the same ambient space as ` self . data ` . Parameters Y : array - like , shape = [ n _ samples _ y , n _ pca ] n _ features must be the same as ` self . data _ nu ` . columns : list - like list of integers referring to column indices in the original data space to be returned . Avoids recomputing the full matrix where only a few dimensions of the ambient space are of interest Returns Inverse transformed data , shape = [ n _ samples _ y , n _ features ] Raises ValueError : if Y . shape [ 1 ] ! = self . data _ nu . shape [ 1]"""
try : if not hasattr ( self , "data_pca" ) : # no pca performed try : if Y . shape [ 1 ] != self . data_nu . shape [ 1 ] : # shape is wrong raise ValueError except IndexError : # len ( Y . shape ) < 2 raise ValueError if columns is None : return Y else : columns = np . array ( [ columns ] ) . flatten ( ) return Y [ : , columns ] else : if columns is None : return self . data_pca . inverse_transform ( Y ) else : # only return specific columns columns = np . array ( [ columns ] ) . flatten ( ) Y_inv = np . dot ( Y , self . data_pca . components_ [ : , columns ] ) if hasattr ( self . data_pca , "mean_" ) : Y_inv += self . data_pca . mean_ [ columns ] return Y_inv except ValueError : # more informative error raise ValueError ( "data of shape {} cannot be inverse transformed" " from graph built on data of shape {}" . format ( Y . shape , self . data_nu . shape ) )
def _do_code_blocks ( self , text ) : """Process Markdown ` < pre > < code > ` blocks ."""
code_block_re = re . compile ( r''' (?:\n\n|\A\n?) ( # $1 = the code block -- one or more lines, starting with a space/tab (?: (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces .*\n+ )+ ) ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc ''' % ( self . tab_width , self . tab_width ) , re . M | re . X ) return code_block_re . sub ( self . _code_block_sub , text )
def build_vf_node ( vf ) : """Convert a VulnerabilityFunction object into a Node suitable for XML conversion ."""
nodes = [ Node ( 'imls' , { 'imt' : vf . imt } , vf . imls ) , Node ( 'meanLRs' , { } , vf . mean_loss_ratios ) , Node ( 'covLRs' , { } , vf . covs ) ] return Node ( 'vulnerabilityFunction' , { 'id' : vf . id , 'dist' : vf . distribution_name } , nodes = nodes )
async def unsubscribe ( self , topic ) : """Unsubscribe the socket from the specified topic . : param topic : The topic to unsubscribe from ."""
if self . socket_type not in { SUB , XSUB } : raise AssertionError ( "A %s socket cannot unsubscribe." % self . socket_type . decode ( ) , ) # Do this * * BEFORE * * awaiting so that new connections created during # the execution below honor the setting . self . _subscriptions . remove ( topic ) tasks = [ asyncio . ensure_future ( peer . connection . local_unsubscribe ( topic ) , loop = self . loop , ) for peer in self . _peers if peer . connection ] if tasks : try : await asyncio . wait ( tasks , loop = self . loop ) finally : for task in tasks : task . cancel ( )
def check_columns ( column , line , columns ) : """Make sure the column is the minimum between the largest column asked for and the max column available in the line ."""
return column <= min ( len ( line ) , max ( columns ) )
def to_feature_importances ( regressor_type , regressor_kwargs , trained_regressor ) : """Motivation : when the out - of - bag improvement heuristic is used , we cancel the effect of normalization by dividing by the number of trees in the regression ensemble by multiplying again by the number of trees used . This enables prioritizing links that were inferred in a regression where lots of : param regressor _ type : string . Case insensitive . : param regressor _ kwargs : a dictionary of key - value pairs that configures the regressor . : param trained _ regressor : the trained model from which to extract the feature importances . : return : the feature importances inferred from the trained model ."""
if is_oob_heuristic_supported ( regressor_type , regressor_kwargs ) : n_estimators = len ( trained_regressor . estimators_ ) denormalized_importances = trained_regressor . feature_importances_ * n_estimators return denormalized_importances else : return trained_regressor . feature_importances_
def faasport ( func : Faasport ) -> Faasport : """Decorator that registers the user ' s faasport function ."""
global user_faasport if user_faasport is not None : raise RuntimeError ( 'Multiple definitions of faasport.' ) user_faasport = func return func
def pressure_trend_text ( trend ) : """Convert pressure trend to a string , as used by the UK met office ."""
_ = pywws . localisation . translation . ugettext if trend > 6.0 : return _ ( u'rising very rapidly' ) elif trend > 3.5 : return _ ( u'rising quickly' ) elif trend > 1.5 : return _ ( u'rising' ) elif trend >= 0.1 : return _ ( u'rising slowly' ) elif trend < - 6.0 : return _ ( u'falling very rapidly' ) elif trend < - 3.5 : return _ ( u'falling quickly' ) elif trend < - 1.5 : return _ ( u'falling' ) elif trend <= - 0.1 : return _ ( u'falling slowly' ) return _ ( u'steady' )
def registerApp ( self , itemId , appType , redirect_uris = None ) : """The register app operation registers an app item with the portal . App registration results in an APPID and APPSECRET ( also known as client _ id and client _ secret ( in OAuth speak respectively ) being generated for that application . Upon successful registration , a Registered App type keyword gets appended to the app item . Inputs : itemId - The ID of item being registered . Note that the item must be owned by the user invoking this operation otherwise the call will be rejected . appType - The type of app that was registered indicating whether it ' s a browser app , native app , server app or a multiple interface app . Values : browser | native | server | multiple redirect _ uris - The URIs where the access _ token or authorization code will be delivered to upon successful authorization . The redirect _ uri specified during authorization must be match one of the registered URIs otherwise authorization will be rejected . A special value of urn : ietf : wg : oauth : 2.0 : oob can also be specified for authorization grants . This will result in the authorization code being delivered to a portal URL ( / oauth2 / approval ) . This value is typically used by applications that don ' t have a web server or a custom URI scheme to deliver the code to . The value is a JSON string array ."""
url = self . _url + "/registerApp" params = { "f" : "json" , "itemId" : itemId , "appType" : appType } if redirect_uris is None : params [ 'redirect_uris' ] = redirect_uris return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
def apply_dependencies ( self ) : """Creates dependencies links between elements . : return : None"""
self . hosts . apply_dependencies ( ) self . services . apply_dependencies ( self . hosts )
def urlencode ( resource ) : """This implementation of urlencode supports all unicode characters : param : resource : Resource value to be url encoded ."""
if isinstance ( resource , str ) : return _urlencode ( resource . encode ( 'utf-8' ) ) return _urlencode ( resource )
def get_allure_suites ( longname ) : """> > > get _ allure _ suites ( ' Suite1 . Test ' ) [ Label ( name = ' suite ' , value = ' Suite1 ' ) ] > > > get _ allure _ suites ( ' Suite1 . Suite2 . Test ' ) # doctest : + NORMALIZE _ WHITESPACE [ Label ( name = ' suite ' , value = ' Suite1 ' ) , Label ( name = ' subSuite ' , value = ' Suite2 ' ) ] > > > get _ allure _ suites ( ' Suite1 . Suite2 . Suite3 . Test ' ) # doctest : + NORMALIZE _ WHITESPACE [ Label ( name = ' parentSuite ' , value = ' Suite1 ' ) , Label ( name = ' suite ' , value = ' Suite2 ' ) , Label ( name = ' subSuite ' , value = ' Suite3 ' ) ]"""
labels = [ ] suites = longname . split ( '.' ) if len ( suites ) > 3 : labels . append ( Label ( LabelType . PARENT_SUITE , suites . pop ( 0 ) ) ) labels . append ( Label ( LabelType . SUITE , suites . pop ( 0 ) ) ) if len ( suites ) > 1 : labels . append ( Label ( LabelType . SUB_SUITE , '.' . join ( suites [ : - 1 ] ) ) ) return labels
def chained ( wrapping_exc ) : # pylint : disable = W0212 """Embeds the current exception information into the given one ( which will replace the current one ) . For example : : try : except OSError as ex : raise chained ( MyError ( " database not found ! " ) )"""
t , v , tb = sys . exc_info ( ) if not t : return wrapping_exc wrapping_exc . _inner_exc = v lines = traceback . format_exception ( t , v , tb ) wrapping_exc . _inner_tb = "" . join ( lines [ 1 : ] ) return wrapping_exc
def area ( poly ) : """Area of a polygon poly"""
if len ( poly ) < 3 : # not a plane - no area return 0 total = [ 0 , 0 , 0 ] num = len ( poly ) for i in range ( num ) : vi1 = poly [ i ] vi2 = poly [ ( i + 1 ) % num ] prod = np . cross ( vi1 , vi2 ) total [ 0 ] += prod [ 0 ] total [ 1 ] += prod [ 1 ] total [ 2 ] += prod [ 2 ] if total == [ 0 , 0 , 0 ] : # points are in a straight line - no area return 0 result = np . dot ( total , unit_normal ( poly [ 0 ] , poly [ 1 ] , poly [ 2 ] ) ) return abs ( result / 2 )
def _func_filters ( self , filters ) : '''Build post query filters'''
if not isinstance ( filters , ( list , tuple ) ) : raise TypeError ( 'func_filters must be a <type list> or <type tuple>' ) for i , func in enumerate ( filters ) : if isinstance ( func , str ) and func == 'reverse' : filters [ i ] = 'reverse()' elif isinstance ( func , tuple ) and func [ 0 ] in YQL . FUNC_FILTERS : filters [ i ] = '{:s}(count={:d})' . format ( * func ) elif isinstance ( func , dict ) : func_stmt = '' func_name = list ( func . keys ( ) ) [ 0 ] # Because of Py3 values = [ "{0}='{1}'" . format ( v [ 0 ] , v [ 1 ] ) for v in func [ func_name ] ] func_stmt = ',' . join ( values ) func_stmt = '{0}({1})' . format ( func_name , func_stmt ) filters [ i ] = func_stmt else : raise TypeError ( '{0} is neither a <str>, a <tuple> or a <dict>' . format ( func ) ) return '| ' . join ( filters )
def Add ( self , request , callback = None ) : """Add a new request . Args : request : A http _ wrapper . Request to add to the batch . callback : A callback to be called for this response , of the form callback ( response , exception ) . The first parameter is the deserialized response object . The second is an apiclient . errors . HttpError exception object if an HTTP error occurred while processing the request , or None if no errors occurred . Returns : None"""
handler = RequestResponseAndHandler ( request , None , callback ) self . __request_response_handlers [ self . _NewId ( ) ] = handler
def _request ( self , method , path = '/' , url = None , ignore_codes = [ ] , ** kwargs ) : """Performs HTTP request . : param str method : An HTTP method ( e . g . ' get ' , ' post ' , ' PUT ' , etc . . . ) : param str path : A path component of the target URL . This will be appended to the value of ` ` self . endpoint ` ` . If both : attr : ` path ` and : attr : ` url ` are specified , the value in : attr : ` url ` is used and the : attr : ` path ` is ignored . : param str url : The target URL ( e . g . ` ` http : / / server . tld / somepath / ` ` ) . If both : attr : ` path ` and : attr : ` url ` are specified , the value in : attr : ` url ` is used and the : attr : ` path ` is ignored . : param ignore _ codes : List of HTTP error codes ( e . g . 404 , 500 ) that should be ignored . If an HTTP error occurs and it is * not * in : attr : ` ignore _ codes ` , then an exception is raised . : type ignore _ codes : list of int : param kwargs : Any other kwargs to pass to : meth : ` requests . request ( ) ` . Returns a : class : ` requests . Response ` object ."""
_url = url if url else ( self . endpoint + path ) r = self . s . request ( method , _url , ** kwargs ) if not r . ok and r . status_code not in ignore_codes : r . raise_for_status ( ) return HTTPResponse ( r )
def licenses ( self ) : """Returns a string of the built - in licenses the J - Link has . Args : self ( JLink ) : the ` ` JLink ` ` instance Returns : String of the contents of the built - in licenses the J - Link has ."""
buf_size = self . MAX_BUF_SIZE buf = ( ctypes . c_char * buf_size ) ( ) res = self . _dll . JLINK_GetAvailableLicense ( buf , buf_size ) if res < 0 : raise errors . JLinkException ( res ) return ctypes . string_at ( buf ) . decode ( )
def record_set_get ( name , zone_name , resource_group , record_type , ** kwargs ) : '''. . versionadded : : Fluorine Get a dictionary representing a record set ' s properties . : param name : The name of the record set , relative to the name of the zone . : param zone _ name : The name of the DNS zone ( without a terminating dot ) . : param resource _ group : The name of the resource group . : param record _ type : The type of DNS record in this record set . Possible values include : ' A ' , ' AAAA ' , ' CAA ' , ' CNAME ' , ' MX ' , ' NS ' , ' PTR ' , ' SOA ' , ' SRV ' , ' TXT ' CLI Example : . . code - block : : bash salt - call azurearm _ dns . record _ set _ get ' @ ' myzone testgroup SOA'''
dnsconn = __utils__ [ 'azurearm.get_client' ] ( 'dns' , ** kwargs ) try : record_set = dnsconn . record_sets . get ( relative_record_set_name = name , zone_name = zone_name , resource_group_name = resource_group , record_type = record_type ) result = record_set . as_dict ( ) except CloudError as exc : __utils__ [ 'azurearm.log_cloud_error' ] ( 'dns' , str ( exc ) , ** kwargs ) result = { 'error' : str ( exc ) } return result
def _exclude_region ( self , contig , start , end , fout ) : '''Writes reads not mapping to the given region of contig , start and end as per python convention'''
sam_reader = pysam . Samfile ( self . bam , "rb" ) exclude_interval = pyfastaq . intervals . Interval ( start , end - 1 ) for read in sam_reader . fetch ( contig ) : read_interval = pyfastaq . intervals . Interval ( read . pos , read . reference_end - 1 ) if not read_interval . intersects ( exclude_interval ) : print ( mapping . aligned_read_to_read ( read , ignore_quality = not self . fastq_out ) , file = fout )
def simple ( type , short , long = None , parent = None , buttons = gtk . BUTTONS_OK , default = None , ** kw ) : """A simple dialog : param type : The type of dialog : param short : The short description : param long : The long description : param parent : The parent Window to make this dialog transient to : param buttons : A buttons enum : param default : A default response"""
if buttons == gtk . BUTTONS_OK : default = gtk . RESPONSE_OK return _message_dialog ( type , short , long , parent = parent , buttons = buttons , default = default , ** kw )