idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
45,000
def fill_archives ( src , dst , startFrom , endAt = 0 , overwrite = False , lock_writes = False ) : if lock_writes is False : whisper . LOCK = False elif whisper . CAN_LOCK and lock_writes is True : whisper . LOCK = True header = whisper . info ( dst ) archives = header [ 'archives' ] archives = sorted ( archives , key = lambda t : t [ 'retention' ] ) for archive in archives : fromTime = max ( endAt , time . time ( ) - archive [ 'retention' ] ) if fromTime >= startFrom : continue ( timeInfo , values ) = whisper . fetch ( dst , fromTime , untilTime = startFrom ) ( start , end , step ) = timeInfo gapstart = None for value in values : has_value = bool ( value and not overwrite ) if not has_value and not gapstart : gapstart = start elif has_value and gapstart : if ( start - gapstart ) >= archive [ 'secondsPerPoint' ] : fill ( src , dst , gapstart - step , start ) gapstart = None start += step if gapstart : fill ( src , dst , gapstart - step , end - step ) startFrom = fromTime
Fills gaps in dst using data from src .
45,001
def data ( path , hours , offset = 0 ) : now = time . time ( ) end = now - _to_sec ( offset ) start = end - _to_sec ( hours ) _data = whisper . fetch ( path , start , end ) return all ( x is None for x in _data [ - 1 ] )
Does the metric at path have any whisper data newer than hours ?
45,002
def stat ( path , hours , offset = None ) : return os . stat ( path ) . st_mtime < ( time . time ( ) - _to_sec ( hours ) )
Has the metric file at path been modified since hours ago?
45,003
def short_path ( path , cwd = None ) : if not isinstance ( path , str ) : return path if cwd is None : cwd = os . getcwd ( ) abspath = os . path . abspath ( path ) relpath = os . path . relpath ( path , cwd ) if len ( abspath ) <= len ( relpath ) : return abspath return relpath
Return relative or absolute path name whichever is shortest .
45,004
def check_rest ( module , names , dots = True ) : try : skip_types = ( dict , str , unicode , float , int ) except NameError : skip_types = ( dict , str , float , int ) results = [ ] if module . __name__ [ 6 : ] not in OTHER_MODULE_DOCS : results += [ ( module . __name__ , ) + validate_rst_syntax ( inspect . getdoc ( module ) , module . __name__ , dots = dots ) ] for name in names : full_name = module . __name__ + '.' + name obj = getattr ( module , name , None ) if obj is None : results . append ( ( full_name , False , "%s has no docstring" % ( full_name , ) ) ) continue elif isinstance ( obj , skip_types ) : continue if inspect . ismodule ( obj ) : text = inspect . getdoc ( obj ) else : try : text = str ( get_doc_object ( obj ) ) except : import traceback results . append ( ( full_name , False , "Error in docstring format!\n" + traceback . format_exc ( ) ) ) continue m = re . search ( "([\x00-\x09\x0b-\x1f])" , text ) if m : msg = ( "Docstring contains a non-printable character %r! " "Maybe forgot r\"\"\"?" % ( m . group ( 1 ) , ) ) results . append ( ( full_name , False , msg ) ) continue try : src_file = short_path ( inspect . getsourcefile ( obj ) ) except TypeError : src_file = None if src_file : file_full_name = src_file + ':' + full_name else : file_full_name = full_name results . append ( ( full_name , ) + validate_rst_syntax ( text , file_full_name , dots = dots ) ) return results
Check reStructuredText formatting of docstrings
45,005
def update_header ( self ) : set_technician ( self . handle , du ( self . technician ) ) set_recording_additional ( self . handle , du ( self . recording_additional ) ) set_patientname ( self . handle , du ( self . patient_name ) ) set_patientcode ( self . handle , du ( self . patient_code ) ) set_patient_additional ( self . handle , du ( self . patient_additional ) ) set_equipment ( self . handle , du ( self . equipment ) ) set_admincode ( self . handle , du ( self . admincode ) ) if isinstance ( self . gender , int ) : set_gender ( self . handle , self . gender ) elif self . gender == "Male" : set_gender ( self . handle , 0 ) elif self . gender == "Female" : set_gender ( self . handle , 1 ) set_datarecord_duration ( self . handle , self . duration ) set_number_of_annotation_signals ( self . handle , self . number_of_annotations ) set_startdatetime ( self . handle , self . recording_start_time . year , self . recording_start_time . month , self . recording_start_time . day , self . recording_start_time . hour , self . recording_start_time . minute , self . recording_start_time . second ) if isstr ( self . birthdate ) : if self . birthdate != '' : birthday = datetime . strptime ( self . birthdate , '%d %b %Y' ) . date ( ) set_birthdate ( self . handle , birthday . year , birthday . month , birthday . day ) else : set_birthdate ( self . handle , self . birthdate . year , self . birthdate . month , self . birthdate . day ) for i in np . arange ( self . n_channels ) : set_samplefrequency ( self . handle , i , self . channels [ i ] [ 'sample_rate' ] ) set_physical_maximum ( self . handle , i , self . channels [ i ] [ 'physical_max' ] ) set_physical_minimum ( self . handle , i , self . channels [ i ] [ 'physical_min' ] ) set_digital_maximum ( self . handle , i , self . channels [ i ] [ 'digital_max' ] ) set_digital_minimum ( self . handle , i , self . channels [ i ] [ 'digital_min' ] ) set_label ( self . handle , i , du ( self . channels [ i ] [ 'label' ] ) ) set_physical_dimension ( self . handle , i , du ( self . channels [ i ] [ 'dimension' ] ) ) set_transducer ( self . handle , i , du ( self . channels [ i ] [ 'transducer' ] ) ) set_prefilter ( self . handle , i , du ( self . channels [ i ] [ 'prefilter' ] ) )
Updates header to edffile struct
45,006
def setHeader ( self , fileHeader ) : self . technician = fileHeader [ "technician" ] self . recording_additional = fileHeader [ "recording_additional" ] self . patient_name = fileHeader [ "patientname" ] self . patient_additional = fileHeader [ "patient_additional" ] self . patient_code = fileHeader [ "patientcode" ] self . equipment = fileHeader [ "equipment" ] self . admincode = fileHeader [ "admincode" ] self . gender = fileHeader [ "gender" ] self . recording_start_time = fileHeader [ "startdate" ] self . birthdate = fileHeader [ "birthdate" ] self . update_header ( )
Sets the file header
45,007
def setSignalHeader ( self , edfsignal , channel_info ) : if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] = channel_info self . update_header ( )
Sets the parameter for signal edfsignal .
45,008
def setSignalHeaders ( self , signalHeaders ) : for edfsignal in np . arange ( self . n_channels ) : self . channels [ edfsignal ] = signalHeaders [ edfsignal ] self . update_header ( )
Sets the parameter for all signals
45,009
def set_number_of_annotation_signals ( self , number_of_annotations ) : number_of_annotations = max ( ( min ( ( int ( number_of_annotations ) , 64 ) ) , 1 ) ) self . number_of_annotations = number_of_annotations self . update_header ( )
Sets the number of annotation signals . The default value is 1 This function is optional and can be called only after opening a file in writemode and before the first sample write action Normally you don t need to change the default value . Only when the number of annotations you want to write is more than the number of seconds of the duration of the recording you can use this function to increase the storage space for annotations Minimum is 1 maximum is 64
45,010
def setStartdatetime ( self , recording_start_time ) : if isinstance ( recording_start_time , datetime ) : self . recording_start_time = recording_start_time else : self . recording_start_time = datetime . strptime ( recording_start_time , "%d %b %Y %H:%M:%S" ) self . update_header ( )
Sets the recording start Time
45,011
def setSamplefrequency ( self , edfsignal , samplefrequency ) : if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'sample_rate' ] = samplefrequency self . update_header ( )
Sets the samplefrequency of signal edfsignal .
45,012
def setPhysicalMaximum ( self , edfsignal , physical_maximum ) : if edfsignal < 0 or edfsignal > self . n_channels : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'physical_max' ] = physical_maximum self . update_header ( )
Sets the physical_maximum of signal edfsignal .
45,013
def setPhysicalMinimum ( self , edfsignal , physical_minimum ) : if ( edfsignal < 0 or edfsignal > self . n_channels ) : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'physical_min' ] = physical_minimum self . update_header ( )
Sets the physical_minimum of signal edfsignal .
45,014
def setDigitalMaximum ( self , edfsignal , digital_maximum ) : if ( edfsignal < 0 or edfsignal > self . n_channels ) : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'digital_max' ] = digital_maximum self . update_header ( )
Sets the samplefrequency of signal edfsignal . Usually the value 32767 is used for EDF + and 8388607 for BDF + .
45,015
def setTransducer ( self , edfsignal , transducer ) : if ( edfsignal < 0 or edfsignal > self . n_channels ) : raise ChannelDoesNotExist ( edfsignal ) self . channels [ edfsignal ] [ 'transducer' ] = transducer self . update_header ( )
Sets the transducer of signal edfsignal
45,016
def readAnnotations ( self ) : annot = self . read_annotation ( ) annot = np . array ( annot ) if ( annot . shape [ 0 ] == 0 ) : return np . array ( [ ] ) , np . array ( [ ] ) , np . array ( [ ] ) ann_time = self . _get_float ( annot [ : , 0 ] ) ann_text = annot [ : , 2 ] ann_text_out = [ "" for x in range ( len ( annot [ : , 1 ] ) ) ] for i in np . arange ( len ( annot [ : , 1 ] ) ) : ann_text_out [ i ] = self . _convert_string ( ann_text [ i ] ) if annot [ i , 1 ] == '' : annot [ i , 1 ] = '-1' ann_duration = self . _get_float ( annot [ : , 1 ] ) return ann_time / 10000000 , ann_duration , np . array ( ann_text_out )
Annotations from a edf - file
45,017
def getHeader ( self ) : return { "technician" : self . getTechnician ( ) , "recording_additional" : self . getRecordingAdditional ( ) , "patientname" : self . getPatientName ( ) , "patient_additional" : self . getPatientAdditional ( ) , "patientcode" : self . getPatientCode ( ) , "equipment" : self . getEquipment ( ) , "admincode" : self . getAdmincode ( ) , "gender" : self . getGender ( ) , "startdate" : self . getStartdatetime ( ) , "birthdate" : self . getBirthdate ( ) }
Returns the file header as dict
45,018
def getSignalHeader ( self , chn ) : return { 'label' : self . getLabel ( chn ) , 'dimension' : self . getPhysicalDimension ( chn ) , 'sample_rate' : self . getSampleFrequency ( chn ) , 'physical_max' : self . getPhysicalMaximum ( chn ) , 'physical_min' : self . getPhysicalMinimum ( chn ) , 'digital_max' : self . getDigitalMaximum ( chn ) , 'digital_min' : self . getDigitalMinimum ( chn ) , 'prefilter' : self . getPrefilter ( chn ) , 'transducer' : self . getTransducer ( chn ) }
Returns the header of one signal as dicts
45,019
def getSignalHeaders ( self ) : signalHeader = [ ] for chn in np . arange ( self . signals_in_file ) : signalHeader . append ( self . getSignalHeader ( chn ) ) return signalHeader
Returns the header of all signals as array of dicts
45,020
def getStartdatetime ( self ) : return datetime ( self . startdate_year , self . startdate_month , self . startdate_day , self . starttime_hour , self . starttime_minute , self . starttime_second )
Returns the date and starttime as datetime object
45,021
def getBirthdate ( self , string = True ) : if string : return self . _convert_string ( self . birthdate . rstrip ( ) ) else : return datetime . strptime ( self . _convert_string ( self . birthdate . rstrip ( ) ) , "%d %b %Y" )
Returns the birthdate as string object
45,022
def getSampleFrequencies ( self ) : return np . array ( [ round ( self . samplefrequency ( chn ) ) for chn in np . arange ( self . signals_in_file ) ] )
Returns samplefrequencies of all signals .
45,023
def getSampleFrequency ( self , chn ) : if 0 <= chn < self . signals_in_file : return round ( self . samplefrequency ( chn ) ) else : return 0
Returns the samplefrequency of signal edfsignal .
45,024
def getPhysicalMaximum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . physical_max ( chn ) else : return 0 else : physMax = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : physMax [ i ] = self . physical_max ( i ) return physMax
Returns the maximum physical value of signal edfsignal .
45,025
def getPhysicalMinimum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . physical_min ( chn ) else : return 0 else : physMin = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : physMin [ i ] = self . physical_min ( i ) return physMin
Returns the minimum physical value of signal edfsignal .
45,026
def getDigitalMaximum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . digital_max ( chn ) else : return 0 else : digMax = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : digMax [ i ] = self . digital_max ( i ) return digMax
Returns the maximum digital value of signal edfsignal .
45,027
def getDigitalMinimum ( self , chn = None ) : if chn is not None : if 0 <= chn < self . signals_in_file : return self . digital_min ( chn ) else : return 0 else : digMin = np . zeros ( self . signals_in_file ) for i in np . arange ( self . signals_in_file ) : digMin [ i ] = self . digital_min ( i ) return digMin
Returns the minimum digital value of signal edfsignal .
45,028
def readSignal ( self , chn , start = 0 , n = None ) : if start < 0 : return np . array ( [ ] ) if n is not None and n < 0 : return np . array ( [ ] ) nsamples = self . getNSamples ( ) if chn < len ( nsamples ) : if n is None : n = nsamples [ chn ] elif n > nsamples [ chn ] : return np . array ( [ ] ) x = np . zeros ( n , dtype = np . float64 ) self . readsignal ( chn , start , n , x ) return x else : return np . array ( [ ] )
Returns the physical data of signal chn . When start and n is set a subset is returned
45,029
def stackplot ( marray , seconds = None , start_time = None , ylabels = None ) : tarray = np . transpose ( marray ) stackplot_t ( tarray , seconds = seconds , start_time = start_time , ylabels = ylabels ) plt . show ( )
will plot a stack of traces one above the other assuming marray . shape = numRows numSamples
45,030
def stackplot_t ( tarray , seconds = None , start_time = None , ylabels = None ) : data = tarray numSamples , numRows = tarray . shape if seconds : t = seconds * np . arange ( numSamples , dtype = float ) / numSamples if start_time : t = t + start_time xlm = ( start_time , start_time + seconds ) else : xlm = ( 0 , seconds ) else : t = np . arange ( numSamples , dtype = float ) xlm = ( 0 , numSamples ) ticklocs = [ ] ax = plt . subplot ( 111 ) plt . xlim ( * xlm ) dmin = data . min ( ) dmax = data . max ( ) dr = ( dmax - dmin ) * 0.7 y0 = dmin y1 = ( numRows - 1 ) * dr + dmax plt . ylim ( y0 , y1 ) segs = [ ] for i in range ( numRows ) : segs . append ( np . hstack ( ( t [ : , np . newaxis ] , data [ : , i , np . newaxis ] ) ) ) ticklocs . append ( i * dr ) offsets = np . zeros ( ( numRows , 2 ) , dtype = float ) offsets [ : , 1 ] = ticklocs lines = LineCollection ( segs , offsets = offsets , transOffset = None , ) ax . add_collection ( lines ) ax . set_yticks ( ticklocs ) plt . ylabels = [ "%d" % ii for ii in range ( numRows ) ] ax . set_yticklabels ( ylabels ) plt . xlabel ( 'time (s)' )
will plot a stack of traces one above the other assuming tarray . shape = numSamples numRows
45,031
def find_path ( start , goal , neighbors_fnct , reversePath = False , heuristic_cost_estimate_fnct = lambda a , b : Infinite , distance_between_fnct = lambda a , b : 1.0 , is_goal_reached_fnct = lambda a , b : a == b ) : class FindPath ( AStar ) : def heuristic_cost_estimate ( self , current , goal ) : return heuristic_cost_estimate_fnct ( current , goal ) def distance_between ( self , n1 , n2 ) : return distance_between_fnct ( n1 , n2 ) def neighbors ( self , node ) : return neighbors_fnct ( node ) def is_goal_reached ( self , current , goal ) : return is_goal_reached_fnct ( current , goal ) return FindPath ( ) . astar ( start , goal , reversePath )
A non - class version of the path finding algorithm
45,032
def validate ( source , ** options ) : source , options , inspector_settings = _parse_arguments ( source , ** options ) inspector = Inspector ( ** inspector_settings ) report = inspector . inspect ( source , ** options ) return report
Validates a source file and returns a report .
45,033
def init_datapackage ( resource_paths ) : dp = datapackage . Package ( { 'name' : 'change-me' , 'schema' : 'tabular-data-package' , } ) for path in resource_paths : dp . infer ( path ) return dp
Create tabular data package with resources .
45,034
def init ( paths , output , ** kwargs ) : dp = goodtables . init_datapackage ( paths ) click . secho ( json_module . dumps ( dp . descriptor , indent = 4 ) , file = output ) exit ( dp . valid )
Init data package from list of files .
45,035
def _clean_empty ( d ) : if not isinstance ( d , ( dict , list ) ) : return d if isinstance ( d , list ) : return [ v for v in ( _clean_empty ( v ) for v in d ) if v is not None ] return { k : v for k , v in ( ( k , _clean_empty ( v ) ) for k , v in d . items ( ) ) if v is not None }
Remove None values from a dict .
45,036
def create_cells ( headers , schema_fields , values = None , row_number = None ) : fillvalue = '_fillvalue' is_header_row = ( values is None ) cells = [ ] iterator = zip_longest ( headers , schema_fields , values or [ ] , fillvalue = fillvalue ) for column_number , ( header , field , value ) in enumerate ( iterator , start = 1 ) : if header == fillvalue : header = None elif is_header_row : value = header if field == fillvalue : field = None if value == fillvalue : value = None elif value is None : value = '' cell = create_cell ( header , value , field , column_number , row_number ) cells . append ( cell ) return cells
Create list of cells from headers fields and values .
45,037
def __impl_read_chain ( self , start , read_sector_f , read_fat_f ) : sector = start check = [ sector ] buffer = StringIO ( ) while sector != ENDOFCHAIN : buffer . write ( read_sector_f ( sector ) ) next = read_fat_f ( sector ) if next in check : logging . error ( 'infinite loop detected at {0} to {1} starting at {2}' . format ( sector , next , sector_start ) ) return buffer . getvalue ( ) check . append ( next ) sector = next return buffer . getvalue ( )
Returns the entire contents of a chain starting at the given sector .
45,038
def get_charm_url ( self ) : if self . rank_id <= 4 : return self . RANK_CHARMS [ 0 ] if self . rank_id <= 8 : return self . RANK_CHARMS [ 1 ] if self . rank_id <= 12 : return self . RANK_CHARMS [ 2 ] if self . rank_id <= 16 : return self . RANK_CHARMS [ 3 ] if self . rank_id <= 19 : return self . RANK_CHARMS [ 4 ] return self . RANK_CHARMS [ 5 ]
Get charm URL for the bracket this rank is in
45,039
def load_rank ( self , region , season = - 1 ) : data = yield from self . auth . get ( "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6karma/players?board_id=pvp_ranked&profile_ids=%s&region_id=%s&season_id=%s" % ( self . spaceid , self . platform_url , self . id , region , season ) ) if "players" in data and self . id in data [ "players" ] : regionkey = "%s:%s" % ( region , season ) self . ranks [ regionkey ] = Rank ( data [ "players" ] [ self . id ] ) return self . ranks [ regionkey ] else : raise InvalidRequest ( "Missing players key in returned JSON object %s" % str ( data ) )
|coro| Loads the players rank for this region and season
45,040
def libdmtx_function ( fname , restype , * args ) : prototype = CFUNCTYPE ( restype , * args ) return prototype ( ( fname , load_libdmtx ( ) ) )
Returns a foreign function exported by libdmtx .
45,041
def _image ( pixels , width , height , pack ) : image = dmtxImageCreate ( pixels , width , height , pack ) if not image : raise PyLibDMTXError ( 'Could not create image' ) else : try : yield image finally : dmtxImageDestroy ( byref ( image ) )
A context manager for DmtxImage created and destroyed by dmtxImageCreate and dmtxImageDestroy .
45,042
def _decoder ( image , shrink ) : decoder = dmtxDecodeCreate ( image , shrink ) if not decoder : raise PyLibDMTXError ( 'Could not create decoder' ) else : try : yield decoder finally : dmtxDecodeDestroy ( byref ( decoder ) )
A context manager for DmtxDecode created and destroyed by dmtxDecodeCreate and dmtxDecodeDestroy .
45,043
def _region ( decoder , timeout ) : region = dmtxRegionFindNext ( decoder , timeout ) try : yield region finally : if region : dmtxRegionDestroy ( byref ( region ) )
A context manager for DmtxRegion created and destroyed by dmtxRegionFindNext and dmtxRegionDestroy .
45,044
def _decoded_matrix_region ( decoder , region , corrections ) : message = dmtxDecodeMatrixRegion ( decoder , region , corrections ) try : yield message finally : if message : dmtxMessageDestroy ( byref ( message ) )
A context manager for DmtxMessage created and destoyed by dmtxDecodeMatrixRegion and dmtxMessageDestroy .
45,045
def _decode_region ( decoder , region , corrections , shrink ) : with _decoded_matrix_region ( decoder , region , corrections ) as msg : if msg : p00 = DmtxVector2 ( ) p11 = DmtxVector2 ( 1.0 , 1.0 ) dmtxMatrix3VMultiplyBy ( p00 , region . contents . fit2raw ) dmtxMatrix3VMultiplyBy ( p11 , region . contents . fit2raw ) x0 = int ( ( shrink * p00 . X ) + 0.5 ) y0 = int ( ( shrink * p00 . Y ) + 0.5 ) x1 = int ( ( shrink * p11 . X ) + 0.5 ) y1 = int ( ( shrink * p11 . Y ) + 0.5 ) return Decoded ( string_at ( msg . contents . output ) , Rect ( x0 , y0 , x1 - x0 , y1 - y0 ) ) else : return None
Decodes and returns the value in a region .
45,046
def encode ( data , scheme = None , size = None ) : size = size if size else 'ShapeAuto' size_name = '{0}{1}' . format ( ENCODING_SIZE_PREFIX , size ) if not hasattr ( DmtxSymbolSize , size_name ) : raise PyLibDMTXError ( 'Invalid size [{0}]: should be one of {1}' . format ( size , ENCODING_SIZE_NAMES ) ) size = getattr ( DmtxSymbolSize , size_name ) scheme = scheme if scheme else 'Ascii' scheme_name = '{0}{1}' . format ( ENCODING_SCHEME_PREFIX , scheme . capitalize ( ) ) if not hasattr ( DmtxScheme , scheme_name ) : raise PyLibDMTXError ( 'Invalid scheme [{0}]: should be one of {1}' . format ( scheme , ENCODING_SCHEME_NAMES ) ) scheme = getattr ( DmtxScheme , scheme_name ) with _encoder ( ) as encoder : dmtxEncodeSetProp ( encoder , DmtxProperty . DmtxPropScheme , scheme ) dmtxEncodeSetProp ( encoder , DmtxProperty . DmtxPropSizeRequest , size ) if dmtxEncodeDataMatrix ( encoder , len ( data ) , cast ( data , c_ubyte_p ) ) == 0 : raise PyLibDMTXError ( 'Could not encode data, possibly because the image is not ' 'large enough to contain the data' ) w , h , bpp = map ( partial ( dmtxImageGetProp , encoder [ 0 ] . image ) , ( DmtxProperty . DmtxPropWidth , DmtxProperty . DmtxPropHeight , DmtxProperty . DmtxPropBitsPerPixel ) ) size = w * h * bpp // 8 pixels = cast ( encoder [ 0 ] . image [ 0 ] . pxl , ctypes . POINTER ( ctypes . c_ubyte * size ) ) return Encoded ( width = w , height = h , bpp = bpp , pixels = ctypes . string_at ( pixels , size ) )
Encodes data in a DataMatrix image .
45,047
def add_edge ( edges , edge_points , coords , i , j ) : if ( i , j ) in edges or ( j , i ) in edges : return ( edges . add ( ( i , j ) ) , edge_points . append ( coords [ [ i , j ] ] ) )
Add a line between the i - th and j - th points if not in the list already
45,048
def sequence ( self ) : if ( len ( self . Points [ 0 ] ) == 2 ) : if ( self . Sort == 'X' or self . Sort == 'x' ) : self . Points . sort ( key = lambda x : x [ 0 ] ) self . order ( self . Points ) elif ( self . Sort == 'Y' or self . Sort == 'y' ) : self . Points . sort ( key = lambda x : x [ 1 ] ) self . order ( self . Points ) else : self . order ( self . Points ) if ( len ( self . Points [ 0 ] ) == 3 ) : if ( self . Sort == 'X' or self . Sort == 'x' ) : self . Points . sort ( key = lambda x : x [ 0 ] ) self . order ( self . Points ) elif ( self . Sort == 'Y' or self . Sort == 'y' ) : self . Points . sort ( key = lambda x : x [ 1 ] ) self . order ( self . Points ) elif ( self . Sort == 'Z' or self . Sort == 'Z' ) : self . Points . sort ( key = lambda x : x [ 2 ] ) self . order ( self . Points ) else : self . order ( self . Points )
sort the points in the line with given option
45,049
def resample ( df , rule , time_index , groupby = None , aggregation = 'mean' ) : if groupby : df = df . groupby ( groupby ) df = df . resample ( rule , on = time_index ) df = getattr ( df , aggregation ) ( ) for column in groupby : del df [ column ] return df
pd . DataFrame . resample adapter .
45,050
def _join_names ( names ) : levels = ( str ( name ) for name in names if name != '' ) return '_' . join ( levels )
Join the names of a multi - level index with an underscore .
45,051
def unstack ( df , level = - 1 , reset_index = True ) : df = df . unstack ( level = level ) if reset_index : df = df . reset_index ( ) df . columns = df . columns . map ( _join_names ) return df
pd . DataFrame . unstack adapter .
45,052
def load_boston_multitask ( ) : dataset = datasets . load_boston ( ) y = dataset . target target = np . column_stack ( [ y , 2 * y + 5 ] ) return Dataset ( load_boston . __doc__ , dataset . data , target , r2_score )
Boston House Prices Dataset with a synthetic multitask output .
45,053
def energy ( data ) : data = np . mean ( data , axis = 1 ) return np . sum ( data ** 2 ) / np . float64 ( len ( data ) )
Computes signal energy of data
45,054
def zcr ( data ) : data = np . mean ( data , axis = 1 ) count = len ( data ) countZ = np . sum ( np . abs ( np . diff ( np . sign ( data ) ) ) ) / 2 return ( np . float64 ( countZ ) / np . float64 ( count - 1.0 ) )
Computes zero crossing rate of segment
45,055
def spectral_flux ( d0 , d1 ) : d0 = np . mean ( d0 , axis = 1 ) d1 = np . mean ( d1 , axis = 1 ) nFFT = min ( len ( d0 ) // 2 , len ( d1 ) // 2 ) X = FFT ( d0 , nFFT ) Xprev = FFT ( d1 , nFFT ) sumX = np . sum ( X + EPSILON ) sumPrevX = np . sum ( Xprev + EPSILON ) return np . sum ( ( X / sumX - Xprev / sumPrevX ) ** 2 )
Computes the spectral flux feature of the current frame
45,056
def rolling_window_sequences ( X , index , window_size , target_size , target_column ) : out_X = list ( ) out_y = list ( ) X_index = list ( ) y_index = list ( ) target = X [ : , target_column ] for start in range ( len ( X ) - window_size - target_size + 1 ) : end = start + window_size out_X . append ( X [ start : end ] ) out_y . append ( target [ end : end + target_size ] ) X_index . append ( index [ start ] ) y_index . append ( index [ end ] ) return np . asarray ( out_X ) , np . asarray ( out_y ) , np . asarray ( X_index ) , np . asarray ( y_index )
Create rolling window sequences out of timeseries data .
45,057
def time_segments_average ( X , interval , time_column ) : warnings . warn ( _TIME_SEGMENTS_AVERAGE_DEPRECATION_WARNING , DeprecationWarning ) if isinstance ( X , np . ndarray ) : X = pd . DataFrame ( X ) X = X . sort_values ( time_column ) . set_index ( time_column ) start_ts = X . index . values [ 0 ] max_ts = X . index . values [ - 1 ] values = list ( ) index = list ( ) while start_ts <= max_ts : end_ts = start_ts + interval subset = X . loc [ start_ts : end_ts - 1 ] means = subset . mean ( skipna = True ) . values values . append ( means ) index . append ( start_ts ) start_ts = end_ts return np . asarray ( values ) , np . asarray ( index )
Compute average of values over fixed length time segments .
45,058
def time_segments_aggregate ( X , interval , time_column , method = [ 'mean' ] ) : if isinstance ( X , np . ndarray ) : X = pd . DataFrame ( X ) X = X . sort_values ( time_column ) . set_index ( time_column ) if isinstance ( method , str ) : method = [ method ] start_ts = X . index . values [ 0 ] max_ts = X . index . values [ - 1 ] values = list ( ) index = list ( ) while start_ts <= max_ts : end_ts = start_ts + interval subset = X . loc [ start_ts : end_ts - 1 ] aggregated = [ getattr ( subset , agg ) ( skipna = True ) . values for agg in method ] values . append ( np . concatenate ( aggregated ) ) index . append ( start_ts ) start_ts = end_ts return np . asarray ( values ) , np . asarray ( index )
Aggregate values over fixed length time segments .
45,059
def image_transform ( X , function , reshape_before = False , reshape_after = False , width = None , height = None , ** kwargs ) : if not callable ( function ) : function = import_object ( function ) elif not callable ( function ) : raise ValueError ( "function must be a str or a callable" ) flat_image = len ( X [ 0 ] . shape ) == 1 if reshape_before and flat_image : if not ( width and height ) : side_length = math . sqrt ( X . shape [ 1 ] ) if side_length . is_integer ( ) : side_length = int ( side_length ) width = side_length height = side_length else : raise ValueError ( "Image sizes must be given for non-square images" ) else : reshape_before = False new_X = [ ] for image in X : if reshape_before : image = image . reshape ( ( width , height ) ) features = function ( image , ** kwargs ) if reshape_after : features = np . reshape ( features , X . shape [ 1 ] ) new_X . append ( features ) return np . array ( new_X )
Apply a function image by image .
45,060
def regression_errors ( y , y_hat , smoothing_window = 0.01 , smooth = True ) : errors = np . abs ( y - y_hat ) [ : , 0 ] if not smooth : return errors smoothing_window = int ( smoothing_window * len ( y ) ) return pd . Series ( errors ) . ewm ( span = smoothing_window ) . mean ( ) . values
Compute an array of absolute errors comparing predictions and expected output .
45,061
def deltas ( errors , epsilon , mean , std ) : below = errors [ errors <= epsilon ] if not len ( below ) : return 0 , 0 return mean - below . mean ( ) , std - below . std ( )
Compute mean and std deltas .
45,062
def count_above ( errors , epsilon ) : above = errors > epsilon total_above = len ( errors [ above ] ) above = pd . Series ( above ) shift = above . shift ( 1 ) change = above != shift total_consecutive = sum ( above & change ) return total_above , total_consecutive
Count number of errors and continuous sequences above epsilon .
45,063
def z_cost ( z , errors , mean , std ) : epsilon = mean + z * std delta_mean , delta_std = deltas ( errors , epsilon , mean , std ) above , consecutive = count_above ( errors , epsilon ) numerator = - ( delta_mean / mean + delta_std / std ) denominator = above + consecutive ** 2 if denominator == 0 : return np . inf return numerator / denominator
Compute how bad a z value is .
45,064
def find_threshold ( errors , z_range = ( 0 , 10 ) ) : mean = errors . mean ( ) std = errors . std ( ) min_z , max_z = z_range best_z = min_z best_cost = np . inf for z in range ( min_z , max_z ) : best = fmin ( z_cost , z , args = ( errors , mean , std ) , full_output = True , disp = False ) z , cost = best [ 0 : 2 ] if cost < best_cost : best_z = z [ 0 ] return mean + best_z * std
Find the ideal threshold .
45,065
def find_sequences ( errors , epsilon ) : above = pd . Series ( errors > epsilon ) shift = above . shift ( 1 ) . fillna ( False ) change = above != shift index = above . index starts = index [ above & change ] . tolist ( ) ends = ( index [ ~ above & change ] - 1 ) . tolist ( ) if len ( ends ) == len ( starts ) - 1 : ends . append ( len ( above ) - 1 ) return list ( zip ( starts , ends ) )
Find sequences of values that are above epsilon .
45,066
def find_anomalies ( errors , index , z_range = ( 0 , 10 ) ) : threshold = find_threshold ( errors , z_range ) sequences = find_sequences ( errors , threshold ) anomalies = list ( ) denominator = errors . mean ( ) + errors . std ( ) for start , stop in sequences : max_error = errors [ start : stop + 1 ] . max ( ) score = ( max_error - threshold ) / denominator anomalies . append ( [ index [ start ] , index [ stop ] , score ] ) return np . asarray ( anomalies )
Find sequences of values that are anomalous .
45,067
def GaussianBlur ( X , ksize_width , ksize_height , sigma_x , sigma_y ) : return image_transform ( X , cv2 . GaussianBlur , ksize = ( ksize_width , ksize_height ) , sigmaX = sigma_x , sigmaY = sigma_y )
Apply Gaussian blur to the given data .
45,068
def get_anomalies ( smoothed_errors , y_true , z , window , all_anomalies , error_buffer ) : mu = np . mean ( smoothed_errors ) sigma = np . std ( smoothed_errors ) epsilon = mu + ( z * sigma ) errors_seq , anomaly_indices , max_error_below_e = group_consecutive_anomalies ( smoothed_errors , epsilon , y_true , error_buffer , window , all_anomalies ) if len ( errors_seq ) > 0 : anomaly_indices = prune_anomalies ( errors_seq , smoothed_errors , max_error_below_e , anomaly_indices ) return anomaly_indices
Helper method to get anomalies .
45,069
def prune_anomalies ( e_seq , smoothed_errors , max_error_below_e , anomaly_indices ) : MIN_PERCENT_DECREASE = 0.05 e_seq_max , smoothed_errors_max = [ ] , [ ] for error_seq in e_seq : if len ( smoothed_errors [ error_seq [ 0 ] : error_seq [ 1 ] ] ) > 0 : sliced_errors = smoothed_errors [ error_seq [ 0 ] : error_seq [ 1 ] ] e_seq_max . append ( max ( sliced_errors ) ) smoothed_errors_max . append ( max ( sliced_errors ) ) smoothed_errors_max . sort ( reverse = True ) if max_error_below_e > 0 : smoothed_errors_max . append ( max_error_below_e ) indices_remove = [ ] for i in range ( len ( smoothed_errors_max ) ) : if i < len ( smoothed_errors_max ) - 1 : delta = smoothed_errors_max [ i ] - smoothed_errors_max [ i + 1 ] perc_change = delta / smoothed_errors_max [ i ] if perc_change < MIN_PERCENT_DECREASE : indices_remove . append ( e_seq_max . index ( smoothed_errors_max [ i ] ) ) for index in sorted ( indices_remove , reverse = True ) : del e_seq [ index ] pruned_indices = [ ] for i in anomaly_indices : for error_seq in e_seq : if i >= error_seq [ 0 ] and i <= error_seq [ 1 ] : pruned_indices . append ( i ) return pruned_indices
Helper method that removes anomalies which don t meet a minimum separation from next anomaly .
45,070
def _configure_nodes ( self , nodes ) : if isinstance ( nodes , str ) : nodes = [ nodes ] elif not isinstance ( nodes , ( dict , list ) ) : raise ValueError ( 'nodes configuration should be a list or a dict,' ' got {}' . format ( type ( nodes ) ) ) conf_changed = False for node in nodes : conf = { 'hostname' : node , 'instance' : None , 'nodename' : node , 'port' : None , 'vnodes' : self . _default_vnodes , 'weight' : 1 } current_conf = self . runtime . _nodes . get ( node , { } ) nodename = node if not current_conf : conf_changed = True if isinstance ( nodes , dict ) : node_conf = nodes [ node ] if isinstance ( node_conf , int ) : conf [ 'weight' ] = node_conf elif isinstance ( node_conf , dict ) : for k , v in node_conf . items ( ) : if k in conf : conf [ k ] = v if k in [ 'nodename' , 'vnodes' , 'weight' ] : if current_conf . get ( k ) != v : conf_changed = True else : raise ValueError ( 'node configuration should be a dict or an int,' ' got {}' . format ( type ( node_conf ) ) ) if self . _weight_fn : conf [ 'weight' ] = self . _weight_fn ( ** conf ) if current_conf . get ( 'weight' ) != conf [ 'weight' ] : conf_changed = True self . runtime . _nodes [ nodename ] = conf return conf_changed
Parse and set up the given nodes .
45,071
def _get_pos ( self , key ) : p = bisect ( self . runtime . _keys , self . hashi ( key ) ) if p == len ( self . runtime . _keys ) : return 0 else : return p
Get the index of the given key in the sorted key list .
45,072
def _get ( self , key , what ) : if not self . runtime . _ring : return None pos = self . _get_pos ( key ) if what == 'pos' : return pos nodename = self . runtime . _ring [ self . runtime . _keys [ pos ] ] if what in [ 'hostname' , 'instance' , 'port' , 'weight' ] : return self . runtime . _nodes [ nodename ] [ what ] elif what == 'dict' : return self . runtime . _nodes [ nodename ] elif what == 'nodename' : return nodename elif what == 'tuple' : return ( self . runtime . _keys [ pos ] , nodename )
Generic getter magic method .
45,073
def get_instances ( self ) : return [ c . get ( 'instance' ) for c in self . runtime . _nodes . values ( ) if c . get ( 'instance' ) ]
Returns a list of the instances of all the configured nodes .
45,074
def iterate_nodes ( self , key , distinct = True ) : if not self . runtime . _ring : yield None else : for node in self . range ( key , unique = distinct ) : yield node [ 'nodename' ]
hash_ring compatibility implementation .
45,075
def print_continuum ( self ) : numpoints = len ( self . runtime . _keys ) if numpoints : print ( 'Numpoints in continuum: {}' . format ( numpoints ) ) else : print ( 'Continuum empty' ) for p in self . get_points ( ) : point , node = p print ( '{} ({})' . format ( node , point ) )
Prints a ketama compatible continuum report .
45,076
def patch_memcache ( ) : def _init ( self , servers , * k , ** kw ) : self . _old_init ( servers , * k , ** kw ) nodes = { } for server in self . servers : conf = { 'hostname' : server . ip , 'instance' : server , 'port' : server . port , 'weight' : server . weight } nodes [ server . ip ] = conf self . uhashring = HashRing ( nodes ) def _get_server ( self , key ) : if isinstance ( key , tuple ) : return self . _old_get_server ( key ) for i in range ( self . _SERVER_RETRIES ) : for node in self . uhashring . range ( key ) : if node [ 'instance' ] . connect ( ) : return node [ 'instance' ] , key return None , None memcache = __import__ ( 'memcache' ) memcache . Client . _old_get_server = memcache . Client . _get_server memcache . Client . _old_init = memcache . Client . __init__ memcache . Client . __init__ = _init memcache . Client . _get_server = _get_server
Monkey patch python - memcached to implement our consistent hashring in its node selection and operations .
45,077
def hashi ( self , key , replica = 0 ) : dh = self . _listbytes ( md5 ( str ( key ) . encode ( 'utf-8' ) ) . digest ( ) ) rd = replica * 4 return ( ( dh [ 3 + rd ] << 24 ) | ( dh [ 2 + rd ] << 16 ) | ( dh [ 1 + rd ] << 8 ) | dh [ 0 + rd ] )
Returns a ketama compatible hash from the given key .
45,078
def _hashi_weight_generator ( self , node_name , node_conf ) : ks = ( node_conf [ 'vnodes' ] * len ( self . _nodes ) * node_conf [ 'weight' ] ) // self . _weight_sum for w in range ( 0 , ks ) : w_node_name = '%s-%s' % ( node_name , w ) for i in range ( 0 , self . _replicas ) : yield self . hashi ( w_node_name , replica = i )
Calculate the weight factor of the given node and yield its hash key for every configured replica .
45,079
def lapmod ( n , cc , ii , kk , fast = True , return_cost = True , fp_version = FP_DYNAMIC ) : check_cost ( n , cc , ii , kk ) if fast is True : x , y = _lapmod ( n , cc , ii , kk , fp_version = fp_version ) else : cc = np . ascontiguousarray ( cc , dtype = np . float64 ) ii = np . ascontiguousarray ( ii , dtype = np . int32 ) kk = np . ascontiguousarray ( kk , dtype = np . int32 ) x = np . empty ( ( n , ) , dtype = np . int32 ) y = np . empty ( ( n , ) , dtype = np . int32 ) v = np . empty ( ( n , ) , dtype = np . float64 ) free_rows = np . empty ( ( n , ) , dtype = np . int32 ) n_free_rows = _pycrrt ( n , cc , ii , kk , free_rows , x , y , v ) if n_free_rows == 0 : if return_cost is True : return get_cost ( n , cc , ii , kk , x ) , x , y else : return x , y for it in range ( 2 ) : n_free_rows = _pyarr ( n , cc , ii , kk , n_free_rows , free_rows , x , y , v ) if n_free_rows == 0 : if return_cost is True : return get_cost ( n , cc , ii , kk , x ) , x , y else : return x , y _pya ( n , cc , ii , kk , n_free_rows , free_rows , x , y , v ) if return_cost is True : return get_cost ( n , cc , ii , kk , x ) , x , y else : return x , y
Solve sparse linear assignment problem using Jonker - Volgenant algorithm .
45,080
def register_provider ( cls , provider ) : def decorator ( subclass ) : cls . _providers [ provider ] = subclass subclass . name = provider return subclass return decorator
Register method to keep list of providers .
45,081
def tar_to_bigfile ( self , fname , outfile ) : fnames = [ ] tmpdir = mkdtemp ( ) with tarfile . open ( fname ) as tar : tar . extractall ( path = tmpdir ) for root , _ , files in os . walk ( tmpdir ) : fnames += [ os . path . join ( root , fname ) for fname in files ] with open ( outfile , "w" ) as out : for infile in fnames : for line in open ( infile ) : out . write ( line ) os . unlink ( infile ) shutil . rmtree ( tmpdir )
Convert tar of multiple FASTAs to one file .
45,082
def find_plugins ( ) : plugin_dir = os . path . dirname ( os . path . realpath ( __file__ ) ) plugin_dir = os . path . join ( plugin_dir , "plugins" ) plugin_files = [ x [ : - 3 ] for x in os . listdir ( plugin_dir ) if x . endswith ( ".py" ) ] sys . path . insert ( 0 , plugin_dir ) for plugin in plugin_files : __import__ ( plugin )
Locate and initialize all available plugins .
45,083
def convert ( name ) : s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , name ) return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( )
Convert CamelCase to underscore
45,084
def init_plugins ( ) : find_plugins ( ) d = { } for c in Plugin . __subclasses__ ( ) : ins = c ( ) if ins . name ( ) in config . get ( "plugin" , [ ] ) : ins . activate ( ) d [ ins . name ( ) ] = ins return d
Return dictionary of available plugins
45,085
def activate ( name ) : if name in plugins : plugins [ name ] . activate ( ) else : raise Exception ( "plugin {} not found" . format ( name ) )
Activate plugin .
45,086
def deactivate ( name ) : if name in plugins : plugins [ name ] . deactivate ( ) else : raise Exception ( "plugin {} not found" . format ( name ) )
Deactivate plugin .
45,087
def manage_config ( cmd , * args ) : if cmd == "file" : print ( config . config_file ) elif cmd == "show" : with open ( config . config_file ) as f : print ( f . read ( ) ) elif cmd == "generate" : fname = os . path . join ( user_config_dir ( "genomepy" ) , "{}.yaml" . format ( "genomepy" ) ) if not os . path . exists ( user_config_dir ( "genomepy" ) ) : os . makedirs ( user_config_dir ( "genomepy" ) ) with open ( fname , "w" ) as fout : with open ( config . config_file ) as fin : fout . write ( fin . read ( ) ) print ( "Created config file {}" . format ( fname ) )
Manage genomepy config file .
45,088
def search ( term , provider = None ) : if provider : providers = [ ProviderBase . create ( provider ) ] else : providers = [ ProviderBase . create ( p ) for p in ProviderBase . list_providers ( ) ] for p in providers : for row in p . search ( term ) : yield [ x . encode ( 'latin-1' ) for x in [ p . name ] + list ( row ) ]
Search for a genome .
45,089
def install_genome ( name , provider , version = None , genome_dir = None , localname = None , mask = "soft" , regex = None , invert_match = False , annotation = False ) : if not genome_dir : genome_dir = config . get ( "genome_dir" , None ) if not genome_dir : raise norns . exceptions . ConfigError ( "Please provide or configure a genome_dir" ) genome_dir = os . path . expanduser ( genome_dir ) localname = get_localname ( name , localname ) p = ProviderBase . create ( provider ) p . download_genome ( name , genome_dir , version = version , mask = mask , localname = localname , regex = regex , invert_match = invert_match ) if annotation : p . download_annotation ( name , genome_dir , localname = localname , version = version ) g = Genome ( localname , genome_dir = genome_dir ) for plugin in get_active_plugins ( ) : plugin . after_genome_download ( g ) generate_env ( )
Install a genome .
45,090
def generate_exports ( ) : env = [ ] for name in list_installed_genomes ( ) : try : g = Genome ( name ) env_name = re . sub ( r'[^\w]+' , "_" , name ) . upper ( ) env . append ( "export {}={}" . format ( env_name , g . filename ) ) except : pass return env
Print export commands for setting environment variables .
45,091
def generate_env ( fname = None ) : config_dir = user_config_dir ( "genomepy" ) if os . path . exists ( config_dir ) : fname = os . path . join ( config_dir , "exports.txt" ) with open ( fname , "w" ) as fout : for env in generate_exports ( ) : fout . write ( "{}\n" . format ( env ) )
Generate file with exports .
45,092
def manage_plugins ( command , plugin_names = None ) : if plugin_names is None : plugin_names = [ ] active_plugins = config . get ( "plugin" , [ ] ) plugins = init_plugins ( ) if command == "enable" : for name in plugin_names : if name not in plugins : raise ValueError ( "Unknown plugin: {}" . format ( name ) ) if name not in active_plugins : active_plugins . append ( name ) elif command == "disable" : for name in plugin_names : if name in active_plugins : active_plugins . remove ( name ) elif command == "list" : print ( "{:20}{}" . format ( "plugin" , "enabled" ) ) for plugin in sorted ( plugins ) : print ( "{:20}{}" . format ( plugin , { False : "" , True : "*" } [ plugin in active_plugins ] ) ) else : raise ValueError ( "Invalid plugin command" ) config [ "plugin" ] = active_plugins config . save ( ) if command in [ "enable" , "disable" ] : print ( "Enabled plugins: {}" . format ( ", " . join ( sorted ( active_plugins ) ) ) )
Enable or disable plugins .
45,093
def get_random_sequences ( self , n = 10 , length = 200 , chroms = None , max_n = 0.1 ) : retries = 100 cutoff = length * max_n if not chroms : chroms = self . keys ( ) try : gap_sizes = self . gap_sizes ( ) except : gap_sizes = { } sizes = dict ( [ ( chrom , len ( self [ chrom ] ) - gap_sizes . get ( chrom , 0 ) ) for chrom in chroms ] ) l = [ ( sizes [ x ] , x ) for x in chroms if sizes [ x ] / len ( self [ x ] ) > 0.1 and sizes [ x ] > 10 * length ] chroms = _weighted_selection ( l , n ) coords = [ ] count = { } for chrom in chroms : if chrom in count : count [ chrom ] += 1 else : count [ chrom ] = 1 for chrom in chroms : for i in range ( retries ) : start = int ( random . random ( ) * ( sizes [ chrom ] - length ) ) end = start + length count_n = self [ chrom ] [ start : end ] . seq . upper ( ) . count ( "N" ) if count_n <= cutoff : break if count_n > cutoff : raise ValueError ( "Failed to find suitable non-N sequence for {}" . format ( chrom ) ) coords . append ( [ chrom , start , end ] ) return coords
Return random genomic sequences .
45,094
def search ( term , provider = None ) : for row in genomepy . search ( term , provider ) : print ( "\t" . join ( [ x . decode ( 'utf-8' , 'ignore' ) for x in row ] ) )
Search for genomes that contain TERM in their name or description .
45,095
def install ( name , provider , genome_dir , localname , mask , regex , match , annotation ) : genomepy . install_genome ( name , provider , genome_dir = genome_dir , localname = localname , mask = mask , regex = regex , invert_match = not ( match ) , annotation = annotation )
Install genome NAME from provider PROVIDER in directory GENOME_DIR .
45,096
def generate_gap_bed ( fname , outname ) : f = Fasta ( fname ) with open ( outname , "w" ) as bed : for chrom in f . keys ( ) : for m in re . finditer ( r'N+' , f [ chrom ] [ : ] . seq ) : bed . write ( "{}\t{}\t{}\n" . format ( chrom , m . start ( 0 ) , m . end ( 0 ) ) )
Generate a BED file with gap locations .
45,097
def generate_sizes ( name , genome_dir ) : fa = os . path . join ( genome_dir , name , "{}.fa" . format ( name ) ) sizes = fa + ".sizes" g = Fasta ( fa ) with open ( sizes , "w" ) as f : for seqname in g . keys ( ) : f . write ( "{}\t{}\n" . format ( seqname , len ( g [ seqname ] ) ) )
Generate a sizes file with length of sequences in FASTA file .
45,098
def filter_fasta ( infa , outfa , regex = ".*" , v = False , force = False ) : if infa == outfa : raise ValueError ( "Input and output FASTA are the same file." ) if os . path . exists ( outfa ) : if force : os . unlink ( outfa ) if os . path . exists ( outfa + ".fai" ) : os . unlink ( outfa + ".fai" ) else : raise ValueError ( "{} already exists, set force to True to overwrite" . format ( outfa ) ) filt_function = re . compile ( regex ) . search fa = Fasta ( infa , filt_function = filt_function ) seqs = fa . keys ( ) if v : original_fa = Fasta ( infa ) seqs = [ s for s in original_fa . keys ( ) if s not in seqs ] fa = original_fa if len ( seqs ) == 0 : raise ValueError ( "No sequences left after filtering!" ) with open ( outfa , "w" ) as out : for chrom in seqs : out . write ( ">{}\n" . format ( fa [ chrom ] . name ) ) out . write ( "{}\n" . format ( fa [ chrom ] [ : ] . seq ) ) return Fasta ( outfa )
Filter fasta file based on regex .
45,099
def cmd_ok ( cmd ) : try : sp . check_call ( cmd , stderr = sp . PIPE , stdout = sp . PIPE ) except sp . CalledProcessError : pass except : sys . stderr . write ( "{} not found, skipping\n" . format ( cmd ) ) return False return True
Returns True if cmd can be run .