idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
12,100
def fromapi ( _class , api , apiresponse ) : bus = apiresponse return _class ( api = api , vid = bus [ 'vid' ] , timeupdated = datetime . strptime ( bus [ 'tmstmp' ] , api . STRPTIME ) , lat = float ( bus [ 'lat' ] ) , lng = float ( bus [ 'lon' ] ) , heading = bus [ 'hdg' ] , pid = bus [ 'pid' ] , intotrip = bus [ 'pdist' ] , route = bus [ 'rt' ] , destination = bus [ 'des' ] , speed = bus [ 'spd' ] , delay = bus . get ( 'dly' ) or False )
Return a Bus object from an API response dict .
163
10
12,101
def update ( self ) : vehicle = self . api . vehicles ( vid = self . vid ) [ 'vehicle' ] newbus = self . fromapi ( self . api , vehicle ) self . __dict__ = newbus . __dict__ del newbus
Update this bus by creating a new one and transplanting dictionaries .
57
14
12,102
def predictions ( self ) : for prediction in self . api . predictions ( vid = self . vid ) [ 'prd' ] : pobj = Prediction . fromapi ( self . api , prediction ) pobj . _busobj = self yield pobj
Generator that yields prediction objects from an API response .
55
11
12,103
def next_stop ( self ) : p = self . api . predictions ( vid = self . vid ) [ 'prd' ] pobj = Prediction . fromapi ( self . api , p [ 0 ] ) pobj . _busobj = self return pobj
Return the next stop for this bus .
58
8
12,104
def get ( _class , api , rt ) : if not _class . all_routes : _class . all_routes = _class . update_list ( api , api . routes ( ) [ 'route' ] ) return _class . all_routes [ str ( rt ) ]
Return a Route object for route rt using API instance api .
68
13
12,105
def _normalise_path ( path : Union [ str , pathlib . Path ] ) -> pathlib . Path : if isinstance ( path , str ) : return pathlib . Path ( path ) return path
Ensures a path is parsed .
44
8
12,106
def root ( path : Union [ str , pathlib . Path ] ) -> _Root : return _Root . from_path ( _normalise_path ( path ) )
Retrieve a root directory object from a path .
36
10
12,107
def entity ( path : Union [ str , pathlib . Path ] ) -> _Entity : return _Entity . from_path ( _normalise_path ( path ) )
Retrieve an appropriate entity object from a path .
36
10
12,108
def compare ( left : Union [ str , pathlib . Path , _Entity ] , right : Union [ str , pathlib . Path , _Entity ] ) -> Comparison : def normalise ( param : Union [ str , pathlib . Path , _Entity ] ) -> _Entity : """ Turns any one of a number of types of input into an entity. :param param: The input - either a path string, a path object, or a full blown entity. :return: The input param as an entity. """ if isinstance ( param , str ) : param = pathlib . Path ( param ) if isinstance ( param , pathlib . Path ) : param = _Entity . from_path ( param ) return param return Comparison . compare ( normalise ( left ) , normalise ( right ) )
Compare two paths .
167
4
12,109
def read_XPARM ( path_to_XPARM = '.' ) : if not os . path . exists ( path_to_XPARM ) : raise Exception ( "path " + path_to_XPARM + "does not exist" ) if os . path . isdir ( path_to_XPARM ) : candidate = os . path . join ( path_to_XPARM , 'GXPARM.XDS' ) if os . path . isfile ( candidate ) : path_to_XPARM = candidate else : candidate = os . path . join ( path_to_XPARM , 'XPARM.XDS' ) if os . path . isfile ( candidate ) : path_to_XPARM = candidate else : raise Exception ( "files GXPARM.XDS and XPARM.XDS are not found in the folder " + path_to_XPARM ) with open ( path_to_XPARM ) as f : f . readline ( ) # skip header text = f . read ( ) # parse the rest to numbers f = re . compile ( '-?\d+\.?\d*' ) . finditer ( text ) try : result = dict ( starting_frame = r_get_numbers ( f , 1 ) , starting_angle = r_get_numbers ( f , 1 ) , oscillation_angle = r_get_numbers ( f , 1 ) , rotation_axis = r_get_numbers ( f , 3 ) , wavelength = r_get_numbers ( f , 1 ) , wavevector = r_get_numbers ( f , 3 ) , space_group_nr = r_get_numbers ( f , 1 ) , cell = r_get_numbers ( f , 6 ) , unit_cell_vectors = np . reshape ( r_get_numbers ( f , 9 ) , ( 3 , 3 ) ) , number_of_detector_segments = r_get_numbers ( f , 1 ) , NX = r_get_numbers ( f , 1 ) , NY = r_get_numbers ( f , 1 ) , pixelsize_x = r_get_numbers ( f , 1 ) , pixelsize_y = r_get_numbers ( f , 1 ) , x_center = r_get_numbers ( f , 1 ) , y_center = r_get_numbers ( f , 1 ) , distance_to_detector = r_get_numbers ( f , 1 ) , detector_x = r_get_numbers ( f , 3 ) , detector_y = r_get_numbers ( f , 3 ) , detector_normal = r_get_numbers ( f , 3 ) , detector_segment_crossection = r_get_numbers ( f , 5 ) , detector_segment_geometry = r_get_numbers ( f , 9 ) ) except StopIteration : raise Exception ( 'Wrong format of the XPARM.XDS file' ) # check there is nothing left try : f . next ( ) except StopIteration : pass else : raise Exception ( 'Wrong format of the XPARM.XDS file' ) return result
Loads the instrumental geometry information from the XPARM . XDS or GXPARM . XDS files at the proposed location
713
26
12,110
def create_h5py_with_large_cache ( filename , cache_size_mb ) : # h5py does not allow to control the cache size from the high level # we employ the workaround # sources: #http://stackoverflow.com/questions/14653259/how-to-set-cache-settings-while-using-h5py-high-level-interface #https://groups.google.com/forum/#!msg/h5py/RVx1ZB6LpE4/KH57vq5yw2AJ propfaid = h5py . h5p . create ( h5py . h5p . FILE_ACCESS ) settings = list ( propfaid . get_cache ( ) ) settings [ 2 ] = 1024 * 1024 * cache_size_mb propfaid . set_cache ( * settings ) fid = h5py . h5f . create ( filename , flags = h5py . h5f . ACC_EXCL , fapl = propfaid ) fin = h5py . File ( fid ) return fin
Allows to open the hdf5 file with specified cache size
240
12
12,111
def find_features ( seqs , locus_tag = "all" , utr_len = 200 ) : found_features = [ ] for seq_i in seqs : for feature in seq_i . features : if feature . type == "CDS" and ( locus_tag == "all" or ( 'locus_tag' in feature . qualifiers and feature . qualifiers [ 'locus_tag' ] [ 0 ] == locus_tag ) ) : start = max ( 0 , feature . location . nofuzzy_start - utr_len ) stop = max ( 0 , feature . location . nofuzzy_end + utr_len ) feature_seq = seq_i . seq [ start : stop ] f_match = FeatureMatch ( feature , feature_seq , feature . strand , utr_len ) found_features . append ( f_match ) return found_features
Find features in sequences by locus tag
196
8
12,112
def getLevel ( self ) : lvl = 0 p = self while True : p = p . parent if not isinstance ( p , LPort ) : break lvl += 1 return lvl
Get nest - level of this port
38
7
12,113
def normalize_LCSH ( subject ) : # Strip then divide on -- which is a delimiter for LCSH; # rejoin after stripping parts. subject_parts = subject . strip ( ) . split ( '--' ) joined_subject = ' -- ' . join ( [ part . strip ( ) for part in subject_parts ] ) # Check if there is punctuation at the end of the string, # and if not, add a trailing period. if re . search ( r'[^a-zA-Z0-9]$' , joined_subject ) is None : joined_subject = joined_subject + '.' return joined_subject
Normalize a LCSH subject heading prior to indexing .
139
12
12,114
def normalize_UNTL ( subject ) : subject = subject . strip ( ) subject = re . sub ( r'[\s]+' , ' ' , subject ) return subject
Normalize a UNTL subject heading for consistency .
38
10
12,115
def UNTL_to_encodedUNTL ( subject ) : subject = normalize_UNTL ( subject ) subject = subject . replace ( ' ' , '_' ) subject = subject . replace ( '_-_' , '/' ) return subject
Normalize a UNTL subject heading to be used in SOLR .
52
14
12,116
def untldict_normalizer ( untl_dict , normalizations ) : # Loop through the element types in the UNTL metadata. for element_type , element_list in untl_dict . items ( ) : # A normalization is required for that element type. if element_type in normalizations : # Get the required normalizations for specific qualifiers list. norm_qualifier_list = normalizations . get ( element_type ) # Loop through the element lists within that element type. for element in element_list : # Determine if the qualifier requires normalization. qualifier = element . get ( 'qualifier' , None ) if qualifier in norm_qualifier_list : content = element . get ( 'content' , None ) # Determine if there is normalizing for the element. if element_type in ELEMENT_NORMALIZERS : elem_norms = ELEMENT_NORMALIZERS . get ( element_type , None ) # If the qualified element requires a # normalization and has content, replace the # content with the normalized. if qualifier in elem_norms : if content and content != '' : element [ 'content' ] = elem_norms [ qualifier ] ( content ) return untl_dict
Normalize UNTL elements by their qualifier .
264
9
12,117
def start ( config , bugnumber = "" ) : repo = config . repo if bugnumber : summary , bugnumber , url = get_summary ( config , bugnumber ) else : url = None summary = None if summary : summary = input ( 'Summary ["{}"]: ' . format ( summary ) ) . strip ( ) or summary else : summary = input ( "Summary: " ) . strip ( ) branch_name = "" if bugnumber : if is_github ( { "bugnumber" : bugnumber , "url" : url } ) : branch_name = "{}-" . format ( bugnumber ) else : branch_name = "{}-" . format ( bugnumber ) def clean_branch_name ( string ) : string = re . sub ( r"\s+" , " " , string ) string = string . replace ( " " , "-" ) string = string . replace ( "->" , "-" ) . replace ( "=>" , "-" ) for each in "@%^&:'\"/(),[]{}!.?`$<>#*;=" : string = string . replace ( each , "" ) string = re . sub ( "-+" , "-" , string ) string = string . strip ( "-" ) return string . lower ( ) . strip ( ) branch_name += clean_branch_name ( summary ) if not branch_name : error_out ( "Must provide a branch name" ) # Check that the branch doesn't already exist found = list ( find ( repo , branch_name , exact = True ) ) if found : error_out ( "There is already a branch called {!r}" . format ( found [ 0 ] . name ) ) new_branch = repo . create_head ( branch_name ) new_branch . checkout ( ) if config . verbose : click . echo ( "Checkout out new branch: {}" . format ( branch_name ) ) save ( config . configfile , summary , branch_name , bugnumber = bugnumber , url = url )
Create a new topic branch .
446
6
12,118
def conv_cond_concat ( x , y ) : x_shapes = x . get_shape ( ) y_shapes = y . get_shape ( ) return tf . concat ( 3 , [ x , y * tf . ones ( [ x_shapes [ 0 ] , x_shapes [ 1 ] , x_shapes [ 2 ] , y_shapes [ 3 ] ] ) ] )
Concatenate conditioning vector on feature map axis .
90
11
12,119
def lrelu_sq ( x ) : dim = len ( x . get_shape ( ) ) - 1 return tf . concat ( dim , [ lrelu ( x ) , tf . minimum ( tf . abs ( x ) , tf . square ( x ) ) ] )
Concatenates lrelu and square
60
9
12,120
def avg_grads ( tower_grads ) : average_grads = [ ] for grad_and_vars in zip ( * tower_grads ) : # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [ ] for g , _ in grad_and_vars : # Add 0 dimension to the gradients to represent the tower. expanded_g = tf . expand_dims ( g , 0 ) # Append on a 'tower' dimension which we will average over below. grads . append ( expanded_g ) # Average over the 'tower' dimension. grad = tf . concat ( 0 , grads ) grad = tf . reduce_mean ( grad , 0 ) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars [ 0 ] [ 1 ] grad_and_var = ( grad , v ) average_grads . append ( grad_and_var ) return average_grads
Calculate the average gradient for each shared variable across all towers .
260
14
12,121
def unescape_utf8 ( msg ) : def sub ( m ) : text = m . group ( 0 ) if text [ : 3 ] == "&#x" : return unichr ( int ( text [ 3 : - 1 ] , 16 ) ) else : return unichr ( int ( text [ 2 : - 1 ] ) ) return re . sub ( "&#?\w+;" , sub , urllib . unquote ( msg ) )
convert escaped unicode web entities to unicode
99
10
12,122
def ensure ( data_type , check_value , default_value = None ) : if default_value is not None and not isinstance ( default_value , data_type ) : raise ValueError ( "default_value must be the value in the given data " "type." ) elif isinstance ( check_value , data_type ) : return check_value try : new_value = data_type ( check_value ) except : return default_value return new_value
function to ensure the given check value is in the given data type if yes return the check value directly otherwise return the default value
102
25
12,123
def mark_resolved ( task_id ) : from . import models models . FailedTask . objects . filter ( task_id = task_id , datetime_resolved = None ) . update ( datetime_resolved = now ( ) )
Mark the specified task as resolved in the FailedTask table .
53
12
12,124
def is_equal ( a , b , tol ) : if a == b or abs ( a - b ) <= tol * max ( abs ( a ) , abs ( b ) ) : return True else : return False
Ratio test to check if two floating point numbers are equal .
47
13
12,125
def getPortSideView ( self , side ) -> List [ "LPort" ] : if side == PortSide . WEST : return self . west elif side == PortSide . EAST : return self . east elif side == PortSide . NORTH : return self . north elif side == PortSide . SOUTH : return self . south else : raise ValueError ( side )
Returns a sublist view for all ports of given side .
82
12
12,126
def iterEdges ( self , filterSelfLoops = False ) : for p in self . iterPorts ( ) : yield from p . iterEdges ( filterSelfLoops = filterSelfLoops )
Iter edges connected from outside of this unit
44
8
12,127
def link ( source_path ) : if not os . path . isfile ( source_path ) : raise SourceNotFound ( source_path ) with open ( source_path , 'r' ) as f : content = f . read ( ) block_map = BlockMap ( ) # The map will be populated with the following function call. all_block = convert_lines_to_block ( content . splitlines ( ) , block_map , LinkStack ( source_path ) , source_path ) return all_block , block_map . get_variables ( )
Links the content found at source_path and represents a Block that represents the content .
122
17
12,128
def process_links ( include_match , block_map , link_stack , source_path ) : leading_whitespace = include_match . group ( 1 ) include_path = include_match . group ( 2 ) # Optional block name. If match is None, block name was ommitted (default to 'all'). block_name = include_match . group ( 3 ) if block_name is not None : block_name = block_name . lstrip ( ':' ) else : block_name = ALL_BLOCK_NAME return retrieve_block_from_map ( source_path , include_path . strip ( ) , block_name . strip ( ) , leading_whitespace , block_map , link_stack )
Process a string of content for include tags .
159
9
12,129
def catch_warnings ( action , category = Warning , lineno = 0 , append = False ) : def decorator ( func ) : @ functools . wraps ( func ) def newfunc ( * args , * * kwargs ) : with warnings . catch_warnings ( ) : warnings . simplefilter ( action , category , lineno , append ) return func ( * args , * * kwargs ) return newfunc return decorator
Wrap the function in a warnings . catch_warnings context .
94
14
12,130
def _guess_caller ( ) : import inspect global _caller_path caller = inspect . stack ( ) [ 1 ] caller_module = inspect . getmodule ( caller [ 0 ] ) if hasattr ( caller_module , '__file__' ) : _caller_path = os . path . abspath ( caller_module . __file__ ) return _caller_path
try to guess which module import app . py
84
9
12,131
def _fix_paths ( self , options ) : for k in ( 'template_path' , 'static_path' ) : if k in options : v = options . pop ( k ) if v is None : continue if not os . path . isabs ( v ) : v = os . path . abspath ( os . path . join ( self . root_path , v ) ) app_log . debug ( 'Fix %s to be absolute: %s' % ( k , v ) ) options [ k ] = v
fix static_path and template_path to be absolute path according to self . root_path so that PWD can be ignoreed .
114
28
12,132
def route ( self , url , host = None ) : def fn ( handler_cls ) : handlers = self . _get_handlers_on_host ( host ) handlers . insert ( 0 , ( url , handler_cls ) ) return handler_cls return fn
This is a decorator
59
5
12,133
def command_line_config ( self ) : args = sys . argv [ 1 : ] args_dict = { } existed_keys = [ ] new_keys = [ ] for t in args : if not t . startswith ( '--' ) : raise errors . ArgsParseError ( 'Bad arg: %s' % t ) try : key , value = tuple ( t [ 2 : ] . split ( '=' ) ) except : raise errors . ArgsParseError ( 'Bad arg: %s' % t ) args_dict [ key ] = value if key in settings : existed_keys . append ( key ) else : new_keys . append ( key ) if existed_keys : app_log . debug ( 'Changed settings:' ) for i in existed_keys : before = settings [ i ] type_ = type ( before ) if type_ is bool : if args_dict [ i ] == 'True' : _value = True elif args_dict [ i ] == 'False' : _value = False else : raise errors . ArgsParseError ( '%s should only be True or False' % i ) else : _value = type_ ( args_dict [ i ] ) settings [ i ] = _value app_log . debug ( ' %s [%s]%s (%s)' , i , type ( settings [ i ] ) , settings [ i ] , before ) if new_keys : app_log . debug ( 'New settings:' ) for i in new_keys : settings [ i ] = args_dict [ i ] app_log . debug ( ' %s %s' , i , args_dict [ i ] ) # NOTE if ``command_line_config`` is called, logging must be re-configed self . update_settings ( { } )
settings . py is the basis
389
6
12,134
def setup ( self ) : testing = settings . get ( 'TESTING' ) if testing : # Fix nose handler in testing situation. config = settings [ 'LOGGERS' ] . get ( '' , { } ) set_nose_formatter ( config ) #print('testing, set nose formatter: {}'.format(config)) # reset timezone os . environ [ 'TZ' ] = settings [ 'TIME_ZONE' ] time . tzset ( ) # determine project name if settings . _module : project = os . path . split ( self . root_path ) [ 1 ] if settings [ 'PROJECT' ] : assert settings [ 'PROJECT' ] == project , 'PROJECT specialized in settings (%s) ' 'should be the same as project directory name (%s)' % ( settings [ 'PROJECT' ] , project ) else : settings [ 'PROJECT' ] = project # PROJECT should be importable as a python module if settings [ 'PROJECT' ] : # add upper directory path to sys.path if not in if settings . _module : _abs = os . path . abspath parent_path = os . path . dirname ( self . root_path ) if not _abs ( parent_path ) in [ _abs ( i ) for i in sys . path ] : sys . path . insert ( 0 , parent_path ) app_log . info ( 'Add %s to sys.path' % _abs ( parent_path ) ) try : __import__ ( settings [ 'PROJECT' ] ) app_log . debug ( 'import package `%s` success' % settings [ 'PROJECT' ] ) except ImportError : raise ImportError ( 'PROJECT could not be imported, may be app.py is outside the project' 'or there is no __init__ in the package.' ) self . is_setuped = True
This function will be called both before run and testing started .
405
12
12,135
def _init_application ( self , application = None ) : if application : self . application = application else : self . application = self . make_application ( )
Initialize application object for torext app if a existed application is passed then just use this one without make a new one
34
24
12,136
def _log_function ( self , handler ) : if handler . get_status ( ) < 400 : log_method = request_log . info elif handler . get_status ( ) < 500 : log_method = request_log . warning else : log_method = request_log . error for i in settings [ 'LOGGING_IGNORE_URLS' ] : if handler . request . uri . startswith ( i ) : log_method = request_log . debug break request_time = 1000.0 * handler . request . request_time ( ) log_method ( "%d %s %.2fms" , handler . get_status ( ) , handler . _request_summary ( ) , request_time )
Override Application . log_function so that what to log can be controlled .
159
15
12,137
def xavier_init ( fan_in , fan_out , constant = 1 ) : # https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow low = - constant * np . sqrt ( 6.0 / ( fan_in + fan_out ) ) high = constant * np . sqrt ( 6.0 / ( fan_in + fan_out ) ) return tf . random_uniform ( ( fan_in , fan_out ) , minval = low , maxval = high , dtype = tf . float32 )
Xavier initialization of network weights
135
6
12,138
def partial_fit ( self , X ) : opt , cost = self . sess . run ( ( self . optimizer , self . cost ) , feed_dict = { self . x : X } ) return cost
Train model based on mini - batch of input data . Return cost of mini - batch .
46
18
12,139
def transform ( self , X ) : # Note: This maps to mean of distribution, we could alternatively # sample from Gaussian distribution return self . sess . run ( self . z_mean , feed_dict = { self . x : X } )
Transform data by mapping it into the latent space .
53
10
12,140
def generate ( self , z_mu = None ) : if z_mu is None : z_mu = np . random . normal ( size = self . network_architecture [ "n_z" ] ) # Note: This maps to mean of distribution, we could alternatively # sample from Gaussian distribution return self . sess . run ( self . x_reconstr_mean , feed_dict = { self . z : z_mu } )
Generate data by sampling from latent space . If z_mu is not None data for this point in latent space is generated . Otherwise z_mu is drawn from prior in latent space .
97
38
12,141
def reconstruct ( self , X ) : return self . sess . run ( self . x_reconstr_mean , feed_dict = { self . x : X } )
Use VAE to reconstruct given data .
38
8
12,142
def get_ajd_bound ( mesh ) : print ( 'Get elements adjacent to boundaries' ) boundary_elements = [ ] str_adj_boundaries = '' # for boundary in mesh['elements']['1']: boundaries = mesh [ 'boundaries' ] [ '12' ] + mesh [ 'boundaries' ] [ '11' ] for boundary in boundaries : # now find the triangle ('2') with two nodes equal to this boundary indices = [ nr if ( boundary [ 0 ] in x and boundary [ 1 ] in x ) else np . nan for ( nr , x ) in enumerate ( mesh [ 'elements' ] [ '2' ] ) ] indices = np . array ( indices ) [ ~ np . isnan ( indices ) ] if ( len ( indices ) != 1 ) : print ( 'More than one neighbour found!' ) elif ( len ( indices ) == 0 ) : print ( 'No neighbour found!' ) boundary_elements . append ( indices [ 0 ] ) str_adj_boundaries += '{0}\n' . format ( int ( indices [ 0 ] ) + 1 ) return str_adj_boundaries , boundary_elements
Determine triangular elements adjacend to the boundary elements
254
12
12,143
def write_elec_file ( filename , mesh ) : elecs = [ ] # print('Write electrodes') electrodes = np . loadtxt ( filename ) for i in electrodes : # find for nr , j in enumerate ( mesh [ 'nodes' ] ) : if np . isclose ( j [ 1 ] , i [ 0 ] ) and np . isclose ( j [ 2 ] , i [ 1 ] ) : elecs . append ( nr + 1 ) fid = open ( 'elec.dat' , 'w' ) fid . write ( '{0}\n' . format ( len ( elecs ) ) ) for i in elecs : fid . write ( '{0}\n' . format ( i ) ) fid . close ( )
Read in the electrode positions and return the indices of the electrodes
163
12
12,144
def state_size ( self ) -> Sequence [ Shape ] : return self . _sizes ( self . _compiler . rddl . state_size )
Returns the MDP state size .
34
7
12,145
def action_size ( self ) -> Sequence [ Shape ] : return self . _sizes ( self . _compiler . rddl . action_size )
Returns the MDP action size .
34
7
12,146
def interm_size ( self ) -> Sequence [ Shape ] : return self . _sizes ( self . _compiler . rddl . interm_size )
Returns the MDP intermediate state size .
36
8
12,147
def output_size ( self ) -> Tuple [ Sequence [ Shape ] , Sequence [ Shape ] , Sequence [ Shape ] , int ] : return ( self . state_size , self . action_size , self . interm_size , 1 )
Returns the simulation cell output size .
52
7
12,148
def initial_state ( self ) -> StateTensor : s0 = [ ] for fluent in self . _compiler . compile_initial_state ( self . _batch_size ) : s0 . append ( self . _output_size ( fluent ) ) s0 = tuple ( s0 ) return s0
Returns the initial state tensor .
66
7
12,149
def _tensors ( cls , fluents : Sequence [ FluentPair ] ) -> Iterable [ tf . Tensor ] : for _ , fluent in fluents : tensor = cls . _output_size ( fluent . tensor ) yield tensor
Yields the fluents tensors .
57
9
12,150
def _dtype ( cls , tensor : tf . Tensor ) -> tf . Tensor : if tensor . dtype != tf . float32 : tensor = tf . cast ( tensor , tf . float32 ) return tensor
Converts tensor to tf . float32 datatype if needed .
52
15
12,151
def _output ( cls , fluents : Sequence [ FluentPair ] ) -> Sequence [ tf . Tensor ] : return tuple ( cls . _dtype ( t ) for t in cls . _tensors ( fluents ) )
Returns output tensors for fluents .
54
8
12,152
def output_size ( self ) -> Tuple [ Sequence [ Shape ] , Sequence [ Shape ] , Sequence [ Shape ] , int ] : return self . _cell . output_size
Returns the simulation output size .
38
6
12,153
def timesteps ( self , horizon : int ) -> tf . Tensor : start , limit , delta = horizon - 1 , - 1 , - 1 timesteps_range = tf . range ( start , limit , delta , dtype = tf . float32 ) timesteps_range = tf . expand_dims ( timesteps_range , - 1 ) batch_timesteps = tf . stack ( [ timesteps_range ] * self . batch_size ) return batch_timesteps
Returns the input tensor for the given horizon .
108
10
12,154
def trajectory ( self , horizon : int , initial_state : Optional [ StateTensor ] = None ) -> TrajectoryOutput : if initial_state is None : initial_state = self . _cell . initial_state ( ) with self . graph . as_default ( ) : self . inputs = self . timesteps ( horizon ) outputs , _ = tf . nn . dynamic_rnn ( self . _cell , self . inputs , initial_state = initial_state , dtype = tf . float32 , scope = "trajectory" ) states , actions , interms , rewards = outputs # fluent types state_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . state_range_type ) states = self . _output ( states , state_dtype ) interm_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . interm_range_type ) interms = self . _output ( interms , interm_dtype ) action_dtype = map ( rddl2tf . utils . range_type_to_dtype , self . _cell . _compiler . rddl . action_range_type ) actions = self . _output ( actions , action_dtype ) outputs = ( initial_state , states , actions , interms , rewards ) return outputs
Returns the ops for the trajectory generation with given horizon and initial_state .
329
15
12,155
def run ( self , horizon : int , initial_state : Optional [ StateTensor ] = None ) -> SimulationOutput : trajectory = self . trajectory ( horizon , initial_state ) with tf . Session ( graph = self . graph ) as sess : sess . run ( tf . global_variables_initializer ( ) ) non_fluents = sess . run ( self . _non_fluents ) initial_state , states , actions , interms , rewards = sess . run ( trajectory ) # non-fluents non_fluent_ordering = self . _cell . _compiler . rddl . domain . non_fluent_ordering non_fluents = tuple ( zip ( non_fluent_ordering , non_fluents ) ) # states state_fluent_ordering = self . _cell . _compiler . rddl . domain . state_fluent_ordering states = tuple ( zip ( state_fluent_ordering , states ) ) # interms interm_fluent_ordering = self . _cell . _compiler . rddl . domain . interm_fluent_ordering interms = tuple ( zip ( interm_fluent_ordering , interms ) ) # actions action_fluent_ordering = self . _cell . _compiler . rddl . domain . action_fluent_ordering actions = tuple ( zip ( action_fluent_ordering , actions ) ) # rewards rewards = np . squeeze ( rewards ) outputs = ( non_fluents , initial_state , states , actions , interms , rewards ) return outputs
Builds the MDP graph and simulates in batch the trajectories with given horizon . Returns the non - fluents states actions interms and rewards . Fluents and non - fluents are returned in factored form .
340
45
12,156
def _output ( cls , tensors : Sequence [ tf . Tensor ] , dtypes : Sequence [ tf . DType ] ) -> Sequence [ tf . Tensor ] : outputs = [ ] for tensor , dtype in zip ( tensors , dtypes ) : tensor = tensor [ 0 ] if tensor . dtype != dtype : tensor = tf . cast ( tensor , dtype ) outputs . append ( tensor ) return tuple ( outputs )
Converts tensors to the corresponding dtypes .
101
10
12,157
def _get_biallelic_variant ( self , variant , info , _check_alleles = True ) : info = info . iloc [ 0 , : ] assert not info . multiallelic # Seeking and parsing the file self . _impute2_file . seek ( info . seek ) genotypes = self . _parse_impute2_line ( self . _impute2_file . readline ( ) ) variant_alleles = variant . _encode_alleles ( [ genotypes . reference , genotypes . coded , ] ) if ( _check_alleles and variant_alleles != variant . alleles ) : # Variant with requested alleles is unavailable. logging . variant_not_found ( variant ) return [ ] return [ genotypes ]
Creates a bi - allelic variant .
167
9
12,158
def _fix_genotypes_object ( self , genotypes , variant_info ) : # Checking the name (if there were duplications) if self . has_index and variant_info . name != genotypes . variant . name : if not variant_info . name . startswith ( genotypes . variant . name ) : raise ValueError ( "Index file not synced with IMPUTE2 file" ) genotypes . variant . name = variant_info . name # Trying to set multi-allelic information if self . has_index and self . _index_has_location : # Location was in the index, so we can automatically set the # multi-allelic state of the genotypes genotypes . multiallelic = variant_info . multiallelic else : # Location was not in the index, so we check one marker before and # after the one we found logging . warning ( "Multiallelic variants are not detected on " "unindexed files." )
Fixes a genotypes object ( variant name multi - allelic value .
208
15
12,159
def _normalize_missing ( g ) : g = g . astype ( float ) g [ g == - 1.0 ] = np . nan return g
Normalize a plink genotype vector .
34
9
12,160
def maybe_download_and_extract ( ) : dest_directory = "/tmp/cifar" if not os . path . exists ( dest_directory ) : os . makedirs ( dest_directory ) filename = DATA_URL . split ( '/' ) [ - 1 ] filepath = os . path . join ( dest_directory , filename ) if not os . path . exists ( filepath ) : def _progress ( count , block_size , total_size ) : sys . stdout . write ( '\r>> Downloading %s %.1f%%' % ( filename , float ( count * block_size ) / float ( total_size ) * 100.0 ) ) sys . stdout . flush ( ) filepath , _ = urllib . request . urlretrieve ( DATA_URL , filepath , _progress ) print ( ) statinfo = os . stat ( filepath ) print ( 'Successfully downloaded' , filename , statinfo . st_size , 'bytes.' ) tarfile . open ( filepath , 'r:gz' ) . extractall ( dest_directory )
Download and extract the tarball from Alex s website .
238
11
12,161
def plot ( config , image , file ) : image = np . squeeze ( image ) print ( file , image . shape ) imsave ( file , image )
Plot a single CIFAR image .
33
8
12,162
def _get_seqtype_from_ext ( handle ) : if isinstance ( handle , basestring ) : name = handle elif hasattr ( handle , 'filename' ) : name = handle . filename elif hasattr ( handle , 'name' ) : name = handle . name else : raise ValueError ( "Unknown datatype for handle!" ) modifier = '' dummy , ext = path . splitext ( name . lower ( ) ) if ext == ".gz" : modifier = 'gz-' dummy , ext = path . splitext ( dummy ) if not ext : ext = "." + dummy if ext in ( ".gbk" , ".gb" , ".genbank" , ".gbff" ) : return modifier + "genbank" elif ext in ( ".embl" , ".emb" ) : return modifier + "embl" elif ext in ( ".fa" , ".fasta" , ".fna" , ".faa" , ".fas" ) : return modifier + "fasta" else : raise ValueError ( "Unknown file format '%s'." % ext )
Predict the filetype from a handle s name
237
10
12,163
def _guess_seqtype_from_file ( handle ) : if isinstance ( handle , basestring ) : handle = StringIO ( handle ) for line in handle : if not line . strip ( ) : continue if line . lstrip ( ) . split ( ) [ 0 ] in ( 'LOCUS' , 'FEATURES' , 'source' , 'CDS' , 'gene' ) : return 'genbank' if len ( line ) > 2 and line [ : 3 ] in ( 'ID ' , 'FT ' ) : return 'embl' if line . startswith ( '>' ) : return 'fasta' handle . seek ( 0 ) import string from Bio . Data import IUPACData as iupac all_input_letters = set ( handle . read ( ) . lower ( ) ) all_valid = set ( string . digits ) all_valid . update ( set ( iupac . protein_letters . lower ( ) ) ) all_valid . update ( set ( iupac . unambiguous_dna_letters . lower ( ) ) ) all_valid . update ( set ( '- \n' ) ) if all_valid . issuperset ( all_input_letters ) : return 'fasta' raise ValueError ( "Failed to guess format for input" )
Guess the sequence type from the file s contents
285
10
12,164
def _unzip_handle ( handle ) : if isinstance ( handle , basestring ) : handle = _gzip_open_filename ( handle ) else : handle = _gzip_open_handle ( handle ) return handle
Transparently unzip the file handle
49
8
12,165
def sanity_check_insdcio ( handle , id_marker , fake_id_line ) : found_id = False found_end_marker = False for line in handle : line = line . strip ( ) if not line : continue if line . startswith ( id_marker ) : found_id = True break if line . startswith ( '//' ) : found_end_marker = True break handle . seek ( 0 ) # We found an ID, file looks good. if found_id : return handle # If there's no ID and no end marker, just give up. if not found_end_marker : return handle # If we found an end marker but no ID, fake one. new_handle = StringIO ( ) new_handle . write ( "%s\n" % fake_id_line ) new_handle . write ( handle . read ( ) ) new_handle . seek ( 0 ) return new_handle
Sanity check for insdcio style files
206
9
12,166
def sanity_check_fasta ( handle ) : header_found = False for line in handle : if line . startswith ( '>' ) : header_found = True break handle . seek ( 0 ) if header_found : return handle fake_header_line = ">DUMMY" new_handle = StringIO ( ) new_handle . write ( "%s\n" % fake_header_line ) new_handle . write ( handle . read ( ) ) new_handle . seek ( 0 ) return new_handle
Sanity check FASTA files .
114
8
12,167
def parse ( handle , seqtype = None , robust = False ) : if seqtype is None : seqtype = _get_seqtype_from_ext ( handle ) if seqtype . startswith ( 'gz-' ) : handle = _unzip_handle ( handle ) seqtype = seqtype [ 3 : ] # False positive from pylint, both handles are fileobj-like # pylint: disable=redefined-variable-type if robust : if seqtype == "embl" : handle = sanity_check_embl ( handle ) elif seqtype == "genbank" : handle = sanity_check_genbank ( handle ) elif seqtype == "fasta" : handle = sanity_check_fasta ( handle ) # pylint: enable=redefined-variable-type return SeqIO . parse ( handle , seqtype )
Wrap SeqIO . parse
186
7
12,168
def isOrderFixed ( self ) : return ( self == PortConstraints . FIXED_ORDER or self == PortConstraints . FIXED_RATIO or self == PortConstraints . FIXED_POS )
Returns whether the order of ports is fixed .
49
9
12,169
def _dicts_to_columns ( dicts ) : keys = dicts [ 0 ] . keys ( ) result = dict ( ( k , [ ] ) for k in keys ) for d in dicts : for k , v in d . items ( ) : result [ k ] += [ v ] return result
Given a List of Dictionaries with uniform keys returns a single Dictionary with keys holding a List of values matching the key in the original List .
67
29
12,170
def from_vertices_and_edges ( vertices , edges , vertex_name_key = 'name' , vertex_id_key = 'id' , edge_foreign_keys = ( 'source' , 'target' ) , directed = True ) : vertex_data = _dicts_to_columns ( vertices ) edge_data = _dicts_to_columns ( edges ) n = len ( vertices ) vertex_index = dict ( zip ( vertex_data [ vertex_id_key ] , range ( n ) ) ) # Iterate over `edges` to create `edge_list`, where every list item is a pair of integers. edge_list = list ( map ( lambda source , target : ( vertex_index [ source ] , vertex_index [ target ] ) , edge_data [ edge_foreign_keys [ 0 ] ] , edge_data [ edge_foreign_keys [ 1 ] ] ) ) g = IGraph ( n = n , edges = edge_list , directed = directed , vertex_attrs = vertex_data , edge_attrs = edge_data ) g . vs [ 'name' ] = g . vs [ vertex_name_key ] g . vs [ 'indegree' ] = g . degree ( mode = "in" ) g . vs [ 'outdegree' ] = g . degree ( mode = "out" ) g . vs [ 'label' ] = g . vs [ vertex_name_key ] if 'group' not in g . vs . attributes ( ) : g . vs [ 'group' ] = labels_to_groups ( g . vs [ 'label' ] ) return g
This representation assumes that vertices and edges are encoded in two lists each list containing a Python dict for each vertex and each edge respectively . A distinguished element of the vertex dicts contain a vertex ID which is used in the edge dicts to refer to source and target vertices . All the remaining elements of the dicts are considered vertex and edge attributes .
359
71
12,171
def from_edges ( edges , source_key = 'source' , target_key = 'target' , weight_key = 'weight' , directed = True ) : raw = list ( map ( lambda x : [ x [ source_key ] , x [ target_key ] , int ( x [ weight_key ] ) ] , edges ) ) g = IGraph . TupleList ( raw , weights = True , directed = directed ) g . vs [ 'indegree' ] = g . degree ( mode = "in" ) g . vs [ 'outdegree' ] = g . degree ( mode = "out" ) g . vs [ 'label' ] = g . vs [ 'name' ] if 'group' not in g . vs . attributes ( ) : g . vs [ 'group' ] = labels_to_groups ( g . vs [ 'label' ] ) return g
Given a List of Dictionaries with source target and weight attributes return a weighted directed graph .
192
19
12,172
def flip_alleles ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.flip_coded'" , DeprecationWarning ) genotypes . reference , genotypes . coded = ( genotypes . coded , genotypes . reference ) genotypes . genotypes = 2 - genotypes . genotypes return genotypes
Flip the alleles of an Genotypes instance .
71
11
12,173
def code_minor ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.code_minor'" , DeprecationWarning ) _ , minor_coded = maf ( genotypes ) if not minor_coded : return flip_alleles ( genotypes ) return genotypes
Encode the genotypes with respect to the minor allele .
64
12
12,174
def maf ( genotypes ) : warnings . warn ( "deprecated: use 'Genotypes.maf'" , DeprecationWarning ) g = genotypes . genotypes maf = np . nansum ( g ) / ( 2 * np . sum ( ~ np . isnan ( g ) ) ) if maf > 0.5 : maf = 1 - maf return maf , False return maf , True
Computes the MAF and returns a boolean indicating if the minor allele is currently the coded allele .
91
20
12,175
def genotype_to_df ( g , samples , as_string = False ) : name = g . variant . name if g . variant . name else "genotypes" df = pd . DataFrame ( g . genotypes , index = samples , columns = [ name ] ) if as_string : df [ "alleles" ] = None hard_calls = df [ name ] . round ( ) df . loc [ hard_calls == 0 , "alleles" ] = "{0}/{0}" . format ( g . reference ) df . loc [ hard_calls == 1 , "alleles" ] = "{0}/{1}" . format ( g . reference , g . coded ) df . loc [ hard_calls == 2 , "alleles" ] = "{0}/{0}" . format ( g . coded ) df = df [ [ "alleles" ] ] df . columns = [ name ] return df
Convert a genotype object to a pandas dataframe .
204
13
12,176
def compute_ld ( cur_geno , other_genotypes , r2 = False ) : # Normalizing the current genotypes norm_cur = normalize_genotypes ( cur_geno ) # Normalizing and creating the matrix for the other genotypes norm_others = np . stack ( tuple ( normalize_genotypes ( g ) for g in other_genotypes ) , axis = 1 , ) # Making sure the size is the same assert norm_cur . shape [ 0 ] == norm_others . shape [ 0 ] # Getting the number of "samples" per marker (taking into account NaN) n = ( ~ np . isnan ( norm_cur . reshape ( norm_cur . shape [ 0 ] , 1 ) ) * ~ np . isnan ( norm_others ) ) . sum ( axis = 0 ) # Computing r (replacing NaN by 0) r = pd . Series ( np . dot ( np . nan_to_num ( norm_cur ) , np . nan_to_num ( norm_others ) / n ) , index = [ g . variant . name for g in other_genotypes ] , name = "r2" if r2 else "r" , ) # Checking no "invalid" values (i.e. < -1 or > 1) r . loc [ r > 1 ] = 1 r . loc [ r < - 1 ] = - 1 if r2 : return r ** 2 else : return r
Compute LD between a marker and a list of markers .
319
12
12,177
def normalize_genotypes ( genotypes ) : genotypes = genotypes . genotypes return ( genotypes - np . nanmean ( genotypes ) ) / np . nanstd ( genotypes )
Normalize the genotypes .
42
6
12,178
def _get_tdm ( self , m ) : m = np . atleast_2d ( m ) assert len ( m . shape ) == 2 tdm = crtomo . tdMan ( grid = self . grid , tempdir = self . tempdir ) tdm . configs . add_to_configs ( self . configs ) pid_mag = tdm . parman . add_data ( m [ 0 , : ] ) tdm . register_magnitude_model ( pid_mag ) if m . shape [ 0 ] == 2 : pid_pha = tdm . parman . add_data ( m [ 1 , : ] ) else : pid_pha = tdm . parman . add_data ( np . zeros ( m . shape [ 1 ] ) ) tdm . register_phase_model ( pid_pha ) return tdm
For a given model return a tdMan instance
188
9
12,179
def J ( self , log_sigma ) : m = 1.0 / np . exp ( log_sigma ) tdm = self . _get_tdm ( m ) tdm . model ( sensitivities = True , # output_directory=stage_dir + 'modeling', ) measurements = tdm . measurements ( ) # build up the sensitivity matrix sens_list = [ ] for config_nr , cids in sorted ( tdm . assignments [ 'sensitivities' ] . items ( ) ) : sens_list . append ( tdm . parman . parsets [ cids [ 0 ] ] ) sensitivities_lin = np . array ( sens_list ) # now convert to the log-sensitivities relevant for CRTomo and the # resolution matrix sensitivities_log = sensitivities_lin # multiply measurements on first dimension measurements_rep = np . repeat ( measurements [ : , 0 , np . newaxis ] , sensitivities_lin . shape [ 1 ] , axis = 1 ) # sensitivities_log = sensitivities_log * mfit # multiply resistivities on second dimension m_rep = np . repeat ( m [ np . newaxis , : ] , sensitivities_lin . shape [ 0 ] , axis = 0 ) # eq. 3.41 in Kemna, 2000: notice that m_rep here is in rho, not sigma factor = - 1 / ( m_rep * measurements_rep ) sensitivities_log = factor * sensitivities_lin # import IPython # IPython.embed() return sensitivities_log
Return the sensitivity matrix
334
4
12,180
def set_ironic_uuid ( self , uuid_list ) : # TODO(Gonéri): ensure we adjust the correct node i = iter ( self . nodes ) for uuid in uuid_list : node = next ( i ) node . uuid = uuid
Map a list of Ironic UUID to BM nodes .
62
12
12,181
def find_resistance ( record ) : for feature in record . features : labels = set ( feature . qualifiers . get ( "label" , [ ] ) ) cassettes = labels . intersection ( _ANTIBIOTICS ) if len ( cassettes ) > 1 : raise RuntimeError ( "multiple resistance cassettes detected" ) elif len ( cassettes ) == 1 : return _ANTIBIOTICS . get ( cassettes . pop ( ) ) raise RuntimeError ( "could not find the resistance of '{}'" . format ( record . id ) )
Infer the antibiotics resistance of the given record .
119
10
12,182
def shell_cmd ( args , cwd = None ) : if cwd is None : cwd = os . path . abspath ( '.' ) if not isinstance ( args , ( list , tuple ) ) : args = [ args ] ps = Popen ( args , shell = True , cwd = cwd , stdout = PIPE , stderr = PIPE , close_fds = True ) stdout , stderr = ps . communicate ( ) if ps . returncode != 0 : if stderr : stderr = stderr . strip ( ) raise IOError ( 'Shell command %s failed (exit status %r): %s' % ( args , ps . returncode , stderr ) ) return stdout . strip ( )
Returns stdout as string or None on failure
166
9
12,183
def reverse_complement ( self , id = False , name = False , description = False , features = True , annotations = False , letter_annotations = True , dbxrefs = False , ) : return type ( self ) ( super ( CircularRecord , self ) . reverse_complement ( id = id , name = name , description = description , features = features , annotations = annotations , letter_annotations = letter_annotations , dbxrefs = dbxrefs , ) )
Return a new CircularRecord with reverse complement sequence .
106
11
12,184
def load_private_key ( self , priv_key ) : with open ( priv_key ) as fd : self . _private_key = paramiko . RSAKey . from_private_key ( fd )
Register the SSH private key .
47
6
12,185
def start ( self ) : if self . via_ip : connect_to = self . via_ip self . description = '[%s@%s via %s]' % ( self . _user , self . _hostname , self . via_ip ) else : connect_to = self . _hostname self . description = '[%s@%s]' % ( self . _user , self . _hostname ) exception = None for i in range ( 60 ) : try : self . _client . connect ( connect_to , username = self . _user , allow_agent = True , key_filename = self . _key_filename ) # NOTE(Gonéri): TypeError is in the list because of # https://github.com/paramiko/paramiko/issues/615 self . _transport = self . _get_transport ( ) except ( OSError , TypeError , ssh_exception . SSHException , ssh_exception . NoValidConnectionsError ) as e : exception = e LOG . info ( '%s waiting for %s: %s' % ( self . description , connect_to , str ( exception ) ) ) time . sleep ( 1 ) else : LOG . debug ( '%s connected' % self . description ) self . _started = True return _error = ( "unable to connect to ssh service on '%s': %s" % ( self . _hostname , str ( exception ) ) ) LOG . error ( _error ) raise exception
Start the ssh client and connect to the host .
323
10
12,186
def _get_channel ( self ) : channel = self . _transport . open_session ( ) channel . set_combine_stderr ( True ) channel . get_pty ( ) return channel
Returns a channel according to if there is a redirection to do or not .
44
16
12,187
def print_fields ( bf , * args , * * kwargs ) : vals = { k : hex ( v ) for k , v in bf . items ( ) } print ( bf . base , vals , * args , * * kwargs )
Print all the fields of a Bitfield object to stdout . This is primarly a diagnostic aid during debugging .
59
24
12,188
def clone ( self ) : temp = self . __class__ ( ) temp . base = self . base return temp
Return a new bitfield with the same value . The returned value is a copy and so is no longer linked to the original bitfield . This is important when the original is located at anything other than normal memory with accesses to it either slow or having side effects . Creating a clone and working against that clone means that only one read will occur .
24
70
12,189
def new ( self , base : pathlib . PurePath = pathlib . PurePath ( ) , include_intermediates : bool = True ) -> Iterator [ str ] : if self . is_new : yield str ( base / self . right . name )
Find the list of new paths in this comparison .
56
10
12,190
def modified ( self , base : pathlib . PurePath = pathlib . PurePath ( ) ) -> Iterator [ str ] : # N.B. this method will only ever return files, as directories cannot # be "modified" if self . is_modified : yield str ( base / self . right . name )
Find the paths of modified files . There is no option to include intermediate directories as all files and directories exist in both the left and right trees .
67
29
12,191
def deleted ( self , base : pathlib . PurePath = pathlib . PurePath ( ) , include_children : bool = True , include_directories : bool = True ) -> Iterator [ str ] : if self . is_deleted : yield str ( base / self . left . name )
Find the paths of entities deleted between the left and right entities in this comparison .
64
16
12,192
def compare ( left : Optional [ L ] , right : Optional [ R ] ) -> 'Comparison[L, R]' : if isinstance ( left , File ) and isinstance ( right , Directory ) : return FileDirectoryComparison ( left , right ) if isinstance ( left , Directory ) and isinstance ( right , File ) : return DirectoryFileComparison ( left , right ) if isinstance ( left , File ) or isinstance ( right , File ) : return FileComparison ( left , right ) if isinstance ( left , Directory ) or isinstance ( right , Directory ) : return DirectoryComparison ( left , right ) raise TypeError ( f'Cannot compare entities: {left}, {right}' )
Calculate the comparison of two entities .
152
9
12,193
def print_hierarchy ( self , level : int = 0 , file : IO [ str ] = sys . stdout ) -> None : print ( ' ' * self . _INDENT_SIZE * level + str ( self ) , file = file )
Print this comparison and its children with indentation to represent nesting .
54
13
12,194
def is_modified ( self ) -> bool : if self . is_new or self . is_deleted : return False return self . left . md5 != self . right . md5
Find whether the files on the left and right are different . Note modified implies the contents of the file have changed which is predicated on the file existing on both the left and right . Therefore this will be false if the file on the left has been deleted or the file on the right is new .
40
60
12,195
def generate_index ( fn , cols = None , names = None , sep = " " ) : # Some assertions assert cols is not None , "'cols' was not set" assert names is not None , "'names' was not set" assert len ( cols ) == len ( names ) # Getting the open function bgzip , open_func = get_open_func ( fn , return_fmt = True ) # Reading the required columns data = pd . read_csv ( fn , sep = sep , engine = "c" , usecols = cols , names = names , compression = "gzip" if bgzip else None ) # Getting the seek information f = open_func ( fn , "rb" ) data [ "seek" ] = np . fromiter ( _seek_generator ( f ) , dtype = np . uint ) [ : - 1 ] f . close ( ) # Saving the index to file write_index ( get_index_fn ( fn ) , data ) return data
Build a index for the given file .
220
8
12,196
def get_open_func ( fn , return_fmt = False ) : # The file might be compressed using bgzip bgzip = None with open ( fn , "rb" ) as i_file : bgzip = i_file . read ( 3 ) == b"\x1f\x8b\x08" if bgzip and not HAS_BIOPYTHON : raise ValueError ( "needs BioPython to index a bgzip file" ) open_func = open if bgzip : open_func = BgzfReader # Trying to read try : with open_func ( fn , "r" ) as i_file : if bgzip : if not i_file . seekable ( ) : raise ValueError pass except ValueError : raise ValueError ( "{}: use bgzip for compression..." . format ( fn ) ) if return_fmt : return bgzip , open_func return open_func
Get the opening function .
205
5
12,197
def get_index ( fn , cols , names , sep ) : if not has_index ( fn ) : # The index doesn't exists, generate it return generate_index ( fn , cols , names , sep ) # Retrieving the index file_index = read_index ( get_index_fn ( fn ) ) # Checking the names are there if len ( set ( names ) - ( set ( file_index . columns ) - { 'seek' } ) ) != 0 : raise ValueError ( "{}: missing index columns: reindex" . format ( fn ) ) if "seek" not in file_index . columns : raise ValueError ( "{}: invalid index: reindex" . format ( fn ) ) return file_index
Restores the index for a given file .
156
9
12,198
def write_index ( fn , index ) : with open ( fn , "wb" ) as o_file : o_file . write ( _CHECK_STRING ) o_file . write ( zlib . compress ( bytes ( index . to_csv ( None , index = False , encoding = "utf-8" ) , encoding = "utf-8" , ) ) )
Writes the index to file .
81
7
12,199
def read_index ( fn ) : index = None with open ( fn , "rb" ) as i_file : if i_file . read ( len ( _CHECK_STRING ) ) != _CHECK_STRING : raise ValueError ( "{}: not a valid index file" . format ( fn ) ) index = pd . read_csv ( io . StringIO ( zlib . decompress ( i_file . read ( ) ) . decode ( encoding = "utf-8" ) , ) ) return index
Reads index from file .
110
6