idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
1,300
def left_overlaps ( self , other , min_overlap_size = 1 ) : if self . alt != other . alt : # allele must match! return False if len ( other . prefix ) > len ( self . prefix ) : # only consider strings that overlap like: # self: ppppAssss # other: ppAsssssss # which excludes cases where the other sequence has a longer # prefix return False elif len ( other . suffix ) < len ( self . suffix ) : # similarly, we throw away cases where the other sequence is shorter # after the alt nucleotides than this sequence return False # is the other sequence a prefix of this sequence? # Example: # p1 a1 s1 = XXXXXXXX Y ZZZZZZ # p2 a2 s2 = XX Y ZZZZZZZZZ # ... # then we can combine them into a longer sequence sequence_overlaps = ( self . prefix . endswith ( other . prefix ) and other . suffix . startswith ( self . suffix ) ) prefix_overlap_size = min ( len ( self . prefix ) , len ( other . prefix ) ) suffix_overlap_size = min ( len ( other . suffix ) , len ( self . suffix ) ) overlap_size = ( prefix_overlap_size + suffix_overlap_size + len ( self . alt ) ) return sequence_overlaps and overlap_size >= min_overlap_size
Does this VariantSequence overlap another on the left side?
311
12
1,301
def add_reads ( self , reads ) : if len ( reads ) == 0 : return self new_reads = self . reads . union ( reads ) if len ( new_reads ) > len ( self . reads ) : return VariantSequence ( prefix = self . prefix , alt = self . alt , suffix = self . suffix , reads = new_reads ) else : return self
Create another VariantSequence with more supporting reads .
80
10
1,302
def variant_indices ( self ) : variant_start_index = len ( self . prefix ) variant_len = len ( self . alt ) variant_end_index = variant_start_index + variant_len return variant_start_index , variant_end_index
When we combine prefix + alt + suffix into a single string what are is base - 0 index interval which gets us back the alt sequence? First returned index is inclusive the second is exclusive .
58
38
1,303
def coverage ( self ) : variant_start_index , variant_end_index = self . variant_indices ( ) n_nucleotides = len ( self ) coverage_array = np . zeros ( n_nucleotides , dtype = "int32" ) for read in self . reads : coverage_array [ max ( 0 , variant_start_index - len ( read . prefix ) ) : min ( n_nucleotides , variant_end_index + len ( read . suffix ) ) ] += 1 return coverage_array
Returns NumPy array indicating number of reads covering each nucleotides of this sequence .
119
17
1,304
def trim_by_coverage ( self , min_reads ) : read_count_array = self . coverage ( ) logger . info ( "Coverage: %s (len=%d)" % ( read_count_array , len ( read_count_array ) ) ) sufficient_coverage_mask = read_count_array >= min_reads sufficient_coverage_indices = np . argwhere ( sufficient_coverage_mask ) if len ( sufficient_coverage_indices ) == 0 : logger . debug ( "No bases in %s have coverage >= %d" % ( self , min_reads ) ) return VariantSequence ( prefix = "" , alt = "" , suffix = "" , reads = self . reads ) variant_start_index , variant_end_index = self . variant_indices ( ) # assuming that coverage drops off monotonically away from # variant nucleotides first_covered_index = sufficient_coverage_indices . min ( ) last_covered_index = sufficient_coverage_indices . max ( ) # adding 1 to last_covered_index since it's an inclusive index # whereas variant_end_index is the end of a half-open interval if ( first_covered_index > variant_start_index or last_covered_index + 1 < variant_end_index ) : # Example: # Nucleotide sequence: # ACCCTTTT|AA|GGCGCGCC # Coverage: # 12222333|44|33333211 # Then the mask for bases covered >= 4x would be: # ________|**|________ # with indices: # first_covered_index = 9 # last_covered_index = 10 # variant_start_index = 9 # variant_end_index = 11 logger . debug ( "Some variant bases in %s don't have coverage >= %d" % ( self , min_reads ) ) return VariantSequence ( prefix = "" , alt = "" , suffix = "" , reads = self . reads ) return VariantSequence ( prefix = self . prefix [ first_covered_index : ] , alt = self . alt , suffix = self . suffix [ : last_covered_index - variant_end_index + 1 ] , reads = self . reads )
Given the min number of reads overlapping each nucleotide of a variant sequence trim this sequence by getting rid of positions which are overlapped by fewer reads than specified .
484
32
1,305
def trim_N_nucleotides ( prefix , suffix ) : if 'N' in prefix : # trim prefix to exclude all occurrences of N rightmost_index = prefix . rfind ( 'N' ) logger . debug ( "Trimming %d nucleotides from read prefix '%s'" , rightmost_index + 1 , prefix ) prefix = prefix [ rightmost_index + 1 : ] if 'N' in suffix : leftmost_index = suffix . find ( 'N' ) logger . debug ( "Trimming %d nucleotides from read suffix '%s'" , len ( suffix ) - leftmost_index , suffix ) suffix = suffix [ : leftmost_index ] return prefix , suffix
Drop all occurrences of N from prefix and suffix nucleotide strings by trimming .
152
16
1,306
def convert_from_bytes_if_necessary ( prefix , suffix ) : if isinstance ( prefix , bytes ) : prefix = prefix . decode ( 'ascii' ) if isinstance ( suffix , bytes ) : suffix = suffix . decode ( 'ascii' ) return prefix , suffix
Depending on how we extract data from pysam we may end up with either a string or a byte array of nucleotides . For consistency and simplicity we want to only use strings in the rest of our code .
62
44
1,307
def publish ( self , data , * * kwargs ) : assert data . get ( 'op' ) in { 'index' , 'create' , 'delete' , 'update' } return super ( Producer , self ) . publish ( data , * * kwargs )
Validate operation type .
59
5
1,308
def index ( self , record ) : index , doc_type = self . record_to_index ( record ) return self . client . index ( id = str ( record . id ) , version = record . revision_id , version_type = self . _version_type , index = index , doc_type = doc_type , body = self . _prepare_record ( record , index , doc_type ) , )
Index a record .
91
4
1,309
def process_bulk_queue ( self , es_bulk_kwargs = None ) : with current_celery_app . pool . acquire ( block = True ) as conn : consumer = Consumer ( connection = conn , queue = self . mq_queue . name , exchange = self . mq_exchange . name , routing_key = self . mq_routing_key , ) req_timeout = current_app . config [ 'INDEXER_BULK_REQUEST_TIMEOUT' ] es_bulk_kwargs = es_bulk_kwargs or { } count = bulk ( self . client , self . _actionsiter ( consumer . iterqueue ( ) ) , stats_only = True , request_timeout = req_timeout , * * es_bulk_kwargs ) consumer . close ( ) return count
Process bulk indexing queue .
183
6
1,310
def _bulk_op ( self , record_id_iterator , op_type , index = None , doc_type = None ) : with self . create_producer ( ) as producer : for rec in record_id_iterator : producer . publish ( dict ( id = str ( rec ) , op = op_type , index = index , doc_type = doc_type ) )
Index record in Elasticsearch asynchronously .
83
9
1,311
def _actionsiter ( self , message_iterator ) : for message in message_iterator : payload = message . decode ( ) try : if payload [ 'op' ] == 'delete' : yield self . _delete_action ( payload ) else : yield self . _index_action ( payload ) message . ack ( ) except NoResultFound : message . reject ( ) except Exception : message . reject ( ) current_app . logger . error ( "Failed to index record {0}" . format ( payload . get ( 'id' ) ) , exc_info = True )
Iterate bulk actions .
122
5
1,312
def _delete_action ( self , payload ) : index , doc_type = payload . get ( 'index' ) , payload . get ( 'doc_type' ) if not ( index and doc_type ) : record = Record . get_record ( payload [ 'id' ] ) index , doc_type = self . record_to_index ( record ) return { '_op_type' : 'delete' , '_index' : index , '_type' : doc_type , '_id' : payload [ 'id' ] , }
Bulk delete action .
119
5
1,313
def _index_action ( self , payload ) : record = Record . get_record ( payload [ 'id' ] ) index , doc_type = self . record_to_index ( record ) return { '_op_type' : 'index' , '_index' : index , '_type' : doc_type , '_id' : str ( record . id ) , '_version' : record . revision_id , '_version_type' : self . _version_type , '_source' : self . _prepare_record ( record , index , doc_type ) , }
Bulk index action .
131
5
1,314
def _prepare_record ( record , index , doc_type ) : if current_app . config [ 'INDEXER_REPLACE_REFS' ] : data = copy . deepcopy ( record . replace_refs ( ) ) else : data = record . dumps ( ) data [ '_created' ] = pytz . utc . localize ( record . created ) . isoformat ( ) if record . created else None data [ '_updated' ] = pytz . utc . localize ( record . updated ) . isoformat ( ) if record . updated else None # Allow modification of data prior to sending to Elasticsearch. before_record_index . send ( current_app . _get_current_object ( ) , json = data , record = record , index = index , doc_type = doc_type , ) return data
Prepare record data for indexing .
182
8
1,315
def greedy_merge_helper ( variant_sequences , min_overlap_size = MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE ) : merged_variant_sequences = { } merged_any = False # here we'll keep track of sequences that haven't been merged yet, and add them in at the end unmerged_variant_sequences = set ( variant_sequences ) for i in range ( len ( variant_sequences ) ) : sequence1 = variant_sequences [ i ] # it works to loop over the triangle (i+1 onwards) because combine() tries flipping the # arguments if sequence1 is on the right of sequence2 for j in range ( i + 1 , len ( variant_sequences ) ) : sequence2 = variant_sequences [ j ] combined = sequence1 . combine ( sequence2 ) if combined is None : continue if combined . sequence in merged_variant_sequences : existing = merged_variant_sequences [ combined . sequence ] # the existing VariantSequence and the newly merged # VariantSequence should differ only in which reads support them combined = combined . add_reads ( existing . reads ) merged_variant_sequences [ combined . sequence ] = combined unmerged_variant_sequences . discard ( sequence1 ) unmerged_variant_sequences . discard ( sequence2 ) merged_any = True result = list ( merged_variant_sequences . values ( ) ) + list ( unmerged_variant_sequences ) return result , merged_any
Returns a list of merged VariantSequence objects and True if any were successfully merged .
342
17
1,316
def greedy_merge ( variant_sequences , min_overlap_size = MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE ) : merged_any = True while merged_any : variant_sequences , merged_any = greedy_merge_helper ( variant_sequences , min_overlap_size = min_overlap_size ) return variant_sequences
Greedily merge overlapping sequences into longer sequences .
91
10
1,317
def collapse_substrings ( variant_sequences ) : if len ( variant_sequences ) <= 1 : # if we don't have at least two VariantSequences then just # return your input return variant_sequences # dictionary mapping VariantSequence objects to lists of reads # they absorb from substring VariantSequences extra_reads_from_substrings = defaultdict ( set ) result_list = [ ] # sort by longest to shortest total length for short_variant_sequence in sorted ( variant_sequences , key = lambda seq : - len ( seq ) ) : found_superstring = False for long_variant_sequence in result_list : found_superstring = long_variant_sequence . contains ( short_variant_sequence ) if found_superstring : extra_reads_from_substrings [ long_variant_sequence ] . update ( short_variant_sequence . reads ) if not found_superstring : result_list . append ( short_variant_sequence ) # add to each VariantSequence the reads it absorbed from dropped substrings # and then return return [ variant_sequence . add_reads ( extra_reads_from_substrings [ variant_sequence ] ) for variant_sequence in result_list ]
Combine shorter sequences which are fully contained in longer sequences .
266
12
1,318
def iterative_overlap_assembly ( variant_sequences , min_overlap_size = MIN_VARIANT_SEQUENCE_ASSEMBLY_OVERLAP_SIZE ) : if len ( variant_sequences ) <= 1 : # if we don't have at least two sequences to start with then # skip the whole mess below return variant_sequences # reduce the number of inputs to the merge algorithm by first collapsing # shorter sequences onto the longer sequences which contain them n_before_collapse = len ( variant_sequences ) variant_sequences = collapse_substrings ( variant_sequences ) n_after_collapse = len ( variant_sequences ) logger . info ( "Collapsed %d -> %d sequences" , n_before_collapse , n_after_collapse ) merged_variant_sequences = greedy_merge ( variant_sequences , min_overlap_size ) return list ( sorted ( merged_variant_sequences , key = lambda seq : - len ( seq . reads ) ) )
Assembles longer sequences from reads centered on a variant by between merging all pairs of overlapping sequences and collapsing shorter sequences onto every longer sequence which contains them .
226
31
1,319
def groupby ( xs , key_fn ) : result = defaultdict ( list ) for x in xs : key = key_fn ( x ) result [ key ] . append ( x ) return result
Group elements of the list xs by keys generated from calling key_fn .
44
16
1,320
def ortho_basis ( normal , ref_vec = None ) : # Imports for library functions import numpy as np from scipy import linalg as spla from scipy import random as sprnd from . . const import PRM from . . error import VectorError # Internal parameters # Magnitude of the perturbation from 'normal' in constructing a random rv RAND_MAG = 0.25 # Test 'normal' for shape and length if not len ( normal . shape ) == 1 : raise ValueError ( "'normal' is not a vector" ) ## end if if not normal . shape [ 0 ] == 3 : raise ValueError ( "Length of 'normal' is not three" ) ## end if # Normalize to concise variable 'nv' nv = normal / spla . norm ( normal ) # Test for specification of ref_vec in the function call if ref_vec is None : # ref_vec not specified. # # Generate reference vector by generation of a random perturbation # vector suitably non-parallel to norm_vec # Generate suitable randomizer, looping as needed rv = nv while parallel_check ( nv , rv ) : rv = np . float64 ( 1.0 - RAND_MAG + 2 * RAND_MAG * sprnd . rand ( 3 ) ) ## do loop # Calculate rejection of perturbed vector on the normal, then # normalize rv = rej ( rv , nv ) rv = rv / spla . norm ( rv ) else : # ref_vec specified, go ahead and use. Start with validity check. if not len ( ref_vec . shape ) == 1 : raise ValueError ( "ref_vec is not a vector" ) ## end if if not ref_vec . shape [ 0 ] == 3 : raise ValueError ( "ref_vec length is not three" ) ## end if # Normalize ref_vec to 'rv' rv = ref_vec / spla . norm ( ref_vec ) # Check for collinearity of nv and rv; raise error if too close if parallel_check ( nv , rv ) : # Essentially equal or opposite vectors, making them too nearly # parallel. raise VectorError ( VectorError . NONPRL , "'normal' and 'ref_vec' are too nearly parallel." , "" ) ## end if # rv is ok to use from here ## end try # on2 is the unit vector parallel to nv x rv on2 = np . cross ( nv , rv ) on2 = on2 / spla . norm ( on2 ) # on1 is on2 x nv (normalization should not be necessary here, but is # performed just in case) on1 = np . cross ( on2 , nv ) on1 = on1 / spla . norm ( on1 ) # Return the spanning vectors return on1 , on2
Generates an orthonormal basis in the plane perpendicular to normal
628
13
1,321
def orthonorm_check ( a , tol = _DEF . ORTHONORM_TOL , report = False ) : # Imports import numpy as np from . base import delta_fxn #!TODO? orthonorm_check Must add traps to ensure a is a single array, # that it is 2D, that it's all real? To enforce the limits stated # in the docstring? # Initialize return variables orth = True n_fail = [ ] o_fail = [ ] # Coerce to float_ matrix. Must treat 1-D vector as column vector. # Should raise an exception for any objects with more than # two dimensions; real and all-numeric are still not yet checked, but # will probably be run-time caught if too bad an object is passed. if len ( a . shape ) == 1 : a_mx = np . matrix ( a , dtype = np . float_ ) . T else : a_mx = np . matrix ( a , dtype = np . float_ ) # Split matrix into separate vectors for convenient indexing. a_split = np . hsplit ( a_mx , a_mx . shape [ 1 ] ) # Loop over vectors and check orthonormality. for iter1 in range ( a_mx . shape [ 1 ] ) : for iter2 in range ( iter1 , a_mx . shape [ 1 ] ) : if not abs ( ( a_split [ iter1 ] . T * a_split [ iter2 ] ) [ 0 , 0 ] - np . float_ ( delta_fxn ( iter1 , iter2 ) ) ) <= tol : orth = False if report : if iter1 == iter2 : n_fail . append ( iter1 ) else : o_fail . append ( ( iter1 , iter2 ) ) # Return results if report : return orth , n_fail , o_fail else : return orth , None , None
Checks orthonormality of the column vectors of a matrix .
416
14
1,322
def parallel_check ( vec1 , vec2 ) : # Imports from . . const import PRM import numpy as np # Initialize False par = False # Shape check for n , v in enumerate ( [ vec1 , vec2 ] ) : if not len ( v . shape ) == 1 : raise ValueError ( "Bad shape for vector #{0}" . format ( n ) ) ## end if ## next v,n if not vec1 . shape [ 0 ] == vec2 . shape [ 0 ] : raise ValueError ( "Vector length mismatch" ) ## end if # Check for (anti-)parallel character and return angle = vec_angle ( vec1 , vec2 ) if min ( [ abs ( angle ) , abs ( angle - 180. ) ] ) < PRM . NON_PARALLEL_TOL : par = True ## end if return par
Checks whether two vectors are parallel OR anti - parallel .
186
12
1,323
def proj ( vec , vec_onto ) : # Imports import numpy as np # Ensure vectors if not len ( vec . shape ) == 1 : raise ValueError ( "'vec' is not a vector" ) ## end if if not len ( vec_onto . shape ) == 1 : raise ValueError ( "'vec_onto' is not a vector" ) ## end if if not vec . shape [ 0 ] == vec_onto . shape [ 0 ] : raise ValueError ( "Shape mismatch between vectors" ) ## end if # Calculate the projection and return proj_vec = np . float_ ( np . asscalar ( np . dot ( vec . T , vec_onto ) ) ) / np . float_ ( np . asscalar ( np . dot ( vec_onto . T , vec_onto ) ) ) * vec_onto return proj_vec
Vector projection .
186
3
1,324
def rej ( vec , vec_onto ) : # Imports import numpy as np # Calculate and return. rej_vec = vec - proj ( vec , vec_onto ) return rej_vec
Vector rejection .
46
3
1,325
def vec_angle ( vec1 , vec2 ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM # Check shape and equal length if len ( vec1 . shape ) != 1 : raise ValueError ( "'vec1' is not a vector" ) ## end if if len ( vec2 . shape ) != 1 : raise ValueError ( "'vec2' is not a vector" ) ## end if if vec1 . shape [ 0 ] != vec2 . shape [ 0 ] : raise ValueError ( "Vector lengths are not equal" ) ## end if # Check magnitudes if spla . norm ( vec1 ) < PRM . ZERO_VEC_TOL : raise ValueError ( "'vec1' norm is too small" ) ## end if if spla . norm ( vec2 ) < PRM . ZERO_VEC_TOL : raise ValueError ( "'vec2' norm is too small" ) ## end if # Calculate the angle and return. Do in multiple steps to test for # possible >1 or <-1 values from numerical precision errors. dotp = np . dot ( vec1 , vec2 ) / spla . norm ( vec1 ) / spla . norm ( vec2 ) if dotp > 1 : angle = 0. # pragma: no cover elif dotp < - 1 : angle = 180. # pragma: no cover else : angle = np . degrees ( np . arccos ( dotp ) ) ## end if return angle
Angle between two R - dimensional vectors .
330
9
1,326
def new_module ( name ) : parent = None if '.' in name : parent_name = name . rsplit ( '.' , 1 ) [ 0 ] parent = __import__ ( parent_name , fromlist = [ '' ] ) module = imp . new_module ( name ) sys . modules [ name ] = module if parent : setattr ( parent , name . rsplit ( '.' , 1 ) [ 1 ] , module ) return module
Do all of the gruntwork associated with creating a new module .
95
13
1,327
def allele_counts_dataframe ( variant_and_allele_reads_generator ) : df_builder = DataFrameBuilder ( AlleleCount , extra_column_fns = { "gene" : lambda variant , _ : ";" . join ( variant . gene_names ) , } ) for variant , allele_reads in variant_and_allele_reads_generator : counts = count_alleles_at_variant_locus ( variant , allele_reads ) df_builder . add ( variant , counts ) return df_builder . to_dataframe ( )
Creates a DataFrame containing number of reads supporting the ref vs . alt alleles for each variant .
128
21
1,328
def install_extension ( conn , extension : str ) : query = 'CREATE EXTENSION IF NOT EXISTS "%s";' with conn . cursor ( ) as cursor : cursor . execute ( query , ( AsIs ( extension ) , ) ) installed = check_extension ( conn , extension ) if not installed : raise psycopg2 . ProgrammingError ( 'Postgres extension failed installation.' , extension )
Install Postgres extension .
88
5
1,329
def check_extension ( conn , extension : str ) -> bool : query = 'SELECT installed_version FROM pg_available_extensions WHERE name=%s;' with conn . cursor ( ) as cursor : cursor . execute ( query , ( extension , ) ) result = cursor . fetchone ( ) if result is None : raise psycopg2 . ProgrammingError ( 'Extension is not available for installation.' , extension ) else : extension_version = result [ 0 ] return bool ( extension_version )
Check to see if an extension is installed .
107
9
1,330
def make_iterable ( obj , default = None ) : if obj is None : return default or [ ] if isinstance ( obj , ( compat . string_types , compat . integer_types ) ) : return [ obj ] return obj
Ensure obj is iterable .
50
7
1,331
def iter_documents ( self , fileids = None , categories = None , _destroy = False ) : doc_ids = self . _filter_ids ( fileids , categories ) for doc in imap ( self . get_document , doc_ids ) : yield doc if _destroy : doc . destroy ( )
Return an iterator over corpus documents .
67
7
1,332
def _create_meta_cache ( self ) : try : with open ( self . _cache_filename , 'wb' ) as f : compat . pickle . dump ( self . _document_meta , f , 1 ) except ( IOError , compat . pickle . PickleError ) : pass
Try to dump metadata to a file .
64
8
1,333
def _load_meta_cache ( self ) : try : if self . _should_invalidate_cache ( ) : os . remove ( self . _cache_filename ) else : with open ( self . _cache_filename , 'rb' ) as f : self . _document_meta = compat . pickle . load ( f ) except ( OSError , IOError , compat . pickle . PickleError , ImportError , AttributeError ) : pass
Try to load metadata from file .
101
7
1,334
def _compute_document_meta ( self ) : meta = OrderedDict ( ) bounds_iter = xml_utils . bounds ( self . filename , start_re = r'<text id="(\d+)"[^>]*name="([^"]*)"' , end_re = r'</text>' ) for match , bounds in bounds_iter : doc_id , title = str ( match . group ( 1 ) ) , match . group ( 2 ) title = xml_utils . unescape_attribute ( title ) # cache categories xml_data = xml_utils . load_chunk ( self . filename , bounds ) doc = Document ( compat . ElementTree . XML ( xml_data . encode ( 'utf8' ) ) ) meta [ doc_id ] = _DocumentMeta ( title , bounds , doc . categories ( ) ) return meta
Return documents meta information that can be used for fast document lookups . Meta information consists of documents titles categories and positions in file .
185
26
1,335
def _document_xml ( self , doc_id ) : doc_str = self . _get_doc_by_raw_offset ( str ( doc_id ) ) return compat . ElementTree . XML ( doc_str . encode ( 'utf8' ) )
Return xml Element for the document document_id .
57
10
1,336
def _get_doc_by_line_offset ( self , doc_id ) : bounds = self . _get_meta ( ) [ str ( doc_id ) ] . bounds return xml_utils . load_chunk ( self . filename , bounds , slow = True )
Load document from xml using line offset information . This is much slower than _get_doc_by_raw_offset but should work everywhere .
59
29
1,337
def _threeDdot_simple ( M , a ) : result = np . empty ( a . shape , dtype = a . dtype ) for i in range ( a . shape [ 0 ] ) : for j in range ( a . shape [ 1 ] ) : A = np . array ( [ a [ i , j , 0 ] , a [ i , j , 1 ] , a [ i , j , 2 ] ] ) . reshape ( ( 3 , 1 ) ) L = np . dot ( M , A ) result [ i , j , 0 ] = L [ 0 ] result [ i , j , 1 ] = L [ 1 ] result [ i , j , 2 ] = L [ 2 ] return result
Return Ma where M is a 3x3 transformation matrix for each pixel
153
14
1,338
def _swaplch ( LCH ) : try : # Numpy array L , C , H = np . dsplit ( LCH , 3 ) return np . dstack ( ( H , C , L ) ) except : # Tuple L , C , H = LCH return H , C , L
Reverse the order of an LCH numpy dstack or tuple for analysis .
66
18
1,339
def rgb_to_hsv ( self , RGB ) : gammaRGB = self . _gamma_rgb ( RGB ) return self . _ABC_to_DEF_by_fn ( gammaRGB , rgb_to_hsv )
linear rgb to hsv
51
5
1,340
def hsv_to_rgb ( self , HSV ) : gammaRGB = self . _ABC_to_DEF_by_fn ( HSV , hsv_to_rgb ) return self . _ungamma_rgb ( gammaRGB )
hsv to linear rgb
56
5
1,341
def image2working ( self , i ) : return self . colorspace . convert ( self . image_space , self . working_space , i )
Transform images i provided into the specified working color space .
32
11
1,342
def working2analysis ( self , r ) : a = self . colorspace . convert ( self . working_space , self . analysis_space , r ) return self . swap_polar_HSVorder [ self . analysis_space ] ( a )
Transform working space inputs to the analysis color space .
54
10
1,343
def analysis2working ( self , a ) : a = self . swap_polar_HSVorder [ self . analysis_space ] ( a ) return self . colorspace . convert ( self . analysis_space , self . working_space , a )
Convert back from the analysis color space to the working space .
54
13
1,344
def load_chunk ( filename , bounds , encoding = 'utf8' , slow = False ) : if slow : return _load_chunk_slow ( filename , bounds , encoding ) with open ( filename , 'rb' ) as f : f . seek ( bounds . byte_start ) size = bounds . byte_end - bounds . byte_start return f . read ( size ) . decode ( encoding )
Load a chunk from file using Bounds info . Pass slow = True for an alternative loading method based on line numbers .
87
24
1,345
def generate_numeric_range ( items , lower_bound , upper_bound ) : quantile_grid = create_quantiles ( items , lower_bound , upper_bound ) labels , bounds = ( zip ( * quantile_grid ) ) ranges = ( ( label , NumericRange ( * bound ) ) for label , bound in zip ( labels , bounds ) ) return ranges
Generate postgresql numeric range and label for insertion .
81
12
1,346
def edge_average ( a ) : if len ( np . ravel ( a ) ) < 2 : return float ( a [ 0 ] ) else : top_edge = a [ 0 ] bottom_edge = a [ - 1 ] left_edge = a [ 1 : - 1 , 0 ] right_edge = a [ 1 : - 1 , - 1 ] edge_sum = np . sum ( top_edge ) + np . sum ( bottom_edge ) + np . sum ( left_edge ) + np . sum ( right_edge ) num_values = len ( top_edge ) + len ( bottom_edge ) + len ( left_edge ) + len ( right_edge ) return float ( edge_sum ) / num_values
Return the mean value around the edge of an array .
157
11
1,347
def _process_channels ( self , p , * * params_to_override ) : orig_image = self . _image for i in range ( len ( self . _channel_data ) ) : self . _image = self . _original_channel_data [ i ] self . _channel_data [ i ] = self . _reduced_call ( * * params_to_override ) self . _image = orig_image return self . _channel_data
Add the channel information to the channel_data attribute .
103
11
1,348
def set_matrix_dimensions ( self , * args ) : self . _image = None super ( FileImage , self ) . set_matrix_dimensions ( * args )
Subclassed to delete the cached image when matrix dimensions are changed .
40
14
1,349
def _load_pil_image ( self , filename ) : self . _channel_data = [ ] self . _original_channel_data = [ ] im = Image . open ( filename ) self . _image = ImageOps . grayscale ( im ) im . load ( ) file_data = np . asarray ( im , float ) file_data = file_data / file_data . max ( ) # if the image has more than one channel, load them if ( len ( file_data . shape ) == 3 ) : num_channels = file_data . shape [ 2 ] for i in range ( num_channels ) : self . _channel_data . append ( file_data [ : , : , i ] ) self . _original_channel_data . append ( file_data [ : , : , i ] )
Load image using PIL .
181
6
1,350
def _load_npy ( self , filename ) : self . _channel_data = [ ] self . _original_channel_data = [ ] file_channel_data = np . load ( filename ) file_channel_data = file_channel_data / file_channel_data . max ( ) for i in range ( file_channel_data . shape [ 2 ] ) : self . _channel_data . append ( file_channel_data [ : , : , i ] ) self . _original_channel_data . append ( file_channel_data [ : , : , i ] ) self . _image = file_channel_data . sum ( 2 ) / file_channel_data . shape [ 2 ]
Load image using Numpy .
154
6
1,351
def ok_kwarg ( val ) : import keyword try : return str . isidentifier ( val ) and not keyword . iskeyword ( val ) except TypeError : # Non-string values are never a valid keyword arg return False
Helper method for screening keyword arguments
49
6
1,352
def run ( delayed , concurrency , version_type = None , queue = None , raise_on_error = True ) : if delayed : celery_kwargs = { 'kwargs' : { 'version_type' : version_type , 'es_bulk_kwargs' : { 'raise_on_error' : raise_on_error } , } } click . secho ( 'Starting {0} tasks for indexing records...' . format ( concurrency ) , fg = 'green' ) if queue is not None : celery_kwargs . update ( { 'queue' : queue } ) for c in range ( 0 , concurrency ) : process_bulk_queue . apply_async ( * * celery_kwargs ) else : click . secho ( 'Indexing records...' , fg = 'green' ) RecordIndexer ( version_type = version_type ) . process_bulk_queue ( es_bulk_kwargs = { 'raise_on_error' : raise_on_error } )
Run bulk record indexing .
230
6
1,353
def reindex ( pid_type ) : click . secho ( 'Sending records to indexing queue ...' , fg = 'green' ) query = ( x [ 0 ] for x in PersistentIdentifier . query . filter_by ( object_type = 'rec' , status = PIDStatus . REGISTERED ) . filter ( PersistentIdentifier . pid_type . in_ ( pid_type ) ) . values ( PersistentIdentifier . object_uuid ) ) RecordIndexer ( ) . bulk_index ( query ) click . secho ( 'Execute "run" command to process the queue!' , fg = 'yellow' )
Reindex all records .
141
5
1,354
def process_actions ( actions ) : queue = current_app . config [ 'INDEXER_MQ_QUEUE' ] with establish_connection ( ) as c : q = queue ( c ) for action in actions : q = action ( q )
Process queue actions .
54
4
1,355
def init_queue ( ) : def action ( queue ) : queue . declare ( ) click . secho ( 'Indexing queue has been initialized.' , fg = 'green' ) return queue return action
Initialize indexing queue .
43
6
1,356
def purge_queue ( ) : def action ( queue ) : queue . purge ( ) click . secho ( 'Indexing queue has been purged.' , fg = 'green' ) return queue return action
Purge indexing queue .
44
6
1,357
def delete_queue ( ) : def action ( queue ) : queue . delete ( ) click . secho ( 'Indexing queue has been deleted.' , fg = 'green' ) return queue return action
Delete indexing queue .
43
5
1,358
def variant_matches_reference_sequence ( variant , ref_seq_on_transcript , strand ) : if strand == "-" : ref_seq_on_transcript = reverse_complement_dna ( ref_seq_on_transcript ) return ref_seq_on_transcript == variant . ref
Make sure that reference nucleotides we expect to see on the reference transcript from a variant are the same ones we encounter .
69
25
1,359
def from_variant_and_transcript ( cls , variant , transcript , context_size ) : full_transcript_sequence = transcript . sequence if full_transcript_sequence is None : logger . warn ( "Expected transcript %s (overlapping %s) to have sequence" , transcript . name , variant ) return None # get the interbase range of offsets which capture all reference # bases modified by the variant variant_start_offset , variant_end_offset = interbase_range_affected_by_variant_on_transcript ( variant = variant , transcript = transcript ) reference_cdna_at_variant = full_transcript_sequence [ variant_start_offset : variant_end_offset ] if not variant_matches_reference_sequence ( variant = variant , strand = transcript . strand , ref_seq_on_transcript = reference_cdna_at_variant ) : logger . warn ( "Variant %s doesn't match reference sequence on transcript %s: " "may span splice junction" , variant , transcript ) return None if len ( full_transcript_sequence ) < 6 : # need at least 6 nucleotides for a start and stop codon logger . warn ( "Sequence of %s (overlapping %s) too short: %d" , transcript , variant , len ( full_transcript_sequence ) ) return None logger . info ( "Interbase offset range on %s for variant %s = %d:%d" , transcript . name , variant , variant_start_offset , variant_end_offset ) reference_cdna_before_variant = full_transcript_sequence [ max ( 0 , variant_start_offset - context_size ) : variant_start_offset ] reference_cdna_after_variant = full_transcript_sequence [ variant_end_offset : variant_end_offset + context_size ] return ReferenceSequenceKey ( strand = transcript . strand , sequence_before_variant_locus = reference_cdna_before_variant , sequence_at_variant_locus = reference_cdna_at_variant , sequence_after_variant_locus = reference_cdna_after_variant )
Extracts the reference sequence around a variant locus on a particular transcript .
485
16
1,360
def wrap ( lower , upper , x ) : #I have no idea how I came up with this algorithm; it should be simplified. # # Note that Python's % operator works on floats and arrays; # usually one can simply use that instead. E.g. to wrap array or # scalar x into 0,2*pi, just use "x % (2*pi)". range_ = upper - lower return lower + np . fmod ( x - lower + 2 * range_ * ( 1 - np . floor ( x / ( 2 * range_ ) ) ) , range_ )
Circularly alias the numeric value x into the range [ lower upper ) .
125
16
1,361
def _pixelsize ( self , p ) : xpixelsize = 1. / float ( p . xdensity ) ypixelsize = 1. / float ( p . ydensity ) return max ( [ xpixelsize , ypixelsize ] )
Calculate line width necessary to cover at least one pixel on all axes .
54
16
1,362
def _count_pixels_on_line ( self , y , p ) : h = line ( y , self . _effective_thickness ( p ) , 0.0 ) return h . sum ( )
Count the number of pixels rendered on this line .
46
10
1,363
def num_channels ( self ) : if ( self . inspect_value ( 'index' ) is None ) : if ( len ( self . generators ) > 0 ) : return self . generators [ 0 ] . num_channels ( ) return 0 return self . get_current_generator ( ) . num_channels ( )
Get the number of channels in the input generators .
71
10
1,364
def _set_frequency_spacing ( self , min_freq , max_freq ) : self . frequency_spacing = np . linspace ( min_freq , max_freq , num = self . _sheet_dimensions [ 0 ] + 1 , endpoint = True )
Frequency spacing to use i . e . how to map the available frequency range to the discrete sheet rows .
64
22
1,365
def get_postgres_encoding ( python_encoding : str ) -> str : encoding = normalize_encoding ( python_encoding . lower ( ) ) encoding_ = aliases . aliases [ encoding . replace ( '_' , '' , 1 ) ] . upper ( ) pg_encoding = PG_ENCODING_MAP [ encoding_ . replace ( '_' , '' ) ] return pg_encoding
Python to postgres encoding map .
90
7
1,366
def en_last ( self ) : # Initialize the return dict last_ens = dict ( ) # Iterate and store for ( k , l ) in self . en . items ( ) : last_ens . update ( { k : l [ - 1 ] if l != [ ] else None } ) ##next (k,l) # Should be ready to return? return last_ens
Report the energies from the last SCF present in the output .
82
13
1,367
def connect ( host = None , database = None , user = None , password = None , * * kwargs ) : host = host or os . environ [ 'PGHOST' ] database = database or os . environ [ 'PGDATABASE' ] user = user or os . environ [ 'PGUSER' ] password = password or os . environ [ 'PGPASSWORD' ] return psycopg2 . connect ( host = host , database = database , user = user , password = password , * * kwargs )
Create a database connection .
118
5
1,368
def _setup ( ) : _SOCKET . setsockopt ( socket . SOL_SOCKET , socket . SO_BROADCAST , 1 ) _SOCKET . bind ( ( '' , PORT ) ) udp = threading . Thread ( target = _listen , daemon = True ) udp . start ( )
Set up module .
71
4
1,369
def discover ( timeout = DISCOVERY_TIMEOUT ) : hosts = { } payload = MAGIC + DISCOVERY for _ in range ( RETRIES ) : _SOCKET . sendto ( bytearray ( payload ) , ( '255.255.255.255' , PORT ) ) start = time . time ( ) while time . time ( ) < start + timeout : for host , data in _BUFFER . copy ( ) . items ( ) : if not _is_discovery_response ( data ) : continue if host not in hosts : _LOGGER . debug ( "Discovered device at %s" , host ) entry = { } entry [ 'mac' ] = data [ 7 : 13 ] entry [ 'imac' ] = data [ 19 : 25 ] entry [ 'next' ] = 0 entry [ 'st' ] = int ( data [ - 1 ] ) entry [ 'time' ] = _device_time ( data [ 37 : 41 ] ) entry [ 'serverTime' ] = int ( time . time ( ) ) hosts [ host ] = entry return hosts
Discover devices on the local network .
237
7
1,370
def _discover_mac ( self ) : mac = None mac_reversed = None cmd = MAGIC + DISCOVERY resp = self . _udp_transact ( cmd , self . _discovery_resp , broadcast = True , timeout = DISCOVERY_TIMEOUT ) if resp : ( mac , mac_reversed ) = resp if mac is None : raise S20Exception ( "Couldn't discover {}" . format ( self . host ) ) return ( mac , mac_reversed )
Discovers MAC address of device .
113
8
1,371
def _subscribe ( self ) : cmd = MAGIC + SUBSCRIBE + self . _mac + PADDING_1 + self . _mac_reversed + PADDING_1 status = self . _udp_transact ( cmd , self . _subscribe_resp ) if status is not None : self . last_subscribed = time . time ( ) return status == ON else : raise S20Exception ( "No status could be found for {}" . format ( self . host ) )
Subscribe to the device .
111
5
1,372
def _control ( self , state ) : # Renew subscription if necessary if not self . _subscription_is_recent ( ) : self . _subscribe ( ) cmd = MAGIC + CONTROL + self . _mac + PADDING_1 + PADDING_2 + state _LOGGER . debug ( "Sending new state to %s: %s" , self . host , ord ( state ) ) ack_state = self . _udp_transact ( cmd , self . _control_resp , state ) if ack_state is None : raise S20Exception ( "Device didn't acknowledge control request: {}" . format ( self . host ) )
Control device state .
144
4
1,373
def _discovery_resp ( self , data ) : if _is_discovery_response ( data ) : _LOGGER . debug ( "Discovered MAC of %s: %s" , self . host , binascii . hexlify ( data [ 7 : 13 ] ) . decode ( ) ) return ( data [ 7 : 13 ] , data [ 19 : 25 ] )
Handle a discovery response .
83
5
1,374
def _subscribe_resp ( self , data ) : if _is_subscribe_response ( data ) : status = bytes ( [ data [ 23 ] ] ) _LOGGER . debug ( "Successfully subscribed to %s, state: %s" , self . host , ord ( status ) ) return status
Handle a subscribe response .
66
5
1,375
def _control_resp ( self , data , state ) : if _is_control_response ( data ) : ack_state = bytes ( [ data [ 22 ] ] ) if state == ack_state : _LOGGER . debug ( "Received state ack from %s, state: %s" , self . host , ord ( ack_state ) ) return ack_state
Handle a control response .
85
5
1,376
def _udp_transact ( self , payload , handler , * args , broadcast = False , timeout = TIMEOUT ) : if self . host in _BUFFER : del _BUFFER [ self . host ] host = self . host if broadcast : host = '255.255.255.255' retval = None for _ in range ( RETRIES ) : _SOCKET . sendto ( bytearray ( payload ) , ( host , PORT ) ) start = time . time ( ) while time . time ( ) < start + timeout : data = _BUFFER . get ( self . host , None ) if data : retval = handler ( data , * args ) # Return as soon as a response is received if retval : return retval
Complete a UDP transaction .
162
5
1,377
def load ( source ) : parser = get_xml_parser ( ) return etree . parse ( source , parser = parser ) . getroot ( )
Load OpenCorpora corpus .
32
6
1,378
def translation_generator ( variant_sequences , reference_contexts , min_transcript_prefix_length , max_transcript_mismatches , include_mismatches_after_variant , protein_sequence_length = None ) : for reference_context in reference_contexts : for variant_sequence in variant_sequences : translation = Translation . from_variant_sequence_and_reference_context ( variant_sequence = variant_sequence , reference_context = reference_context , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant , protein_sequence_length = protein_sequence_length ) if translation is not None : yield translation
Given all detected VariantSequence objects for a particular variant and all the ReferenceContext objects for that locus translate multiple protein sequences up to the number specified by the argument max_protein_sequences_per_variant .
185
45
1,379
def translate_variant_reads ( variant , variant_reads , protein_sequence_length , transcript_id_whitelist = None , min_alt_rna_reads = MIN_ALT_RNA_READS , min_variant_sequence_coverage = MIN_VARIANT_SEQUENCE_COVERAGE , min_transcript_prefix_length = MIN_TRANSCRIPT_PREFIX_LENGTH , max_transcript_mismatches = MAX_REFERENCE_TRANSCRIPT_MISMATCHES , include_mismatches_after_variant = INCLUDE_MISMATCHES_AFTER_VARIANT , variant_sequence_assembly = VARIANT_SEQUENCE_ASSEMBLY ) : if len ( variant_reads ) == 0 : logger . info ( "No supporting reads for variant %s" , variant ) return [ ] # Adding an extra codon to the desired RNA sequence length in case we # need to clip nucleotides at the start/end of the sequence cdna_sequence_length = ( protein_sequence_length + 1 ) * 3 variant_sequences = reads_to_variant_sequences ( variant = variant , reads = variant_reads , preferred_sequence_length = cdna_sequence_length , min_alt_rna_reads = min_alt_rna_reads , min_variant_sequence_coverage = min_variant_sequence_coverage , variant_sequence_assembly = variant_sequence_assembly ) if not variant_sequences : logger . info ( "No spanning cDNA sequences for variant %s" , variant ) return [ ] # try translating the variant sequences from the same set of # ReferenceContext objects, which requires using the longest # context_size to be compatible with all of the sequences. Some # sequences maybe have fewer nucleotides than this before the variant # and will thus have to be trimmed. context_size = max ( len ( variant_sequence . prefix ) for variant_sequence in variant_sequences ) reference_contexts = reference_contexts_for_variant ( variant , context_size = context_size , transcript_id_whitelist = transcript_id_whitelist ) return list ( translation_generator ( variant_sequences = variant_sequences , reference_contexts = reference_contexts , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant , protein_sequence_length = protein_sequence_length ) )
Given a variant and its associated alt reads construct variant sequences and translate them into Translation objects .
582
18
1,380
def as_translation_key ( self ) : return TranslationKey ( * * { name : getattr ( self , name ) for name in TranslationKey . _fields } )
Project Translation object or any other derived class into just a TranslationKey which has fewer fields and can be used as a dictionary key .
36
26
1,381
def from_variant_sequence_and_reference_context ( cls , variant_sequence , reference_context , min_transcript_prefix_length , max_transcript_mismatches , include_mismatches_after_variant , protein_sequence_length = None ) : variant_sequence_in_reading_frame = match_variant_sequence_to_reference_context ( variant_sequence , reference_context , min_transcript_prefix_length = min_transcript_prefix_length , max_transcript_mismatches = max_transcript_mismatches , include_mismatches_after_variant = include_mismatches_after_variant ) if variant_sequence_in_reading_frame is None : logger . info ( "Unable to determine reading frame for %s" , variant_sequence ) return None cdna_sequence = variant_sequence_in_reading_frame . cdna_sequence cdna_codon_offset = variant_sequence_in_reading_frame . offset_to_first_complete_codon # get the offsets into the cDNA sequence which pick out the variant nucleotides cdna_variant_start_offset = variant_sequence_in_reading_frame . variant_cdna_interval_start cdna_variant_end_offset = variant_sequence_in_reading_frame . variant_cdna_interval_end # TODO: determine if the first codon is the start codon of a # transcript, for now any of the unusual start codons like CTG # will translate to leucine instead of methionine. variant_amino_acids , ends_with_stop_codon = translate_cdna ( cdna_sequence [ cdna_codon_offset : ] , first_codon_is_start = False , mitochondrial = reference_context . mitochondrial ) variant_aa_interval_start , variant_aa_interval_end , frameshift = find_mutant_amino_acid_interval ( cdna_sequence = cdna_sequence , cdna_first_codon_offset = cdna_codon_offset , cdna_variant_start_offset = cdna_variant_start_offset , cdna_variant_end_offset = cdna_variant_end_offset , n_ref = len ( reference_context . sequence_at_variant_locus ) , n_amino_acids = len ( variant_amino_acids ) ) if protein_sequence_length and len ( variant_amino_acids ) > protein_sequence_length : if protein_sequence_length <= variant_aa_interval_start : logger . warn ( ( "Truncating amino acid sequence %s " "to only %d elements loses all variant residues" ) , variant_amino_acids , protein_sequence_length ) return None # if the protein is too long then shorten it, which implies # we're no longer stopping due to a stop codon and that the variant # amino acids might need a new stop index variant_amino_acids = variant_amino_acids [ : protein_sequence_length ] variant_aa_interval_end = min ( variant_aa_interval_end , protein_sequence_length ) ends_with_stop_codon = False return Translation ( amino_acids = variant_amino_acids , frameshift = frameshift , ends_with_stop_codon = ends_with_stop_codon , variant_aa_interval_start = variant_aa_interval_start , variant_aa_interval_end = variant_aa_interval_end , untrimmed_variant_sequence = variant_sequence , reference_context = reference_context , variant_sequence_in_reading_frame = variant_sequence_in_reading_frame )
Attempt to translate a single VariantSequence using the reading frame from a single ReferenceContext .
853
18
1,382
def postComponents ( self , name , status , * * kwargs ) : kwargs [ 'name' ] = name kwargs [ 'status' ] = status return self . __postRequest ( '/components' , kwargs )
Create a new component .
54
5
1,383
def postIncidents ( self , name , message , status , visible , * * kwargs ) : kwargs [ 'name' ] = name kwargs [ 'message' ] = message kwargs [ 'status' ] = status kwargs [ 'visible' ] = visible return self . __postRequest ( '/incidents' , kwargs )
Create a new incident .
78
5
1,384
def postMetrics ( self , name , suffix , description , default_value , * * kwargs ) : kwargs [ 'name' ] = name kwargs [ 'suffix' ] = suffix kwargs [ 'description' ] = description kwargs [ 'default_value' ] = default_value return self . __postRequest ( '/metrics' , kwargs )
Create a new metric .
85
5
1,385
def postMetricsPointsByID ( self , id , value , * * kwargs ) : kwargs [ 'value' ] = value return self . __postRequest ( '/metrics/%s/points' % id , kwargs )
Add a metric point to a given metric .
54
9
1,386
def ctr_mass ( geom , masses ) : # Imports import numpy as np from . base import safe_cast as scast # Shape check if len ( geom . shape ) != 1 : raise ValueError ( "Geometry is not a vector" ) ## end if if len ( masses . shape ) != 1 : raise ValueError ( "Masses cannot be parsed as a vector" ) ## end if if not geom . shape [ 0 ] % 3 == 0 : raise ValueError ( "Geometry is not length-3N" ) ## end if if geom . shape [ 0 ] != 3 * masses . shape [ 0 ] and geom . shape [ 0 ] != masses . shape [ 0 ] : raise ValueError ( "Inconsistent geometry and masses vector lengths" ) ## end if # If N masses are provided, expand to 3N; if 3N, retain. if geom . shape [ 0 ] == 3 * masses . shape [ 0 ] : masses = masses . repeat ( 3 ) ## end if # Calculate the mass-weighted coordinates, reshape to group by coordinate # column-wise, sum each column, then divide by the sum of masses, which # must further be divided by three because there are three replicates # (possibly perturbed) of the mass of each atom. ctr = np . multiply ( geom , masses ) . reshape ( ( geom . shape [ 0 ] // 3 , 3 ) ) . sum ( axis = 0 ) . squeeze ( ) / ( masses . sum ( ) / 3 ) # Return the vector return ctr
Calculate the center of mass of the indicated geometry .
336
12
1,387
def ctr_geom ( geom , masses ) : # Imports import numpy as np # Calculate the shift vector. Possible bad shape of geom or masses is # addressed internally by the ctr_mass call. shift = np . tile ( ctr_mass ( geom , masses ) , geom . shape [ 0 ] / 3 ) # Shift the geometry and return ctr_geom = geom - shift return ctr_geom
Returns geometry shifted to center of mass .
97
8
1,388
def inertia_tensor ( geom , masses ) : # Imports import numpy as np # Center the geometry. Takes care of any improper shapes of geom or # masses via the internal call to 'ctr_mass' within the call to 'ctr_geom' geom = ctr_geom ( geom , masses ) # Expand the masses if required. Shape should only ever be (N,) or (3N,), # else would raise an exception within the above 'ctr_geom' call if geom . shape [ 0 ] == 3 * masses . shape [ 0 ] : masses = masses . repeat ( 3 ) ## end if # Initialize the tensor matrix tensor = np . zeros ( ( 3 , 3 ) ) # Fill the matrix for i in range ( 3 ) : for j in range ( i , 3 ) : if i == j : # On-diagonal element; calculate indices to include ind = np . concatenate ( [ np . array ( list ( map ( lambda v : v % 3 , range ( i + 1 , i + 3 ) ) ) ) + o for o in range ( 0 , geom . shape [ 0 ] , 3 ) ] ) # Calculate the tensor element tensor [ i , i ] = np . multiply ( np . square ( geom [ ind ] ) , masses [ ind ] ) . sum ( ) else : # Off-diagonal element; calculate the indices ind_i = np . array ( range ( i , geom . shape [ 0 ] + i , 3 ) ) ind_j = np . array ( range ( j , geom . shape [ 0 ] + j , 3 ) ) # Calculate the tensor element and its symmetric partner tensor [ i , j ] = np . multiply ( np . sqrt ( np . multiply ( masses [ ind_i ] , masses [ ind_j ] ) ) , np . multiply ( geom [ ind_i ] , geom [ ind_j ] ) ) . sum ( ) * - 1 tensor [ j , i ] = tensor [ i , j ] ## end if ## next j ## next i # Return the tensor return tensor
Generate the 3x3 moment - of - inertia tensor .
464
14
1,389
def rot_consts ( geom , masses , units = _EURC . INV_INERTIA , on_tol = _DEF . ORTHONORM_TOL ) : # Imports import numpy as np from . . const import EnumTopType as ETT , EnumUnitsRotConst as EURC , PRM , PHYS # Ensure units are valid if not units in EURC : raise ValueError ( "'{0}' is not a valid units value" . format ( units ) ) ## end if # Retrieve the moments, axes and top type. Geom and masses are proofed # internally in this call. mom , ax , top = principals ( geom , masses , on_tol ) # Check for special cases if top == ETT . ATOM : # All moments are zero; set to zero-moment threshold mom = np . repeat ( PRM . ZERO_MOMENT_TOL , 3 ) elif top == ETT . LINEAR : # First moment is zero; set to zero-moment threshold mom [ 0 ] = PRM . ZERO_MOMENT_TOL ## end if # Calculate the values in the indicated units if units == EURC . INV_INERTIA : # 1/(amu*B^2) rc = 1.0 / ( 2.0 * mom ) elif units == EURC . ANGFREQ_ATOMIC : # 1/Ta rc = PHYS . PLANCK_BAR / ( 2.0 * mom * PHYS . ME_PER_AMU ) elif units == EURC . ANGFREQ_SECS : # 1/s rc = PHYS . PLANCK_BAR / ( 2.0 * mom * PHYS . ME_PER_AMU ) / PHYS . SEC_PER_TA elif units == EURC . CYCFREQ_ATOMIC : # cyc/Ta rc = PHYS . PLANCK_BAR / ( 4.0 * np . pi * mom * PHYS . ME_PER_AMU ) elif units == EURC . CYCFREQ_HZ : # cyc/s rc = PHYS . PLANCK_BAR / ( 4.0 * np . pi * mom * PHYS . ME_PER_AMU ) / PHYS . SEC_PER_TA elif units == EURC . CYCFREQ_MHZ : # Mcyc/s rc = PHYS . PLANCK_BAR / ( 4.0 * np . pi * mom * PHYS . ME_PER_AMU ) / PHYS . SEC_PER_TA / 1.0e6 elif units == EURC . WAVENUM_ATOMIC : # cyc/B rc = PHYS . PLANCK / ( mom * PHYS . ME_PER_AMU ) / ( 8.0 * np . pi ** 2.0 * PHYS . LIGHT_SPEED ) elif units == EURC . WAVENUM_CM : # cyc/cm rc = PHYS . PLANCK / ( mom * PHYS . ME_PER_AMU ) / ( 8.0 * np . pi ** 2.0 * PHYS . LIGHT_SPEED * PHYS . ANG_PER_BOHR ) * 1.0e8 else : # pragma: no cover -- Valid units; not implemented raise NotImplementedError ( "Units conversion not yet implemented." ) ## end if # Return the result return rc
Rotational constants for a given molecular system .
768
9
1,390
def _fadn_orth ( vec , geom ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM from . . error import InertiaError from . vector import orthonorm_check as onchk # Geom and vec must both be the right shape if not ( len ( geom . shape ) == 1 and geom . shape [ 0 ] % 3 == 0 ) : raise ValueError ( "Geometry is not length 3N" ) ## end if if not vec . shape == ( 3 , ) : raise ValueError ( "Reference vector is not length 3" ) ## end if # vec must not be the zero vector if spla . norm ( vec ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Reference vector norm is too small" ) ## end if # Normalize the ref vec vec = vec / spla . norm ( vec ) # Iterate over reshaped geometry for disp in geom . reshape ( ( geom . shape [ 0 ] // 3 , 3 ) ) : # See if the displacement is nonzero and not orthonormal. Trailing # [0] index is to retrieve only the success/fail bool. if spla . norm ( disp ) >= PRM . ZERO_VEC_TOL and not onchk ( np . column_stack ( ( disp / spla . norm ( disp ) , vec / spla . norm ( vec ) ) ) ) [ 0 ] : # This is the displacement you are looking for out_vec = disp / spla . norm ( disp ) return out_vec ## end if ## end if ## next disp else : # Nothing fit the bill - must be atom, linear, or planar raise InertiaError ( InertiaError . BAD_GEOM , "No suitable atomic displacement found" , "" )
First non - zero Atomic Displacement Non - Orthogonal to Vec
404
15
1,391
def _fadn_par ( vec , geom ) : # Imports import numpy as np from scipy import linalg as spla from . . const import PRM from . . error import InertiaError from . vector import parallel_check as parchk # Geom and vec must both be the right shape if not ( len ( geom . shape ) == 1 and geom . shape [ 0 ] % 3 == 0 ) : raise ValueError ( "Geometry is not length 3N" ) ## end if if not vec . shape == ( 3 , ) : raise ValueError ( "Reference vector is not length 3" ) ## end if # vec must not be the zero vector if spla . norm ( vec ) < PRM . ZERO_VEC_TOL : raise ValueError ( "Reference vector norm is too small" ) ## end if # Normalize the ref vec vec = vec / spla . norm ( vec ) # Iterate over reshaped geometry for disp in geom . reshape ( ( geom . shape [ 0 ] // 3 , 3 ) ) : # See if the displacement is nonzero and nonparallel to the ref vec if spla . norm ( disp ) >= PRM . ZERO_VEC_TOL and not parchk ( disp . reshape ( 3 ) , vec ) : # This is the displacement you are looking for out_vec = disp / spla . norm ( disp ) break ## end if ## end if ## next disp else : # Nothing fit the bill - must be a linear molecule? raise InertiaError ( InertiaError . BAD_GEOM , "Linear molecule, no non-parallel displacement" , "" ) ## end for disp # Return the resulting vector return out_vec
First non - zero Atomic Displacement that is Non - Parallel with Vec
377
15
1,392
def reference_contexts_for_variants ( variants , context_size , transcript_id_whitelist = None ) : result = OrderedDict ( ) for variant in variants : result [ variant ] = reference_contexts_for_variant ( variant = variant , context_size = context_size , transcript_id_whitelist = transcript_id_whitelist ) return result
Extract a set of reference contexts for each variant in the collection .
86
14
1,393
def variants_to_reference_contexts_dataframe ( variants , context_size , transcript_id_whitelist = None ) : df_builder = DataFrameBuilder ( ReferenceContext , exclude = [ "variant" ] , converters = dict ( transcripts = lambda ts : ";" . join ( t . name for t in ts ) ) , extra_column_fns = { "gene" : lambda variant , _ : ";" . join ( variant . gene_names ) , } ) for variant , reference_contexts in reference_contexts_for_variants ( variants = variants , context_size = context_size , transcript_id_whitelist = transcript_id_whitelist ) . items ( ) : df_builder . add_many ( variant , reference_contexts ) return df_builder . to_dataframe ( )
Given a collection of variants find all reference sequence contexts around each variant .
184
14
1,394
def exponential ( x , y , xscale , yscale ) : if xscale == 0.0 or yscale == 0.0 : return x * 0.0 with float_error_ignore ( ) : x_w = np . divide ( x , xscale ) y_h = np . divide ( y , yscale ) return np . exp ( - np . sqrt ( x_w * x_w + y_h * y_h ) )
Two - dimensional oriented exponential decay pattern .
98
8
1,395
def line ( y , thickness , gaussian_width ) : distance_from_line = abs ( y ) gaussian_y_coord = distance_from_line - thickness / 2.0 sigmasq = gaussian_width * gaussian_width if sigmasq == 0.0 : falloff = y * 0.0 else : with float_error_ignore ( ) : falloff = np . exp ( np . divide ( - gaussian_y_coord * gaussian_y_coord , 2 * sigmasq ) ) return np . where ( gaussian_y_coord <= 0 , 1.0 , falloff )
Infinite - length line with a solid central region then Gaussian fall - off at the edges .
136
20
1,396
def disk ( x , y , height , gaussian_width ) : disk_radius = height / 2.0 distance_from_origin = np . sqrt ( x ** 2 + y ** 2 ) distance_outside_disk = distance_from_origin - disk_radius sigmasq = gaussian_width * gaussian_width if sigmasq == 0.0 : falloff = x * 0.0 else : with float_error_ignore ( ) : falloff = np . exp ( np . divide ( - distance_outside_disk * distance_outside_disk , 2 * sigmasq ) ) return np . where ( distance_outside_disk <= 0 , 1.0 , falloff )
Circular disk with Gaussian fall - off after the solid central region .
150
15
1,397
def smooth_rectangle ( x , y , rec_w , rec_h , gaussian_width_x , gaussian_width_y ) : gaussian_x_coord = abs ( x ) - rec_w / 2.0 gaussian_y_coord = abs ( y ) - rec_h / 2.0 box_x = np . less ( gaussian_x_coord , 0.0 ) box_y = np . less ( gaussian_y_coord , 0.0 ) sigmasq_x = gaussian_width_x * gaussian_width_x sigmasq_y = gaussian_width_y * gaussian_width_y with float_error_ignore ( ) : falloff_x = x * 0.0 if sigmasq_x == 0.0 else np . exp ( np . divide ( - gaussian_x_coord * gaussian_x_coord , 2 * sigmasq_x ) ) falloff_y = y * 0.0 if sigmasq_y == 0.0 else np . exp ( np . divide ( - gaussian_y_coord * gaussian_y_coord , 2 * sigmasq_y ) ) return np . minimum ( np . maximum ( box_x , falloff_x ) , np . maximum ( box_y , falloff_y ) )
Rectangle with a solid central region then Gaussian fall - off at the edges .
295
17
1,398
def pack_tups ( * args ) : # Imports import numpy as np # Debug flag _DEBUG = False # Marker value for non-iterable items NOT_ITER = - 1 # Uninitialized test value UNINIT_VAL = - 1 # Print the input if in debug mode if _DEBUG : # pragma: no cover print ( "args = {0}" . format ( args ) ) # Non-iterable subclass of str class StrNoIter ( str ) : """ Non-iterable subclass of |str|. """ def __iter__ ( self ) : raise NotImplementedError ( "Non-iterable string" ) ## end def __iter__ ## end class StrNoIter # Re-wrap input arguments with non-iterable strings if required mod_args = [ ( StrNoIter ( a ) if isinstance ( a , str ) else a ) for a in args ] # Determine the length or non-iterable status of each item and store # the maximum value (depends on NOT_ITER < 0) iterlens = [ ( len ( a ) if iterable ( a ) else NOT_ITER ) for a in mod_args ] maxiter = max ( iterlens ) # Check to ensure all iterables are the same length if not all ( map ( lambda v : v in ( NOT_ITER , maxiter ) , iterlens ) ) : raise ValueError ( "All iterable items must be of equal length" ) ## end if # If everything is non-iterable, just return the args tuple wrapped in # a list (as above, depends on NOT_ITER < 0) if maxiter == NOT_ITER : return [ args ] ## end if # Swap any non-iterables for a suitable length repeat, and zip to # tuples for return tups = list ( zip ( * [ ( np . repeat ( a , maxiter ) if l == NOT_ITER else a ) for ( a , l ) in zip ( mod_args , iterlens ) ] ) ) # Dump the resulting tuples, if in debug mode if _DEBUG : # pragma: no cover print ( "tups = {0}" . format ( tups ) ) ## end if # Return the tuples return tups
Pack an arbitrary set of iterables and non - iterables into tuples .
485
16
1,399
def safe_cast ( invar , totype ) : # Make the typecast. Just use Python built-in exceptioning outvar = totype ( invar ) # Check that the cast type matches if not isinstance ( outvar , totype ) : raise TypeError ( "Result of cast to '{0}' is '{1}'" . format ( totype , type ( outvar ) ) ) ## end if # Success; return the cast value return outvar
Performs a safe typecast .
100
7