idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
39,800
def get_url_authcode_flow_user ( client_id , redirect_uri , display = "page" , scope = None , state = None ) : url = "https://oauth.vk.com/authorize" params = { "client_id" : client_id , "redirect_uri" : redirect_uri , "display" : display , "response_type" : "code" } if scope : params [ 'scope' ] = scope if state : params [ 'state' ] = state return u"{url}?{params}" . format ( url = url , params = urlencode ( params ) )
Authorization Code Flow for User Access Token
39,801
def get_fake_data ( * a ) : d [ 'x' ] = _n . linspace ( 0 , 10 , 100 ) d [ 'y' ] = _n . cos ( d [ 'x' ] ) + 0.1 * _n . random . rand ( 100 ) c . setData ( d [ 'x' ] , d [ 'y' ] )
Called whenever someone presses the fire button .
39,802
def selectNumber ( self ) : le = self . lineEdit ( ) text = asUnicode ( le . text ( ) ) if self . opts [ 'suffix' ] == '' : le . setSelection ( 0 , len ( text ) ) else : try : index = text . index ( ' ' ) except ValueError : return le . setSelection ( 0 , index )
Select the numerical portion of the text to allow quick editing by the user .
39,803
def value ( self ) : if self . opts [ 'int' ] : return int ( self . val ) else : return float ( self . val )
Return the value of this SpinBox .
39,804
def interpret ( self ) : strn = self . lineEdit ( ) . text ( ) suf = self . opts [ 'suffix' ] if len ( suf ) > 0 : if strn [ - len ( suf ) : ] != suf : return False strn = strn [ : - len ( suf ) ] try : val = fn . siEval ( strn ) except : return False return val
Return value of text . Return False if text is invalid raise exception if text is intermediate
39,805
def editingFinishedEvent ( self ) : if asUnicode ( self . lineEdit ( ) . text ( ) ) == self . lastText : return try : val = self . interpret ( ) except : return if val is False : return if val == self . val : return self . setValue ( val , delaySignal = False )
Edit has finished ; set value .
39,806
def make_factory ( self , cls , count ) : field_names = cls . _meta . get_all_field_names ( ) fields = { } text = [ ] finalizer = None scaffold = scaffolding . scaffold_for_model ( cls ) for field_name in field_names : generator = getattr ( scaffold , field_name , None ) if generator : if hasattr ( generator , 'set_up' ) : generator . set_up ( cls , count ) fields [ field_name ] = generator text . append ( u'%s: %s; ' % ( field_name , fields [ field_name ] ) ) try : self . stdout . write ( u'Generator for %s: %s\n' % ( cls , u'' . join ( text ) ) ) except models . ObjectDoesNotExist : self . stdout . write ( u'Generator for %s\n' % u'' . join ( text ) ) if hasattr ( scaffold , 'finalize' ) and hasattr ( scaffold . finalize , '__call__' ) : finalizer = scaffold . finalize return fields , finalizer
Get the generators from the Scaffolding class within the model .
39,807
def compute_indel_length ( fs_df ) : indel_len = pd . Series ( index = fs_df . index ) indel_len [ fs_df [ 'Reference_Allele' ] == '-' ] = fs_df [ 'Tumor_Allele' ] [ fs_df [ 'Reference_Allele' ] == '-' ] . str . len ( ) indel_len [ fs_df [ 'Tumor_Allele' ] == '-' ] = fs_df [ 'Reference_Allele' ] [ fs_df [ 'Tumor_Allele' ] == '-' ] . str . len ( ) indel_len = indel_len . fillna ( 0 ) . astype ( int ) return indel_len
Computes the indel length accounting for wether it is an insertion or deletion .
39,808
def keep_indels ( mut_df , indel_len_col = True , indel_type_col = True ) : mut_df = mut_df [ is_indel_annotation ( mut_df ) ] if indel_len_col : mut_df . loc [ : , 'indel len' ] = compute_indel_length ( mut_df ) if indel_type_col : is_ins = mut_df [ 'Reference_Allele' ] == '-' is_del = mut_df [ 'Tumor_Allele' ] == '-' mut_df [ 'indel type' ] = '' mut_df . loc [ is_ins , 'indel type' ] = 'INS' mut_df . loc [ is_del , 'indel type' ] = 'DEL' return mut_df
Filters out all mutations that are not indels .
39,809
def keep_frameshifts ( mut_df , indel_len_col = True ) : mut_df = mut_df [ is_frameshift_annotation ( mut_df ) ] if indel_len_col : mut_df . loc [ : , 'indel len' ] = compute_indel_length ( mut_df ) return mut_df
Filters out all mutations that are not frameshift indels .
39,810
def is_frameshift_len ( mut_df ) : if 'indel len' in mut_df . columns : indel_len = mut_df [ 'indel len' ] else : indel_len = compute_indel_length ( mut_df ) is_fs = ( indel_len % 3 ) > 0 is_indel = ( mut_df [ 'Reference_Allele' ] == '-' ) | ( mut_df [ 'Tumor_Allele' ] == '-' ) is_fs [ ~ is_indel ] = False return is_fs
Simply returns a series indicating whether each corresponding mutation is a frameshift .
39,811
def get_frameshift_lengths ( num_bins ) : fs_len = [ ] i = 1 tmp_bins = 0 while ( tmp_bins < num_bins ) : if i % 3 : fs_len . append ( i ) tmp_bins += 1 i += 1 return fs_len
Simple function that returns the lengths for each frameshift category if num_bins number of frameshift categories are requested .
39,812
def random_context_pos ( self , num , num_permutations , context ) : if not self . is_valid_context ( context ) : error_msg = 'Context ({0}) was never seen in sequence.' . format ( context ) raise ValueError ( error_msg ) if num < 1 : error_msg = ( 'There must be at least one sample (specified {0}) ' 'for a context' . format ( num ) ) raise ValueError ( error_msg ) available_pos = self . context2pos [ context ] random_pos = self . prng_dict [ context ] . choice ( available_pos , ( num_permutations , num ) ) return random_pos
Samples with replacement available positions matching the sequence context .
39,813
def retrieve_scores ( gname , sdir , codon_pos , germ_aa , somatic_aa , default_mga = 5. , default_vest = 0 , no_file_flag = - 1 ) : mga_path = os . path . join ( sdir , gname + ".mgaentropy.pickle" ) if os . path . exists ( mga_path ) : if sys . version_info < ( 3 , ) : with open ( mga_path ) as handle : mga_ent = pickle . load ( handle ) else : with open ( mga_path , 'rb' ) as handle : mga_ent = pickle . load ( handle , encoding = 'latin-1' ) else : mga_ent = None missense_pos = [ p for i , p in enumerate ( codon_pos ) if ( germ_aa [ i ] != somatic_aa [ i ] ) and ( germ_aa [ i ] not in [ '-' , '*' , 'Splice_Site' ] ) and ( somatic_aa [ i ] not in [ '-' , '*' , 'Splice_Site' ] ) ] total_mga_ent = compute_mga_entropy_stat ( mga_ent , missense_pos , sum , default_mga ) vest_path = os . path . join ( sdir , gname + ".vest.pickle" ) if os . path . exists ( vest_path ) : if sys . version_info < ( 3 , ) : with open ( vest_path ) as handle : vest_score = pickle . load ( handle ) else : with open ( vest_path , 'rb' ) as handle : vest_score = pickle . load ( handle , encoding = 'latin-1' ) else : vest_score = None total_vest = compute_vest_stat ( vest_score , germ_aa , somatic_aa , codon_pos , stat_func = sum , default_val = default_vest ) return total_mga_ent , total_vest
Retrieves scores from pickle files .
39,814
def read_vest_pickle ( gname , score_dir ) : vest_path = os . path . join ( score_dir , gname + ".vest.pickle" ) if os . path . exists ( vest_path ) : if sys . version_info < ( 3 , ) : with open ( vest_path ) as handle : gene_vest = pickle . load ( handle ) else : with open ( vest_path , 'rb' ) as handle : gene_vest = pickle . load ( handle , encoding = 'latin-1' ) return gene_vest else : return None
Read in VEST scores for given gene .
39,815
def compute_vest_stat ( vest_dict , ref_aa , somatic_aa , codon_pos , stat_func = np . mean , default_val = 0.0 ) : if vest_dict is None : return default_val myscores = fetch_vest_scores ( vest_dict , ref_aa , somatic_aa , codon_pos ) if myscores : score_stat = stat_func ( myscores ) else : score_stat = default_val return score_stat
Compute missense VEST score statistic .
39,816
def compute_mga_entropy_stat ( mga_vec , codon_pos , stat_func = np . mean , default_val = 0.0 ) : if mga_vec is None : return default_val myscores = fetch_mga_scores ( mga_vec , codon_pos ) if myscores is not None and len ( myscores ) : score_stat = stat_func ( myscores ) else : score_stat = default_val return score_stat
Compute MGA entropy conservation statistic
39,817
def fetch_vest_scores ( vest_dict , ref_aa , somatic_aa , codon_pos , default_vest = 0.0 ) : vest_score_list = [ ] for i in range ( len ( somatic_aa ) ) : if codon_pos [ i ] is not None : tmp_score = vest_dict . get ( codon_pos [ i ] + 1 , { } ) . get ( ref_aa [ i ] , { } ) . get ( somatic_aa [ i ] , default_vest ) else : tmp_score = 0.0 vest_score_list . append ( tmp_score ) return vest_score_list
Get VEST scores from pre - computed scores in dictionary .
39,818
def fetch_mga_scores ( mga_vec , codon_pos , default_mga = None ) : len_mga = len ( mga_vec ) good_codon_pos = [ p for p in codon_pos if p < len_mga ] if good_codon_pos : mga_ent_scores = mga_vec [ good_codon_pos ] else : mga_ent_scores = None return mga_ent_scores
Get MGAEntropy scores from pre - computed scores in array .
39,819
def read_neighbor_graph_pickle ( gname , graph_dir ) : graph_path = os . path . join ( graph_dir , gname + ".pickle" ) if os . path . exists ( graph_path ) : with open ( graph_path ) as handle : gene_graph = pickle . load ( handle ) return gene_graph else : return None
Read in neighbor graph for given gene .
39,820
def compute_ng_stat ( gene_graph , pos_ct , alpha = .5 ) : if not len ( pos_ct ) : return 1.0 , 0 max_pos = max ( gene_graph ) codon_vals = np . zeros ( max_pos + 1 ) for pos in pos_ct : mut_count = pos_ct [ pos ] neighbors = list ( gene_graph [ pos ] ) num_neighbors = len ( neighbors ) codon_vals [ neighbors ] += alpha * mut_count codon_vals [ pos ] += ( 1 - alpha ) * mut_count p = codon_vals / np . sum ( codon_vals ) graph_score = mymath . shannon_entropy ( p ) coverage = np . count_nonzero ( p ) return graph_score , coverage
Compute the clustering score for the gene on its neighbor graph .
39,821
def count_frameshift_total ( mut_df , bed_path , use_unmapped = False , to_zero_based = False ) : if to_zero_based : mut_df [ 'Start_Position' ] = mut_df [ 'Start_Position' ] - 1 fs_cts = { } fs_df = indel . keep_frameshifts ( mut_df ) for bed in utils . bed_generator ( bed_path ) : gene_df = fs_df [ fs_df [ 'Gene' ] == bed . gene_name ] fs_pos = [ ] for ix , row in gene_df . iterrows ( ) : indel_pos = [ row [ 'Start_Position' ] , row [ 'End_Position' ] ] coding_pos = bed . query_position ( bed . strand , row [ 'Chromosome' ] , indel_pos ) fs_pos . append ( coding_pos ) gene_df [ 'unmapped' ] = [ ( 1 if x is None else 0 ) for x in fs_pos ] total_fs = len ( gene_df ) unmapped_fs = len ( gene_df [ gene_df [ 'unmapped' ] == 1 ] ) if not use_unmapped : gene_df = gene_df [ gene_df [ 'unmapped' ] == 0 ] total_fs -= unmapped_fs info = [ total_fs , unmapped_fs , ] fs_cts [ bed . gene_name ] = info fs_cts_df = pd . DataFrame . from_dict ( fs_cts , orient = 'index' ) cols = [ 'total' , 'unmapped' , ] fs_cts_df . columns = cols return fs_cts_df
Count frameshifts for each gene .
39,822
def _fetch_3ss_fasta ( fasta , gene_name , exon_num , chrom , strand , start , end ) : if strand == '-' : ss_seq = fasta . fetch ( reference = chrom , start = end - 1 , end = end + 3 ) ss_seq = utils . rev_comp ( ss_seq ) elif strand == '+' : ss_seq = fasta . fetch ( reference = chrom , start = start - 3 , end = start + 1 ) ss_fasta = '>{0};exon{1};3SS\n{2}\n' . format ( gene_name , exon_num , ss_seq . upper ( ) ) return ss_fasta
Retreives the 3 SS sequence flanking the specified exon .
39,823
def fetch_gene_fasta ( gene_bed , fasta_obj ) : gene_fasta = '' strand = gene_bed . strand exons = gene_bed . get_exons ( ) if strand == '-' : exons . reverse ( ) for i , exon in enumerate ( exons ) : exon_seq = fasta_obj . fetch ( reference = gene_bed . chrom , start = exon [ 0 ] , end = exon [ 1 ] ) . upper ( ) if strand == '-' : exon_seq = utils . rev_comp ( exon_seq ) exon_fasta = '>{0};exon{1}\n{2}\n' . format ( gene_bed . gene_name , i , exon_seq ) if len ( exons ) == 1 : ss_fasta = '' elif i == 0 : ss_fasta = _fetch_5ss_fasta ( fasta_obj , gene_bed . gene_name , i , gene_bed . chrom , strand , exon [ 0 ] , exon [ 1 ] ) elif i == ( len ( exons ) - 1 ) : ss_fasta = _fetch_3ss_fasta ( fasta_obj , gene_bed . gene_name , i , gene_bed . chrom , strand , exon [ 0 ] , exon [ 1 ] ) else : fasta_3ss = _fetch_3ss_fasta ( fasta_obj , gene_bed . gene_name , i , gene_bed . chrom , strand , exon [ 0 ] , exon [ 1 ] ) fasta_5ss = _fetch_5ss_fasta ( fasta_obj , gene_bed . gene_name , i , gene_bed . chrom , strand , exon [ 0 ] , exon [ 1 ] ) ss_fasta = fasta_5ss + fasta_3ss gene_fasta += exon_fasta + ss_fasta return gene_fasta
Retreive gene sequences in FASTA format .
39,824
def _reset_seq ( self ) : exon_seq_list , five_ss_seq_list , three_ss_seq_list = self . _fetch_seq ( ) self . exon_seq = '' . join ( exon_seq_list ) self . three_prime_seq = three_ss_seq_list self . five_prime_seq = five_ss_seq_list self . _to_upper ( )
Updates attributes for gene represented in the self . bed attribute .
39,825
def add_germline_variants ( self , germline_nucs , coding_pos ) : if len ( germline_nucs ) != len ( coding_pos ) : raise ValueError ( 'Each germline nucleotide should have a coding position' ) es = list ( self . exon_seq ) for i in range ( len ( germline_nucs ) ) : gl_nuc , cpos = germline_nucs [ i ] . upper ( ) , coding_pos [ i ] if not utils . is_valid_nuc ( gl_nuc ) : raise ValueError ( '{0} is not a valid nucleotide' . format ( gl_nuc ) ) if cpos >= 0 : es [ cpos ] = gl_nuc self . exon_seq = '' . join ( es )
Add potential germline variants into the nucleotide sequence .
39,826
def _to_upper ( self ) : self . exon_seq = self . exon_seq . upper ( ) self . three_prime_seq = [ s . upper ( ) for s in self . three_prime_seq ] self . five_prime_seq = [ s . upper ( ) for s in self . five_prime_seq ]
Convert sequences to upper case .
39,827
def _fetch_seq ( self ) : exons = [ ] three_prime_ss = [ ] five_prime_ss = [ ] num_exons = self . bed . get_num_exons ( ) for i in range ( num_exons ) : tmp_id = '{0};exon{1}' . format ( self . bed . gene_name , i ) tmp_exon = self . fasta . fetch ( reference = tmp_id ) exons . append ( tmp_exon ) tmp_id_3ss = '{0};3SS' . format ( tmp_id ) tmp_id_5ss = '{0};5SS' . format ( tmp_id ) if num_exons == 1 : pass elif i == 0 : tmp_5ss = self . fasta . fetch ( tmp_id_5ss ) five_prime_ss . append ( tmp_5ss ) elif i == ( num_exons - 1 ) : tmp_3ss = self . fasta . fetch ( tmp_id_3ss ) three_prime_ss . append ( tmp_3ss ) else : tmp_3ss = self . fasta . fetch ( tmp_id_3ss ) tmp_5ss = self . fasta . fetch ( tmp_id_5ss ) three_prime_ss . append ( tmp_3ss ) five_prime_ss . append ( tmp_5ss ) return exons , five_prime_ss , three_prime_ss
Fetches gene sequence from PySAM fasta object .
39,828
def correct_chrom_names ( chroms ) : chrom_list = [ ] for chrom in chroms : chrom = str ( chrom ) chrom = chrom . replace ( '23' , 'X' ) chrom = chrom . replace ( '24' , 'Y' ) chrom = chrom . replace ( '25' , 'Mt' ) if not chrom . startswith ( 'chr' ) : chrom = 'chr' + chrom chrom_list . append ( chrom ) return chrom_list
Make sure chromosome names follow UCSC chr convention .
39,829
def fishers_method ( pvals ) : pvals = np . asarray ( pvals ) degrees_of_freedom = 2 * pvals . size chisq_stat = np . sum ( - 2 * np . log ( pvals ) ) fishers_pval = stats . chi2 . sf ( chisq_stat , degrees_of_freedom ) return fishers_pval
Fisher s method for combining independent p - values .
39,830
def cummin ( x ) : for i in range ( 1 , len ( x ) ) : if x [ i - 1 ] < x [ i ] : x [ i ] = x [ i - 1 ] return x
A python implementation of the cummin function in R
39,831
def bh_fdr ( pval ) : pval_array = np . array ( pval ) sorted_order = np . argsort ( pval_array ) original_order = np . argsort ( sorted_order ) pval_array = pval_array [ sorted_order ] n = float ( len ( pval ) ) pval_adj = np . zeros ( int ( n ) ) i = np . arange ( 1 , int ( n ) + 1 , dtype = float ) [ : : - 1 ] pval_adj = np . minimum ( 1 , cummin ( n / i * pval_array [ : : - 1 ] ) ) [ : : - 1 ] return pval_adj [ original_order ]
A python implementation of the Benjamani - Hochberg FDR method .
39,832
def calc_deleterious_p_value ( mut_info , unmapped_mut_info , sc , gs , bed , num_permutations , stop_thresh , del_threshold , pseudo_count , seed = None ) : if len ( mut_info ) > 0 : mut_info [ 'Coding Position' ] = mut_info [ 'Coding Position' ] . astype ( int ) mut_info [ 'Context' ] = mut_info [ 'Coding Position' ] . apply ( lambda x : sc . pos2context [ x ] ) cols = [ 'Context' , 'Tumor_Allele' ] unmapped_mut_df = pd . DataFrame ( unmapped_mut_info ) tmp_df = pd . concat ( [ mut_info [ cols ] , unmapped_mut_df [ cols ] ] ) context_cts = tmp_df [ 'Context' ] . value_counts ( ) context_to_mutations = dict ( ( name , group [ 'Tumor_Allele' ] ) for name , group in tmp_df . groupby ( 'Context' ) ) aa_mut_info = mc . get_aa_mut_info ( mut_info [ 'Coding Position' ] , mut_info [ 'Tumor_Allele' ] . tolist ( ) , gs ) ref_aa = aa_mut_info [ 'Reference AA' ] + unmapped_mut_info [ 'Reference AA' ] somatic_aa = aa_mut_info [ 'Somatic AA' ] + unmapped_mut_info [ 'Somatic AA' ] codon_pos = aa_mut_info [ 'Codon Pos' ] + unmapped_mut_info [ 'Codon Pos' ] num_del = cutils . calc_deleterious_info ( ref_aa , somatic_aa , codon_pos ) if num_del >= del_threshold : del_p_value = pm . deleterious_permutation ( num_del , context_cts , context_to_mutations , sc , gs , num_permutations , stop_thresh , pseudo_count ) else : del_p_value = None else : num_del = 0 del_p_value = None result = [ bed . gene_name , num_del , del_p_value ] return result
Calculates the p - value for the number of inactivating SNV mutations .
39,833
def calc_protein_p_value ( mut_info , unmapped_mut_info , sc , gs , bed , graph_dir , num_permutations , stop_thresh , min_recurrent , min_fraction ) : if len ( mut_info ) > 0 : mut_info [ 'Coding Position' ] = mut_info [ 'Coding Position' ] . astype ( int ) mut_info [ 'Context' ] = mut_info [ 'Coding Position' ] . apply ( lambda x : sc . pos2context [ x ] ) cols = [ 'Context' , 'Tumor_Allele' ] unmapped_mut_df = pd . DataFrame ( unmapped_mut_info ) tmp_df = pd . concat ( [ mut_info [ cols ] , unmapped_mut_df [ cols ] ] ) context_cts = tmp_df [ 'Context' ] . value_counts ( ) context_to_mutations = dict ( ( name , group [ 'Tumor_Allele' ] ) for name , group in tmp_df . groupby ( 'Context' ) ) if graph_dir : gene_graph = scores . read_neighbor_graph_pickle ( bed . gene_name , graph_dir ) if gene_graph is None : logger . warning ( 'Could not find neighbor graph for {0}, skipping . . .' . format ( bed . gene_name ) ) else : gene_graph = None aa_mut_info = mc . get_aa_mut_info ( mut_info [ 'Coding Position' ] , mut_info [ 'Tumor_Allele' ] . tolist ( ) , gs ) codon_pos = aa_mut_info [ 'Codon Pos' ] + unmapped_mut_info [ 'Codon Pos' ] ref_aa = aa_mut_info [ 'Reference AA' ] + unmapped_mut_info [ 'Reference AA' ] somatic_aa = aa_mut_info [ 'Somatic AA' ] + unmapped_mut_info [ 'Somatic AA' ] num_recurrent , pos_ent , delta_pos_ent , pos_ct = cutils . calc_pos_info ( codon_pos , ref_aa , somatic_aa , min_frac = min_fraction , min_recur = min_recurrent ) try : graph_score , coverage = scores . compute_ng_stat ( gene_graph , pos_ct ) protein_p_value , norm_graph_score = pm . protein_permutation ( graph_score , len ( pos_ct ) , context_cts , context_to_mutations , sc , gs , gene_graph , num_permutations , stop_thresh ) except Exception as err : exc_info = sys . exc_info ( ) norm_graph_score = 0.0 protein_p_value = 1.0 logger . warning ( 'Codon numbering problem with ' + bed . gene_name ) else : norm_graph_score = 0.0 protein_p_value = 1.0 num_recurrent = 0 result = [ bed . gene_name , num_recurrent , norm_graph_score , protein_p_value ] return result
Computes the p - value for clustering on a neighbor graph composed of codons connected with edges if they are spatially near in 3D protein structure .
39,834
def shannon_entropy ( p ) : return - np . sum ( np . where ( p != 0 , p * np . log2 ( p ) , 0 ) )
Calculates shannon entropy in bits .
39,835
def js_divergence ( p , q ) : m = .5 * ( p + q ) js_div = .5 * kl_divergence ( p , m ) + .5 * kl_divergence ( q , m ) return js_div
Compute the Jensen - Shannon Divergence between two discrete distributions .
39,836
def js_distance ( p , q ) : js_dist = np . sqrt ( js_divergence ( p , q ) ) return js_dist
Compute the Jensen - Shannon distance between two discrete distributions .
39,837
def _init_exons ( self ) : exon_starts = [ self . chrom_start + int ( s ) for s in self . bed_tuple . blockStarts . strip ( ',' ) . split ( ',' ) ] exon_sizes = list ( map ( int , self . bed_tuple . blockSizes . strip ( ',' ) . split ( ',' ) ) ) exons = [ ( exon_starts [ i ] , exon_starts [ i ] + exon_sizes [ i ] ) for i in range ( len ( exon_starts ) ) ] no_utr_exons = self . _filter_utr ( exons ) self . exons = no_utr_exons self . exon_lens = [ e [ 1 ] - e [ 0 ] for e in self . exons ] self . num_exons = len ( self . exons ) self . cds_len = sum ( self . exon_lens ) self . five_ss_len = 2 * ( self . num_exons - 1 ) self . three_ss_len = 2 * ( self . num_exons - 1 ) self . _init_splice_site_pos ( )
Sets a list of position intervals for each exon .
39,838
def init_genome_coordinates ( self ) : self . seqpos2genome = { } seq_pos = 0 for estart , eend in self . exons : for genome_pos in range ( estart , eend ) : if self . strand == '+' : self . seqpos2genome [ seq_pos ] = genome_pos elif self . strand == '-' : tmp = self . cds_len - seq_pos - 1 self . seqpos2genome [ tmp ] = genome_pos seq_pos += 1 for i in range ( 0 , self . five_ss_len ) : seq_pos = self . cds_len + i ss_ix = i // 2 pos_in_ss = i % 2 if self . strand == '+' : self . seqpos2genome [ seq_pos ] = self . exons [ ss_ix ] [ 1 ] + pos_in_ss else : exon_pos = - 1 - ss_ix self . seqpos2genome [ seq_pos ] = self . exons [ exon_pos ] [ 0 ] - pos_in_ss - 1 for i in range ( 0 , self . three_ss_len ) : seq_pos = self . cds_len + self . five_ss_len + i ss_ix = i // 2 pos_in_ss = i % 2 if self . strand == '+' : self . seqpos2genome [ seq_pos ] = self . exons [ ss_ix + 1 ] [ 0 ] - 2 + pos_in_ss else : exon_pos = - 1 - ss_ix self . seqpos2genome [ seq_pos ] = self . exons [ exon_pos - 1 ] [ 1 ] + 1 - pos_in_ss
Creates the self . seqpos2genome dictionary that converts positions relative to the sequence to genome coordinates .
39,839
def query_position ( self , strand , chr , genome_coord ) : pos = None if chr != self . chrom : pass if type ( genome_coord ) is list : pos_left = self . query_position ( strand , chr , genome_coord [ 0 ] ) pos_right = self . query_position ( strand , chr , genome_coord [ 1 ] ) if pos_left is not None or pos_right is not None : return [ pos_left , pos_right ] else : return None for i , ( estart , eend ) in enumerate ( self . exons ) : if estart <= genome_coord < eend : if strand == '+' : prev_lens = sum ( self . exon_lens [ : i ] ) pos = prev_lens + ( genome_coord - estart ) elif strand == '-' : prev_lens = sum ( self . exon_lens [ : i ] ) pos = prev_lens + ( genome_coord - estart ) pos = self . cds_len - pos - 1 return pos elif ( eend <= genome_coord < eend + 2 ) and i != self . num_exons - 1 : if strand == '+' : pos = self . cds_len + 2 * i + ( genome_coord - eend ) elif strand == '-' : pos = self . cds_len + self . five_ss_len + 2 * ( self . num_exons - ( i + 2 ) ) + ( genome_coord - eend ) return pos elif ( estart - 2 <= genome_coord < estart ) and i != 0 : if strand == '-' : pos = self . cds_len + 2 * ( self . num_exons - ( i + 2 ) ) + ( genome_coord - ( estart - 2 ) ) elif strand == '+' : pos = self . cds_len + self . five_ss_len + 2 * ( i - 1 ) + ( genome_coord - ( estart - 2 ) ) return pos return pos
Provides the relative position on the coding sequence for a given genomic position .
39,840
def start_logging ( log_file = '' , log_level = 'INFO' , verbose = False ) : if not log_file : log_dir = os . path . abspath ( 'log' ) + '/' if not os . path . isdir ( log_dir ) : os . mkdir ( log_dir ) log_file = log_dir + 'log.run.' + str ( datetime . datetime . now ( ) ) . replace ( ':' , '.' ) + '.txt' lvl = logging . DEBUG if log_level . upper ( ) == 'DEBUG' else logging . INFO if log_level . upper ( ) != 'DEBUG' : warnings . filterwarnings ( 'ignore' ) if verbose : myformat = '%(asctime)s - %(name)s - %(levelname)s \n>>> %(message)s' else : myformat = '%(message)s' if not log_file == 'stdout' : logging . basicConfig ( level = lvl , format = myformat , filename = log_file , filemode = 'w' ) else : root = logging . getLogger ( ) root . setLevel ( lvl ) stdout_stream = logging . StreamHandler ( sys . stdout ) stdout_stream . setLevel ( lvl ) formatter = logging . Formatter ( myformat ) stdout_stream . setFormatter ( formatter ) root . addHandler ( stdout_stream ) root . propagate = True
Start logging information into the log directory .
39,841
def log_error_decorator ( f ) : @ wraps ( f ) def wrapper ( * args , ** kwds ) : try : result = f ( * args , ** kwds ) return result except KeyboardInterrupt : logger . info ( 'Ctrl-C stopped a process.' ) except Exception as e : logger . exception ( e ) raise return wrapper
Writes exception to log file if occured in decorated function .
39,842
def filter_list ( mylist , bad_ixs ) : bad_ixs = sorted ( bad_ixs , reverse = True ) for i in bad_ixs : mylist . pop ( i ) return mylist
Removes indices from a list .
39,843
def rev_comp ( seq ) : rev_seq = seq [ : : - 1 ] rev_comp_seq = '' . join ( [ base_pairing [ s ] for s in rev_seq ] ) return rev_comp_seq
Get reverse complement of sequence .
39,844
def bed_generator ( bed_path ) : with open ( bed_path ) as handle : bed_reader = csv . reader ( handle , delimiter = '\t' ) for line in bed_reader : yield BedLine ( line )
Iterates through a BED file yielding parsed BED lines .
39,845
def read_bed ( file_path , restricted_genes = None ) : bed_dict = OrderedDict ( ) for bed_row in bed_generator ( file_path ) : is_restrict_flag = restricted_genes is None or bed_row . gene_name in restricted_genes if is_restrict_flag : bed_dict . setdefault ( bed_row . chrom , [ ] ) bed_dict [ bed_row . chrom ] . append ( bed_row ) sort_chroms = sorted ( bed_dict . keys ( ) , key = lambda x : len ( bed_dict [ x ] ) , reverse = True ) bed_dict = OrderedDict ( ( chrom , bed_dict [ chrom ] ) for chrom in sort_chroms ) return bed_dict
Reads BED file and populates a dictionary separating genes by chromosome .
39,846
def _fix_mutation_df ( mutation_df , only_unique = False ) : orig_len = len ( mutation_df ) mutation_df = mutation_df [ mutation_df . Variant_Classification . isin ( variant_snv ) ] type_len = len ( mutation_df ) log_msg = ( 'Dropped {num_dropped} mutations after only keeping ' '{mut_types}. Indels are processed separately.' . format ( num_dropped = orig_len - type_len , mut_types = ', ' . join ( variant_snv ) ) ) logger . info ( log_msg ) valid_nuc_flag = ( mutation_df [ 'Reference_Allele' ] . apply ( is_valid_nuc ) & mutation_df [ 'Tumor_Allele' ] . apply ( is_valid_nuc ) ) mutation_df = mutation_df [ valid_nuc_flag ] mutation_df = mutation_df [ mutation_df [ 'Tumor_Allele' ] . apply ( lambda x : len ( x ) == 1 ) ] mutation_df = mutation_df [ mutation_df [ 'Reference_Allele' ] . apply ( lambda x : len ( x ) == 1 ) ] valid_len = len ( mutation_df ) log_msg = ( 'Dropped {num_dropped} mutations after only keeping ' 'valid SNVs' . format ( num_dropped = type_len - valid_len ) ) logger . info ( log_msg ) if only_unique : dup_cols = [ 'Tumor_Sample' , 'Chromosome' , 'Start_Position' , 'End_Position' , 'Reference_Allele' , 'Tumor_Allele' ] mutation_df = mutation_df . drop_duplicates ( subset = dup_cols ) dedup_len = len ( mutation_df ) log_msg = ( 'Dropped {num_dropped} mutations when removing ' 'duplicates' . format ( num_dropped = valid_len - dedup_len ) ) logger . info ( log_msg ) if 'Tumor_Type' not in mutation_df . columns : mutation_df [ 'Tumor_Type' ] = '' if 'Protein_Change' not in mutation_df . columns : mutation_df [ 'Protein_Change' ] = '' mutation_df [ 'Start_Position' ] = mutation_df [ 'Start_Position' ] . astype ( int ) - 1 return mutation_df
Drops invalid mutations and corrects for 1 - based coordinates .
39,847
def calc_windowed_sum ( aa_mut_pos , germ_aa , somatic_aa , window = [ 3 ] ) : pos_ctr , pos_sum = { } , { w : { } for w in window } num_pos = len ( aa_mut_pos ) for i in range ( num_pos ) : pos = aa_mut_pos [ i ] if germ_aa [ i ] and somatic_aa [ i ] and germ_aa [ i ] != '*' and somatic_aa [ i ] != '*' and germ_aa [ i ] != somatic_aa [ i ] : if pos is not None : pos_ctr . setdefault ( pos , 0 ) pos_ctr [ pos ] += 1 pos_list = sorted ( pos_ctr . keys ( ) ) max_window = max ( window ) for ix , pos in enumerate ( pos_list ) : tmp_sum = { w : 0 for w in window } for k in reversed ( range ( ix + 1 ) ) : pos2 = pos_list [ k ] if pos2 < pos - max_window : break for w in window : if pos - w <= pos2 : tmp_sum [ w ] += pos_ctr [ pos2 ] for l in range ( ix + 1 , len ( pos_list ) ) : pos2 = pos_list [ l ] if pos2 > pos + max_window : break for w in window : if pos2 <= pos + w : tmp_sum [ w ] += pos_ctr [ pos2 ] for w in window : pos_sum [ w ] [ pos ] = tmp_sum [ w ] return pos_ctr , pos_sum
Calculate the sum of mutations within a window around a particular mutated codon .
39,848
def get_all_context_names ( context_num ) : if context_num == 0 : return [ 'None' ] elif context_num == 1 : return [ 'A' , 'C' , 'T' , 'G' ] elif context_num == 1.5 : return [ 'C*pG' , 'CpG*' , 'TpC*' , 'G*pA' , 'A' , 'C' , 'T' , 'G' ] elif context_num == 2 : dinucs = list ( set ( [ d1 + d2 for d1 in 'ACTG' for d2 in 'ACTG' ] ) ) return dinucs elif context_num == 3 : trinucs = list ( set ( [ t1 + t2 + t3 for t1 in 'ACTG' for t2 in 'ACTG' for t3 in 'ACTG' ] ) ) return trinucs
Based on the nucleotide base context number return a list of strings representing each context .
39,849
def get_chasm_context ( tri_nuc ) : if len ( tri_nuc ) != 3 : raise ValueError ( 'Chasm context requires a three nucleotide string ' '(Provided: "{0}")' . format ( tri_nuc ) ) if tri_nuc [ 1 : ] == 'CG' : return 'C*pG' elif tri_nuc [ : 2 ] == 'CG' : return 'CpG*' elif tri_nuc [ : 2 ] == 'TC' : return 'TpC*' elif tri_nuc [ 1 : ] == 'GA' : return 'G*pA' else : return tri_nuc [ 1 ]
Returns the mutation context acording to CHASM .
39,850
def get_aa_mut_info ( coding_pos , somatic_base , gene_seq ) : if not somatic_base : aa_info = { 'Reference Codon' : [ ] , 'Somatic Codon' : [ ] , 'Codon Pos' : [ ] , 'Reference Nuc' : [ ] , 'Reference AA' : [ ] , 'Somatic AA' : [ ] } return aa_info ref_codon , codon_pos , pos_in_codon , ref_nuc = zip ( * [ cutils . pos_to_codon ( gene_seq , p ) for p in coding_pos ] ) ref_codon , codon_pos , pos_in_codon , ref_nuc = list ( ref_codon ) , list ( codon_pos ) , list ( pos_in_codon ) , list ( ref_nuc ) mut_codon = [ ( list ( x ) if x != 'Splice_Site' else [ ] ) for x in ref_codon ] for i in range ( len ( mut_codon ) ) : if pos_in_codon [ i ] is not None : pc = pos_in_codon [ i ] mut_codon [ i ] [ pc ] = somatic_base [ i ] mut_codon = [ ( '' . join ( x ) if x else 'Splice_Site' ) for x in mut_codon ] aa_info = { 'Reference Codon' : ref_codon , 'Somatic Codon' : mut_codon , 'Codon Pos' : codon_pos , 'Reference Nuc' : ref_nuc , 'Reference AA' : [ ( utils . codon_table [ r ] if ( r in utils . codon_table ) else None ) for r in ref_codon ] , 'Somatic AA' : [ ( utils . codon_table [ s ] if ( s in utils . codon_table ) else None ) for s in mut_codon ] } return aa_info
Retrieves relevant information about the effect of a somatic SNV on the amino acid of a gene .
39,851
def handle_tsg_results ( permutation_result ) : permutation_df = pd . DataFrame ( sorted ( permutation_result , key = lambda x : x [ 2 ] if x [ 2 ] is not None else 1.1 ) , columns = [ 'gene' , 'inactivating count' , 'inactivating p-value' , 'Total SNV Mutations' , 'SNVs Unmapped to Ref Tx' ] ) permutation_df [ 'inactivating p-value' ] = permutation_df [ 'inactivating p-value' ] . astype ( 'float' ) tmp_df = permutation_df [ permutation_df [ 'inactivating p-value' ] . notnull ( ) ] permutation_df [ 'inactivating BH q-value' ] = np . nan permutation_df . loc [ tmp_df . index , 'inactivating BH q-value' ] = mypval . bh_fdr ( tmp_df [ 'inactivating p-value' ] ) permutation_df = permutation_df . sort_values ( by = 'inactivating p-value' , ascending = False ) permutation_df = permutation_df . reindex ( index = permutation_df . index [ : : - 1 ] ) permutation_df = permutation_df . set_index ( 'gene' , drop = False ) col_order = [ 'gene' , 'Total SNV Mutations' , 'SNVs Unmapped to Ref Tx' , 'inactivating count' , 'inactivating p-value' , 'inactivating BH q-value' ] return permutation_df [ col_order ]
Handles result from TSG results .
39,852
def get_frameshift_info ( fs_df , bins ) : fs_df = compute_indel_length ( fs_df ) num_indels = [ ] indel_len = [ ] num_categories = 0 i = 1 while ( num_categories < bins ) : if i % 3 : if num_categories != bins - 1 : tmp_num = len ( fs_df [ fs_df [ 'indel len' ] == i ] ) else : tmp_num = len ( fs_df [ ( fs_df [ 'indel len' ] >= i ) & ( ( fs_df [ 'indel len' ] % 3 ) > 0 ) ] ) num_indels . append ( tmp_num ) indel_len . append ( i ) num_categories += 1 i += 1 return indel_len , num_indels
Counts frameshifts stratified by a given length .
39,853
def set_mutation_type ( self , mut_type = '' ) : if mut_type : self . mutation_type = mut_type else : if not self . is_valid : self . mutation_type = 'not valid' elif self . unknown_effect : self . mutation_type = 'unknown effect' elif self . is_no_protein : self . mutation_type = 'no protein' elif self . is_missing_info : self . mutation_type = 'missing' else : if self . is_lost_stop : self . mutation_type = 'Nonstop_Mutation' elif self . is_lost_start : self . mutation_type = 'Translation_Start_Site' elif self . is_synonymous : self . mutation_type = 'Silent' elif self . is_missense : self . mutation_type = 'Missense_Mutation' elif self . is_indel : self . mutation_type = 'In_Frame_Indel' elif self . is_nonsense_mutation : self . mutation_type = 'Nonsense_Mutation' elif self . is_frame_shift : self . mutation_type = 'Frame_Shift_Indel'
Sets the mutation type attribute to a single label based on attribute flags .
39,854
def set_amino_acid ( self , aa ) : aa = aa . upper ( ) aa = aa [ 2 : ] if aa . startswith ( 'P.' ) else aa self . __set_mutation_status ( ) self . __parse_hgvs_syntax ( aa )
Set amino acid change and position .
39,855
def __set_missense_status ( self , hgvs_string ) : if re . search ( '^[A-Z?]\d+[A-Z?]$' , hgvs_string ) : self . is_missense = True self . is_non_silent = True else : self . is_missense = False
Sets the self . is_missense flag .
39,856
def __set_lost_start_status ( self , hgvs_string ) : mymatch = re . search ( '^([A-Z?])(\d+)([A-Z?])$' , hgvs_string ) if mymatch : grps = mymatch . groups ( ) if int ( grps [ 1 ] ) == 1 and grps [ 0 ] != grps [ 2 ] : self . is_lost_start = True self . is_non_silent = True else : self . is_lost_start = False else : self . is_lost_start = False
Sets the self . is_lost_start flag .
39,857
def __set_frame_shift_status ( self ) : if 'fs' in self . hgvs_original : self . is_frame_shift = True self . is_non_silent = True elif re . search ( '[A-Z]\d+[A-Z]+\*' , self . hgvs_original ) : self . is_frame_shift = True self . is_non_silent = True else : self . is_frame_shift = False
Check for frame shift and set the self . is_frame_shift flag .
39,858
def __set_lost_stop_status ( self , hgvs_string ) : lost_stop_pattern = '^\*\d+[A-Z?]+\*?$' if re . search ( lost_stop_pattern , hgvs_string ) : self . is_lost_stop = True self . is_non_silent = True else : self . is_lost_stop = False
Check if the stop codon was mutated to something other than a stop codon .
39,859
def __set_premature_stop_codon_status ( self , hgvs_string ) : if re . search ( '.+\*(\d+)?$' , hgvs_string ) : self . is_premature_stop_codon = True self . is_non_silent = True if hgvs_string . endswith ( '*' ) : self . is_nonsense_mutation = True else : self . is_nonsense_mutation = False else : self . is_premature_stop_codon = False self . is_nonsense_mutation = False
Set whether there is a premature stop codon .
39,860
def __set_indel_status ( self ) : if "ins" in self . hgvs_original : self . is_insertion = True self . is_deletion = False self . is_indel = True self . is_non_silent = True elif "del" in self . hgvs_original : self . is_deletion = True self . is_insertion = False self . is_indel = True self . is_non_silent = True else : self . is_deletion = False self . is_insertion = False self . is_indel = False
Sets flags related to the mutation being an indel .
39,861
def __set_unkown_effect ( self , hgvs_string ) : unknown_effect_list = [ '?' , '(=)' , '=' ] if hgvs_string in unknown_effect_list : self . unknown_effect = True elif "(" in hgvs_string : self . unknown_effect = True else : self . unknown_effect = False if "?" in hgvs_string : self . is_missing_info = True else : self . is_missing_info = False
Sets a flag for unkown effect according to HGVS syntax . The COSMIC database also uses unconventional questionmarks to denote missing information .
39,862
def deleterious_permutation ( obs_del , context_counts , context_to_mut , seq_context , gene_seq , num_permutations = 10000 , stop_criteria = 100 , pseudo_count = 0 , max_batch = 25000 ) : mycontexts = context_counts . index . tolist ( ) somatic_base = [ base for one_context in mycontexts for base in context_to_mut [ one_context ] ] max_batch = min ( num_permutations , max_batch ) num_batches = num_permutations // max_batch remainder = num_permutations % max_batch batch_sizes = [ max_batch ] * num_batches if remainder : batch_sizes += [ remainder ] num_sim = 0 null_del_ct = 0 for j , batch_size in enumerate ( batch_sizes ) : if null_del_ct >= stop_criteria : break tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , batch_size ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) for i , row in enumerate ( tmp_mut_pos ) : tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) tmp_del_count = cutils . calc_deleterious_info ( tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , tmp_mut_info [ 'Codon Pos' ] ) if tmp_del_count >= obs_del : null_del_ct += 1 if null_del_ct >= stop_criteria : break num_sim += i + 1 del_pval = float ( null_del_ct ) / ( num_sim ) return del_pval
Performs null - permutations for deleterious mutation statistics in a single gene .
39,863
def protein_permutation ( graph_score , num_codons_obs , context_counts , context_to_mut , seq_context , gene_seq , gene_graph , num_permutations = 10000 , stop_criteria = 100 , pseudo_count = 0 ) : mycontexts = context_counts . index . tolist ( ) somatic_base = [ base for one_context in mycontexts for base in context_to_mut [ one_context ] ] tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , num_permutations ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) null_graph_entropy_ct = 0 coverage_list = [ ] num_mut_list = [ ] graph_entropy_list = [ ] for i , row in enumerate ( tmp_mut_pos ) : if i == stop_criteria - 1 : rel_inc = [ coverage_list [ k ] / float ( num_mut_list [ k ] ) for k in range ( stop_criteria - 1 ) if coverage_list [ k ] ] exp_rel_inc = np . mean ( rel_inc ) if num_codons_obs : obs_stat = graph_score / np . log2 ( exp_rel_inc * num_codons_obs ) else : obs_stat = 1.0 sim_stat_list = [ ent / np . log2 ( exp_rel_inc * num_mut_list [ l ] ) for l , ent in enumerate ( graph_entropy_list ) ] null_graph_entropy_ct = len ( [ s for s in sim_stat_list if s - utils . epsilon <= obs_stat ] ) tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) tmp_tuple = cutils . calc_pos_info ( tmp_mut_info [ 'Codon Pos' ] , tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , pseudo_count = pseudo_count , is_obs = 0 ) _ , _ , _ , tmp_pos_ct = tmp_tuple if i < stop_criteria - 1 : tmp_num_mut_codons = len ( tmp_pos_ct ) num_mut_list . append ( tmp_num_mut_codons ) tmp_graph_entropy , tmp_coverage = scores . compute_ng_stat ( gene_graph , tmp_pos_ct ) if i < stop_criteria - 1 : coverage_list . append ( tmp_coverage ) graph_entropy_list . append ( tmp_graph_entropy ) if i >= stop_criteria : if tmp_num_mut_codons : sim_stat = tmp_graph_entropy / np . log2 ( exp_rel_inc * tmp_num_mut_codons ) else : sim_stat = 1.0 if sim_stat - utils . epsilon <= obs_stat : null_graph_entropy_ct += 1 if null_graph_entropy_ct >= stop_criteria : break protein_pval = float ( null_graph_entropy_ct ) / ( i + 1 ) return protein_pval , obs_stat
Performs null - simulations for position - based mutation statistics in a single gene .
39,864
def effect_permutation ( context_counts , context_to_mut , seq_context , gene_seq , num_permutations = 10000 , pseudo_count = 0 ) : mycontexts = context_counts . index . tolist ( ) somatic_base = [ base for one_context in mycontexts for base in context_to_mut [ one_context ] ] tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , num_permutations ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) effect_entropy_list , recur_list , inactivating_list = [ ] , [ ] , [ ] for row in tmp_mut_pos : tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) tmp_entropy , tmp_recur , tmp_inactivating = cutils . calc_effect_info ( tmp_mut_info [ 'Codon Pos' ] , tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , pseudo_count = pseudo_count , is_obs = 0 ) effect_entropy_list . append ( tmp_entropy ) recur_list . append ( tmp_recur ) inactivating_list . append ( tmp_inactivating ) return effect_entropy_list , recur_list , inactivating_list
Performs null - permutations for effect - based mutation statistics in a single gene .
39,865
def non_silent_ratio_permutation ( context_counts , context_to_mut , seq_context , gene_seq , num_permutations = 10000 ) : mycontexts = context_counts . index . tolist ( ) somatic_base = [ base for one_context in mycontexts for base in context_to_mut [ one_context ] ] tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , num_permutations ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) non_silent_count_list = [ ] for row in tmp_mut_pos : tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) tmp_non_silent = cutils . calc_non_silent_info ( tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , tmp_mut_info [ 'Codon Pos' ] ) non_silent_count_list . append ( tmp_non_silent ) return non_silent_count_list
Performs null - permutations for non - silent ratio across all genes .
39,866
def summary_permutation ( context_counts , context_to_mut , seq_context , gene_seq , score_dir , num_permutations = 10000 , min_frac = 0.0 , min_recur = 2 , drop_silent = False ) : mycontexts = context_counts . index . tolist ( ) somatic_base = [ base for one_context in mycontexts for base in context_to_mut [ one_context ] ] tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , num_permutations ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) gene_name = gene_seq . bed . gene_name gene_len = gene_seq . bed . cds_len summary_info_list = [ ] for i , row in enumerate ( tmp_mut_pos ) : tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) tmp_summary = cutils . calc_summary_info ( tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , tmp_mut_info [ 'Codon Pos' ] , gene_name , score_dir , min_frac = min_frac , min_recur = min_recur ) if drop_silent : tmp_summary [ 1 ] = 0 summary_info_list . append ( [ gene_name , i + 1 , gene_len ] + tmp_summary ) return summary_info_list
Performs null - permutations and summarizes the results as features over the gene .
39,867
def maf_permutation ( context_counts , context_to_mut , seq_context , gene_seq , num_permutations = 10000 , drop_silent = False ) : mycontexts = context_counts . index . tolist ( ) somatic_base , base_context = zip ( * [ ( base , one_context ) for one_context in mycontexts for base in context_to_mut [ one_context ] ] ) tmp_contxt_pos = seq_context . random_pos ( context_counts . iteritems ( ) , num_permutations ) tmp_mut_pos = np . hstack ( pos_array for base , pos_array in tmp_contxt_pos ) gene_name = gene_seq . bed . gene_name strand = gene_seq . bed . strand chrom = gene_seq . bed . chrom gene_seq . bed . init_genome_coordinates ( ) maf_list = [ ] for row in tmp_mut_pos : pos2genome = np . vectorize ( lambda x : gene_seq . bed . seqpos2genome [ x ] + 1 ) genome_coord = pos2genome ( row ) tmp_mut_info = mc . get_aa_mut_info ( row , somatic_base , gene_seq ) var_class = cutils . get_variant_classification ( tmp_mut_info [ 'Reference AA' ] , tmp_mut_info [ 'Somatic AA' ] , tmp_mut_info [ 'Codon Pos' ] ) for k , mysomatic_base in enumerate ( somatic_base ) : ref_nuc = tmp_mut_info [ 'Reference Nuc' ] [ k ] nuc_pos = row [ k ] dna_change = 'c.{0}{1}>{2}' . format ( ref_nuc , nuc_pos , mysomatic_base ) ref_aa = tmp_mut_info [ 'Reference AA' ] [ k ] somatic_aa = tmp_mut_info [ 'Somatic AA' ] [ k ] codon_pos = tmp_mut_info [ 'Codon Pos' ] [ k ] protein_change = 'p.{0}{1}{2}' . format ( ref_aa , codon_pos , somatic_aa ) if strand == '-' : ref_nuc = utils . rev_comp ( ref_nuc ) mysomatic_base = utils . rev_comp ( mysomatic_base ) if drop_silent and var_class [ k ] . decode ( ) == 'Silent' : continue maf_line = [ gene_name , strand , chrom , genome_coord [ k ] , genome_coord [ k ] , ref_nuc , mysomatic_base , base_context [ k ] , dna_change , protein_change , var_class [ k ] . decode ( ) ] maf_list . append ( maf_line ) return maf_list
Performs null - permutations across all genes and records the results in a format like a MAF file . This could be useful for examining the null permutations because the alternative approaches always summarize the results . With the simulated null - permutations novel metrics can be applied to create an empirical null - distribution .
39,868
def editor_js_initialization ( selector , ** extra_settings ) : init_template = loader . get_template ( settings . MARKDOWN_EDITOR_INIT_TEMPLATE ) options = dict ( previewParserPath = reverse ( 'django_markdown_preview' ) , ** settings . MARKDOWN_EDITOR_SETTINGS ) options . update ( extra_settings ) ctx = dict ( selector = selector , extra_settings = simplejson . dumps ( options ) ) return init_template . render ( ctx )
Return script tag with initialization code .
39,869
def preview ( request ) : if settings . MARKDOWN_PROTECT_PREVIEW : user = getattr ( request , 'user' , None ) if not user or not user . is_staff : from django . contrib . auth . views import redirect_to_login return redirect_to_login ( request . get_full_path ( ) ) return render ( request , settings . MARKDOWN_PREVIEW_TEMPLATE , dict ( content = request . POST . get ( 'data' , 'No content posted' ) , css = settings . MARKDOWN_STYLE ) )
Render preview page .
39,870
def register ( ) : admin . site . unregister ( FlatPage ) admin . site . register ( FlatPage , LocalFlatPageAdmin )
Register markdown for flatpages .
39,871
def markdown_editor ( selector ) : return dict ( selector = selector , extra_settings = mark_safe ( simplejson . dumps ( dict ( previewParserPath = reverse ( 'django_markdown_preview' ) ) ) ) )
Enable markdown editor for given textarea .
39,872
def markdown_media_css ( ) : return dict ( CSS_SET = posixpath . join ( settings . MARKDOWN_SET_PATH , settings . MARKDOWN_SET_NAME , 'style.css' ) , CSS_SKIN = posixpath . join ( 'django_markdown' , 'skins' , settings . MARKDOWN_EDITOR_SKIN , 'style.css' ) )
Add css requirements to HTML .
39,873
def convert ( source , to , format = None , extra_args = ( ) , encoding = 'utf-8' ) : return _convert ( _read_file , _process_file , source , to , format , extra_args , encoding = encoding )
Convert given source from format to another .
39,874
def get_pandoc_formats ( ) : try : p = subprocess . Popen ( [ 'pandoc' , '-h' ] , stdin = subprocess . PIPE , stdout = subprocess . PIPE ) except OSError : raise OSError ( "You probably do not have pandoc installed." ) help_text = p . communicate ( ) [ 0 ] . decode ( ) . splitlines ( False ) txt = ' ' . join ( help_text [ 1 : help_text . index ( 'Options:' ) ] ) aux = txt . split ( 'Output formats: ' ) in_ = aux [ 0 ] . split ( 'Input formats: ' ) [ 1 ] . split ( ',' ) out = aux [ 1 ] . split ( ',' ) return [ f . strip ( ) for f in in_ ] , [ f . strip ( ) for f in out ]
Dynamic preprocessor for Pandoc formats .
39,875
def render ( self , name , value , attrs = None , renderer = None ) : html = super ( MarkdownWidget , self ) . render ( name , value , attrs , renderer ) attrs = self . build_attrs ( attrs ) html += editor_js_initialization ( "#%s" % attrs [ 'id' ] ) return mark_safe ( html )
Render widget .
39,876
def extendMarkdown ( self , md , md_globals ) : md . registerExtension ( self ) md . preprocessors . add ( 'graphviz_block' , InlineGraphvizPreprocessor ( md ) , "_begin" )
Add InlineGraphvizPreprocessor to the Markdown instance .
39,877
def run ( self , lines ) : text = "\n" . join ( lines ) while 1 : m = BLOCK_RE . search ( text ) if m : command = m . group ( 'command' ) if command not in SUPPORTED_COMMAMDS : raise Exception ( 'Command not supported: %s' % command ) filename = m . group ( 'filename' ) content = m . group ( 'content' ) filetype = filename [ filename . rfind ( '.' ) + 1 : ] args = [ command , '-T' + filetype ] try : proc = subprocess . Popen ( args , stdin = subprocess . PIPE , stderr = subprocess . PIPE , stdout = subprocess . PIPE ) proc . stdin . write ( content . encode ( 'utf-8' ) ) output , err = proc . communicate ( ) if filetype == 'svg' : data_url_filetype = 'svg+xml' encoding = 'utf-8' img = output . decode ( encoding ) if filetype == 'png' : data_url_filetype = 'png' encoding = 'base64' output = base64 . b64encode ( output ) data_path = "data:image/%s;%s,%s" % ( data_url_filetype , encoding , output ) img = "![" + filename + "](" + data_path + ")" text = '%s\n%s\n%s' % ( text [ : m . start ( ) ] , img , text [ m . end ( ) : ] ) except Exception as e : err = str ( e ) + ' : ' + str ( args ) return ( '<pre>Error : ' + err + '</pre>' '<pre>' + content + '</pre>' ) . split ( '\n' ) else : break return text . split ( "\n" )
Match and generate dot code blocks .
39,878
def post ( self , command , data = None ) : now = calendar . timegm ( datetime . datetime . now ( ) . timetuple ( ) ) if now > self . expiration : auth = self . __open ( "/oauth/token" , data = self . oauth ) self . __sethead ( auth [ 'access_token' ] ) return self . __open ( "%s%s" % ( self . api , command ) , headers = self . head , data = data )
Post data to API .
39,879
def __sethead ( self , access_token ) : self . access_token = access_token now = calendar . timegm ( datetime . datetime . now ( ) . timetuple ( ) ) self . expiration = now + 1800 self . head = { "Authorization" : "Bearer %s" % access_token , "User-Agent" : self . user_agent }
Set HTTP header .
39,880
def __open ( self , url , headers = None , data = None , baseurl = "" ) : headers = headers or { } if not baseurl : baseurl = self . baseurl req = Request ( "%s%s" % ( baseurl , url ) , headers = headers ) _LOGGER . debug ( url ) try : req . data = urlencode ( data ) . encode ( 'utf-8' ) except TypeError : pass opener = build_opener ( ) try : resp = opener . open ( req ) charset = resp . info ( ) . get ( 'charset' , 'utf-8' ) data = json . loads ( resp . read ( ) . decode ( charset ) ) opener . close ( ) _LOGGER . debug ( json . dumps ( data ) ) return data except HTTPError as exception_ : if exception_ . code == 408 : _LOGGER . debug ( "%s" , exception_ ) return False raise TeslaException ( exception_ . code )
Use raw urlopen command .
39,881
def update ( self ) : self . _controller . update ( self . _id , wake_if_asleep = False ) data = self . _controller . get_drive_params ( self . _id ) if data : if not data [ 'shift_state' ] or data [ 'shift_state' ] == 'P' : self . __state = True else : self . __state = False
Update the parking brake sensor .
39,882
def update ( self ) : self . _controller . update ( self . _id , wake_if_asleep = False ) data = self . _controller . get_climate_params ( self . _id ) if data : if time . time ( ) - self . __manual_update_time > 60 : self . __is_auto_conditioning_on = ( data [ 'is_auto_conditioning_on' ] ) self . __is_climate_on = data [ 'is_climate_on' ] self . __driver_temp_setting = ( data [ 'driver_temp_setting' ] if data [ 'driver_temp_setting' ] else self . __driver_temp_setting ) self . __passenger_temp_setting = ( data [ 'passenger_temp_setting' ] if data [ 'passenger_temp_setting' ] else self . __passenger_temp_setting ) self . __inside_temp = ( data [ 'inside_temp' ] if data [ 'inside_temp' ] else self . __inside_temp ) self . __outside_temp = ( data [ 'outside_temp' ] if data [ 'outside_temp' ] else self . __outside_temp ) self . __fan_status = data [ 'fan_status' ]
Update the HVAC state .
39,883
def set_temperature ( self , temp ) : temp = round ( temp , 1 ) self . __manual_update_time = time . time ( ) data = self . _controller . command ( self . _id , 'set_temps' , { "driver_temp" : temp , "passenger_temp" : temp } , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __driver_temp_setting = temp self . __passenger_temp_setting = temp
Set both the driver and passenger temperature to temp .
39,884
def set_status ( self , enabled ) : self . __manual_update_time = time . time ( ) if enabled : data = self . _controller . command ( self . _id , 'auto_conditioning_start' , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __is_auto_conditioning_on = True self . __is_climate_on = True else : data = self . _controller . command ( self . _id , 'auto_conditioning_stop' , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __is_auto_conditioning_on = False self . __is_climate_on = False self . update ( )
Enable or disable the HVAC .
39,885
def update ( self ) : self . _controller . update ( self . _id , wake_if_asleep = False ) data = self . _controller . get_climate_params ( self . _id ) if data : self . __inside_temp = ( data [ 'inside_temp' ] if data [ 'inside_temp' ] else self . __inside_temp ) self . __outside_temp = ( data [ 'outside_temp' ] if data [ 'outside_temp' ] else self . __outside_temp )
Update the temperature .
39,886
def update ( self ) : self . _controller . update ( self . _id , wake_if_asleep = False ) data = self . _controller . get_charging_params ( self . _id ) if data and ( time . time ( ) - self . __manual_update_time > 60 ) : if data [ 'charging_state' ] != "Charging" : self . __charger_state = False else : self . __charger_state = True
Update the charging state of the Tesla Vehicle .
39,887
def start_charge ( self ) : if not self . __charger_state : data = self . _controller . command ( self . _id , 'charge_start' , wake_if_asleep = True ) if data and data [ 'response' ] [ 'result' ] : self . __charger_state = True self . __manual_update_time = time . time ( )
Start charging the Tesla Vehicle .
39,888
def stop_charge ( self ) : if self . __charger_state : data = self . _controller . command ( self . _id , 'charge_stop' , wake_if_asleep = True ) if data and data [ 'response' ] [ 'result' ] : self . __charger_state = False self . __manual_update_time = time . time ( )
Stop charging the Tesla Vehicle .
39,889
def update ( self ) : self . _controller . update ( self . _id , wake_if_asleep = False ) data = self . _controller . get_charging_params ( self . _id ) if data and ( time . time ( ) - self . __manual_update_time > 60 ) : self . __maxrange_state = data [ 'charge_to_max_range' ]
Update the status of the range setting .
39,890
def set_max ( self ) : if not self . __maxrange_state : data = self . _controller . command ( self . _id , 'charge_max_range' , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __maxrange_state = True self . __manual_update_time = time . time ( )
Set the charger to max range for trips .
39,891
def set_standard ( self ) : if self . __maxrange_state : data = self . _controller . command ( self . _id , 'charge_standard' , wake_if_asleep = True ) if data and data [ 'response' ] [ 'result' ] : self . __maxrange_state = False self . __manual_update_time = time . time ( )
Set the charger to standard range for daily commute .
39,892
def unlock ( self ) : if self . __lock_state : data = self . _controller . command ( self . _id , 'door_unlock' , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __lock_state = False self . __manual_update_time = time . time ( )
Unlock the doors and extend handles where applicable .
39,893
def lock ( self ) : if not self . __lock_state : data = self . _controller . command ( self . _id , 'charge_port_door_close' , wake_if_asleep = True ) if data [ 'response' ] [ 'result' ] : self . __lock_state = True self . __manual_update_time = time . time ( )
Close the charger door .
39,894
def wake_up ( func ) : @ wraps ( func ) def wrapped ( * args , ** kwargs ) : def valid_result ( result ) : try : return ( result is not None and result is not False and ( result is True or ( isinstance ( result , dict ) and isinstance ( result [ 'response' ] , dict ) and ( 'result' in result [ 'response' ] and result [ 'response' ] [ 'result' ] is True ) or ( 'reason' in result [ 'response' ] and result [ 'response' ] [ 'reason' ] != 'could_not_wake_buses' ) or ( 'result' not in result [ 'response' ] ) ) ) ) except TypeError as exception : _LOGGER . error ( "Result: %s, %s" , result , exception ) retries = 0 sleep_delay = 2 inst = args [ 0 ] vehicle_id = args [ 1 ] result = None if ( vehicle_id is not None and vehicle_id in inst . car_online and inst . car_online [ vehicle_id ] ) : try : result = func ( * args , ** kwargs ) except TeslaException : pass if valid_result ( result ) : return result _LOGGER . debug ( "wake_up needed for %s -> %s \n" "Info: args:%s, kwargs:%s, " "vehicle_id:%s, car_online:%s" , func . __name__ , result , args , kwargs , vehicle_id , inst . car_online ) inst . car_online [ vehicle_id ] = False while ( 'wake_if_asleep' in kwargs and kwargs [ 'wake_if_asleep' ] and ( vehicle_id is None or ( vehicle_id is not None and vehicle_id in inst . car_online and not inst . car_online [ vehicle_id ] ) ) ) : result = inst . _wake_up ( vehicle_id ) _LOGGER . debug ( "%s(%s): Wake Attempt(%s): %s" , func . __name__ , vehicle_id , retries , result ) if not result : if retries < 5 : time . sleep ( sleep_delay ** ( retries + 2 ) ) retries += 1 continue else : inst . car_online [ vehicle_id ] = False raise RetryLimitError else : break retries = 0 while True : try : result = func ( * args , ** kwargs ) _LOGGER . debug ( "%s(%s): Retry Attempt(%s): %s" , func . __name__ , vehicle_id , retries , result ) except TeslaException : pass finally : retries += 1 time . sleep ( sleep_delay ** ( retries + 1 ) ) if valid_result ( result ) : return result if retries >= 5 : raise RetryLimitError return wrapped
Wrap a API f so it will attempt to wake the vehicle if asleep .
39,895
def post ( self , vehicle_id , command , data = None , wake_if_asleep = True ) : data = data or { } return self . __connection . post ( 'vehicles/%i/%s' % ( vehicle_id , command ) , data )
Send post command to the vehicle_id .
39,896
def get ( self , vehicle_id , command , wake_if_asleep = False ) : return self . __connection . get ( 'vehicles/%i/%s' % ( vehicle_id , command ) )
Send get command to the vehicle_id .
39,897
def data_request ( self , vehicle_id , name , wake_if_asleep = False ) : return self . get ( vehicle_id , 'vehicle_data/%s' % name , wake_if_asleep = wake_if_asleep ) [ 'response' ]
Get requested data from vehicle_id .
39,898
def command ( self , vehicle_id , name , data = None , wake_if_asleep = True ) : data = data or { } return self . post ( vehicle_id , 'command/%s' % name , data , wake_if_asleep = wake_if_asleep )
Post name command to the vehicle_id .
39,899
def update ( self , car_id = None , wake_if_asleep = False , force = False ) : cur_time = time . time ( ) with self . __lock : last_update = self . _last_attempted_update_time if ( force or cur_time - last_update > self . update_interval ) : cars = self . get_vehicles ( ) for car in cars : self . car_online [ car [ 'id' ] ] = ( car [ 'state' ] == 'online' ) self . _last_attempted_update_time = cur_time update_succeeded = False for id_ , value in self . car_online . items ( ) : if ( car_id is not None and car_id != id_ ) : continue if ( value and ( id_ in self . __update and self . __update [ id_ ] ) and ( force or id_ not in self . _last_update_time or ( ( cur_time - self . _last_update_time [ id_ ] ) > self . update_interval ) ) ) : try : data = self . get ( id_ , 'data' , wake_if_asleep ) except TeslaException : data = None if data and data [ 'response' ] : response = data [ 'response' ] self . __climate [ car_id ] = response [ 'climate_state' ] self . __charging [ car_id ] = response [ 'charge_state' ] self . __state [ car_id ] = response [ 'vehicle_state' ] self . __driving [ car_id ] = response [ 'drive_state' ] self . __gui [ car_id ] = response [ 'gui_settings' ] self . car_online [ car_id ] = ( response [ 'state' ] == 'online' ) self . _last_update_time [ car_id ] = time . time ( ) update_succeeded = True return update_succeeded
Update all vehicle attributes in the cache .