idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
234,100
def _expand_if_needed ( self , dims , write_dims , start , offset ) : from operator import mul if numpy . isscalar ( start ) : start_is_scalar = True else : start_is_scalar = False existing_size = reduce ( mul , dims , 1 ) required_size = offset + reduce ( mul , write_dims , 1 ) if required_size > existing_size : print ( " required size:" , required_size , "existing size:" , existing_size ) # we need to expand the image ndim = len ( dims ) idim = len ( write_dims ) if start_is_scalar : if start == 0 : start = [ 0 ] * ndim else : raise ValueError ( "When expanding " "an existing image while writing, the start keyword " "must have the same number of dimensions " "as the image or be exactly 0, got %s " % start ) if idim != ndim : raise ValueError ( "When expanding " "an existing image while writing, the input image " "must have the same number of dimensions " "as the original. " "Got %d instead of %d" % ( idim , ndim ) ) new_dims = [ ] for i in xrange ( ndim ) : required_dim = start [ i ] + write_dims [ i ] if required_dim < dims [ i ] : # careful not to shrink the image! dimsize = dims [ i ] else : dimsize = required_dim new_dims . append ( dimsize ) print ( " reshaping image to:" , new_dims ) self . reshape ( new_dims )
expand the on - disk image if the indended write will extend beyond the existing dimensions
372
18
234,101
def get_extname ( self ) : name = self . _info [ 'extname' ] if name . strip ( ) == '' : name = self . _info [ 'hduname' ] return name . strip ( )
Get the name for this extension can be an empty string
49
11
234,102
def get_extver ( self ) : ver = self . _info [ 'extver' ] if ver == 0 : ver = self . _info [ 'hduver' ] return ver
Get the version for this extension .
41
7
234,103
def get_exttype ( self , num = False ) : if num : return self . _info [ 'hdutype' ] else : name = _hdu_type_map [ self . _info [ 'hdutype' ] ] return name
Get the extension type
54
4
234,104
def verify_checksum ( self ) : res = self . _FITS . verify_checksum ( self . _ext + 1 ) if res [ 'dataok' ] != 1 : raise ValueError ( "data checksum failed" ) if res [ 'hduok' ] != 1 : raise ValueError ( "hdu checksum failed" )
Verify the checksum in the header for this HDU .
75
13
234,105
def write_comment ( self , comment ) : self . _FITS . write_comment ( self . _ext + 1 , str ( comment ) )
Write a comment into the header
32
6
234,106
def write_key ( self , name , value , comment = "" ) : if value is None : self . _FITS . write_undefined_key ( self . _ext + 1 , str ( name ) , str ( comment ) ) elif isinstance ( value , bool ) : if value : v = 1 else : v = 0 self . _FITS . write_logical_key ( self . _ext + 1 , str ( name ) , v , str ( comment ) ) elif isinstance ( value , _stypes ) : self . _FITS . write_string_key ( self . _ext + 1 , str ( name ) , str ( value ) , str ( comment ) ) elif isinstance ( value , _ftypes ) : self . _FITS . write_double_key ( self . _ext + 1 , str ( name ) , float ( value ) , str ( comment ) ) elif isinstance ( value , _itypes ) : self . _FITS . write_long_key ( self . _ext + 1 , str ( name ) , int ( value ) , str ( comment ) ) elif isinstance ( value , ( tuple , list ) ) : vl = [ str ( el ) for el in value ] sval = ',' . join ( vl ) self . _FITS . write_string_key ( self . _ext + 1 , str ( name ) , sval , str ( comment ) ) else : sval = str ( value ) mess = ( "warning, keyword '%s' has non-standard " "value type %s, " "Converting to string: '%s'" ) warnings . warn ( mess % ( name , type ( value ) , sval ) , FITSRuntimeWarning ) self . _FITS . write_string_key ( self . _ext + 1 , str ( name ) , sval , str ( comment ) )
Write the input value to the header
410
7
234,107
def write_keys ( self , records_in , clean = True ) : if isinstance ( records_in , FITSHDR ) : hdr = records_in else : hdr = FITSHDR ( records_in ) if clean : is_table = hasattr ( self , '_table_type_str' ) # is_table = isinstance(self, TableHDU) hdr . clean ( is_table = is_table ) for r in hdr . records ( ) : name = r [ 'name' ] . upper ( ) value = r [ 'value' ] if name == 'COMMENT' : self . write_comment ( value ) elif name == 'HISTORY' : self . write_history ( value ) elif name == 'CONTINUE' : self . _write_continue ( value ) else : comment = r . get ( 'comment' , '' ) self . write_key ( name , value , comment = comment )
Write the keywords to the header .
208
7
234,108
def _update_info ( self ) : try : self . _FITS . movabs_hdu ( self . _ext + 1 ) except IOError : raise RuntimeError ( "no such hdu" ) self . _info = self . _FITS . get_hdu_info ( self . _ext + 1 )
Update metadata for this HDU
70
6
234,109
def _get_repr_list ( self ) : spacing = ' ' * 2 text = [ '' ] text . append ( "%sfile: %s" % ( spacing , self . _filename ) ) text . append ( "%sextension: %d" % ( spacing , self . _info [ 'hdunum' ] - 1 ) ) text . append ( "%stype: %s" % ( spacing , _hdu_type_map [ self . _info [ 'hdutype' ] ] ) ) extname = self . get_extname ( ) if extname != "" : text . append ( "%sextname: %s" % ( spacing , extname ) ) extver = self . get_extver ( ) if extver != 0 : text . append ( "%sextver: %s" % ( spacing , extver ) ) return text , spacing
Get some representation data common to all HDU types
190
10
234,110
def add_record ( self , record_in ) : if ( isinstance ( record_in , dict ) and 'name' in record_in and 'value' in record_in ) : record = { } record . update ( record_in ) else : record = FITSRecord ( record_in ) # only append when this name already exists if it is # a comment or history field, otherwise simply over-write key = record [ 'name' ] . upper ( ) key_exists = key in self . _record_map if not key_exists or key in ( 'COMMENT' , 'HISTORY' , 'CONTINUE' ) : # append new record self . _record_list . append ( record ) index = len ( self . _record_list ) - 1 self . _index_map [ key ] = index else : # over-write existing index = self . _index_map [ key ] self . _record_list [ index ] = record self . _record_map [ key ] = record
Add a new record . Strip quotes from around strings .
219
11
234,111
def get_comment ( self , item ) : key = item . upper ( ) if key not in self . _record_map : raise KeyError ( "unknown record: %s" % key ) if 'comment' not in self . _record_map [ key ] : return None else : return self . _record_map [ key ] [ 'comment' ]
Get the comment for the requested entry
77
7
234,112
def delete ( self , name ) : if isinstance ( name , ( list , tuple ) ) : for xx in name : self . delete ( xx ) else : if name in self . _record_map : del self . _record_map [ name ] self . _record_list = [ r for r in self . _record_list if r [ 'name' ] != name ]
Delete the specified entry if it exists .
82
8
234,113
def clean ( self , is_table = False ) : rmnames = [ 'SIMPLE' , 'EXTEND' , 'XTENSION' , 'BITPIX' , 'PCOUNT' , 'GCOUNT' , 'THEAP' , 'EXTNAME' , 'BLANK' , 'ZQUANTIZ' , 'ZDITHER0' , 'ZIMAGE' , 'ZCMPTYPE' , 'ZSIMPLE' , 'ZTENSION' , 'ZPCOUNT' , 'ZGCOUNT' , 'ZBITPIX' , 'ZEXTEND' , # 'FZTILELN','FZALGOR', 'CHECKSUM' , 'DATASUM' ] if is_table : # these are not allowed in tables rmnames += [ 'BUNIT' , 'BSCALE' , 'BZERO' , ] self . delete ( rmnames ) r = self . _record_map . get ( 'NAXIS' , None ) if r is not None : naxis = int ( r [ 'value' ] ) self . delete ( 'NAXIS' ) rmnames = [ 'NAXIS%d' % i for i in xrange ( 1 , naxis + 1 ) ] self . delete ( rmnames ) r = self . _record_map . get ( 'ZNAXIS' , None ) self . delete ( 'ZNAXIS' ) if r is not None : znaxis = int ( r [ 'value' ] ) rmnames = [ 'ZNAXIS%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) rmnames = [ 'ZTILE%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) rmnames = [ 'ZNAME%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) rmnames = [ 'ZVAL%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) r = self . _record_map . get ( 'TFIELDS' , None ) if r is not None : tfields = int ( r [ 'value' ] ) self . delete ( 'TFIELDS' ) if tfields > 0 : nbase = [ 'TFORM' , 'TTYPE' , 'TDIM' , 'TUNIT' , 'TSCAL' , 'TZERO' , 'TNULL' , 'TDISP' , 'TDMIN' , 'TDMAX' , 'TDESC' , 'TROTA' , 'TRPIX' , 'TRVAL' , 'TDELT' , 'TCUNI' , # 'FZALG' ] for i in xrange ( 1 , tfields + 1 ) : names = [ '%s%d' % ( n , i ) for n in nbase ] self . delete ( names )
Remove reserved keywords from the header .
679
7
234,114
def get ( self , item , default_value = None ) : found , name = self . _contains_and_name ( item ) if found : return self . _record_map [ name ] [ 'value' ] else : return default_value
Get the requested header entry by keyword name
54
8
234,115
def next ( self ) : if self . _current < len ( self . _record_list ) : rec = self . _record_list [ self . _current ] key = rec [ 'name' ] self . _current += 1 return key else : raise StopIteration
for iteration over the header entries
58
6
234,116
def set_record ( self , record , * * kw ) : if isstring ( record ) : card = FITSCard ( record ) self . update ( card ) self . verify ( ) else : if isinstance ( record , FITSRecord ) : self . update ( record ) elif isinstance ( record , dict ) : if 'name' in record and 'value' in record : self . update ( record ) elif 'card_string' in record : self . set_record ( record [ 'card_string' ] ) else : raise ValueError ( 'record must have name,value fields ' 'or a card_string field' ) else : raise ValueError ( "record must be a string card or " "dictionary or FITSRecord" )
check the record is valid and set keys in the dict
163
11
234,117
def _check_equals ( self ) : card_string = self [ 'card_string' ] if len ( card_string ) < 9 : self . _has_equals = False elif card_string [ 8 ] == '=' : self . _has_equals = True else : self . _has_equals = False
check for = in position 8 set attribute _has_equals
73
13
234,118
def _convert_value ( self , value_orig ) : import ast if value_orig is None : return value_orig if value_orig . startswith ( "'" ) and value_orig . endswith ( "'" ) : value = value_orig [ 1 : - 1 ] else : try : avalue = ast . parse ( value_orig ) . body [ 0 ] . value if isinstance ( avalue , ast . BinOp ) : # this is probably a string that happens to look like # a binary operation, e.g. '25-3' value = value_orig else : value = ast . literal_eval ( value_orig ) except Exception : value = self . _convert_string ( value_orig ) if isinstance ( value , int ) and '_' in value_orig : value = value_orig return value
things like 6 and 1 . 25 are converted with ast . literal_value
183
15
234,119
def _make_reads_for_assembly ( number_of_wanted_reads , total_reads , reads_in1 , reads_in2 , reads_out1 , reads_out2 , random_seed = None ) : random . seed ( random_seed ) if number_of_wanted_reads < total_reads : reads_written = 0 percent_wanted = 100 * number_of_wanted_reads / total_reads file_reader1 = pyfastaq . sequences . file_reader ( reads_in1 ) file_reader2 = pyfastaq . sequences . file_reader ( reads_in2 ) out1 = pyfastaq . utils . open_file_write ( reads_out1 ) out2 = pyfastaq . utils . open_file_write ( reads_out2 ) for read1 in file_reader1 : try : read2 = next ( file_reader2 ) except StopIteration : pyfastaq . utils . close ( out1 ) pyfastaq . utils . close ( out2 ) raise Error ( 'Error subsetting reads. No mate found for read ' + read1 . id ) if random . randint ( 0 , 100 ) <= percent_wanted : print ( read1 , file = out1 ) print ( read2 , file = out2 ) reads_written += 2 pyfastaq . utils . close ( out1 ) pyfastaq . utils . close ( out2 ) return reads_written else : os . symlink ( reads_in1 , reads_out1 ) os . symlink ( reads_in2 , reads_out2 ) return total_reads
Makes fastq files that are random subset of input files . Returns total number of reads in output files . If the number of wanted reads is > = total reads then just makes symlinks instead of making new copies of the input files .
356
48
234,120
def load_mutations ( gene_coords , mutation_to_drug_json , variants_txt , upstream_before = 100 ) : with open ( mutation_to_drug_json ) as f : drug_data = json . load ( f ) mutations = [ ] genes_with_indels = set ( ) genes_need_upstream = set ( ) genes_non_upstream = set ( ) with open ( variants_txt ) as f : for line in f : gene , variant , d_or_p = line . rstrip ( ) . split ( '\t' ) coding = 0 if gene == 'rrs' else 1 d = { 'gene' : gene , 'var' : variant , 'coding' : coding , 'upstream' : False } drug_data_key = d [ 'gene' ] + '_' + d [ 'var' ] if drug_data_key not in drug_data : print ( 'KEY' , drug_data_key , 'NOT FOUND' , file = sys . stderr ) else : d [ 'drugs' ] = ',' . join ( sorted ( drug_data [ drug_data_key ] ) ) if d_or_p == 'DNA' and gene != 'rrs' : assert gene != 'rrs' re_match = re . match ( '([ACGT]+)(-?[0-9]+)([ACGTX]+)' , d [ 'var' ] ) try : ref , pos , alt = re_match . groups ( ) except : print ( 'regex error:' , d [ 'var' ] , file = sys . stderr ) continue pos = int ( pos ) if len ( ref ) != len ( alt ) : genes_with_indels . add ( d [ 'gene' ] ) continue elif pos > 0 : #print('ignoring synonymous change (not implemented):', d['gene'], d['var'], d['drugs'], file=sys.stderr) continue elif pos < 0 : this_gene_coords = gene_coords [ d [ 'gene' ] ] d [ 'upstream' ] = True if this_gene_coords [ 'start' ] < this_gene_coords [ 'end' ] : variant_pos_in_output_seq = upstream_before + pos + 1 else : variant_pos_in_output_seq = upstream_before + pos + 1 assert variant_pos_in_output_seq > 0 d [ 'var' ] = ref + str ( variant_pos_in_output_seq ) + alt d [ 'original_mutation' ] = variant genes_need_upstream . add ( d [ 'gene' ] ) elif pos == 0 : print ( 'Zero coord!' , d , file = sys . stderr ) continue else : print ( 'deal with?' , d , file = sys . stderr ) continue mutations . append ( d ) if not d [ 'upstream' ] : genes_non_upstream . add ( d [ 'gene' ] ) return mutations , genes_with_indels , genes_need_upstream , genes_non_upstream
Load mutations from mykrobe - style files . mutation_to_drug_json is json file of mutation - > list of drugs . variants_txt is text file of variants used my mykrobe s make probes . gene_coords should be dict of gene coords made by the function genbank_to_gene_coords
704
70
234,121
def write_prepareref_fasta_file ( outfile , gene_coords , genes_need_upstream , genes_non_upstream , upstream_before = 100 , upstream_after = 100 ) : tmp_dict = { } fasta_in = os . path . join ( data_dir , 'NC_000962.3.fa.gz' ) pyfastaq . tasks . file_to_dict ( fasta_in , tmp_dict ) ref_seq = tmp_dict [ 'NC_000962.3' ] with open ( outfile , 'w' ) as f : for gene in genes_non_upstream : start = gene_coords [ gene ] [ 'start' ] end = gene_coords [ gene ] [ 'end' ] if start < end : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ start : end + 1 ] ) else : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ end : start + 1 ] ) gene_fa . revcomp ( ) print ( gene_fa , file = f ) for gene in genes_need_upstream : start = gene_coords [ gene ] [ 'start' ] end = gene_coords [ gene ] [ 'end' ] if start < end : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ start - upstream_before : start + upstream_after ] ) else : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ start - upstream_after + 1 : start + upstream_before + 1 ] ) gene_fa . revcomp ( ) gene_fa . id += '_upstream' print ( gene_fa , file = f )
Writes fasta file to be used with - f option of prepareref
391
15
234,122
def _get_known_noncoding_het_snp ( data_dict ) : if data_dict [ 'gene' ] == '1' : return None if data_dict [ 'known_var' ] == '1' and data_dict [ 'ref_ctg_effect' ] == 'SNP' and data_dict [ 'smtls_nts' ] != '.' and ';' not in data_dict [ 'smtls_nts' ] : nucleotides = data_dict [ 'smtls_nts' ] . split ( ',' ) depths = data_dict [ 'smtls_nts_depth' ] . split ( ',' ) if len ( nucleotides ) != len ( depths ) : raise Error ( 'Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\n' + str ( data_dict ) ) try : var_nucleotide = data_dict [ 'known_var_change' ] [ - 1 ] depths = [ int ( x ) for x in depths ] nuc_to_depth = dict ( zip ( nucleotides , depths ) ) total_depth = sum ( depths ) var_depth = nuc_to_depth . get ( var_nucleotide , 0 ) percent_depth = round ( 100 * var_depth / total_depth , 1 ) except : return None return data_dict [ 'known_var_change' ] , percent_depth else : return None
If ref is coding return None . If the data dict has a known snp and samtools made a call then return the string ref_name_change and the % of reads supporting the variant type . If noncoding but no samtools call then return None
343
53
234,123
def _has_match ( self , assembled_summary ) : if assembled_summary . startswith ( 'yes' ) : if self . data [ 0 ] [ 'var_only' ] == '0' or self . _to_cluster_summary_has_known_nonsynonymous ( assembled_summary ) == 'yes' : return 'yes' else : return 'no' else : return 'no'
assembled_summary should be output of _to_cluster_summary_assembled
90
16
234,124
def has_var_groups ( self ) : ids = set ( ) for d in self . data : if self . _has_known_variant ( d ) != 'no' and d [ 'var_group' ] != '.' : ids . add ( d [ 'var_group' ] ) return ids
Returns a set of the variant group ids that this cluster has
70
13
234,125
def column_summary_data ( self ) : assembled_summary = self . _to_cluster_summary_assembled ( ) pct_id , read_depth = self . _pc_id_and_read_depth_of_longest ( ) columns = { 'assembled' : self . _to_cluster_summary_assembled ( ) , 'match' : self . _has_match ( assembled_summary ) , 'ref_seq' : self . ref_name , 'pct_id' : str ( pct_id ) , 'ctg_cov' : str ( read_depth ) , 'known_var' : self . _to_cluster_summary_has_known_nonsynonymous ( assembled_summary ) , 'novel_var' : self . _to_cluster_summary_has_novel_nonsynonymous ( assembled_summary ) } return columns
Returns a dictionary of column name - > value for cluster - level results
198
14
234,126
def cat_files ( infiles , outfile ) : f_out = pyfastaq . utils . open_file_write ( outfile ) for filename in infiles : if os . path . exists ( filename ) : f_in = pyfastaq . utils . open_file_read ( filename ) for line in f_in : print ( line , end = '' , file = f_out ) pyfastaq . utils . close ( f_in ) pyfastaq . utils . close ( f_out )
Cats all files in list infiles into outfile
114
11
234,127
def _check_spades_log_file ( logfile ) : f = pyfastaq . utils . open_file_read ( logfile ) for line in f : if line . startswith ( '== Error == system call for:' ) and line . rstrip ( ) . endswith ( 'finished abnormally, err code: -7' ) : pyfastaq . utils . close ( f ) print ( 'Error running SPAdes. Cannot continue. This is the error from the log file' , logfile , '...' , file = sys . stderr ) print ( line , file = sys . stderr ) raise Error ( 'Fatal error ("err code: -7") running spades. Cannot continue' ) pyfastaq . utils . close ( f ) return True
SPAdes can fail with a strange error . Stop everything if this happens
174
15
234,128
def _fix_contig_orientation ( contigs_fa , ref_fa , outfile , min_id = 90 , min_length = 20 , breaklen = 200 ) : if not os . path . exists ( contigs_fa ) : raise Error ( 'Cannot fix orientation of assembly contigs because file not found: ' + contigs_fa ) tmp_coords = os . path . join ( outfile + '.tmp.rename.coords' ) pymummer . nucmer . Runner ( ref_fa , contigs_fa , tmp_coords , min_id = min_id , min_length = min_length , breaklen = breaklen , maxmatch = True , ) . run ( ) to_revcomp = set ( ) not_revcomp = set ( ) file_reader = pymummer . coords_file . reader ( tmp_coords ) for hit in file_reader : if hit . on_same_strand ( ) : not_revcomp . add ( hit . qry_name ) else : to_revcomp . add ( hit . qry_name ) os . unlink ( tmp_coords ) in_both = to_revcomp . intersection ( not_revcomp ) f = pyfastaq . utils . open_file_write ( outfile ) seq_reader = pyfastaq . sequences . file_reader ( contigs_fa ) for seq in seq_reader : if seq . id in to_revcomp and seq . id not in in_both : seq . revcomp ( ) print ( seq , file = f ) pyfastaq . utils . close ( f ) return in_both
Changes orientation of each contig to match the reference when possible . Returns a set of names of contigs that had hits in both orientations to the reference
361
31
234,129
def _parse_nucmer_coords_file ( coords_file , ref_name ) : file_reader = pymummer . coords_file . reader ( coords_file ) nucmer_hits = { } for hit in file_reader : assert hit . ref_name == ref_name contig = hit . qry_name if contig not in nucmer_hits : nucmer_hits [ contig ] = [ ] nucmer_hits [ contig ] . append ( copy . copy ( hit ) ) return nucmer_hits
Input is coords file made by self . _run_nucmer . Reference should have one sequence only . ref_name is name fo the reference sequence to sanity check the coords file . Returns dictionary . Key = assembly contig name . Value = list of nucmer hits to that contig
130
62
234,130
def _nucmer_hits_to_percent_identity ( nucmer_hits ) : percent_identities = { } max_lengths = { } for contig in nucmer_hits : max_length = - 1 percent_identity = 0 for hit in nucmer_hits [ contig ] : if hit . hit_length_qry > max_length : max_length = hit . hit_length_qry percent_identity = hit . percent_identity percent_identities [ contig ] = percent_identity return percent_identities
Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . key = contig name . Value = percent identity of hits to that contig
130
38
234,131
def _nucmer_hits_to_assembly_coords ( nucmer_hits ) : coords = { } for l in nucmer_hits . values ( ) : for hit in l : if hit . qry_name not in coords : coords [ hit . qry_name ] = [ ] coords [ hit . qry_name ] . append ( hit . qry_coords ( ) ) for scaff in coords : pyfastaq . intervals . merge_overlapping_in_list ( coords [ scaff ] ) return coords
Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . key = contig name . Value = list of coords that match to the reference gene
127
40
234,132
def nucmer_hits_to_ref_coords ( cls , nucmer_hits , contig = None ) : coords = [ ] if contig is None : coords = { key : [ ] for key in nucmer_hits . keys ( ) } else : coords = { contig : [ ] } for key in coords : coords [ key ] = [ hit . ref_coords ( ) for hit in nucmer_hits [ key ] ] pyfastaq . intervals . merge_overlapping_in_list ( coords [ key ] ) return coords
Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . Key = contig name . Value = list of coords in the reference sequence for that contig . if contig = contig_name then just gets the ref coords from that contig instead of using all the contigs
135
69
234,133
def nucmer_hits_to_ref_and_qry_coords ( cls , nucmer_hits , contig = None ) : if contig is None : ctg_coords = { key : [ ] for key in nucmer_hits . keys ( ) } else : ctg_coords = { contig : [ ] } ref_coords = { } for key in ctg_coords : hits = copy . copy ( nucmer_hits [ key ] ) hits . sort ( key = lambda x : len ( x . ref_coords ( ) ) ) if len ( hits ) > 1 : i = 0 while i < len ( hits ) - 1 : c1 = hits [ i ] . ref_coords ( ) c2 = hits [ i + 1 ] . ref_coords ( ) if c2 . contains ( c1 ) : hits . pop ( i ) else : i += 1 ref_coords [ key ] = [ hit . ref_coords ( ) for hit in hits ] ctg_coords [ key ] = [ hit . qry_coords ( ) for hit in hits ] pyfastaq . intervals . merge_overlapping_in_list ( ref_coords [ key ] ) pyfastaq . intervals . merge_overlapping_in_list ( ctg_coords [ key ] ) return ctg_coords , ref_coords
Same as nucmer_hits_to_ref_coords except removes containing hits first and returns ref and qry coords lists
313
29
234,134
def ref_cov_per_contig ( nucmer_hits ) : coords = AssemblyCompare . nucmer_hits_to_ref_coords ( nucmer_hits ) return { x : pyfastaq . intervals . length_sum_from_list ( coords [ x ] ) for x in coords }
Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . key = contig name . Value = number of bases that match to the reference sequence .
76
40
234,135
def _ref_covered_by_at_least_one_full_length_contig ( nucmer_hits , percent_threshold , max_nt_extend ) : for l in nucmer_hits . values ( ) : for hit in l : if ( ( 2 * max_nt_extend ) + len ( hit . ref_coords ( ) ) ) / hit . ref_length >= percent_threshold : return True return False
Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self . _parse_nucmer_coords_file .
102
39
234,136
def nucmer_hit_containing_reference_position ( nucmer_hits , ref_name , ref_position , qry_name = None ) : for contig_name in nucmer_hits : for hit in nucmer_hits [ contig_name ] : if hit . ref_name == ref_name and ( qry_name is None or qry_name == hit . qry_name ) and hit . ref_coords ( ) . distance_to_point ( ref_position ) == 0 : return hit return None
Returns the first nucmer match found that contains the given reference location . nucmer_hits = hits made by self . _parse_nucmer_coords_file . Returns None if no matching hit found
123
46
234,137
def _get_exe ( prog ) : if prog in prog_to_env_var : env_var = prog_to_env_var [ prog ] if env_var in os . environ : return os . environ [ env_var ] return prog_to_default [ prog ]
Given a program name return what we expect its exectuable to be called
63
15
234,138
def fake_run ( self ) : clusters = { } used_names = set ( ) seq_reader = pyfastaq . sequences . file_reader ( self . infile ) for seq in seq_reader : if seq . id in used_names : raise Error ( 'Sequence name "' + seq . id + '" not unique. Cannot continue' ) clusters [ str ( len ( clusters ) + self . min_cluster_number ) ] = { seq . id } used_names . add ( seq . id ) return clusters
Doesn t actually run cd - hit . Instead puts each input sequence into its own cluster . So it s as if cdhit was run but didn t cluster anything
113
33
234,139
def run_get_clusters_from_file ( self , clusters_infile , all_ref_seqs , rename_dict = None ) : if rename_dict is None : rename_dict = { } # check that every sequence in the clusters file can be # found in the fasta file seq_reader = pyfastaq . sequences . file_reader ( self . infile ) names_list_from_fasta_file = [ seq . id for seq in seq_reader ] names_set_from_fasta_file = set ( names_list_from_fasta_file ) clusters = self . _load_user_clusters_file ( clusters_infile , all_ref_seqs , rename_dict = rename_dict ) if len ( names_set_from_fasta_file ) != len ( names_list_from_fasta_file ) : raise Error ( 'At least one duplicate name in fasta file ' + self . infile + '. Cannot continue' ) names_from_clusters_file = set ( ) for new_names in clusters . values ( ) : names_from_clusters_file . update ( new_names ) if not names_set_from_fasta_file . issubset ( names_from_clusters_file ) : raise Error ( 'Some names in fasta file "' + self . infile + '" not given in cluster file. Cannot continue' ) return clusters
Instead of running cdhit gets the clusters info from the input file .
313
14
234,140
def sam_pair_to_insert ( s1 , s2 ) : if s1 . is_unmapped or s2 . is_unmapped or ( s1 . tid != s2 . tid ) or ( s1 . is_reverse == s2 . is_reverse ) : return None # If here, reads are both mapped to the same ref, and in opposite orientations if s1 . is_reverse : end = s1 . reference_end - 1 start = s2 . reference_start else : end = s2 . reference_end - 1 start = s1 . reference_start if start < end : return end - start + 1 else : return None
Returns insert size from pair of sam records as long as their orientation is innies . Otherwise returns None .
144
21
234,141
def update_from_sam ( self , sam , sam_reader ) : if sam . is_unmapped or sam . mate_is_unmapped or ( sam . reference_id == sam . next_reference_id ) : return new_link = link . Link ( sam , sam_reader , self . ref_lengths ) read_name = sam . query_name if read_name in self . partial_links : new_link . merge ( self . partial_links [ read_name ] ) del self . partial_links [ read_name ] key = tuple ( sorted ( ( new_link . refnames [ 0 ] , new_link . refnames [ 1 ] ) ) ) if key not in self . links : self . links [ key ] = [ ] new_link . sort ( ) self . links [ key ] . append ( new_link ) else : self . partial_links [ read_name ] = new_link
Updates graph info from a pysam . AlignedSegment object
203
15
234,142
def _make_graph ( self , max_insert ) : if len ( self . partial_links ) != 0 : raise Error ( 'Error in _make_graph(). Cannot continue because there are partial links' ) self . contig_links = { } for key in self . links : for l in self . links [ key ] : insert_size = l . insert_size ( ) if insert_size <= max_insert : if key not in self . contig_links : self . contig_links [ key ] = { } dirs = '' . join ( l . dirs ) self . contig_links [ key ] [ dirs ] = self . contig_links [ key ] . get ( dirs , 0 ) + 1
helper function to construct graph from current state of object
159
11
234,143
def _sam_to_soft_clipped ( self , sam ) : if sam . is_unmapped : raise Error ( 'Cannot get soft clip info from an unmapped read' ) if sam . cigar is None or len ( sam . cigar ) == 0 : return False , False return ( sam . cigar [ 0 ] [ 0 ] == 4 , sam . cigar [ - 1 ] [ 0 ] == 4 )
Returns tuple of whether or not the left and right end of the mapped read in the sam record is soft - clipped
89
23
234,144
def _report_line_to_dict ( cls , line ) : data = line . split ( '\t' ) if len ( data ) != len ( report . columns ) : return None d = dict ( zip ( report . columns , data ) ) for key in report . int_columns : try : d [ key ] = int ( d [ key ] ) except : assert d [ key ] == '.' for key in report . float_columns : try : d [ key ] = float ( d [ key ] ) except : assert d [ key ] == '.' d [ 'flag' ] = flag . Flag ( int ( d [ 'flag' ] ) ) return d
Takes report line string as input . Returns a dict of column name - > value in line
146
19
234,145
def _dict_to_report_line ( cls , report_dict ) : return '\t' . join ( [ str ( report_dict [ x ] ) for x in report . columns ] )
Takes a report_dict as input and returns a report line
44
13
234,146
def _load_report ( infile ) : report_dict = { } f = pyfastaq . utils . open_file_read ( infile ) first_line = True for line in f : line = line . rstrip ( ) if first_line : expected_first_line = '#' + '\t' . join ( report . columns ) if line != expected_first_line : pyfastaq . utils . close ( f ) raise Error ( 'Error reading report file. Expected first line of file is\n' + expected_first_line + '\nbut got:\n' + line ) first_line = False else : line_dict = ReportFilter . _report_line_to_dict ( line ) if line_dict is None : pyfastaq . utils . close ( f ) raise Error ( 'Error reading report file at this line:\n' + line ) ref_name = line_dict [ 'ref_name' ] ctg_name = line_dict [ 'ctg' ] if ref_name not in report_dict : report_dict [ ref_name ] = { } if ctg_name not in report_dict [ ref_name ] : report_dict [ ref_name ] [ ctg_name ] = [ ] report_dict [ ref_name ] [ ctg_name ] . append ( line_dict ) pyfastaq . utils . close ( f ) return report_dict
Loads report file into a dictionary . Key = reference name . Value = list of report lines for that reference
313
22
234,147
def _filter_dicts ( self ) : keys_to_remove = set ( ) for ref_name in self . report : for ctg_name in self . report [ ref_name ] : self . report [ ref_name ] [ ctg_name ] = self . _filter_list_of_dicts ( self . report [ ref_name ] [ ctg_name ] ) if len ( self . report [ ref_name ] [ ctg_name ] ) == 0 : keys_to_remove . add ( ( ref_name , ctg_name ) ) refs_to_remove = set ( ) for ref_name , ctg_name in keys_to_remove : del self . report [ ref_name ] [ ctg_name ] if len ( self . report [ ref_name ] ) == 0 : refs_to_remove . add ( ref_name ) for ref_name in refs_to_remove : del self . report [ ref_name ]
Filters out all the report_dicts that do not pass the cutoffs . If any ref sequence loses all of its report_dicts then it is completely removed .
215
35
234,148
def merge ( self , other ) : assert self . refnames == other . refnames assert self . dirs == other . dirs assert self . lengths == other . lengths for i in range ( 2 ) : if self . pos [ i ] is None : if other . pos [ i ] is None : raise Error ( 'Error merging these two links:\n' + str ( self ) + '\n' + str ( other ) ) self . pos [ i ] = other . pos [ i ] else : if other . pos [ i ] is not None : raise Error ( 'Error merging these two links:\n' + str ( self ) + '\n' + str ( other ) )
Merge another link into this one . Expected that each link was created from each mate from a pair . We only know both distances to contig ends when we have read info from both mappings in a BAM file . All other info should be the same .
146
54
234,149
def _load_fofn ( cls , fofn ) : filenames = { } f = pyfastaq . utils . open_file_read ( fofn ) for line in f : fields = line . rstrip ( ) . split ( ) if len ( fields ) == 1 : filenames [ fields [ 0 ] ] = None elif len ( fields ) == 2 : filenames [ fields [ 0 ] ] = fields [ 1 ] else : raise Error ( 'Error at the following line of file ' + fofn + '. Expected at most 2 fields.\n' + line ) pyfastaq . utils . close ( f ) return filenames
Returns dictionary of filename - > short name . Value is None whenever short name is not provided
148
18
234,150
def _filter_matrix_rows ( cls , matrix ) : indexes_to_keep = [ ] for i in range ( len ( matrix ) ) : keep_row = False for element in matrix [ i ] : if element not in { 'NA' , 'no' } : keep_row = True break if keep_row : indexes_to_keep . append ( i ) return [ matrix [ i ] for i in indexes_to_keep ]
matrix = output from _to_matrix
97
10
234,151
def _filter_matrix_columns ( cls , matrix , phandango_header , csv_header ) : indexes_to_keep = set ( ) for row in matrix : for i in range ( len ( row ) ) : if row [ i ] not in { 'NA' , 'no' } : indexes_to_keep . add ( i ) indexes_to_keep = sorted ( list ( indexes_to_keep ) ) for i in range ( len ( matrix ) ) : matrix [ i ] = [ matrix [ i ] [ j ] for j in indexes_to_keep ] phandango_header = [ phandango_header [ i ] for i in indexes_to_keep ] csv_header = [ csv_header [ i ] for i in indexes_to_keep ] return phandango_header , csv_header , matrix
phandango_header csv_header matrix = output from _to_matrix
188
18
234,152
def _get_remaining_known_ref_variants ( known_ref_variants , used_ref_variants , nucmer_coords ) : variants = [ ] for ref_variant_pos , ref_variants_set in sorted ( known_ref_variants . items ( ) ) : for known_ref_variant in ref_variants_set : if known_ref_variant not in used_ref_variants : variant_pos_matches_contig = False pos = known_ref_variant . variant . position if known_ref_variant . seq_type == 'n' : ref_interval = intervals . Interval ( pos , pos ) elif known_ref_variant . seq_type == 'p' : ref_interval = intervals . Interval ( 3 * pos , 3 * pos + 2 ) else : raise Error ( 'Unexpected variant type "' + known_ref_variant . variant_type + '" in _get_remaining_known_ref_variants. Cannot continue' ) for interval in nucmer_coords : if ref_interval . intersects ( interval ) : variant_pos_matches_contig = True break if variant_pos_matches_contig : variants . append ( ( None , known_ref_variant . seq_type , None , None , None , { known_ref_variant } , set ( ) ) ) return variants
Finds variants where ref has the variant and so does the contig . Which means that there was no mummer call to flag it up so need to look through the known ref variants . Also need to check that the variant is in a nucmer match to an assembly contig .
318
58
234,153
def _samtools_depths_at_known_snps_all_wild ( sequence_meta , contig_name , cluster , variant_list ) : ref_nuc_range = sequence_meta . variant . nucleotide_range ( ) if ref_nuc_range is None : return None bases = [ ] ctg_nts = [ ] ref_nts = [ ] smtls_total_depths = [ ] smtls_nts = [ ] smtls_depths = [ ] contig_positions = [ ] for ref_position in range ( ref_nuc_range [ 0 ] , ref_nuc_range [ 1 ] + 1 , 1 ) : nucmer_match = cluster . assembly_compare . nucmer_hit_containing_reference_position ( cluster . assembly_compare . nucmer_hits , cluster . ref_sequence . id , ref_position , qry_name = contig_name ) if nucmer_match is not None : # work out contig position. Needs indels variants to correct the position ref_nts . append ( cluster . ref_sequence [ ref_position ] ) contig_position , in_indel = nucmer_match . qry_coords_from_ref_coord ( ref_position , variant_list ) contig_positions . append ( contig_position ) bases , total_depth , base_depths = cluster . samtools_vars . get_depths_at_position ( contig_name , contig_position ) ctg_nts . append ( cluster . assembly . sequences [ contig_name ] [ contig_position ] ) smtls_nts . append ( bases ) smtls_total_depths . append ( total_depth ) smtls_depths . append ( base_depths ) ctg_nts = ';' . join ( ctg_nts ) if len ( ctg_nts ) else '.' ref_nts = ';' . join ( ref_nts ) if len ( ref_nts ) else '.' smtls_nts = ';' . join ( smtls_nts ) if len ( smtls_nts ) else '.' smtls_total_depths = ';' . join ( [ str ( x ) for x in smtls_total_depths ] ) if len ( smtls_total_depths ) else '.' smtls_depths = ';' . join ( [ str ( x ) for x in smtls_depths ] ) if len ( smtls_depths ) else '.' ctg_start = str ( min ( contig_positions ) + 1 ) if contig_positions is not None else '.' ctg_end = str ( max ( contig_positions ) + 1 ) if contig_positions is not None else '.' return [ str ( x ) for x in [ ref_nuc_range [ 0 ] + 1 , ref_nuc_range [ 1 ] + 1 , ref_nts , ctg_start , ctg_end , ctg_nts , smtls_total_depths , smtls_nts , smtls_depths ] ]
Input is a known variants as sequence_metadata object . The assumption is that both the reference and the assembly have the variant type not wild type . The list variant_list should be a list of pymummer . variant . Variant objects only contaning variants to the relevant query contig
730
57
234,154
def abbr ( value : Any , limit : int = 20 ) -> str : rep = repr ( value ) if len ( rep ) > limit : if limit < 3 : raise ValueError ( 'Abbreviation limit may not be less than 3' ) rep = rep [ : limit - 3 ] + '...' return rep
Converts a value into its string representation and abbreviates that representation based on the given length limit if necessary .
68
22
234,155
def invalidate_value ( cls , value : Any , exc : Type [ Exception ] = EncodingTypeError , msg : Optional [ str ] = None , ) -> None : raise exc ( "Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}" . format ( rep = abbr ( value ) , typ = type ( value ) , cls = cls . __name__ , msg = "" if msg is None else ( ": " + msg ) , ) )
Throws a standard exception for when a value is not encodable by an encoder .
109
19
234,156
def parse_tuple_type_str ( old_from_type_str ) : @ functools . wraps ( old_from_type_str ) def new_from_type_str ( cls , type_str , registry ) : normalized_type_str = normalize ( type_str ) abi_type = parse ( normalized_type_str ) type_str_repr = repr ( type_str ) if type_str != normalized_type_str : type_str_repr = '{} (normalized to {})' . format ( type_str_repr , repr ( normalized_type_str ) , ) if not isinstance ( abi_type , TupleType ) : raise ValueError ( 'Cannot create {} for non-tuple type {}' . format ( cls . __name__ , type_str_repr , ) ) abi_type . validate ( ) return old_from_type_str ( cls , abi_type , registry ) return classmethod ( new_from_type_str )
Used by BaseCoder subclasses as a convenience for implementing the from_type_str method required by ABIRegistry . Useful if normalizing then parsing a tuple type string is required in that method .
229
42
234,157
def seek_in_frame ( self , pos , * args , * * kwargs ) : super ( ) . seek ( self . _total_offset + pos , * args , * * kwargs )
Seeks relative to the total offset of the current contextual frames .
45
13
234,158
def push_frame ( self , offset ) : self . _frames . append ( ( offset , self . tell ( ) ) ) self . _total_offset += offset self . seek_in_frame ( 0 )
Pushes a new contextual frame onto the stack with the given offset and a return position at the current cursor position then seeks to the new total offset .
45
30
234,159
def pop_frame ( self ) : try : offset , return_pos = self . _frames . pop ( ) except IndexError : raise IndexError ( 'no frames to pop' ) self . _total_offset -= offset self . seek ( return_pos )
Pops the current contextual frame off of the stack and returns the cursor to the frame s return position .
55
21
234,160
def has_arrlist ( type_str ) : try : abi_type = grammar . parse ( type_str ) except exceptions . ParseError : return False return abi_type . arrlist is not None
A predicate that matches a type string with an array dimension list .
46
13
234,161
def is_base_tuple ( type_str ) : try : abi_type = grammar . parse ( type_str ) except exceptions . ParseError : return False return isinstance ( abi_type , grammar . TupleType ) and abi_type . arrlist is None
A predicate that matches a tuple type with no array dimension list .
62
13
234,162
def register ( self , lookup : Lookup , encoder : Encoder , decoder : Decoder , label : str = None ) -> None : self . register_encoder ( lookup , encoder , label = label ) self . register_decoder ( lookup , decoder , label = label )
Registers the given encoder and decoder under the given lookup . A unique string label may be optionally provided that can be used to refer to the registration by name .
62
34
234,163
def unregister ( self , label : str ) -> None : self . unregister_encoder ( label ) self . unregister_decoder ( label )
Unregisters the entries in the encoder and decoder registries which have the label label .
33
20
234,164
def copy ( self ) : cpy = type ( self ) ( ) cpy . _encoders = copy . copy ( self . _encoders ) cpy . _decoders = copy . copy ( self . _decoders ) return cpy
Copies a registry such that new registrations can be made or existing registrations can be unregistered without affecting any instance from which a copy was obtained . This is useful if an existing registry fulfills most of a user s needs but requires one or two modifications . In that case a copy of that registry can be obtained and the necessary changes made without affecting the original registry .
56
73
234,165
def encode_single ( self , typ : TypeStr , arg : Any ) -> bytes : encoder = self . _registry . get_encoder ( typ ) return encoder ( arg )
Encodes the python value arg as a binary value of the ABI type typ .
41
17
234,166
def encode_abi ( self , types : Iterable [ TypeStr ] , args : Iterable [ Any ] ) -> bytes : encoders = [ self . _registry . get_encoder ( type_str ) for type_str in types ] encoder = TupleEncoder ( encoders = encoders ) return encoder ( args )
Encodes the python values in args as a sequence of binary values of the ABI types in types via the head - tail mechanism .
76
27
234,167
def is_encodable ( self , typ : TypeStr , arg : Any ) -> bool : encoder = self . _registry . get_encoder ( typ ) try : encoder . validate_value ( arg ) except EncodingError : return False except AttributeError : try : encoder ( arg ) except EncodingError : return False return True
Determines if the python value arg is encodable as a value of the ABI type typ .
76
22
234,168
def decode_single ( self , typ : TypeStr , data : Decodable ) -> Any : if not is_bytes ( data ) : raise TypeError ( "The `data` value must be of bytes type. Got {0}" . format ( type ( data ) ) ) decoder = self . _registry . get_decoder ( typ ) stream = ContextFramesBytesIO ( data ) return decoder ( stream )
Decodes the binary value data of the ABI type typ into its equivalent python value .
90
18
234,169
def decode_abi ( self , types : Iterable [ TypeStr ] , data : Decodable ) -> Tuple [ Any , ... ] : if not is_bytes ( data ) : raise TypeError ( "The `data` value must be of bytes type. Got {0}" . format ( type ( data ) ) ) decoders = [ self . _registry . get_decoder ( type_str ) for type_str in types ] decoder = TupleDecoder ( decoders = decoders ) stream = ContextFramesBytesIO ( data ) return decoder ( stream )
Decodes the binary value data as a sequence of values of the ABI types in types via the head - tail mechanism into a tuple of equivalent python values .
127
32
234,170
async def create_turn_endpoint ( protocol_factory , server_addr , username , password , lifetime = 600 , ssl = False , transport = 'udp' ) : loop = asyncio . get_event_loop ( ) if transport == 'tcp' : _ , inner_protocol = await loop . create_connection ( lambda : TurnClientTcpProtocol ( server_addr , username = username , password = password , lifetime = lifetime ) , host = server_addr [ 0 ] , port = server_addr [ 1 ] , ssl = ssl ) else : _ , inner_protocol = await loop . create_datagram_endpoint ( lambda : TurnClientUdpProtocol ( server_addr , username = username , password = password , lifetime = lifetime ) , remote_addr = server_addr ) protocol = protocol_factory ( ) transport = TurnTransport ( protocol , inner_protocol ) await transport . _connect ( ) return transport , protocol
Create datagram connection relayed over TURN .
210
9
234,171
async def connect ( self ) : request = stun . Message ( message_method = stun . Method . ALLOCATE , message_class = stun . Class . REQUEST ) request . attributes [ 'LIFETIME' ] = self . lifetime request . attributes [ 'REQUESTED-TRANSPORT' ] = UDP_TRANSPORT try : response , _ = await self . request ( request ) except exceptions . TransactionFailed as e : response = e . response if response . attributes [ 'ERROR-CODE' ] [ 0 ] == 401 : # update long-term credentials self . nonce = response . attributes [ 'NONCE' ] self . realm = response . attributes [ 'REALM' ] self . integrity_key = make_integrity_key ( self . username , self . realm , self . password ) # retry request with authentication request . transaction_id = random_transaction_id ( ) response , _ = await self . request ( request ) self . relayed_address = response . attributes [ 'XOR-RELAYED-ADDRESS' ] logger . info ( 'TURN allocation created %s' , self . relayed_address ) # periodically refresh allocation self . refresh_handle = asyncio . ensure_future ( self . refresh ( ) ) return self . relayed_address
Create a TURN allocation .
281
6
234,172
async def delete ( self ) : if self . refresh_handle : self . refresh_handle . cancel ( ) self . refresh_handle = None request = stun . Message ( message_method = stun . Method . REFRESH , message_class = stun . Class . REQUEST ) request . attributes [ 'LIFETIME' ] = 0 await self . request ( request ) logger . info ( 'TURN allocation deleted %s' , self . relayed_address ) if self . receiver : self . receiver . connection_lost ( None )
Delete the TURN allocation .
114
6
234,173
async def refresh ( self ) : while True : await asyncio . sleep ( 5 / 6 * self . lifetime ) request = stun . Message ( message_method = stun . Method . REFRESH , message_class = stun . Class . REQUEST ) request . attributes [ 'LIFETIME' ] = self . lifetime await self . request ( request ) logger . info ( 'TURN allocation refreshed %s' , self . relayed_address )
Periodically refresh the TURN allocation .
95
9
234,174
async def send_data ( self , data , addr ) : channel = self . peer_to_channel . get ( addr ) if channel is None : channel = self . channel_number self . channel_number += 1 self . channel_to_peer [ channel ] = addr self . peer_to_channel [ addr ] = channel # bind channel await self . channel_bind ( channel , addr ) header = struct . pack ( '!HH' , channel , len ( data ) ) self . _send ( header + data )
Send data to a remote host via the TURN server .
112
12
234,175
def send_stun ( self , message , addr ) : logger . debug ( '%s > %s %s' , self , addr , message ) self . _send ( bytes ( message ) )
Send a STUN message to the TURN server .
43
11
234,176
def get_extra_info ( self , name , default = None ) : if name == 'related_address' : return self . __inner_protocol . transport . get_extra_info ( 'sockname' ) elif name == 'sockname' : return self . __relayed_address return default
Return optional transport information .
68
5
234,177
def sendto ( self , data , addr ) : asyncio . ensure_future ( self . __inner_protocol . send_data ( data , addr ) )
Sends the data bytes to the remote peer given addr .
35
12
234,178
def candidate_foundation ( candidate_type , candidate_transport , base_address ) : key = '%s|%s|%s' % ( candidate_type , candidate_transport , base_address ) return hashlib . md5 ( key . encode ( 'ascii' ) ) . hexdigest ( )
See RFC 5245 - 4 . 1 . 1 . 3 . Computing Foundations
70
16
234,179
def candidate_priority ( candidate_component , candidate_type , local_pref = 65535 ) : if candidate_type == 'host' : type_pref = 126 elif candidate_type == 'prflx' : type_pref = 110 elif candidate_type == 'srflx' : type_pref = 100 else : type_pref = 0 return ( 1 << 24 ) * type_pref + ( 1 << 8 ) * local_pref + ( 256 - candidate_component )
See RFC 5245 - 4 . 1 . 2 . 1 . Recommended Formula
111
15
234,180
def to_sdp ( self ) : sdp = '%s %d %s %d %s %d typ %s' % ( self . foundation , self . component , self . transport , self . priority , self . host , self . port , self . type ) if self . related_address is not None : sdp += ' raddr %s' % self . related_address if self . related_port is not None : sdp += ' rport %s' % self . related_port if self . tcptype is not None : sdp += ' tcptype %s' % self . tcptype if self . generation is not None : sdp += ' generation %d' % self . generation return sdp
Return a string representation suitable for SDP .
158
9
234,181
def can_pair_with ( self , other ) : a = ipaddress . ip_address ( self . host ) b = ipaddress . ip_address ( other . host ) return ( self . component == other . component and self . transport . lower ( ) == other . transport . lower ( ) and a . version == b . version )
A local candidate is paired with a remote candidate if and only if the two candidates have the same component ID and have the same IP address version .
72
29
234,182
def candidate_pair_priority ( local , remote , ice_controlling ) : G = ice_controlling and local . priority or remote . priority D = ice_controlling and remote . priority or local . priority return ( 1 << 32 ) * min ( G , D ) + 2 * max ( G , D ) + ( G > D and 1 or 0 )
See RFC 5245 - 5 . 7 . 2 . Computing Pair Priority and Ordering Pairs
77
19
234,183
def get_host_addresses ( use_ipv4 , use_ipv6 ) : addresses = [ ] for interface in netifaces . interfaces ( ) : ifaddresses = netifaces . ifaddresses ( interface ) for address in ifaddresses . get ( socket . AF_INET , [ ] ) : if use_ipv4 and address [ 'addr' ] != '127.0.0.1' : addresses . append ( address [ 'addr' ] ) for address in ifaddresses . get ( socket . AF_INET6 , [ ] ) : if use_ipv6 and address [ 'addr' ] != '::1' and '%' not in address [ 'addr' ] : addresses . append ( address [ 'addr' ] ) return addresses
Get local IP addresses .
170
5
234,184
async def server_reflexive_candidate ( protocol , stun_server ) : # lookup address loop = asyncio . get_event_loop ( ) stun_server = ( await loop . run_in_executor ( None , socket . gethostbyname , stun_server [ 0 ] ) , stun_server [ 1 ] ) # perform STUN query request = stun . Message ( message_method = stun . Method . BINDING , message_class = stun . Class . REQUEST ) response , _ = await protocol . request ( request , stun_server ) local_candidate = protocol . local_candidate return Candidate ( foundation = candidate_foundation ( 'srflx' , 'udp' , local_candidate . host ) , component = local_candidate . component , transport = local_candidate . transport , priority = candidate_priority ( local_candidate . component , 'srflx' ) , host = response . attributes [ 'XOR-MAPPED-ADDRESS' ] [ 0 ] , port = response . attributes [ 'XOR-MAPPED-ADDRESS' ] [ 1 ] , type = 'srflx' , related_address = local_candidate . host , related_port = local_candidate . port )
Query STUN server to obtain a server - reflexive candidate .
275
13
234,185
def sort_candidate_pairs ( pairs , ice_controlling ) : def pair_priority ( pair ) : return - candidate_pair_priority ( pair . local_candidate , pair . remote_candidate , ice_controlling ) pairs . sort ( key = pair_priority )
Sort a list of candidate pairs .
62
7
234,186
def send_stun ( self , message , addr ) : self . __log_debug ( '> %s %s' , addr , message ) self . transport . sendto ( bytes ( message ) , addr )
Send a STUN message .
46
6
234,187
def add_remote_candidate ( self , remote_candidate ) : if self . _remote_candidates_end : raise ValueError ( 'Cannot add remote candidate after end-of-candidates.' ) if remote_candidate is None : self . _prune_components ( ) self . _remote_candidates_end = True return self . _remote_candidates . append ( remote_candidate ) for protocol in self . _protocols : if ( protocol . local_candidate . can_pair_with ( remote_candidate ) and not self . _find_pair ( protocol , remote_candidate ) ) : pair = CandidatePair ( protocol , remote_candidate ) self . _check_list . append ( pair ) self . sort_check_list ( )
Add a remote candidate or signal end - of - candidates .
172
12
234,188
async def gather_candidates ( self ) : if not self . _local_candidates_start : self . _local_candidates_start = True addresses = get_host_addresses ( use_ipv4 = self . _use_ipv4 , use_ipv6 = self . _use_ipv6 ) for component in self . _components : self . _local_candidates += await self . get_component_candidates ( component = component , addresses = addresses ) self . _local_candidates_end = True
Gather local candidates .
119
5
234,189
def get_default_candidate ( self , component ) : for candidate in sorted ( self . _local_candidates , key = lambda x : x . priority ) : if candidate . component == component : return candidate
Gets the default local candidate for the specified component .
45
11
234,190
async def connect ( self ) : if not self . _local_candidates_end : raise ConnectionError ( 'Local candidates gathering was not performed' ) if ( self . remote_username is None or self . remote_password is None ) : raise ConnectionError ( 'Remote username or password is missing' ) # 5.7.1. Forming Candidate Pairs for remote_candidate in self . _remote_candidates : for protocol in self . _protocols : if ( protocol . local_candidate . can_pair_with ( remote_candidate ) and not self . _find_pair ( protocol , remote_candidate ) ) : pair = CandidatePair ( protocol , remote_candidate ) self . _check_list . append ( pair ) self . sort_check_list ( ) self . _unfreeze_initial ( ) # handle early checks for check in self . _early_checks : self . check_incoming ( * check ) self . _early_checks = [ ] # perform checks while True : if not self . check_periodic ( ) : break await asyncio . sleep ( 0.02 ) # wait for completion if self . _check_list : res = await self . _check_list_state . get ( ) else : res = ICE_FAILED # cancel remaining checks for check in self . _check_list : if check . handle : check . handle . cancel ( ) if res != ICE_COMPLETED : raise ConnectionError ( 'ICE negotiation failed' ) # start consent freshness tests self . _query_consent_handle = asyncio . ensure_future ( self . query_consent ( ) )
Perform ICE handshake .
355
5
234,191
async def recvfrom ( self ) : if not len ( self . _nominated ) : raise ConnectionError ( 'Cannot receive data, not connected' ) result = await self . _queue . get ( ) if result [ 0 ] is None : raise ConnectionError ( 'Connection lost while receiving data' ) return result
Receive the next datagram .
68
7
234,192
async def sendto ( self , data , component ) : active_pair = self . _nominated . get ( component ) if active_pair : await active_pair . protocol . send_data ( data , active_pair . remote_addr ) else : raise ConnectionError ( 'Cannot send data, not connected' )
Send a datagram on the specified component .
69
9
234,193
def set_selected_pair ( self , component , local_foundation , remote_foundation ) : # find local candidate protocol = None for p in self . _protocols : if ( p . local_candidate . component == component and p . local_candidate . foundation == local_foundation ) : protocol = p break # find remote candidate remote_candidate = None for c in self . _remote_candidates : if c . component == component and c . foundation == remote_foundation : remote_candidate = c assert ( protocol and remote_candidate ) self . _nominated [ component ] = CandidatePair ( protocol , remote_candidate )
Force the selected candidate pair .
139
6
234,194
def check_incoming ( self , message , addr , protocol ) : component = protocol . local_candidate . component # find remote candidate remote_candidate = None for c in self . _remote_candidates : if c . host == addr [ 0 ] and c . port == addr [ 1 ] : remote_candidate = c assert remote_candidate . component == component break if remote_candidate is None : # 7.2.1.3. Learning Peer Reflexive Candidates remote_candidate = Candidate ( foundation = random_string ( 10 ) , component = component , transport = 'udp' , priority = message . attributes [ 'PRIORITY' ] , host = addr [ 0 ] , port = addr [ 1 ] , type = 'prflx' ) self . _remote_candidates . append ( remote_candidate ) self . __log_info ( 'Discovered peer reflexive candidate %s' , remote_candidate ) # find pair pair = self . _find_pair ( protocol , remote_candidate ) if pair is None : pair = CandidatePair ( protocol , remote_candidate ) pair . state = CandidatePair . State . WAITING self . _check_list . append ( pair ) self . sort_check_list ( ) # triggered check if pair . state in [ CandidatePair . State . WAITING , CandidatePair . State . FAILED ] : pair . handle = asyncio . ensure_future ( self . check_start ( pair ) ) # 7.2.1.5. Updating the Nominated Flag if 'USE-CANDIDATE' in message . attributes and not self . ice_controlling : pair . remote_nominated = True if pair . state == CandidatePair . State . SUCCEEDED : pair . nominated = True self . check_complete ( pair )
Handle a succesful incoming check .
398
8
234,195
async def check_start ( self , pair ) : self . check_state ( pair , CandidatePair . State . IN_PROGRESS ) request = self . build_request ( pair ) try : response , addr = await pair . protocol . request ( request , pair . remote_addr , integrity_key = self . remote_password . encode ( 'utf8' ) ) except exceptions . TransactionError as exc : # 7.1.3.1. Failure Cases if exc . response and exc . response . attributes . get ( 'ERROR-CODE' , ( None , None ) ) [ 0 ] == 487 : if 'ICE-CONTROLLING' in request . attributes : self . switch_role ( ice_controlling = False ) elif 'ICE-CONTROLLED' in request . attributes : self . switch_role ( ice_controlling = True ) return await self . check_start ( pair ) else : self . check_state ( pair , CandidatePair . State . FAILED ) self . check_complete ( pair ) return # check remote address matches if addr != pair . remote_addr : self . __log_info ( 'Check %s failed : source address mismatch' , pair ) self . check_state ( pair , CandidatePair . State . FAILED ) self . check_complete ( pair ) return # success self . check_state ( pair , CandidatePair . State . SUCCEEDED ) if self . ice_controlling or pair . remote_nominated : pair . nominated = True self . check_complete ( pair )
Starts a check .
339
5
234,196
def check_state ( self , pair , state ) : self . __log_info ( 'Check %s %s -> %s' , pair , pair . state , state ) pair . state = state
Updates the state of a check .
43
8
234,197
def _find_pair ( self , protocol , remote_candidate ) : for pair in self . _check_list : if ( pair . protocol == protocol and pair . remote_candidate == remote_candidate ) : return pair return None
Find a candidate pair in the check list .
51
9
234,198
def _prune_components ( self ) : seen_components = set ( map ( lambda x : x . component , self . _remote_candidates ) ) missing_components = self . _components - seen_components if missing_components : self . __log_info ( 'Components %s have no candidate pairs' % missing_components ) self . _components = seen_components
Remove components for which the remote party did not provide any candidates .
90
13
234,199
def parse_message ( data , integrity_key = None ) : if len ( data ) < HEADER_LENGTH : raise ValueError ( 'STUN message length is less than 20 bytes' ) message_type , length , cookie , transaction_id = unpack ( '!HHI12s' , data [ 0 : HEADER_LENGTH ] ) if len ( data ) != HEADER_LENGTH + length : raise ValueError ( 'STUN message length does not match' ) attributes = OrderedDict ( ) pos = HEADER_LENGTH while pos <= len ( data ) - 4 : attr_type , attr_len = unpack ( '!HH' , data [ pos : pos + 4 ] ) v = data [ pos + 4 : pos + 4 + attr_len ] pad_len = 4 * ( ( attr_len + 3 ) // 4 ) - attr_len if attr_type in ATTRIBUTES_BY_TYPE : _ , attr_name , attr_pack , attr_unpack = ATTRIBUTES_BY_TYPE [ attr_type ] if attr_unpack == unpack_xor_address : attributes [ attr_name ] = attr_unpack ( v , transaction_id = transaction_id ) else : attributes [ attr_name ] = attr_unpack ( v ) if attr_name == 'FINGERPRINT' : if attributes [ attr_name ] != message_fingerprint ( data [ 0 : pos ] ) : raise ValueError ( 'STUN message fingerprint does not match' ) elif attr_name == 'MESSAGE-INTEGRITY' : if ( integrity_key is not None and attributes [ attr_name ] != message_integrity ( data [ 0 : pos ] , integrity_key ) ) : raise ValueError ( 'STUN message integrity does not match' ) pos += 4 + attr_len + pad_len return Message ( message_method = message_type & 0x3eef , message_class = message_type & 0x0110 , transaction_id = transaction_id , attributes = attributes )
Parses a STUN message .
474
8