idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
46,200 | def _get_known_noncoding_het_snp ( data_dict ) : if data_dict [ 'gene' ] == '1' : return None if data_dict [ 'known_var' ] == '1' and data_dict [ 'ref_ctg_effect' ] == 'SNP' and data_dict [ 'smtls_nts' ] != '.' and ';' not in data_dict [ 'smtls_nts' ] : nucleotides = data_dict [ 'smtls_nts' ] . split ( ',' ) depths = data_dict [ 'smtls_nts_depth' ] . split ( ',' ) if len ( nucleotides ) != len ( depths ) : raise Error ( 'Mismatch in number of inferred nucleotides from ctg_nt, smtls_nts, smtls_nts_depth columns. Cannot continue\n' + str ( data_dict ) ) try : var_nucleotide = data_dict [ 'known_var_change' ] [ - 1 ] depths = [ int ( x ) for x in depths ] nuc_to_depth = dict ( zip ( nucleotides , depths ) ) total_depth = sum ( depths ) var_depth = nuc_to_depth . get ( var_nucleotide , 0 ) percent_depth = round ( 100 * var_depth / total_depth , 1 ) except : return None return data_dict [ 'known_var_change' ] , percent_depth else : return None | If ref is coding return None . If the data dict has a known snp and samtools made a call then return the string ref_name_change and the % of reads supporting the variant type . If noncoding but no samtools call then return None |
46,201 | def _has_match ( self , assembled_summary ) : if assembled_summary . startswith ( 'yes' ) : if self . data [ 0 ] [ 'var_only' ] == '0' or self . _to_cluster_summary_has_known_nonsynonymous ( assembled_summary ) == 'yes' : return 'yes' else : return 'no' else : return 'no' | assembled_summary should be output of _to_cluster_summary_assembled |
46,202 | def has_var_groups ( self ) : ids = set ( ) for d in self . data : if self . _has_known_variant ( d ) != 'no' and d [ 'var_group' ] != '.' : ids . add ( d [ 'var_group' ] ) return ids | Returns a set of the variant group ids that this cluster has |
46,203 | def column_summary_data ( self ) : assembled_summary = self . _to_cluster_summary_assembled ( ) pct_id , read_depth = self . _pc_id_and_read_depth_of_longest ( ) columns = { 'assembled' : self . _to_cluster_summary_assembled ( ) , 'match' : self . _has_match ( assembled_summary ) , 'ref_seq' : self . ref_name , 'pct_id' : str ( pct_id ) , 'ctg_cov' : str ( read_depth ) , 'known_var' : self . _to_cluster_summary_has_known_nonsynonymous ( assembled_summary ) , 'novel_var' : self . _to_cluster_summary_has_novel_nonsynonymous ( assembled_summary ) } return columns | Returns a dictionary of column name - > value for cluster - level results |
46,204 | def cat_files ( infiles , outfile ) : f_out = pyfastaq . utils . open_file_write ( outfile ) for filename in infiles : if os . path . exists ( filename ) : f_in = pyfastaq . utils . open_file_read ( filename ) for line in f_in : print ( line , end = '' , file = f_out ) pyfastaq . utils . close ( f_in ) pyfastaq . utils . close ( f_out ) | Cats all files in list infiles into outfile |
46,205 | def _check_spades_log_file ( logfile ) : f = pyfastaq . utils . open_file_read ( logfile ) for line in f : if line . startswith ( '== Error == system call for:' ) and line . rstrip ( ) . endswith ( 'finished abnormally, err code: -7' ) : pyfastaq . utils . close ( f ) print ( 'Error running SPAdes. Cannot continue. This is the error from the log file' , logfile , '...' , file = sys . stderr ) print ( line , file = sys . stderr ) raise Error ( 'Fatal error ("err code: -7") running spades. Cannot continue' ) pyfastaq . utils . close ( f ) return True | SPAdes can fail with a strange error . Stop everything if this happens |
46,206 | def _fix_contig_orientation ( contigs_fa , ref_fa , outfile , min_id = 90 , min_length = 20 , breaklen = 200 ) : if not os . path . exists ( contigs_fa ) : raise Error ( 'Cannot fix orientation of assembly contigs because file not found: ' + contigs_fa ) tmp_coords = os . path . join ( outfile + '.tmp.rename.coords' ) pymummer . nucmer . Runner ( ref_fa , contigs_fa , tmp_coords , min_id = min_id , min_length = min_length , breaklen = breaklen , maxmatch = True , ) . run ( ) to_revcomp = set ( ) not_revcomp = set ( ) file_reader = pymummer . coords_file . reader ( tmp_coords ) for hit in file_reader : if hit . on_same_strand ( ) : not_revcomp . add ( hit . qry_name ) else : to_revcomp . add ( hit . qry_name ) os . unlink ( tmp_coords ) in_both = to_revcomp . intersection ( not_revcomp ) f = pyfastaq . utils . open_file_write ( outfile ) seq_reader = pyfastaq . sequences . file_reader ( contigs_fa ) for seq in seq_reader : if seq . id in to_revcomp and seq . id not in in_both : seq . revcomp ( ) print ( seq , file = f ) pyfastaq . utils . close ( f ) return in_both | Changes orientation of each contig to match the reference when possible . Returns a set of names of contigs that had hits in both orientations to the reference |
46,207 | def _parse_nucmer_coords_file ( coords_file , ref_name ) : file_reader = pymummer . coords_file . reader ( coords_file ) nucmer_hits = { } for hit in file_reader : assert hit . ref_name == ref_name contig = hit . qry_name if contig not in nucmer_hits : nucmer_hits [ contig ] = [ ] nucmer_hits [ contig ] . append ( copy . copy ( hit ) ) return nucmer_hits | Input is coords file made by self . _run_nucmer . Reference should have one sequence only . ref_name is name fo the reference sequence to sanity check the coords file . Returns dictionary . Key = assembly contig name . Value = list of nucmer hits to that contig |
46,208 | def _nucmer_hits_to_percent_identity ( nucmer_hits ) : percent_identities = { } max_lengths = { } for contig in nucmer_hits : max_length = - 1 percent_identity = 0 for hit in nucmer_hits [ contig ] : if hit . hit_length_qry > max_length : max_length = hit . hit_length_qry percent_identity = hit . percent_identity percent_identities [ contig ] = percent_identity return percent_identities | Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . key = contig name . Value = percent identity of hits to that contig |
46,209 | def _nucmer_hits_to_assembly_coords ( nucmer_hits ) : coords = { } for l in nucmer_hits . values ( ) : for hit in l : if hit . qry_name not in coords : coords [ hit . qry_name ] = [ ] coords [ hit . qry_name ] . append ( hit . qry_coords ( ) ) for scaff in coords : pyfastaq . intervals . merge_overlapping_in_list ( coords [ scaff ] ) return coords | Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . key = contig name . Value = list of coords that match to the reference gene |
46,210 | def nucmer_hits_to_ref_coords ( cls , nucmer_hits , contig = None ) : coords = [ ] if contig is None : coords = { key : [ ] for key in nucmer_hits . keys ( ) } else : coords = { contig : [ ] } for key in coords : coords [ key ] = [ hit . ref_coords ( ) for hit in nucmer_hits [ key ] ] pyfastaq . intervals . merge_overlapping_in_list ( coords [ key ] ) return coords | Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . Key = contig name . Value = list of coords in the reference sequence for that contig . if contig = contig_name then just gets the ref coords from that contig instead of using all the contigs |
46,211 | def nucmer_hits_to_ref_and_qry_coords ( cls , nucmer_hits , contig = None ) : if contig is None : ctg_coords = { key : [ ] for key in nucmer_hits . keys ( ) } else : ctg_coords = { contig : [ ] } ref_coords = { } for key in ctg_coords : hits = copy . copy ( nucmer_hits [ key ] ) hits . sort ( key = lambda x : len ( x . ref_coords ( ) ) ) if len ( hits ) > 1 : i = 0 while i < len ( hits ) - 1 : c1 = hits [ i ] . ref_coords ( ) c2 = hits [ i + 1 ] . ref_coords ( ) if c2 . contains ( c1 ) : hits . pop ( i ) else : i += 1 ref_coords [ key ] = [ hit . ref_coords ( ) for hit in hits ] ctg_coords [ key ] = [ hit . qry_coords ( ) for hit in hits ] pyfastaq . intervals . merge_overlapping_in_list ( ref_coords [ key ] ) pyfastaq . intervals . merge_overlapping_in_list ( ctg_coords [ key ] ) return ctg_coords , ref_coords | Same as nucmer_hits_to_ref_coords except removes containing hits first and returns ref and qry coords lists |
46,212 | def ref_cov_per_contig ( nucmer_hits ) : coords = AssemblyCompare . nucmer_hits_to_ref_coords ( nucmer_hits ) return { x : pyfastaq . intervals . length_sum_from_list ( coords [ x ] ) for x in coords } | Input is hits made by self . _parse_nucmer_coords_file . Returns dictionary . key = contig name . Value = number of bases that match to the reference sequence . |
46,213 | def _ref_covered_by_at_least_one_full_length_contig ( nucmer_hits , percent_threshold , max_nt_extend ) : for l in nucmer_hits . values ( ) : for hit in l : if ( ( 2 * max_nt_extend ) + len ( hit . ref_coords ( ) ) ) / hit . ref_length >= percent_threshold : return True return False | Returns true iff there exists a contig that completely covers the reference sequence nucmer_hits = hits made by self . _parse_nucmer_coords_file . |
46,214 | def nucmer_hit_containing_reference_position ( nucmer_hits , ref_name , ref_position , qry_name = None ) : for contig_name in nucmer_hits : for hit in nucmer_hits [ contig_name ] : if hit . ref_name == ref_name and ( qry_name is None or qry_name == hit . qry_name ) and hit . ref_coords ( ) . distance_to_point ( ref_position ) == 0 : return hit return None | Returns the first nucmer match found that contains the given reference location . nucmer_hits = hits made by self . _parse_nucmer_coords_file . Returns None if no matching hit found |
46,215 | def _get_exe ( prog ) : if prog in prog_to_env_var : env_var = prog_to_env_var [ prog ] if env_var in os . environ : return os . environ [ env_var ] return prog_to_default [ prog ] | Given a program name return what we expect its exectuable to be called |
46,216 | def fake_run ( self ) : clusters = { } used_names = set ( ) seq_reader = pyfastaq . sequences . file_reader ( self . infile ) for seq in seq_reader : if seq . id in used_names : raise Error ( 'Sequence name "' + seq . id + '" not unique. Cannot continue' ) clusters [ str ( len ( clusters ) + self . min_cluster_number ) ] = { seq . id } used_names . add ( seq . id ) return clusters | Doesn t actually run cd - hit . Instead puts each input sequence into its own cluster . So it s as if cdhit was run but didn t cluster anything |
46,217 | def run_get_clusters_from_file ( self , clusters_infile , all_ref_seqs , rename_dict = None ) : if rename_dict is None : rename_dict = { } seq_reader = pyfastaq . sequences . file_reader ( self . infile ) names_list_from_fasta_file = [ seq . id for seq in seq_reader ] names_set_from_fasta_file = set ( names_list_from_fasta_file ) clusters = self . _load_user_clusters_file ( clusters_infile , all_ref_seqs , rename_dict = rename_dict ) if len ( names_set_from_fasta_file ) != len ( names_list_from_fasta_file ) : raise Error ( 'At least one duplicate name in fasta file ' + self . infile + '. Cannot continue' ) names_from_clusters_file = set ( ) for new_names in clusters . values ( ) : names_from_clusters_file . update ( new_names ) if not names_set_from_fasta_file . issubset ( names_from_clusters_file ) : raise Error ( 'Some names in fasta file "' + self . infile + '" not given in cluster file. Cannot continue' ) return clusters | Instead of running cdhit gets the clusters info from the input file . |
46,218 | def sam_pair_to_insert ( s1 , s2 ) : if s1 . is_unmapped or s2 . is_unmapped or ( s1 . tid != s2 . tid ) or ( s1 . is_reverse == s2 . is_reverse ) : return None if s1 . is_reverse : end = s1 . reference_end - 1 start = s2 . reference_start else : end = s2 . reference_end - 1 start = s1 . reference_start if start < end : return end - start + 1 else : return None | Returns insert size from pair of sam records as long as their orientation is innies . Otherwise returns None . |
46,219 | def update_from_sam ( self , sam , sam_reader ) : if sam . is_unmapped or sam . mate_is_unmapped or ( sam . reference_id == sam . next_reference_id ) : return new_link = link . Link ( sam , sam_reader , self . ref_lengths ) read_name = sam . query_name if read_name in self . partial_links : new_link . merge ( self . partial_links [ read_name ] ) del self . partial_links [ read_name ] key = tuple ( sorted ( ( new_link . refnames [ 0 ] , new_link . refnames [ 1 ] ) ) ) if key not in self . links : self . links [ key ] = [ ] new_link . sort ( ) self . links [ key ] . append ( new_link ) else : self . partial_links [ read_name ] = new_link | Updates graph info from a pysam . AlignedSegment object |
46,220 | def _make_graph ( self , max_insert ) : if len ( self . partial_links ) != 0 : raise Error ( 'Error in _make_graph(). Cannot continue because there are partial links' ) self . contig_links = { } for key in self . links : for l in self . links [ key ] : insert_size = l . insert_size ( ) if insert_size <= max_insert : if key not in self . contig_links : self . contig_links [ key ] = { } dirs = '' . join ( l . dirs ) self . contig_links [ key ] [ dirs ] = self . contig_links [ key ] . get ( dirs , 0 ) + 1 | helper function to construct graph from current state of object |
46,221 | def _sam_to_soft_clipped ( self , sam ) : if sam . is_unmapped : raise Error ( 'Cannot get soft clip info from an unmapped read' ) if sam . cigar is None or len ( sam . cigar ) == 0 : return False , False return ( sam . cigar [ 0 ] [ 0 ] == 4 , sam . cigar [ - 1 ] [ 0 ] == 4 ) | Returns tuple of whether or not the left and right end of the mapped read in the sam record is soft - clipped |
46,222 | def _report_line_to_dict ( cls , line ) : data = line . split ( '\t' ) if len ( data ) != len ( report . columns ) : return None d = dict ( zip ( report . columns , data ) ) for key in report . int_columns : try : d [ key ] = int ( d [ key ] ) except : assert d [ key ] == '.' for key in report . float_columns : try : d [ key ] = float ( d [ key ] ) except : assert d [ key ] == '.' d [ 'flag' ] = flag . Flag ( int ( d [ 'flag' ] ) ) return d | Takes report line string as input . Returns a dict of column name - > value in line |
46,223 | def _dict_to_report_line ( cls , report_dict ) : return '\t' . join ( [ str ( report_dict [ x ] ) for x in report . columns ] ) | Takes a report_dict as input and returns a report line |
46,224 | def _load_report ( infile ) : report_dict = { } f = pyfastaq . utils . open_file_read ( infile ) first_line = True for line in f : line = line . rstrip ( ) if first_line : expected_first_line = '#' + '\t' . join ( report . columns ) if line != expected_first_line : pyfastaq . utils . close ( f ) raise Error ( 'Error reading report file. Expected first line of file is\n' + expected_first_line + '\nbut got:\n' + line ) first_line = False else : line_dict = ReportFilter . _report_line_to_dict ( line ) if line_dict is None : pyfastaq . utils . close ( f ) raise Error ( 'Error reading report file at this line:\n' + line ) ref_name = line_dict [ 'ref_name' ] ctg_name = line_dict [ 'ctg' ] if ref_name not in report_dict : report_dict [ ref_name ] = { } if ctg_name not in report_dict [ ref_name ] : report_dict [ ref_name ] [ ctg_name ] = [ ] report_dict [ ref_name ] [ ctg_name ] . append ( line_dict ) pyfastaq . utils . close ( f ) return report_dict | Loads report file into a dictionary . Key = reference name . Value = list of report lines for that reference |
46,225 | def _filter_dicts ( self ) : keys_to_remove = set ( ) for ref_name in self . report : for ctg_name in self . report [ ref_name ] : self . report [ ref_name ] [ ctg_name ] = self . _filter_list_of_dicts ( self . report [ ref_name ] [ ctg_name ] ) if len ( self . report [ ref_name ] [ ctg_name ] ) == 0 : keys_to_remove . add ( ( ref_name , ctg_name ) ) refs_to_remove = set ( ) for ref_name , ctg_name in keys_to_remove : del self . report [ ref_name ] [ ctg_name ] if len ( self . report [ ref_name ] ) == 0 : refs_to_remove . add ( ref_name ) for ref_name in refs_to_remove : del self . report [ ref_name ] | Filters out all the report_dicts that do not pass the cutoffs . If any ref sequence loses all of its report_dicts then it is completely removed . |
46,226 | def merge ( self , other ) : assert self . refnames == other . refnames assert self . dirs == other . dirs assert self . lengths == other . lengths for i in range ( 2 ) : if self . pos [ i ] is None : if other . pos [ i ] is None : raise Error ( 'Error merging these two links:\n' + str ( self ) + '\n' + str ( other ) ) self . pos [ i ] = other . pos [ i ] else : if other . pos [ i ] is not None : raise Error ( 'Error merging these two links:\n' + str ( self ) + '\n' + str ( other ) ) | Merge another link into this one . Expected that each link was created from each mate from a pair . We only know both distances to contig ends when we have read info from both mappings in a BAM file . All other info should be the same . |
46,227 | def _load_fofn ( cls , fofn ) : filenames = { } f = pyfastaq . utils . open_file_read ( fofn ) for line in f : fields = line . rstrip ( ) . split ( ) if len ( fields ) == 1 : filenames [ fields [ 0 ] ] = None elif len ( fields ) == 2 : filenames [ fields [ 0 ] ] = fields [ 1 ] else : raise Error ( 'Error at the following line of file ' + fofn + '. Expected at most 2 fields.\n' + line ) pyfastaq . utils . close ( f ) return filenames | Returns dictionary of filename - > short name . Value is None whenever short name is not provided |
46,228 | def _filter_matrix_rows ( cls , matrix ) : indexes_to_keep = [ ] for i in range ( len ( matrix ) ) : keep_row = False for element in matrix [ i ] : if element not in { 'NA' , 'no' } : keep_row = True break if keep_row : indexes_to_keep . append ( i ) return [ matrix [ i ] for i in indexes_to_keep ] | matrix = output from _to_matrix |
46,229 | def _filter_matrix_columns ( cls , matrix , phandango_header , csv_header ) : indexes_to_keep = set ( ) for row in matrix : for i in range ( len ( row ) ) : if row [ i ] not in { 'NA' , 'no' } : indexes_to_keep . add ( i ) indexes_to_keep = sorted ( list ( indexes_to_keep ) ) for i in range ( len ( matrix ) ) : matrix [ i ] = [ matrix [ i ] [ j ] for j in indexes_to_keep ] phandango_header = [ phandango_header [ i ] for i in indexes_to_keep ] csv_header = [ csv_header [ i ] for i in indexes_to_keep ] return phandango_header , csv_header , matrix | phandango_header csv_header matrix = output from _to_matrix |
46,230 | def _get_remaining_known_ref_variants ( known_ref_variants , used_ref_variants , nucmer_coords ) : variants = [ ] for ref_variant_pos , ref_variants_set in sorted ( known_ref_variants . items ( ) ) : for known_ref_variant in ref_variants_set : if known_ref_variant not in used_ref_variants : variant_pos_matches_contig = False pos = known_ref_variant . variant . position if known_ref_variant . seq_type == 'n' : ref_interval = intervals . Interval ( pos , pos ) elif known_ref_variant . seq_type == 'p' : ref_interval = intervals . Interval ( 3 * pos , 3 * pos + 2 ) else : raise Error ( 'Unexpected variant type "' + known_ref_variant . variant_type + '" in _get_remaining_known_ref_variants. Cannot continue' ) for interval in nucmer_coords : if ref_interval . intersects ( interval ) : variant_pos_matches_contig = True break if variant_pos_matches_contig : variants . append ( ( None , known_ref_variant . seq_type , None , None , None , { known_ref_variant } , set ( ) ) ) return variants | Finds variants where ref has the variant and so does the contig . Which means that there was no mummer call to flag it up so need to look through the known ref variants . Also need to check that the variant is in a nucmer match to an assembly contig . |
46,231 | def _samtools_depths_at_known_snps_all_wild ( sequence_meta , contig_name , cluster , variant_list ) : ref_nuc_range = sequence_meta . variant . nucleotide_range ( ) if ref_nuc_range is None : return None bases = [ ] ctg_nts = [ ] ref_nts = [ ] smtls_total_depths = [ ] smtls_nts = [ ] smtls_depths = [ ] contig_positions = [ ] for ref_position in range ( ref_nuc_range [ 0 ] , ref_nuc_range [ 1 ] + 1 , 1 ) : nucmer_match = cluster . assembly_compare . nucmer_hit_containing_reference_position ( cluster . assembly_compare . nucmer_hits , cluster . ref_sequence . id , ref_position , qry_name = contig_name ) if nucmer_match is not None : ref_nts . append ( cluster . ref_sequence [ ref_position ] ) contig_position , in_indel = nucmer_match . qry_coords_from_ref_coord ( ref_position , variant_list ) contig_positions . append ( contig_position ) bases , total_depth , base_depths = cluster . samtools_vars . get_depths_at_position ( contig_name , contig_position ) ctg_nts . append ( cluster . assembly . sequences [ contig_name ] [ contig_position ] ) smtls_nts . append ( bases ) smtls_total_depths . append ( total_depth ) smtls_depths . append ( base_depths ) ctg_nts = ';' . join ( ctg_nts ) if len ( ctg_nts ) else '.' ref_nts = ';' . join ( ref_nts ) if len ( ref_nts ) else '.' smtls_nts = ';' . join ( smtls_nts ) if len ( smtls_nts ) else '.' smtls_total_depths = ';' . join ( [ str ( x ) for x in smtls_total_depths ] ) if len ( smtls_total_depths ) else '.' smtls_depths = ';' . join ( [ str ( x ) for x in smtls_depths ] ) if len ( smtls_depths ) else '.' ctg_start = str ( min ( contig_positions ) + 1 ) if contig_positions is not None else '.' ctg_end = str ( max ( contig_positions ) + 1 ) if contig_positions is not None else '.' return [ str ( x ) for x in [ ref_nuc_range [ 0 ] + 1 , ref_nuc_range [ 1 ] + 1 , ref_nts , ctg_start , ctg_end , ctg_nts , smtls_total_depths , smtls_nts , smtls_depths ] ] | Input is a known variants as sequence_metadata object . The assumption is that both the reference and the assembly have the variant type not wild type . The list variant_list should be a list of pymummer . variant . Variant objects only contaning variants to the relevant query contig |
46,232 | def abbr ( value : Any , limit : int = 20 ) -> str : rep = repr ( value ) if len ( rep ) > limit : if limit < 3 : raise ValueError ( 'Abbreviation limit may not be less than 3' ) rep = rep [ : limit - 3 ] + '...' return rep | Converts a value into its string representation and abbreviates that representation based on the given length limit if necessary . |
46,233 | def invalidate_value ( cls , value : Any , exc : Type [ Exception ] = EncodingTypeError , msg : Optional [ str ] = None , ) -> None : raise exc ( "Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}" . format ( rep = abbr ( value ) , typ = type ( value ) , cls = cls . __name__ , msg = "" if msg is None else ( ": " + msg ) , ) ) | Throws a standard exception for when a value is not encodable by an encoder . |
46,234 | def parse_tuple_type_str ( old_from_type_str ) : @ functools . wraps ( old_from_type_str ) def new_from_type_str ( cls , type_str , registry ) : normalized_type_str = normalize ( type_str ) abi_type = parse ( normalized_type_str ) type_str_repr = repr ( type_str ) if type_str != normalized_type_str : type_str_repr = '{} (normalized to {})' . format ( type_str_repr , repr ( normalized_type_str ) , ) if not isinstance ( abi_type , TupleType ) : raise ValueError ( 'Cannot create {} for non-tuple type {}' . format ( cls . __name__ , type_str_repr , ) ) abi_type . validate ( ) return old_from_type_str ( cls , abi_type , registry ) return classmethod ( new_from_type_str ) | Used by BaseCoder subclasses as a convenience for implementing the from_type_str method required by ABIRegistry . Useful if normalizing then parsing a tuple type string is required in that method . |
46,235 | def seek_in_frame ( self , pos , * args , ** kwargs ) : super ( ) . seek ( self . _total_offset + pos , * args , ** kwargs ) | Seeks relative to the total offset of the current contextual frames . |
46,236 | def push_frame ( self , offset ) : self . _frames . append ( ( offset , self . tell ( ) ) ) self . _total_offset += offset self . seek_in_frame ( 0 ) | Pushes a new contextual frame onto the stack with the given offset and a return position at the current cursor position then seeks to the new total offset . |
46,237 | def pop_frame ( self ) : try : offset , return_pos = self . _frames . pop ( ) except IndexError : raise IndexError ( 'no frames to pop' ) self . _total_offset -= offset self . seek ( return_pos ) | Pops the current contextual frame off of the stack and returns the cursor to the frame s return position . |
46,238 | def has_arrlist ( type_str ) : try : abi_type = grammar . parse ( type_str ) except exceptions . ParseError : return False return abi_type . arrlist is not None | A predicate that matches a type string with an array dimension list . |
46,239 | def is_base_tuple ( type_str ) : try : abi_type = grammar . parse ( type_str ) except exceptions . ParseError : return False return isinstance ( abi_type , grammar . TupleType ) and abi_type . arrlist is None | A predicate that matches a tuple type with no array dimension list . |
46,240 | def register ( self , lookup : Lookup , encoder : Encoder , decoder : Decoder , label : str = None ) -> None : self . register_encoder ( lookup , encoder , label = label ) self . register_decoder ( lookup , decoder , label = label ) | Registers the given encoder and decoder under the given lookup . A unique string label may be optionally provided that can be used to refer to the registration by name . |
46,241 | def unregister ( self , label : str ) -> None : self . unregister_encoder ( label ) self . unregister_decoder ( label ) | Unregisters the entries in the encoder and decoder registries which have the label label . |
46,242 | def copy ( self ) : cpy = type ( self ) ( ) cpy . _encoders = copy . copy ( self . _encoders ) cpy . _decoders = copy . copy ( self . _decoders ) return cpy | Copies a registry such that new registrations can be made or existing registrations can be unregistered without affecting any instance from which a copy was obtained . This is useful if an existing registry fulfills most of a user s needs but requires one or two modifications . In that case a copy of that registry can be obtained and the necessary changes made without affecting the original registry . |
46,243 | def encode_single ( self , typ : TypeStr , arg : Any ) -> bytes : encoder = self . _registry . get_encoder ( typ ) return encoder ( arg ) | Encodes the python value arg as a binary value of the ABI type typ . |
46,244 | def encode_abi ( self , types : Iterable [ TypeStr ] , args : Iterable [ Any ] ) -> bytes : encoders = [ self . _registry . get_encoder ( type_str ) for type_str in types ] encoder = TupleEncoder ( encoders = encoders ) return encoder ( args ) | Encodes the python values in args as a sequence of binary values of the ABI types in types via the head - tail mechanism . |
46,245 | def is_encodable ( self , typ : TypeStr , arg : Any ) -> bool : encoder = self . _registry . get_encoder ( typ ) try : encoder . validate_value ( arg ) except EncodingError : return False except AttributeError : try : encoder ( arg ) except EncodingError : return False return True | Determines if the python value arg is encodable as a value of the ABI type typ . |
46,246 | def decode_single ( self , typ : TypeStr , data : Decodable ) -> Any : if not is_bytes ( data ) : raise TypeError ( "The `data` value must be of bytes type. Got {0}" . format ( type ( data ) ) ) decoder = self . _registry . get_decoder ( typ ) stream = ContextFramesBytesIO ( data ) return decoder ( stream ) | Decodes the binary value data of the ABI type typ into its equivalent python value . |
46,247 | def decode_abi ( self , types : Iterable [ TypeStr ] , data : Decodable ) -> Tuple [ Any , ... ] : if not is_bytes ( data ) : raise TypeError ( "The `data` value must be of bytes type. Got {0}" . format ( type ( data ) ) ) decoders = [ self . _registry . get_decoder ( type_str ) for type_str in types ] decoder = TupleDecoder ( decoders = decoders ) stream = ContextFramesBytesIO ( data ) return decoder ( stream ) | Decodes the binary value data as a sequence of values of the ABI types in types via the head - tail mechanism into a tuple of equivalent python values . |
46,248 | async def create_turn_endpoint ( protocol_factory , server_addr , username , password , lifetime = 600 , ssl = False , transport = 'udp' ) : loop = asyncio . get_event_loop ( ) if transport == 'tcp' : _ , inner_protocol = await loop . create_connection ( lambda : TurnClientTcpProtocol ( server_addr , username = username , password = password , lifetime = lifetime ) , host = server_addr [ 0 ] , port = server_addr [ 1 ] , ssl = ssl ) else : _ , inner_protocol = await loop . create_datagram_endpoint ( lambda : TurnClientUdpProtocol ( server_addr , username = username , password = password , lifetime = lifetime ) , remote_addr = server_addr ) protocol = protocol_factory ( ) transport = TurnTransport ( protocol , inner_protocol ) await transport . _connect ( ) return transport , protocol | Create datagram connection relayed over TURN . |
46,249 | async def connect ( self ) : request = stun . Message ( message_method = stun . Method . ALLOCATE , message_class = stun . Class . REQUEST ) request . attributes [ 'LIFETIME' ] = self . lifetime request . attributes [ 'REQUESTED-TRANSPORT' ] = UDP_TRANSPORT try : response , _ = await self . request ( request ) except exceptions . TransactionFailed as e : response = e . response if response . attributes [ 'ERROR-CODE' ] [ 0 ] == 401 : self . nonce = response . attributes [ 'NONCE' ] self . realm = response . attributes [ 'REALM' ] self . integrity_key = make_integrity_key ( self . username , self . realm , self . password ) request . transaction_id = random_transaction_id ( ) response , _ = await self . request ( request ) self . relayed_address = response . attributes [ 'XOR-RELAYED-ADDRESS' ] logger . info ( 'TURN allocation created %s' , self . relayed_address ) self . refresh_handle = asyncio . ensure_future ( self . refresh ( ) ) return self . relayed_address | Create a TURN allocation . |
46,250 | async def delete ( self ) : if self . refresh_handle : self . refresh_handle . cancel ( ) self . refresh_handle = None request = stun . Message ( message_method = stun . Method . REFRESH , message_class = stun . Class . REQUEST ) request . attributes [ 'LIFETIME' ] = 0 await self . request ( request ) logger . info ( 'TURN allocation deleted %s' , self . relayed_address ) if self . receiver : self . receiver . connection_lost ( None ) | Delete the TURN allocation . |
46,251 | async def refresh ( self ) : while True : await asyncio . sleep ( 5 / 6 * self . lifetime ) request = stun . Message ( message_method = stun . Method . REFRESH , message_class = stun . Class . REQUEST ) request . attributes [ 'LIFETIME' ] = self . lifetime await self . request ( request ) logger . info ( 'TURN allocation refreshed %s' , self . relayed_address ) | Periodically refresh the TURN allocation . |
46,252 | async def send_data ( self , data , addr ) : channel = self . peer_to_channel . get ( addr ) if channel is None : channel = self . channel_number self . channel_number += 1 self . channel_to_peer [ channel ] = addr self . peer_to_channel [ addr ] = channel await self . channel_bind ( channel , addr ) header = struct . pack ( '!HH' , channel , len ( data ) ) self . _send ( header + data ) | Send data to a remote host via the TURN server . |
46,253 | def send_stun ( self , message , addr ) : logger . debug ( '%s > %s %s' , self , addr , message ) self . _send ( bytes ( message ) ) | Send a STUN message to the TURN server . |
46,254 | def get_extra_info ( self , name , default = None ) : if name == 'related_address' : return self . __inner_protocol . transport . get_extra_info ( 'sockname' ) elif name == 'sockname' : return self . __relayed_address return default | Return optional transport information . |
46,255 | def sendto ( self , data , addr ) : asyncio . ensure_future ( self . __inner_protocol . send_data ( data , addr ) ) | Sends the data bytes to the remote peer given addr . |
46,256 | def candidate_foundation ( candidate_type , candidate_transport , base_address ) : key = '%s|%s|%s' % ( candidate_type , candidate_transport , base_address ) return hashlib . md5 ( key . encode ( 'ascii' ) ) . hexdigest ( ) | See RFC 5245 - 4 . 1 . 1 . 3 . Computing Foundations |
46,257 | def candidate_priority ( candidate_component , candidate_type , local_pref = 65535 ) : if candidate_type == 'host' : type_pref = 126 elif candidate_type == 'prflx' : type_pref = 110 elif candidate_type == 'srflx' : type_pref = 100 else : type_pref = 0 return ( 1 << 24 ) * type_pref + ( 1 << 8 ) * local_pref + ( 256 - candidate_component ) | See RFC 5245 - 4 . 1 . 2 . 1 . Recommended Formula |
46,258 | def to_sdp ( self ) : sdp = '%s %d %s %d %s %d typ %s' % ( self . foundation , self . component , self . transport , self . priority , self . host , self . port , self . type ) if self . related_address is not None : sdp += ' raddr %s' % self . related_address if self . related_port is not None : sdp += ' rport %s' % self . related_port if self . tcptype is not None : sdp += ' tcptype %s' % self . tcptype if self . generation is not None : sdp += ' generation %d' % self . generation return sdp | Return a string representation suitable for SDP . |
46,259 | def can_pair_with ( self , other ) : a = ipaddress . ip_address ( self . host ) b = ipaddress . ip_address ( other . host ) return ( self . component == other . component and self . transport . lower ( ) == other . transport . lower ( ) and a . version == b . version ) | A local candidate is paired with a remote candidate if and only if the two candidates have the same component ID and have the same IP address version . |
46,260 | def candidate_pair_priority ( local , remote , ice_controlling ) : G = ice_controlling and local . priority or remote . priority D = ice_controlling and remote . priority or local . priority return ( 1 << 32 ) * min ( G , D ) + 2 * max ( G , D ) + ( G > D and 1 or 0 ) | See RFC 5245 - 5 . 7 . 2 . Computing Pair Priority and Ordering Pairs |
46,261 | def get_host_addresses ( use_ipv4 , use_ipv6 ) : addresses = [ ] for interface in netifaces . interfaces ( ) : ifaddresses = netifaces . ifaddresses ( interface ) for address in ifaddresses . get ( socket . AF_INET , [ ] ) : if use_ipv4 and address [ 'addr' ] != '127.0.0.1' : addresses . append ( address [ 'addr' ] ) for address in ifaddresses . get ( socket . AF_INET6 , [ ] ) : if use_ipv6 and address [ 'addr' ] != '::1' and '%' not in address [ 'addr' ] : addresses . append ( address [ 'addr' ] ) return addresses | Get local IP addresses . |
46,262 | async def server_reflexive_candidate ( protocol , stun_server ) : loop = asyncio . get_event_loop ( ) stun_server = ( await loop . run_in_executor ( None , socket . gethostbyname , stun_server [ 0 ] ) , stun_server [ 1 ] ) request = stun . Message ( message_method = stun . Method . BINDING , message_class = stun . Class . REQUEST ) response , _ = await protocol . request ( request , stun_server ) local_candidate = protocol . local_candidate return Candidate ( foundation = candidate_foundation ( 'srflx' , 'udp' , local_candidate . host ) , component = local_candidate . component , transport = local_candidate . transport , priority = candidate_priority ( local_candidate . component , 'srflx' ) , host = response . attributes [ 'XOR-MAPPED-ADDRESS' ] [ 0 ] , port = response . attributes [ 'XOR-MAPPED-ADDRESS' ] [ 1 ] , type = 'srflx' , related_address = local_candidate . host , related_port = local_candidate . port ) | Query STUN server to obtain a server - reflexive candidate . |
46,263 | def sort_candidate_pairs ( pairs , ice_controlling ) : def pair_priority ( pair ) : return - candidate_pair_priority ( pair . local_candidate , pair . remote_candidate , ice_controlling ) pairs . sort ( key = pair_priority ) | Sort a list of candidate pairs . |
46,264 | def send_stun ( self , message , addr ) : self . __log_debug ( '> %s %s' , addr , message ) self . transport . sendto ( bytes ( message ) , addr ) | Send a STUN message . |
46,265 | def add_remote_candidate ( self , remote_candidate ) : if self . _remote_candidates_end : raise ValueError ( 'Cannot add remote candidate after end-of-candidates.' ) if remote_candidate is None : self . _prune_components ( ) self . _remote_candidates_end = True return self . _remote_candidates . append ( remote_candidate ) for protocol in self . _protocols : if ( protocol . local_candidate . can_pair_with ( remote_candidate ) and not self . _find_pair ( protocol , remote_candidate ) ) : pair = CandidatePair ( protocol , remote_candidate ) self . _check_list . append ( pair ) self . sort_check_list ( ) | Add a remote candidate or signal end - of - candidates . |
46,266 | async def gather_candidates ( self ) : if not self . _local_candidates_start : self . _local_candidates_start = True addresses = get_host_addresses ( use_ipv4 = self . _use_ipv4 , use_ipv6 = self . _use_ipv6 ) for component in self . _components : self . _local_candidates += await self . get_component_candidates ( component = component , addresses = addresses ) self . _local_candidates_end = True | Gather local candidates . |
46,267 | def get_default_candidate ( self , component ) : for candidate in sorted ( self . _local_candidates , key = lambda x : x . priority ) : if candidate . component == component : return candidate | Gets the default local candidate for the specified component . |
46,268 | async def connect ( self ) : if not self . _local_candidates_end : raise ConnectionError ( 'Local candidates gathering was not performed' ) if ( self . remote_username is None or self . remote_password is None ) : raise ConnectionError ( 'Remote username or password is missing' ) for remote_candidate in self . _remote_candidates : for protocol in self . _protocols : if ( protocol . local_candidate . can_pair_with ( remote_candidate ) and not self . _find_pair ( protocol , remote_candidate ) ) : pair = CandidatePair ( protocol , remote_candidate ) self . _check_list . append ( pair ) self . sort_check_list ( ) self . _unfreeze_initial ( ) for check in self . _early_checks : self . check_incoming ( * check ) self . _early_checks = [ ] while True : if not self . check_periodic ( ) : break await asyncio . sleep ( 0.02 ) if self . _check_list : res = await self . _check_list_state . get ( ) else : res = ICE_FAILED for check in self . _check_list : if check . handle : check . handle . cancel ( ) if res != ICE_COMPLETED : raise ConnectionError ( 'ICE negotiation failed' ) self . _query_consent_handle = asyncio . ensure_future ( self . query_consent ( ) ) | Perform ICE handshake . |
46,269 | async def recvfrom ( self ) : if not len ( self . _nominated ) : raise ConnectionError ( 'Cannot receive data, not connected' ) result = await self . _queue . get ( ) if result [ 0 ] is None : raise ConnectionError ( 'Connection lost while receiving data' ) return result | Receive the next datagram . |
46,270 | async def sendto ( self , data , component ) : active_pair = self . _nominated . get ( component ) if active_pair : await active_pair . protocol . send_data ( data , active_pair . remote_addr ) else : raise ConnectionError ( 'Cannot send data, not connected' ) | Send a datagram on the specified component . |
46,271 | def set_selected_pair ( self , component , local_foundation , remote_foundation ) : protocol = None for p in self . _protocols : if ( p . local_candidate . component == component and p . local_candidate . foundation == local_foundation ) : protocol = p break remote_candidate = None for c in self . _remote_candidates : if c . component == component and c . foundation == remote_foundation : remote_candidate = c assert ( protocol and remote_candidate ) self . _nominated [ component ] = CandidatePair ( protocol , remote_candidate ) | Force the selected candidate pair . |
46,272 | def check_incoming ( self , message , addr , protocol ) : component = protocol . local_candidate . component remote_candidate = None for c in self . _remote_candidates : if c . host == addr [ 0 ] and c . port == addr [ 1 ] : remote_candidate = c assert remote_candidate . component == component break if remote_candidate is None : remote_candidate = Candidate ( foundation = random_string ( 10 ) , component = component , transport = 'udp' , priority = message . attributes [ 'PRIORITY' ] , host = addr [ 0 ] , port = addr [ 1 ] , type = 'prflx' ) self . _remote_candidates . append ( remote_candidate ) self . __log_info ( 'Discovered peer reflexive candidate %s' , remote_candidate ) pair = self . _find_pair ( protocol , remote_candidate ) if pair is None : pair = CandidatePair ( protocol , remote_candidate ) pair . state = CandidatePair . State . WAITING self . _check_list . append ( pair ) self . sort_check_list ( ) if pair . state in [ CandidatePair . State . WAITING , CandidatePair . State . FAILED ] : pair . handle = asyncio . ensure_future ( self . check_start ( pair ) ) if 'USE-CANDIDATE' in message . attributes and not self . ice_controlling : pair . remote_nominated = True if pair . state == CandidatePair . State . SUCCEEDED : pair . nominated = True self . check_complete ( pair ) | Handle a succesful incoming check . |
46,273 | async def check_start ( self , pair ) : self . check_state ( pair , CandidatePair . State . IN_PROGRESS ) request = self . build_request ( pair ) try : response , addr = await pair . protocol . request ( request , pair . remote_addr , integrity_key = self . remote_password . encode ( 'utf8' ) ) except exceptions . TransactionError as exc : if exc . response and exc . response . attributes . get ( 'ERROR-CODE' , ( None , None ) ) [ 0 ] == 487 : if 'ICE-CONTROLLING' in request . attributes : self . switch_role ( ice_controlling = False ) elif 'ICE-CONTROLLED' in request . attributes : self . switch_role ( ice_controlling = True ) return await self . check_start ( pair ) else : self . check_state ( pair , CandidatePair . State . FAILED ) self . check_complete ( pair ) return if addr != pair . remote_addr : self . __log_info ( 'Check %s failed : source address mismatch' , pair ) self . check_state ( pair , CandidatePair . State . FAILED ) self . check_complete ( pair ) return self . check_state ( pair , CandidatePair . State . SUCCEEDED ) if self . ice_controlling or pair . remote_nominated : pair . nominated = True self . check_complete ( pair ) | Starts a check . |
46,274 | def check_state ( self , pair , state ) : self . __log_info ( 'Check %s %s -> %s' , pair , pair . state , state ) pair . state = state | Updates the state of a check . |
46,275 | def _find_pair ( self , protocol , remote_candidate ) : for pair in self . _check_list : if ( pair . protocol == protocol and pair . remote_candidate == remote_candidate ) : return pair return None | Find a candidate pair in the check list . |
46,276 | def _prune_components ( self ) : seen_components = set ( map ( lambda x : x . component , self . _remote_candidates ) ) missing_components = self . _components - seen_components if missing_components : self . __log_info ( 'Components %s have no candidate pairs' % missing_components ) self . _components = seen_components | Remove components for which the remote party did not provide any candidates . |
46,277 | def parse_message ( data , integrity_key = None ) : if len ( data ) < HEADER_LENGTH : raise ValueError ( 'STUN message length is less than 20 bytes' ) message_type , length , cookie , transaction_id = unpack ( '!HHI12s' , data [ 0 : HEADER_LENGTH ] ) if len ( data ) != HEADER_LENGTH + length : raise ValueError ( 'STUN message length does not match' ) attributes = OrderedDict ( ) pos = HEADER_LENGTH while pos <= len ( data ) - 4 : attr_type , attr_len = unpack ( '!HH' , data [ pos : pos + 4 ] ) v = data [ pos + 4 : pos + 4 + attr_len ] pad_len = 4 * ( ( attr_len + 3 ) // 4 ) - attr_len if attr_type in ATTRIBUTES_BY_TYPE : _ , attr_name , attr_pack , attr_unpack = ATTRIBUTES_BY_TYPE [ attr_type ] if attr_unpack == unpack_xor_address : attributes [ attr_name ] = attr_unpack ( v , transaction_id = transaction_id ) else : attributes [ attr_name ] = attr_unpack ( v ) if attr_name == 'FINGERPRINT' : if attributes [ attr_name ] != message_fingerprint ( data [ 0 : pos ] ) : raise ValueError ( 'STUN message fingerprint does not match' ) elif attr_name == 'MESSAGE-INTEGRITY' : if ( integrity_key is not None and attributes [ attr_name ] != message_integrity ( data [ 0 : pos ] , integrity_key ) ) : raise ValueError ( 'STUN message integrity does not match' ) pos += 4 + attr_len + pad_len return Message ( message_method = message_type & 0x3eef , message_class = message_type & 0x0110 , transaction_id = transaction_id , attributes = attributes ) | Parses a STUN message . |
46,278 | def connection_made ( self , transport : asyncio . BaseTransport ) -> None : if self . _stream_reader is None : raise SMTPServerDisconnected ( "Client not connected" ) self . _stream_reader . _transport = transport self . _over_ssl = transport . get_extra_info ( "sslcontext" ) is not None self . _stream_writer = asyncio . StreamWriter ( transport , self , self . _stream_reader , self . _loop ) self . _client_connected_cb ( self . _stream_reader , self . _stream_writer ) | Modified connection_made that supports upgrading our transport in place using STARTTLS . |
46,279 | def upgrade_transport ( self , context : ssl . SSLContext , server_hostname : str = None , waiter : Awaitable = None , ) -> SSLProtocol : if self . _over_ssl : raise RuntimeError ( "Already using TLS." ) if self . _stream_reader is None or self . _stream_writer is None : raise SMTPServerDisconnected ( "Client not connected" ) transport = self . _stream_reader . _transport tls_protocol = SSLProtocol ( self . _loop , self , context , waiter , server_side = False , server_hostname = server_hostname , ) app_transport = tls_protocol . _app_transport if hasattr ( transport , "set_protocol" ) : transport . set_protocol ( tls_protocol ) else : transport . _protocol = tls_protocol self . _stream_reader . _transport = app_transport self . _stream_writer . _transport = app_transport tls_protocol . connection_made ( transport ) self . _over_ssl = True return tls_protocol | Upgrade our transport to TLS in place . |
46,280 | async def read_response ( self , timeout : NumType = None ) -> SMTPResponse : if self . _stream_reader is None : raise SMTPServerDisconnected ( "Client not connected" ) code = None response_lines = [ ] while True : async with self . _io_lock : line = await self . _readline ( timeout = timeout ) try : code = int ( line [ : 3 ] ) except ValueError : pass message = line [ 4 : ] . strip ( b" \t\r\n" ) . decode ( "utf-8" , "surrogateescape" ) response_lines . append ( message ) if line [ 3 : 4 ] != b"-" : break full_message = "\n" . join ( response_lines ) if code is None : raise SMTPResponseException ( SMTPStatus . invalid_response . value , "Malformed SMTP response: {}" . format ( full_message ) , ) return SMTPResponse ( code , full_message ) | Get a status reponse from the server . |
46,281 | async def write_and_drain ( self , data : bytes , timeout : NumType = None ) -> None : if self . _stream_writer is None : raise SMTPServerDisconnected ( "Client not connected" ) self . _stream_writer . write ( data ) async with self . _io_lock : await self . _drain_writer ( timeout ) | Format a command and send it to the server . |
46,282 | async def write_message_data ( self , data : bytes , timeout : NumType = None ) -> None : data = LINE_ENDINGS_REGEX . sub ( b"\r\n" , data ) data = PERIOD_REGEX . sub ( b".." , data ) if not data . endswith ( b"\r\n" ) : data += b"\r\n" data += b".\r\n" await self . write_and_drain ( data , timeout = timeout ) | Encode and write email message data . |
46,283 | async def execute_command ( self , * args : bytes , timeout : NumType = None ) -> SMTPResponse : command = b" " . join ( args ) + b"\r\n" await self . write_and_drain ( command , timeout = timeout ) response = await self . read_response ( timeout = timeout ) return response | Sends an SMTP command along with any args to the server and returns a response . |
46,284 | def last_ehlo_response ( self , response : SMTPResponse ) -> None : extensions , auth_methods = parse_esmtp_extensions ( response . message ) self . _last_ehlo_response = response self . esmtp_extensions = extensions self . server_auth_methods = auth_methods self . supports_esmtp = True | When setting the last EHLO response parse the message for supported extensions and auth methods . |
46,285 | async def helo ( self , hostname : str = None , timeout : DefaultNumType = _default ) -> SMTPResponse : if hostname is None : hostname = self . source_address async with self . _command_lock : response = await self . execute_command ( b"HELO" , hostname . encode ( "ascii" ) , timeout = timeout ) self . last_helo_response = response if response . code != SMTPStatus . completed : raise SMTPHeloError ( response . code , response . message ) return response | Send the SMTP HELO command . Hostname to send for this command defaults to the FQDN of the local host . |
46,286 | async def help ( self , timeout : DefaultNumType = _default ) -> str : await self . _ehlo_or_helo_if_needed ( ) async with self . _command_lock : response = await self . execute_command ( b"HELP" , timeout = timeout ) success_codes = ( SMTPStatus . system_status_ok , SMTPStatus . help_message , SMTPStatus . completed , ) if response . code not in success_codes : raise SMTPResponseException ( response . code , response . message ) return response . message | Send the SMTP HELP command which responds with help text . |
46,287 | async def noop ( self , timeout : DefaultNumType = _default ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) async with self . _command_lock : response = await self . execute_command ( b"NOOP" , timeout = timeout ) if response . code != SMTPStatus . completed : raise SMTPResponseException ( response . code , response . message ) return response | Send an SMTP NOOP command which does nothing . |
46,288 | async def vrfy ( self , address : str , timeout : DefaultNumType = _default ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) parsed_address = parse_address ( address ) async with self . _command_lock : response = await self . execute_command ( b"VRFY" , parsed_address . encode ( "ascii" ) , timeout = timeout ) success_codes = ( SMTPStatus . completed , SMTPStatus . will_forward , SMTPStatus . cannot_vrfy , ) if response . code not in success_codes : raise SMTPResponseException ( response . code , response . message ) return response | Send an SMTP VRFY command which tests an address for validity . Not many servers support this command . |
46,289 | async def expn ( self , address : str , timeout : DefaultNumType = _default ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) parsed_address = parse_address ( address ) async with self . _command_lock : response = await self . execute_command ( b"EXPN" , parsed_address . encode ( "ascii" ) , timeout = timeout ) if response . code != SMTPStatus . completed : raise SMTPResponseException ( response . code , response . message ) return response | Send an SMTP EXPN command which expands a mailing list . Not many servers support this command . |
46,290 | async def quit ( self , timeout : DefaultNumType = _default ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) async with self . _command_lock : response = await self . execute_command ( b"QUIT" , timeout = timeout ) if response . code != SMTPStatus . closing : raise SMTPResponseException ( response . code , response . message ) self . close ( ) return response | Send the SMTP QUIT command which closes the connection . Also closes the connection from our side after a response is received . |
46,291 | async def rcpt ( self , recipient : str , options : Iterable [ str ] = None , timeout : DefaultNumType = _default , ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) if options is None : options = [ ] options_bytes = [ option . encode ( "ascii" ) for option in options ] to = b"TO:" + quote_address ( recipient ) . encode ( "ascii" ) async with self . _command_lock : response = await self . execute_command ( b"RCPT" , to , * options_bytes , timeout = timeout ) success_codes = ( SMTPStatus . completed , SMTPStatus . will_forward ) if response . code not in success_codes : raise SMTPRecipientRefused ( response . code , response . message , recipient ) return response | Send an SMTP RCPT command which specifies a single recipient for the message . This command is sent once per recipient and must be preceded by MAIL . |
46,292 | async def data ( self , message : Union [ str , bytes ] , timeout : DefaultNumType = _default ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) self . _raise_error_if_disconnected ( ) if timeout is _default : timeout = self . timeout if isinstance ( message , str ) : message = message . encode ( "ascii" ) async with self . _command_lock : start_response = await self . execute_command ( b"DATA" , timeout = timeout ) if start_response . code != SMTPStatus . start_input : raise SMTPDataError ( start_response . code , start_response . message ) try : await self . protocol . write_message_data ( message , timeout = timeout ) response = await self . protocol . read_response ( timeout = timeout ) except SMTPServerDisconnected as exc : self . close ( ) raise exc if response . code != SMTPStatus . completed : raise SMTPDataError ( response . code , response . message ) return response | Send an SMTP DATA command followed by the message given . This method transfers the actual email content to the server . |
46,293 | async def ehlo ( self , hostname : str = None , timeout : DefaultNumType = _default ) -> SMTPResponse : if hostname is None : hostname = self . source_address async with self . _command_lock : response = await self . execute_command ( b"EHLO" , hostname . encode ( "ascii" ) , timeout = timeout ) self . last_ehlo_response = response if response . code != SMTPStatus . completed : raise SMTPHeloError ( response . code , response . message ) return response | Send the SMTP EHLO command . Hostname to send for this command defaults to the FQDN of the local host . |
46,294 | def _reset_server_state ( self ) -> None : self . last_helo_response = None self . _last_ehlo_response = None self . esmtp_extensions = { } self . supports_esmtp = False self . server_auth_methods = [ ] | Clear stored information about the server . |
46,295 | def supported_auth_methods ( self ) -> List [ str ] : return [ auth for auth in self . AUTH_METHODS if auth in self . server_auth_methods ] | Get all AUTH methods supported by the both server and by us . |
46,296 | async def login ( self , username : str , password : str , timeout : DefaultNumType = _default ) -> SMTPResponse : await self . _ehlo_or_helo_if_needed ( ) if not self . supports_extension ( "auth" ) : raise SMTPException ( "SMTP AUTH extension not supported by server." ) response = None exception = None for auth_name in self . supported_auth_methods : method_name = "auth_{}" . format ( auth_name . replace ( "-" , "" ) ) try : auth_method = getattr ( self , method_name ) except AttributeError : raise RuntimeError ( "Missing handler for auth method {}" . format ( auth_name ) ) try : response = await auth_method ( username , password , timeout = timeout ) except SMTPAuthenticationError as exc : exception = exc else : break if response is None : raise exception or SMTPException ( "No suitable authentication method found." ) return response | Tries to login with supported auth methods . |
46,297 | async def auth_crammd5 ( self , username : str , password : str , timeout : DefaultNumType = _default ) -> SMTPResponse : async with self . _command_lock : initial_response = await self . execute_command ( b"AUTH" , b"CRAM-MD5" , timeout = timeout ) if initial_response . code != SMTPStatus . auth_continue : raise SMTPAuthenticationError ( initial_response . code , initial_response . message ) password_bytes = password . encode ( "ascii" ) username_bytes = username . encode ( "ascii" ) response_bytes = initial_response . message . encode ( "ascii" ) verification_bytes = crammd5_verify ( username_bytes , password_bytes , response_bytes ) response = await self . execute_command ( verification_bytes ) if response . code != SMTPStatus . auth_successful : raise SMTPAuthenticationError ( response . code , response . message ) return response | CRAM - MD5 auth uses the password as a shared secret to MD5 the server s response . |
46,298 | async def auth_plain ( self , username : str , password : str , timeout : DefaultNumType = _default ) -> SMTPResponse : username_bytes = username . encode ( "ascii" ) password_bytes = password . encode ( "ascii" ) username_and_password = b"\0" + username_bytes + b"\0" + password_bytes encoded = base64 . b64encode ( username_and_password ) async with self . _command_lock : response = await self . execute_command ( b"AUTH" , b"PLAIN" , encoded , timeout = timeout ) if response . code != SMTPStatus . auth_successful : raise SMTPAuthenticationError ( response . code , response . message ) return response | PLAIN auth encodes the username and password in one Base64 encoded string . No verification message is required . |
46,299 | async def auth_login ( self , username : str , password : str , timeout : DefaultNumType = _default ) -> SMTPResponse : encoded_username = base64 . b64encode ( username . encode ( "ascii" ) ) encoded_password = base64 . b64encode ( password . encode ( "ascii" ) ) async with self . _command_lock : initial_response = await self . execute_command ( b"AUTH" , b"LOGIN" , encoded_username , timeout = timeout ) if initial_response . code != SMTPStatus . auth_continue : raise SMTPAuthenticationError ( initial_response . code , initial_response . message ) response = await self . execute_command ( encoded_password , timeout = timeout ) if response . code != SMTPStatus . auth_successful : raise SMTPAuthenticationError ( response . code , response . message ) return response | LOGIN auth sends the Base64 encoded username and password in sequence . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.