idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
22,200
def _has_qry_hit_longer_than ( self , nucmer_hits , min_length , hits_to_exclude = None ) : if hits_to_exclude is None : to_exclude = set ( ) else : to_exclude = hits_to_exclude long_hits = [ hit . hit_length_qry for hit in nucmer_hits if hit not in to_exclude and hit . hit_length_qry > min_length ] return len ( long_hits ) > 0
Returns True iff list of nucmer_hits has a hit longer than min_length not counting the hits in hits_to_exclude
22,201
def _can_circularise ( self , start_hit , end_hit ) : if not ( self . _is_at_ref_start ( start_hit ) or self . _is_at_ref_end ( end_hit ) ) : return False if self . _is_at_qry_end ( start_hit ) and self . _is_at_qry_start ( end_hit ) and start_hit . on_same_strand ( ) and end_hit . on_same_strand ( ) : return True if self . _is_at_qry_start ( start_hit ) and self . _is_at_qry_end ( end_hit ) and ( not start_hit . on_same_strand ( ) ) and ( not end_hit . on_same_strand ( ) ) : return True return False
Returns true iff the two hits can be used to circularise the reference sequence of the hits
22,202
def _remove_keys_from_dict_with_nonunique_values ( self , d , log_fh = None , log_outprefix = None ) : value_counts = collections . Counter ( d . values ( ) ) new_d = { } writing_log_file = None not in [ log_fh , log_outprefix ] for key in d : if value_counts [ d [ key ] ] == 1 : new_d [ key ] = d [ key ] elif writing_log_file : print ( log_outprefix , 'Reject because non-unique:' , d [ key ] , sep = '\t' , file = log_fh ) return new_d
Returns a new dictionary with keys from input dict removed if their value was not unique
22,203
def _make_circularised_contig ( self , ref_start_hit , ref_end_hit ) : assert ref_start_hit . ref_name == ref_end_hit . ref_name assert ref_start_hit . qry_name == ref_end_hit . qry_name qry_name = ref_start_hit . qry_name ref_name = ref_start_hit . ref_name ref_start_coords = ref_start_hit . ref_coords ( ) ref_end_coords = ref_end_hit . ref_coords ( ) if ref_start_coords . intersects ( ref_end_coords ) : new_ctg = copy . copy ( self . reassembly_contigs [ qry_name ] ) new_ctg . id = ref_name return new_ctg if ref_start_hit . on_same_strand ( ) : qry_start_coords = ref_end_hit . qry_coords ( ) qry_end_coords = ref_start_hit . qry_coords ( ) bases = self . original_contigs [ ref_name ] [ ref_start_coords . end + 1 : ref_end_coords . start ] + self . reassembly_contigs [ qry_name ] [ qry_start_coords . start : qry_end_coords . end + 1 ] return pyfastaq . sequences . Fasta ( ref_name , bases ) else : qry_start_coords = ref_start_hit . qry_coords ( ) qry_end_coords = ref_end_hit . qry_coords ( ) tmp_seq = pyfastaq . sequences . Fasta ( 'x' , self . reassembly_contigs [ qry_name ] [ qry_start_coords . start : qry_end_coords . end + 1 ] ) tmp_seq . revcomp ( ) return pyfastaq . sequences . Fasta ( ref_name , self . original_contigs [ ref_name ] [ ref_start_coords . end + 1 : ref_end_coords . start ] + tmp_seq . seq )
Given a nucmer ref_start_hit and ref_end_hit returns a new contig . Assumes that these hits can be used to circularise the reference contig of the hits using the query contig
22,204
def _orientation_ok_to_bridge_contigs ( self , start_hit , end_hit ) : assert start_hit . qry_name == end_hit . qry_name if start_hit . ref_name == end_hit . ref_name : return False if ( ( self . _is_at_ref_end ( start_hit ) and start_hit . on_same_strand ( ) ) or ( self . _is_at_ref_start ( start_hit ) and not start_hit . on_same_strand ( ) ) ) : start_hit_ok = True else : start_hit_ok = False if ( ( self . _is_at_ref_start ( end_hit ) and end_hit . on_same_strand ( ) ) or ( self . _is_at_ref_end ( end_hit ) and not end_hit . on_same_strand ( ) ) ) : end_hit_ok = True else : end_hit_ok = False return start_hit_ok and end_hit_ok
Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits
22,205
def _merge_all_bridged_contigs ( self , nucmer_hits , ref_contigs , qry_contigs , log_fh = None , log_outprefix = None ) : writing_log_file = None not in [ log_fh , log_outprefix ] if len ( nucmer_hits ) == 0 : if writing_log_file : print ( log_outprefix , 'No nucmer hits, so will not make any merges' , sep = '\t' , file = log_fh ) return all_nucmer_hits = [ ] for l in nucmer_hits . values ( ) : all_nucmer_hits . extend ( l ) nucmer_hits_by_qry = self . _hits_hashed_by_query ( all_nucmer_hits ) bridges = self . _get_possible_query_bridging_contigs ( nucmer_hits_by_qry , log_fh = log_fh , log_outprefix = log_outprefix ) if writing_log_file : print ( log_outprefix , '\tPotential contigs to use for merging: ' , ' ' . join ( sorted ( bridges . keys ( ) ) ) , sep = '' , file = log_fh ) bridges = self . _filter_bridging_contigs ( bridges ) if writing_log_file : print ( log_outprefix , '\tContigs to use for merging after uniqueness filtering: ' , ' ' . join ( sorted ( bridges . keys ( ) ) ) , sep = '' , file = log_fh ) merged = set ( ) made_a_join = False for qry_name , ( start_hit , end_hit ) in bridges . items ( ) : if start_hit . ref_name in merged or end_hit . ref_name in merged : continue self . _merge_bridged_contig_pair ( start_hit , end_hit , ref_contigs , qry_contigs , log_fh = log_fh , log_outprefix = log_outprefix ) merged . add ( start_hit . ref_name ) merged . add ( end_hit . ref_name ) made_a_join = True if writing_log_file : print ( log_outprefix , '\tMade at least one contig join: ' , made_a_join , sep = '' , file = log_fh ) return made_a_join
Input is dict of nucmer_hits . Makes any possible contig merges . Returns True iff any merges were made
22,206
def _write_act_files ( self , ref_fasta , qry_fasta , coords_file , outprefix ) : if self . verbose : print ( 'Making ACT files from' , ref_fasta , qry_fasta , coords_file ) ref_fasta = os . path . relpath ( ref_fasta ) qry_fasta = os . path . relpath ( qry_fasta ) coords_file = os . path . relpath ( coords_file ) outprefix = os . path . relpath ( outprefix ) self . _index_fasta ( ref_fasta ) self . _index_fasta ( qry_fasta ) crunch_file = outprefix + '.crunch' pymummer . coords_file . convert_to_msp_crunch ( coords_file , crunch_file , ref_fai = ref_fasta + '.fai' , qry_fai = qry_fasta + '.fai' ) bash_script = outprefix + '.start_act.sh' with open ( bash_script , 'w' ) as f : print ( '#!/usr/bin/env bash' , file = f ) print ( 'act' , ref_fasta , crunch_file , qry_fasta , file = f ) pyfastaq . utils . syscall ( 'chmod +x ' + bash_script )
Writes crunch file and shell script to start up ACT showing comparison of ref and qry
22,207
def _contigs_dict_to_file ( self , contigs , fname ) : f = pyfastaq . utils . open_file_write ( fname ) for contig in sorted ( contigs , key = lambda x : len ( contigs [ x ] ) , reverse = True ) : print ( contigs [ contig ] , file = f ) pyfastaq . utils . close ( f )
Writes dictionary of contigs to file
22,208
def _get_spades_circular_nodes ( self , fastg ) : seq_reader = pyfastaq . sequences . file_reader ( fastg ) names = set ( [ x . id . rstrip ( ';' ) for x in seq_reader if ':' in x . id ] ) found_fwd = set ( ) found_rev = set ( ) for name in names : l = name . split ( ':' ) if len ( l ) != 2 : continue if l [ 0 ] == l [ 1 ] : if l [ 0 ] [ - 1 ] == "'" : found_rev . add ( l [ 0 ] [ : - 1 ] ) else : found_fwd . add ( l [ 0 ] ) return found_fwd . intersection ( found_rev )
Returns set of names of nodes in SPAdes fastg file that are circular . Names will match those in spades fasta file
22,209
def _make_new_contig_from_nucmer_and_spades ( self , original_contig , hits , circular_spades , log_fh = None , log_outprefix = None ) : writing_log_file = None not in [ log_fh , log_outprefix ] hits_to_circular_contigs = [ x for x in hits if x . qry_name in circular_spades ] if len ( hits_to_circular_contigs ) == 0 : if writing_log_file : print ( log_outprefix , original_contig , 'No matches to SPAdes circular contigs' , sep = '\t' , file = log_fh ) return None , None for hit in hits_to_circular_contigs : print ( log_outprefix , original_contig , 'Checking hit:' , hit , sep = '\t' , file = log_fh ) percent_query_covered = 100 * ( hit . hit_length_qry / hit . qry_length ) if self . min_spades_circular_percent <= percent_query_covered : print ( log_outprefix , '\t' , original_contig , '\t\tHit is long enough. Percent of contig covered by hit is ' , percent_query_covered , sep = '' , file = log_fh ) hit_intervals = [ x . ref_coords ( ) for x in hits_to_circular_contigs if x . qry_name == hit . qry_name ] if len ( hit_intervals ) > 0 : pyfastaq . intervals . merge_overlapping_in_list ( hit_intervals ) percent_covered = 100 * pyfastaq . intervals . length_sum_from_list ( hit_intervals ) / hit . ref_length if writing_log_file : print ( log_outprefix , '\t' , original_contig , '\t\treference bases covered by spades contig:' , ', ' . join ( [ str ( x ) for x in hit_intervals ] ) , sep = '' , file = log_fh ) print ( log_outprefix , '\t' , original_contig , '\t\t ... which is ' , percent_covered , ' percent of ' , hit . ref_length , ' bases' , sep = '' , file = log_fh ) if self . min_spades_circular_percent <= percent_covered : if writing_log_file : print ( log_outprefix , original_contig , '\tUsing hit to call as circular (enough bases covered)' , sep = '\t' , file = log_fh ) return pyfastaq . sequences . Fasta ( original_contig , self . reassembly_contigs [ hit . qry_name ] . seq ) , hit . qry_name elif writing_log_file : print ( log_outprefix , original_contig , '\tNot using hit to call as circular (not enough bases covered)' , sep = '\t' , file = log_fh ) else : print ( log_outprefix , original_contig , '\tNot using hit to call as circular (hit too short)' , sep = '\t' , file = log_fh ) if writing_log_file : print ( log_outprefix , original_contig , 'No suitable matches to SPAdes circular contigs' , sep = '\t' , file = log_fh ) return None , None
Tries to make new circularised contig from contig called original_contig . hits = list of nucmer hits all with ref = original contg . circular_spades = set of query contig names that spades says are circular
22,210
def _parse_hunk ( self , header , diff , encoding ) : header_info = RE_HUNK_HEADER . match ( header ) hunk_info = header_info . groups ( ) hunk = Hunk ( * hunk_info ) source_line_no = hunk . source_start target_line_no = hunk . target_start expected_source_end = source_line_no + hunk . source_length expected_target_end = target_line_no + hunk . target_length for diff_line_no , line in diff : if encoding is not None : line = line . decode ( encoding ) valid_line = RE_HUNK_EMPTY_BODY_LINE . match ( line ) if not valid_line : valid_line = RE_HUNK_BODY_LINE . match ( line ) if not valid_line : raise UnidiffParseError ( 'Hunk diff line expected: %s' % line ) line_type = valid_line . group ( 'line_type' ) if line_type == LINE_TYPE_EMPTY : line_type = LINE_TYPE_CONTEXT value = valid_line . group ( 'value' ) original_line = Line ( value , line_type = line_type ) if line_type == LINE_TYPE_ADDED : original_line . target_line_no = target_line_no target_line_no += 1 elif line_type == LINE_TYPE_REMOVED : original_line . source_line_no = source_line_no source_line_no += 1 elif line_type == LINE_TYPE_CONTEXT : original_line . target_line_no = target_line_no target_line_no += 1 original_line . source_line_no = source_line_no source_line_no += 1 elif line_type == LINE_TYPE_NO_NEWLINE : pass else : original_line = None if ( source_line_no > expected_source_end or target_line_no > expected_target_end ) : raise UnidiffParseError ( 'Hunk is longer than expected' ) if original_line : original_line . diff_line_no = diff_line_no hunk . append ( original_line ) if ( source_line_no == expected_source_end and target_line_no == expected_target_end ) : break if ( source_line_no < expected_source_end or target_line_no < expected_target_end ) : raise UnidiffParseError ( 'Hunk is shorter than expected' ) self . append ( hunk )
Parse hunk details .
22,211
def path ( self ) : if ( self . source_file . startswith ( 'a/' ) and self . target_file . startswith ( 'b/' ) ) : filepath = self . source_file [ 2 : ] elif ( self . source_file . startswith ( 'a/' ) and self . target_file == '/dev/null' ) : filepath = self . source_file [ 2 : ] elif ( self . target_file . startswith ( 'b/' ) and self . source_file == '/dev/null' ) : filepath = self . target_file [ 2 : ] else : filepath = self . source_file return filepath
Return the file path abstracted from VCS .
22,212
def get_tag_value ( string , pre , post , tagtype = float , greedy = True ) : greedy = '?' if greedy else '' if isinstance ( post , ( list , tuple ) ) : post = '(?=' + '|' . join ( post ) + ')' tag_list = re . findall ( r'{pre}(.+{greedy}){post}' . format ( pre = pre , post = post , greedy = greedy ) , string ) if len ( tag_list ) > 1 : raise ValueError ( 'More than one matching pattern found... check filename' ) elif len ( tag_list ) == 0 : return None else : return tagtype ( tag_list [ 0 ] )
Extracts the value of a tag from a string .
22,213
def get_files ( dirname = None , pattern = '*.*' , recursive = True ) : if dirname is None : from FlowCytometryTools . gui import dialogs dirname = dialogs . select_directory_dialog ( 'Select a directory' ) if recursive : matches = [ ] for root , dirnames , filenames in os . walk ( dirname ) : for filename in fnmatch . filter ( filenames , pattern ) : matches . append ( os . path . join ( root , filename ) ) else : matches = glob . glob ( os . path . join ( dirname , pattern ) ) return matches
Get all file names within a given directory those names match a given pattern .
22,214
def load ( path ) : f = open ( path , 'rb' ) try : return pickle . load ( f ) finally : f . close ( )
Load pickled object from the specified file path .
22,215
def to_iter ( obj ) : if isinstance ( obj , type ( None ) ) : return None elif isinstance ( obj , six . string_types ) : return [ obj ] else : if isinstance ( obj , collections . Iterable ) : return obj else : return [ obj ]
Convert an object to a list if it is not already an iterable .
22,216
def to_list ( obj ) : obj = to_iter ( obj ) if isinstance ( obj , type ( None ) ) : return None else : return list ( obj )
Converts an object into a list if it not an iterable forcing tuples into lists . Nones are returned unchanged .
22,217
def copy ( self , deep = True ) : from copy import copy , deepcopy if deep : return deepcopy ( self ) else : return copy ( self )
Make a copy of this object
22,218
def tlog ( x , th = 1 , r = _display_max , d = _l_mmax ) : if th <= 0 : raise ValueError ( 'Threshold value must be positive. %s given.' % th ) return where ( x <= th , log10 ( th ) * 1. * r / d , log10 ( x ) * 1. * r / d )
Truncated log10 transform .
22,219
def tlog_inv ( y , th = 1 , r = _display_max , d = _l_mmax ) : if th <= 0 : raise ValueError ( 'Threshold value must be positive. %s given.' % th ) x = 10 ** ( y * 1. * d / r ) try : x [ x < th ] = th except TypeError : if x < th : x = th return x
Inverse truncated log10 transform . Values
22,220
def hlog_inv ( y , b = 500 , r = _display_max , d = _l_mmax ) : aux = 1. * d / r * y s = sign ( y ) if s . shape : s [ s == 0 ] = 1 elif s == 0 : s = 1 return s * 10 ** ( s * aux ) + b * aux - s
Inverse of base 10 hyperlog transform .
22,221
def _x_for_spln ( x , nx , log_spacing ) : x = asarray ( x ) xmin = min ( x ) xmax = max ( x ) if xmin == xmax : return asarray ( [ xmin ] * nx ) if xmax <= 0 : return - _x_for_spln ( - x , nx , log_spacing ) [ : : - 1 ] if not log_spacing : return linspace ( xmin , xmax , nx ) if xmin > 0 : return logspace ( log10 ( xmin ) , log10 ( xmax ) , nx ) else : lxmax = max ( [ log10 ( xmax ) , 0 ] ) lxmin = max ( [ log10 ( abs ( xmin ) ) , 0 ] ) if lxmax == 0 and lxmin == 0 : return linspace ( xmin , xmax , nx ) if xmin > 0 : x_spln = logspace ( lxmin , lxmax , nx ) elif xmin == 0 : x_spln = r_ [ 0 , logspace ( - 1 , lxmax , nx - 1 ) ] else : f = lxmin / ( lxmin + lxmax ) nx_neg = int ( f * nx ) nx_pos = nx - nx_neg if nx <= 1 : raise AssertionError ( u'nx should never bebe 0 or 1' ) if nx_neg == 0 : nx_neg = 1 nx_pos = nx_pos - 1 if nx_pos == 0 : nx_pos = 1 nx_neg = nx_neg - 1 x_spln_pos = logspace ( - 1 , lxmax , nx_pos ) x_spln_neg = - logspace ( lxmin , - 1 , nx_neg ) x_spln = r_ [ x_spln_neg , x_spln_pos ] return x_spln
Create vector of values to be used in constructing a spline .
22,222
def _make_hlog_numeric ( b , r , d ) : hlog_obj = lambda y , x , b , r , d : hlog_inv ( y , b , r , d ) - x find_inv = vectorize ( lambda x : brentq ( hlog_obj , - 2 * r , 2 * r , args = ( x , b , r , d ) ) ) return find_inv
Return a function that numerically computes the hlog transformation for given parameter values .
22,223
def hlog ( x , b = 500 , r = _display_max , d = _l_mmax ) : hlog_fun = _make_hlog_numeric ( b , r , d ) if not hasattr ( x , '__len__' ) : y = hlog_fun ( x ) else : n = len ( x ) if not n : return x else : y = hlog_fun ( x ) return y
Base 10 hyperlog transform .
22,224
def transform_frame ( frame , transform , columns = None , direction = 'forward' , return_all = True , args = ( ) , ** kwargs ) : tfun , tname = parse_transform ( transform , direction ) columns = to_list ( columns ) if columns is None : columns = frame . columns if return_all : transformed = frame . copy ( ) for c in columns : transformed [ c ] = tfun ( frame [ c ] , * args , ** kwargs ) else : transformed = frame . filter ( columns ) . apply ( tfun , * args , ** kwargs ) return transformed
Apply transform to specified columns .
22,225
def transform ( self , x , use_spln = False , ** kwargs ) : x = asarray ( x , dtype = float ) if use_spln : if self . spln is None : self . set_spline ( x . min ( ) , x . max ( ) , ** kwargs ) return apply_along_axis ( self . spln , 0 , x ) else : return self . tfun ( x , * self . args , ** self . kwargs )
Apply transform to x
22,226
def pypi_register ( server = 'pypitest' ) : base_command = 'python setup.py register' if server == 'pypitest' : command = base_command + ' -r https://testpypi.python.org/pypi' else : command = base_command _execute_setup_command ( command )
Register and prep user for PyPi upload .
22,227
def apply_format ( var , format_str ) : if isinstance ( var , ( list , tuple ) ) : new_var = map ( lambda x : apply_format ( x , format_str ) , var ) if isinstance ( var , tuple ) : new_var = '(' + ', ' . join ( new_var ) + ')' elif isinstance ( var , list ) : new_var = '[' + ', ' . join ( new_var ) + ']' return '{}' . format ( new_var ) else : return format_str . format ( var )
Format all non - iterables inside of the iterable var using the format_str
22,228
def _check_spawnable ( source_channels , target_channels ) : if len ( target_channels ) != len ( set ( target_channels ) ) : raise Exception ( 'Spawn channels must be unique' ) return source_channels . issubset ( set ( target_channels ) )
Check whether gate is spawnable on the target channels .
22,229
def key_press_handler ( event , canvas , toolbar = None ) : if event . key is None : return key = event . key . encode ( 'ascii' , 'ignore' ) if key in [ '1' ] : toolbar . create_gate_widget ( kind = 'poly' ) elif key in [ '2' , '3' , '4' ] : kind = { '2' : 'quad' , '3' : 'horizontal threshold' , '4' : 'vertical threshold' } [ key ] toolbar . create_gate_widget ( kind = kind ) elif key in [ '9' ] : toolbar . remove_active_gate ( ) elif key in [ '0' ] : toolbar . load_fcs ( ) elif key in [ 'a' ] : toolbar . set_axes ( ( 'd1' , 'd2' ) , pl . gca ( ) ) elif key in [ 'b' ] : toolbar . set_axes ( ( 'd2' , 'd1' ) , pl . gca ( ) ) elif key in [ 'c' ] : toolbar . set_axes ( ( 'd1' , 'd3' ) , pl . gca ( ) ) elif key in [ '8' ] : print ( toolbar . get_generation_code ( ) )
Handles keyboard shortcuts for the FCToolbar .
22,230
def add_callback ( self , func ) : if func is None : return func_list = to_list ( func ) if not hasattr ( self , 'callback_list' ) : self . callback_list = func_list else : self . callback_list . extend ( func_list )
Registers a call back function
22,231
def create_artist ( self ) : verts = self . coordinates if not self . tracky : trans = self . ax . get_xaxis_transform ( which = 'grid' ) elif not self . trackx : trans = self . ax . get_yaxis_transform ( which = 'grid' ) else : trans = self . ax . transData self . artist = pl . Line2D ( [ verts [ 0 ] ] , [ verts [ 1 ] ] , transform = trans , picker = 15 ) self . update_looks ( 'inactive' ) self . ax . add_artist ( self . artist )
decides whether the artist should be visible or not in the current axis
22,232
def ignore ( self , event ) : if hasattr ( event , 'inaxes' ) : if event . inaxes != self . ax : return True else : return False
Ignores events .
22,233
def spawn ( self , channels , ax ) : if _check_spawnable ( self . source_channels , channels ) : sgate = self . gate_type ( self . verts , ax , channels ) self . spawn_list . append ( sgate ) return sgate else : return None
Spawns a graphical gate that can be used to update the coordinates of the current gate .
22,234
def remove_spawned_gates ( self , spawn_gate = None ) : if spawn_gate is None : for sg in list ( self . spawn_list ) : self . spawn_list . remove ( sg ) sg . remove ( ) else : spawn_gate . remove ( ) self . spawn_list . remove ( spawn_gate )
Removes all spawned gates .
22,235
def get_generation_code ( self , ** gencode ) : channels , verts = self . coordinates channels = ', ' . join ( [ "'{}'" . format ( ch ) for ch in channels ] ) verts = list ( verts ) if len ( verts ) == 1 : verts = verts [ 0 ] if len ( verts ) == 1 : verts = verts [ 0 ] verts = apply_format ( verts , '{:.3e}' ) gencode . setdefault ( 'name' , self . name ) gencode . setdefault ( 'region' , self . region ) gencode . setdefault ( 'gate_type' , self . _gencode_gate_class ) gencode . setdefault ( 'verts' , verts ) gencode . setdefault ( 'channels' , channels ) format_string = "{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')" return format_string . format ( ** gencode )
Generates python code that can create the gate .
22,236
def _gencode_gate_class ( self ) : channels , verts = self . coordinates num_channels = len ( channels ) gate_type_name = self . gate_type . __name__ if gate_type_name == 'ThresholdGate' and num_channels == 2 : gate_type_name = 'QuadGate' return gate_type_name
Returns the class name that generates this gate .
22,237
def source_channels ( self ) : source_channels = [ v . coordinates . keys ( ) for v in self . verts ] return set ( itertools . chain ( * source_channels ) )
Returns a set describing the source channels on which the gate is defined .
22,238
def pick_event_handler ( self , event ) : info = { 'options' : self . get_available_channels ( ) , 'guiEvent' : event . mouseevent . guiEvent , } if hasattr ( self , 'xlabel_artist' ) and ( event . artist == self . xlabel_artist ) : info [ 'axis_num' ] = 0 self . callback ( Event ( 'axis_click' , info ) ) if hasattr ( self , 'ylabel_artist' ) and ( event . artist == self . ylabel_artist ) : info [ 'axis_num' ] = 1 self . callback ( Event ( 'axis_click' , info ) )
Handles pick events
22,239
def plot_data ( self ) : self . ax . cla ( ) if self . sample is None : return if self . current_channels is None : self . current_channels = self . sample . channel_names [ : 2 ] channels = self . current_channels channels_to_plot = channels [ 0 ] if len ( channels ) == 1 else channels self . sample . plot ( channels_to_plot , ax = self . ax ) xaxis = self . ax . get_xaxis ( ) yaxis = self . ax . get_yaxis ( ) self . xlabel_artist = xaxis . get_label ( ) self . ylabel_artist = yaxis . get_label ( ) self . xlabel_artist . set_picker ( 5 ) self . ylabel_artist . set_picker ( 5 ) self . fig . canvas . draw ( )
Plots the loaded data
22,240
def get_generation_code ( self ) : if len ( self . gates ) < 1 : code = '' else : import_list = set ( [ gate . _gencode_gate_class for gate in self . gates ] ) import_list = 'from FlowCytometryTools import ' + ', ' . join ( import_list ) code_list = [ gate . get_generation_code ( ) for gate in self . gates ] code_list . sort ( ) code_list = '\n' . join ( code_list ) code = import_list + 2 * '\n' + code_list self . callback ( Event ( 'generated_code' , { 'code' : code } ) ) return code
Return python code that generates all drawn gates .
22,241
def replace ( self ) : doc_dict = self . doc_dict . copy ( ) for k , v in doc_dict . items ( ) : if '{' and '}' in v : self . doc_dict [ k ] = v . format ( ** doc_dict )
Reformat values inside the self . doc_dict using self . doc_dict
22,242
def _format ( self , doc ) : if self . allow_partial_formatting : mapping = FormatDict ( self . doc_dict ) else : mapping = self . doc_dict formatter = string . Formatter ( ) return formatter . vformat ( doc , ( ) , mapping )
Formats the docstring using self . doc_dict
22,243
def get_package_version ( path ) : with open ( VERSION_FILE , "rt" ) as f : verstrline = f . read ( ) VERSION = r"^version = ['\"]([^'\"]*)['\"]" results = re . search ( VERSION , verstrline , re . M ) if results : version = results . group ( 1 ) else : raise RuntimeError ( "Unable to find version string in {}." . format ( path ) ) return version
Extracts the version
22,244
def _assign_IDS_to_datafiles ( datafiles , parser , measurement_class = None , ** kwargs ) : if isinstance ( parser , collections . Mapping ) : fparse = lambda x : parser [ x ] elif hasattr ( parser , '__call__' ) : fparse = lambda x : parser ( x , ** kwargs ) elif parser == 'name' : kwargs . setdefault ( 'pre' , 'Well_' ) kwargs . setdefault ( 'post' , [ '_' , '\.' , '$' ] ) kwargs . setdefault ( 'tagtype' , str ) fparse = lambda x : get_tag_value ( os . path . basename ( x ) , ** kwargs ) elif parser == 'number' : fparse = lambda x : int ( x . split ( '.' ) [ - 2 ] ) elif parser == 'read' : fparse = lambda x : measurement_class ( ID = 'temporary' , datafile = x ) . ID_from_data ( ** kwargs ) else : raise ValueError ( 'Encountered unsupported value "%s" for parser parameter.' % parser ) d = dict ( ( fparse ( dfile ) , dfile ) for dfile in datafiles ) return d
Assign measurement IDS to datafiles using specified parser .
22,245
def set_data ( self , data = None , ** kwargs ) : if data is None : data = self . get_data ( ** kwargs ) setattr ( self , '_data' , data ) self . history += self . queue self . queue = [ ]
Read data into memory applying all actions in queue . Additionally update queue and history .
22,246
def set_meta ( self , meta = None , ** kwargs ) : if meta is None : meta = self . get_meta ( ** kwargs ) setattr ( self , '_meta' , meta )
Assign values to self . meta . Meta is not returned
22,247
def get_data ( self , ** kwargs ) : if self . queue : new = self . apply_queued ( ) return new . get_data ( ) else : return self . _get_attr_from_file ( 'data' , ** kwargs )
Get the measurement data . If data is not set read from self . datafile using self . read_data .
22,248
def apply ( self , func , applyto = 'measurement' , noneval = nan , setdata = False ) : applyto = applyto . lower ( ) if applyto == 'data' : if self . data is not None : data = self . data elif self . datafile is None : return noneval else : data = self . read_data ( ) if setdata : self . data = data return func ( data ) elif applyto == 'measurement' : return func ( self ) else : raise ValueError ( 'Encountered unsupported value "%s" for applyto parameter.' % applyto )
Apply func either to self or to associated data . If data is not already parsed try and read it .
22,249
def from_files ( cls , ID , datafiles , parser , readdata_kwargs = { } , readmeta_kwargs = { } , ** ID_kwargs ) : d = _assign_IDS_to_datafiles ( datafiles , parser , cls . _measurement_class , ** ID_kwargs ) measurements = [ ] for sID , dfile in d . items ( ) : try : measurements . append ( cls . _measurement_class ( sID , datafile = dfile , readdata_kwargs = readdata_kwargs , readmeta_kwargs = readmeta_kwargs ) ) except : msg = 'Error occurred while trying to parse file: %s' % dfile raise IOError ( msg ) return cls ( ID , measurements )
Create a Collection of measurements from a set of data files .
22,250
def filter ( self , criteria , applyto = 'measurement' , ID = None ) : fil = criteria new = self . copy ( ) if isinstance ( applyto , collections . Mapping ) : remove = ( k for k , v in self . items ( ) if not fil ( applyto [ k ] ) ) elif applyto == 'measurement' : remove = ( k for k , v in self . items ( ) if not fil ( v ) ) elif applyto == 'keys' : remove = ( k for k , v in self . items ( ) if not fil ( k ) ) elif applyto == 'data' : remove = ( k for k , v in self . items ( ) if not fil ( v . get_data ( ) ) ) else : raise ValueError ( 'Unsupported value "%s" for applyto parameter.' % applyto ) for r in remove : del new [ r ] if ID is None : ID = self . ID new . ID = ID return new
Filter measurements according to given criteria . Retain only Measurements for which criteria returns True .
22,251
def filter_by_key ( self , keys , ID = None ) : keys = to_list ( keys ) fil = lambda x : x in keys if ID is None : ID = self . ID return self . filter ( fil , applyto = 'keys' , ID = ID )
Keep only Measurements with given keys .
22,252
def filter_by_IDs ( self , ids , ID = None ) : fil = lambda x : x in ids return self . filter_by_attr ( 'ID' , fil , ID )
Keep only Measurements with given IDs .
22,253
def filter_by_rows ( self , rows , ID = None ) : rows = to_list ( rows ) fil = lambda x : x in rows applyto = { k : self . _positions [ k ] [ 0 ] for k in self . keys ( ) } if ID is None : ID = self . ID return self . filter ( fil , applyto = applyto , ID = ID )
Keep only Measurements in corresponding rows .
22,254
def filter_by_cols ( self , cols , ID = None ) : rows = to_list ( cols ) fil = lambda x : x in rows applyto = { k : self . _positions [ k ] [ 1 ] for k in self . keys ( ) } if ID is None : ID = self . ID + '.filtered_by_cols' return self . filter ( fil , applyto = applyto , ID = ID )
Keep only Measurements in corresponding columns .
22,255
def from_files ( cls , ID , datafiles , parser = 'name' , position_mapper = None , readdata_kwargs = { } , readmeta_kwargs = { } , ID_kwargs = { } , ** kwargs ) : if position_mapper is None : if isinstance ( parser , six . string_types ) : position_mapper = parser else : msg = "When using a custom parser, you must specify the position_mapper keyword." raise ValueError ( msg ) d = _assign_IDS_to_datafiles ( datafiles , parser , cls . _measurement_class , ** ID_kwargs ) measurements = [ ] for sID , dfile in d . items ( ) : try : measurements . append ( cls . _measurement_class ( sID , datafile = dfile , readdata_kwargs = readdata_kwargs , readmeta_kwargs = readmeta_kwargs ) ) except : msg = 'Error occured while trying to parse file: %s' % dfile raise IOError ( msg ) return cls ( ID , measurements , position_mapper , ** kwargs )
Create an OrderedCollection of measurements from a set of data files .
22,256
def _is_valid_position ( self , position ) : row , col = position valid_r = row in self . row_labels valid_c = col in self . col_labels return valid_r and valid_c
check if given position is valid for this collection
22,257
def _get_ID2position_mapper ( self , position_mapper ) : def num_parser ( x , order ) : i , j = unravel_index ( int ( x - 1 ) , self . shape , order = order ) return ( self . row_labels [ i ] , self . col_labels [ j ] ) if hasattr ( position_mapper , '__call__' ) : mapper = position_mapper elif isinstance ( position_mapper , collections . Mapping ) : mapper = lambda x : position_mapper [ x ] elif position_mapper == 'name' : mapper = lambda x : ( x [ 0 ] , int ( x [ 1 : ] ) ) elif position_mapper in ( 'row_first_enumerator' , 'number' ) : mapper = lambda x : num_parser ( x , 'F' ) elif position_mapper == 'col_first_enumerator' : mapper = lambda x : num_parser ( x , 'C' ) else : msg = '"{}" is not a known key_to_position_parser.' . format ( position_mapper ) raise ValueError ( msg ) return mapper
Defines a position parser that is used to map between sample IDs and positions .
22,258
def set_positions ( self , positions = None , position_mapper = 'name' , ids = None ) : if positions is None : if ids is None : ids = self . keys ( ) else : ids = to_list ( ids ) mapper = self . _get_ID2position_mapper ( position_mapper ) positions = dict ( ( ID , mapper ( ID ) ) for ID in ids ) else : pass temp = self . _positions . copy ( ) temp . update ( positions ) if not len ( temp . values ( ) ) == len ( set ( temp . values ( ) ) ) : msg = 'A position can only be occupied by a single measurement' raise Exception ( msg ) for k , pos in positions . items ( ) : if not self . _is_valid_position ( pos ) : msg = 'Position {} is not supported for this collection' . format ( pos ) raise ValueError ( msg ) self . _positions [ k ] = pos self [ k ] . _set_position ( self . ID , pos )
checks for position validity & collisions but not that all measurements are assigned .
22,259
def get_positions ( self , copy = True ) : if copy : return self . _positions . copy ( ) else : return self . _positions
Get a dictionary of measurement positions .
22,260
def dropna ( self ) : new = self . copy ( ) tmp = self . _dict2DF ( self , nan , True ) new . row_labels = list ( tmp . index ) new . col_labels = list ( tmp . columns ) return new
Remove rows and cols that have no assigned measurements . Return new instance .
22,261
def validate_input ( self ) : if self . vert [ 1 ] <= self . vert [ 0 ] : raise ValueError ( u'{} must be larger than {}' . format ( self . vert [ 1 ] , self . vert [ 0 ] ) )
Raise appropriate exception if gate was defined incorrectly .
22,262
def _identify ( self , dataframe ) : idx = ( ( dataframe [ self . channels [ 0 ] ] <= self . vert [ 1 ] ) & ( dataframe [ self . channels [ 0 ] ] >= self . vert [ 0 ] ) ) if self . region == 'out' : idx = ~ idx return idx
Return bool series which is True for indexes that pass the gate
22,263
def _get_paths ( ) : import os base_path = os . path . dirname ( os . path . abspath ( __file__ ) ) test_data_dir = os . path . join ( base_path , 'tests' , 'data' , 'Plate01' ) test_data_file = os . path . join ( test_data_dir , 'RFP_Well_A3.fcs' ) return test_data_dir , test_data_file
Generate paths to test data . Done in a function to protect namespace a bit .
22,264
def upload_prev ( ver , doc_root = './' ) : 'push a copy of older release to appropriate version directory' local_dir = doc_root + 'build/html' remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver cmd = 'cd %s; rsync -avz . pandas@pandas.pydata.org:%s -essh' cmd = cmd % ( local_dir , remote_dir ) print cmd if os . system ( cmd ) : raise SystemExit ( 'Upload to %s from %s failed' % ( remote_dir , local_dir ) ) local_dir = doc_root + 'build/latex' pdf_cmd = 'cd %s; scp pandas.pdf pandas@pandas.pydata.org:%s' pdf_cmd = pdf_cmd % ( local_dir , remote_dir ) if os . system ( pdf_cmd ) : raise SystemExit ( 'Upload PDF to %s from %s failed' % ( ver , doc_root ) )
push a copy of older release to appropriate version directory
22,265
def plotFCM ( data , channel_names , kind = 'histogram' , ax = None , autolabel = True , xlabel_kwargs = { } , ylabel_kwargs = { } , colorbar = False , grid = False , ** kwargs ) : if ax == None : ax = pl . gca ( ) xlabel_kwargs . setdefault ( 'size' , 16 ) ylabel_kwargs . setdefault ( 'size' , 16 ) channel_names = to_list ( channel_names ) if len ( channel_names ) == 1 : kwargs . setdefault ( 'color' , 'gray' ) kwargs . setdefault ( 'histtype' , 'stepfilled' ) kwargs . setdefault ( 'bins' , 200 ) x = data [ channel_names [ 0 ] ] . values if len ( x ) >= 1 : if ( len ( x ) == 1 ) and isinstance ( kwargs [ 'bins' ] , int ) : warnings . warn ( "One of the data sets only has a single event. " "This event won't be plotted unless the bin locations" " are explicitly provided to the plotting function. " ) return None plot_output = ax . hist ( x , ** kwargs ) else : return None elif len ( channel_names ) == 2 : x = data [ channel_names [ 0 ] ] . values y = data [ channel_names [ 1 ] ] . values if len ( x ) == 0 : return None if kind == 'scatter' : kwargs . setdefault ( 'edgecolor' , 'none' ) plot_output = ax . scatter ( x , y , ** kwargs ) elif kind == 'histogram' : kwargs . setdefault ( 'bins' , 200 ) kwargs . setdefault ( 'cmin' , 1 ) kwargs . setdefault ( 'cmap' , pl . cm . copper ) kwargs . setdefault ( 'norm' , matplotlib . colors . LogNorm ( ) ) plot_output = ax . hist2d ( x , y , ** kwargs ) mappable = plot_output [ - 1 ] if colorbar : pl . colorbar ( mappable , ax = ax ) else : raise ValueError ( "Not a valid plot type. Must be 'scatter', 'histogram'" ) else : raise ValueError ( 'Received an unexpected number of channels: "{}"' . format ( channel_names ) ) pl . grid ( grid ) if autolabel : y_label_text = 'Counts' if len ( channel_names ) == 1 else channel_names [ 1 ] ax . set_xlabel ( channel_names [ 0 ] , ** xlabel_kwargs ) ax . set_ylabel ( y_label_text , ** ylabel_kwargs ) return plot_output
Plots the sample on the current axis .
22,266
def autoscale_subplots ( subplots = None , axis = 'both' ) : axis_options = ( 'x' , 'y' , 'both' , 'none' , '' , 'xy' , 'yx' ) if axis . lower ( ) not in axis_options : raise ValueError ( 'axis must be in {0}' . format ( axis_options ) ) if subplots is None : subplots = plt . gcf ( ) . axes data_limits = [ ( ax . xaxis . get_data_interval ( ) , ax . yaxis . get_data_interval ( ) ) for loc , ax in numpy . ndenumerate ( subplots ) ] xlims , ylims = zip ( * data_limits ) xmins_list , xmaxs_list = zip ( * xlims ) ymins_list , ymaxs_list = zip ( * ylims ) xmin = numpy . min ( xmins_list ) xmax = numpy . max ( xmaxs_list ) ymin = numpy . min ( ymins_list ) ymax = numpy . max ( ymaxs_list ) for loc , ax in numpy . ndenumerate ( subplots ) : if axis in ( 'x' , 'both' , 'xy' , 'yx' ) : ax . set_xlim ( ( xmin , xmax ) ) if axis in ( 'y' , 'both' , 'xy' , 'yx' ) : ax . set_ylim ( ( ymin , ymax ) )
Sets the x and y axis limits for each subplot to match the x and y axis limits of the most extreme data points encountered .
22,267
def scale_subplots ( subplots = None , xlim = 'auto' , ylim = 'auto' ) : auto_axis = '' if xlim == 'auto' : auto_axis += 'x' if ylim == 'auto' : auto_axis += 'y' autoscale_subplots ( subplots , auto_axis ) for loc , ax in numpy . ndenumerate ( subplots ) : if 'x' not in auto_axis : ax . set_xlim ( xlim ) if 'y' not in auto_axis : ax . set_ylim ( ylim )
Set the x and y axis limits for a collection of subplots .
22,268
def _plot_table ( matrix , text_format = '{:.2f}' , cmap = None , ** kwargs ) : shape = matrix . shape xtick_pos = numpy . arange ( shape [ 1 ] ) ytick_pos = numpy . arange ( shape [ 0 ] ) xtick_grid , ytick_grid = numpy . meshgrid ( xtick_pos , ytick_pos ) vmax = numpy . nanmax ( matrix ) vmin = numpy . nanmin ( matrix ) if not kwargs . get ( 'color' , None ) and cmap is not None : use_cmap = True norm = matplotlib . colors . Normalize ( vmin = vmin , vmax = vmax , clip = False ) else : use_cmap = False for ( row , col ) , w in numpy . ndenumerate ( matrix ) : x = xtick_grid [ row , col ] y = ytick_grid [ row , col ] if use_cmap : kwargs [ 'color' ] = cmap ( norm ( w ) ) plt . text ( x , y , text_format . format ( w ) , horizontalalignment = 'center' , verticalalignment = 'center' , transform = plt . gca ( ) . transData , ** kwargs )
Plot a numpy matrix as a table . Uses the current axis bounding box to decide on limits . text_format specifies the formatting to apply to the values .
22,269
def _set_tick_lines_visibility ( ax , visible = True ) : for i , thisAxis in enumerate ( ( ax . get_xaxis ( ) , ax . get_yaxis ( ) ) ) : for thisItem in thisAxis . get_ticklines ( ) : if isinstance ( visible , list ) : thisItem . set_visible ( visible [ i ] ) else : thisItem . set_visible ( visible )
Set the visibility of the tick lines of the requested axis .
22,270
def _set_tick_labels_visibility ( ax , visible = True ) : for i , thisAxis in enumerate ( ( ax . get_xaxis ( ) , ax . get_yaxis ( ) ) ) : for thisItem in thisAxis . get_ticklabels ( ) : if isinstance ( visible , list ) : thisItem . set_visible ( visible [ i ] ) else : thisItem . set_visible ( visible )
Set the visibility of the tick labels of the requested axis .
22,271
def extract_annotation ( data ) : xlabel = None xvalues = None ylabel = None yvalues = None if hasattr ( data , 'minor_axis' ) : xvalues = data . minor_axis if hasattr ( data . minor_axis , 'name' ) : xlabel = data . minor_axis . name if hasattr ( data , 'columns' ) : xvalues = data . columns if hasattr ( data . columns , 'name' ) : xlabel = data . columns . name if hasattr ( data , 'major_axis' ) : yvalues = data . major_axis if hasattr ( data . major_axis , 'name' ) : ylabel = data . major_axis . name if hasattr ( data , 'index' ) : yvalues = data . index if hasattr ( data . index , 'name' ) : ylabel = data . index . name return xlabel , xvalues , ylabel , yvalues
Extract names and values of rows and columns .
22,272
def transform_using_this_method ( original_sample ) : new_sample = original_sample . copy ( ) new_data = new_sample . data new_data [ 'Y2-A' ] = log ( new_data [ 'Y2-A' ] ) new_data = new_data . dropna ( ) new_sample . data = new_data return new_sample
This function implements a log transformation on the data .
22,273
def read_data ( self , ** kwargs ) : meta , data = parse_fcs ( self . datafile , ** kwargs ) return data
Read the datafile specified in Sample . datafile and return the resulting object . Does NOT assign the data to self . data
22,274
def get_meta_fields ( self , fields , kwargs = { } ) : fields = to_list ( fields ) meta = self . get_meta ( ) return { field : meta . get ( field ) for field in fields }
Return a dictionary of metadata fields
22,275
def plot ( self , channel_names , kind = 'histogram' , gates = None , gate_colors = None , gate_lw = 1 , ** kwargs ) : ax = kwargs . get ( 'ax' ) channel_names = to_list ( channel_names ) gates = to_list ( gates ) plot_output = graph . plotFCM ( self . data , channel_names , kind = kind , ** kwargs ) if gates is not None : if gate_colors is None : gate_colors = cycle ( ( 'b' , 'g' , 'r' , 'm' , 'c' , 'y' ) ) if not isinstance ( gate_lw , collections . Iterable ) : gate_lw = [ gate_lw ] gate_lw = cycle ( gate_lw ) for ( g , c , lw ) in zip ( gates , gate_colors , gate_lw ) : g . plot ( ax = ax , ax_channels = channel_names , color = c , lw = lw ) return plot_output
Plot the flow cytometry data associated with the sample on the current axis .
22,276
def view ( self , channel_names = 'auto' , gates = None , diag_kw = { } , offdiag_kw = { } , gate_colors = None , ** kwargs ) : if channel_names == 'auto' : channel_names = list ( self . channel_names ) def plot_region ( channels , ** kwargs ) : if channels [ 0 ] == channels [ 1 ] : channels = channels [ 0 ] kind = 'histogram' self . plot ( channels , kind = kind , gates = gates , gate_colors = gate_colors , autolabel = False ) channel_list = np . array ( list ( channel_names ) , dtype = object ) channel_mat = [ [ ( x , y ) for x in channel_list ] for y in channel_list ] channel_mat = DataFrame ( channel_mat , columns = channel_list , index = channel_list ) kwargs . setdefault ( 'wspace' , 0.1 ) kwargs . setdefault ( 'hspace' , 0.1 ) return plot_ndpanel ( channel_mat , plot_region , ** kwargs )
Generates a matrix of subplots allowing for a quick way to examine how the sample looks in different channels .
22,277
def view_interactively ( self , backend = 'wx' ) : if backend == 'auto' : if matplotlib . __version__ >= '1.4.3' : backend = 'WebAgg' else : backend = 'wx' if backend == 'wx' : from FlowCytometryTools . gui . wx_backend import gui elif backend == 'webagg' : from FlowCytometryTools . gui . webagg_backend import gui else : raise ValueError ( 'No support for backend {}' . format ( backend ) ) gui . GUILauncher ( measurement = self )
Loads the current sample in a graphical interface for drawing gates .
22,278
def transform ( self , transform , direction = 'forward' , channels = None , return_all = True , auto_range = True , use_spln = True , get_transformer = False , ID = None , apply_now = True , args = ( ) , ** kwargs ) : new = self . copy ( ) data = new . data channels = to_list ( channels ) if channels is None : channels = data . columns if isinstance ( transform , Transformation ) : transformer = transform else : if auto_range : if 'd' in kwargs : warnings . warn ( 'Encountered both auto_range=True and user-specified range value in ' 'parameter d.\n Range value specified in parameter d is used.' ) else : channel_meta = self . channels ranges = [ float ( r [ '$PnR' ] ) for i , r in channel_meta . iterrows ( ) if self . channel_names [ i - 1 ] in channels ] if not np . allclose ( ranges , ranges [ 0 ] ) : raise Exception ( ) if transform in { 'hlog' , 'tlog' , 'hlog_inv' , 'tlog_inv' } : kwargs [ 'd' ] = np . log10 ( ranges [ 0 ] ) transformer = Transformation ( transform , direction , args , ** kwargs ) transformed = transformer ( data [ channels ] , use_spln ) if return_all : new_data = data else : new_data = data . filter ( channels ) new_data [ channels ] = transformed new . data = new_data if ID is not None : new . ID = ID if get_transformer : return new , transformer else : return new
Applies a transformation to the specified channels .
22,279
def transform ( self , transform , direction = 'forward' , share_transform = True , channels = None , return_all = True , auto_range = True , use_spln = True , get_transformer = False , ID = None , apply_now = True , args = ( ) , ** kwargs ) : new = self . copy ( ) if share_transform : channel_meta = list ( self . values ( ) ) [ 0 ] . channels channel_names = list ( self . values ( ) ) [ 0 ] . channel_names if channels is None : channels = list ( channel_names ) else : channels = to_list ( channels ) if isinstance ( transform , Transformation ) : transformer = transform else : if auto_range : if 'd' in kwargs : warnings . warn ( 'Encountered both auto_range=True and user-specified range ' 'value in parameter d.\n ' 'Range value specified in parameter d is used.' ) else : ranges = [ float ( r [ '$PnR' ] ) for i , r in channel_meta . iterrows ( ) if channel_names [ i - 1 ] in channels ] if not np . allclose ( ranges , ranges [ 0 ] ) : raise Exception ( 'Not all specified channels have the same ' 'data range, therefore they cannot be ' 'transformed together.' ) if transform in { 'hlog' , 'tlog' , 'hlog_inv' , 'tlog_inv' } : kwargs [ 'd' ] = np . log10 ( ranges [ 0 ] ) transformer = Transformation ( transform , direction , args , ** kwargs ) if use_spln : xmax = self . apply ( lambda x : x [ channels ] . max ( ) . max ( ) , applyto = 'data' ) . max ( ) . max ( ) xmin = self . apply ( lambda x : x [ channels ] . min ( ) . min ( ) , applyto = 'data' ) . min ( ) . min ( ) transformer . set_spline ( xmin , xmax ) for k , v in new . items ( ) : new [ k ] = v . transform ( transformer , channels = channels , return_all = return_all , use_spln = use_spln , apply_now = apply_now ) else : for k , v in new . items ( ) : new [ k ] = v . transform ( transform , direction = direction , channels = channels , return_all = return_all , auto_range = auto_range , get_transformer = False , use_spln = use_spln , apply_now = apply_now , args = args , ** kwargs ) if ID is not None : new . ID = ID if share_transform and get_transformer : return new , transformer else : return new
Apply transform to each Measurement in the Collection .
22,280
def gate ( self , gate , ID = None , apply_now = True ) : def func ( well ) : return well . gate ( gate , apply_now = apply_now ) return self . apply ( func , output_format = 'collection' , ID = ID )
Applies the gate to each Measurement in the Collection returning a new Collection with gated data .
22,281
def counts ( self , ids = None , setdata = False , output_format = 'DataFrame' ) : return self . apply ( lambda x : x . counts , ids = ids , setdata = setdata , output_format = output_format )
Return the counts in each of the specified measurements .
22,282
def plot ( self , channel_names , kind = 'histogram' , gates = None , gate_colors = None , ids = None , row_labels = None , col_labels = None , xlim = 'auto' , ylim = 'auto' , autolabel = True , ** kwargs ) : grid_arg_list = inspect . getargspec ( OrderedCollection . grid_plot ) . args grid_plot_kwargs = { 'ids' : ids , 'row_labels' : row_labels , 'col_labels' : col_labels } for key , value in list ( kwargs . items ( ) ) : if key in grid_arg_list : kwargs . pop ( key ) grid_plot_kwargs [ key ] = value channel_names = to_list ( channel_names ) if kind == 'histogram' : nbins = kwargs . get ( 'bins' , 200 ) if isinstance ( nbins , int ) : min_list = [ ] max_list = [ ] for sample in self : min_list . append ( self [ sample ] . data [ channel_names ] . min ( ) . values ) max_list . append ( self [ sample ] . data [ channel_names ] . max ( ) . values ) min_list = list ( zip ( * min_list ) ) max_list = list ( zip ( * max_list ) ) bins = [ ] for i , c in enumerate ( channel_names ) : min_v = min ( min_list [ i ] ) max_v = max ( max_list [ i ] ) bins . append ( np . linspace ( min_v , max_v , nbins ) ) if len ( channel_names ) == 1 : bins = bins [ 0 ] kwargs [ 'bins' ] = bins def plot_sample ( sample , ax ) : return sample . plot ( channel_names , ax = ax , gates = gates , gate_colors = gate_colors , colorbar = False , kind = kind , autolabel = False , ** kwargs ) xlabel , ylabel = None , None if autolabel : cnames = to_list ( channel_names ) xlabel = cnames [ 0 ] if len ( cnames ) == 2 : ylabel = cnames [ 1 ] return self . grid_plot ( plot_sample , xlim = xlim , ylim = ylim , xlabel = xlabel , ylabel = ylabel , ** grid_plot_kwargs )
Produces a grid plot with each subplot corresponding to the data at the given position .
22,283
def obj2unicode ( obj ) : if isinstance ( obj , unicode_type ) : return obj elif isinstance ( obj , bytes_type ) : try : return unicode_type ( obj , 'utf-8' ) except UnicodeDecodeError as strerror : sys . stderr . write ( "UnicodeDecodeError exception for string '%s': %s\n" % ( obj , strerror ) ) return unicode_type ( obj , 'utf-8' , 'replace' ) else : return unicode_type ( obj )
Return a unicode representation of a python object
22,284
def set_chars ( self , array ) : if len ( array ) != 4 : raise ArraySizeError ( "array should contain 4 characters" ) array = [ x [ : 1 ] for x in [ str ( s ) for s in array ] ] ( self . _char_horiz , self . _char_vert , self . _char_corner , self . _char_header ) = array return self
Set the characters used to draw lines between rows and columns
22,285
def set_header_align ( self , array ) : self . _check_row_size ( array ) self . _header_align = array return self
Set the desired header alignment
22,286
def set_cols_align ( self , array ) : self . _check_row_size ( array ) self . _align = array return self
Set the desired columns alignment
22,287
def set_cols_valign ( self , array ) : self . _check_row_size ( array ) self . _valign = array return self
Set the desired columns vertical alignment
22,288
def set_cols_dtype ( self , array ) : self . _check_row_size ( array ) self . _dtype = array return self
Set the desired columns datatype for the cols .
22,289
def add_rows ( self , rows , header = True ) : if header : if hasattr ( rows , '__iter__' ) and hasattr ( rows , 'next' ) : self . header ( rows . next ( ) ) else : self . header ( rows [ 0 ] ) rows = rows [ 1 : ] for row in rows : self . add_row ( row ) return self
Add several rows in the rows stack
22,290
def draw ( self ) : if not self . _header and not self . _rows : return self . _compute_cols_width ( ) self . _check_align ( ) out = "" if self . _has_border ( ) : out += self . _hline ( ) if self . _header : out += self . _draw_line ( self . _header , isheader = True ) if self . _has_header ( ) : out += self . _hline_header ( ) length = 0 for row in self . _rows : length += 1 out += self . _draw_line ( row ) if self . _has_hlines ( ) and length < len ( self . _rows ) : out += self . _hline ( ) if self . _has_border ( ) : out += self . _hline ( ) return out [ : - 1 ]
Draw the table
22,291
def _fmt_int ( cls , x , ** kw ) : return str ( int ( round ( cls . _to_float ( x ) ) ) )
Integer formatting class - method .
22,292
def _fmt_float ( cls , x , ** kw ) : n = kw . get ( 'n' ) return '%.*f' % ( n , cls . _to_float ( x ) )
Float formatting class - method .
22,293
def _fmt_exp ( cls , x , ** kw ) : n = kw . get ( 'n' ) return '%.*e' % ( n , cls . _to_float ( x ) )
Exponential formatting class - method .
22,294
def _fmt_auto ( cls , x , ** kw ) : f = cls . _to_float ( x ) if abs ( f ) > 1e8 : fn = cls . _fmt_exp else : if f - round ( f ) == 0 : fn = cls . _fmt_int else : fn = cls . _fmt_float return fn ( x , ** kw )
auto formatting class - method .
22,295
def _str ( self , i , x ) : FMT = { 'a' : self . _fmt_auto , 'i' : self . _fmt_int , 'f' : self . _fmt_float , 'e' : self . _fmt_exp , 't' : self . _fmt_text , } n = self . _precision dtype = self . _dtype [ i ] try : if callable ( dtype ) : return dtype ( x ) else : return FMT [ dtype ] ( x , n = n ) except FallbackToText : return self . _fmt_text ( x )
Handles string formatting of cell data
22,296
def _hline ( self ) : if not self . _hline_string : self . _hline_string = self . _build_hline ( ) return self . _hline_string
Print an horizontal line
22,297
def _build_hline ( self , is_header = False ) : horiz = self . _char_horiz if ( is_header ) : horiz = self . _char_header s = "%s%s%s" % ( horiz , [ horiz , self . _char_corner ] [ self . _has_vlines ( ) ] , horiz ) l = s . join ( [ horiz * n for n in self . _width ] ) if self . _has_border ( ) : l = "%s%s%s%s%s\n" % ( self . _char_corner , horiz , l , horiz , self . _char_corner ) else : l += "\n" return l
Return a string used to separated rows or separate header from rows
22,298
def _len_cell ( self , cell ) : cell_lines = cell . split ( '\n' ) maxi = 0 for line in cell_lines : length = 0 parts = line . split ( '\t' ) for part , i in zip ( parts , list ( range ( 1 , len ( parts ) + 1 ) ) ) : length = length + len ( part ) if i < len ( parts ) : length = ( length // 8 + 1 ) * 8 maxi = max ( maxi , length ) return maxi
Return the width of the cell
22,299
def region ( self , rect ) : box = ( int ( rect [ 0 ] ) , int ( rect [ 1 ] ) , int ( rect [ 0 ] ) + int ( rect [ 2 ] ) , int ( rect [ 1 ] ) + int ( rect [ 3 ] ) ) if box [ 2 ] > self . img . size [ 0 ] or box [ 3 ] > self . img . size [ 1 ] : raise errors . RectangleError ( "Region out-of-bounds" ) self . img = self . img . crop ( box ) return self
Selects a sub - region of the image using the supplied rectangle x y width height .