idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
15,800
def set_stim_by_index ( self , index ) : # remove any current components self . stimulus . clearComponents ( ) # add one to index because of tone curve self . stimulus . insertComponent ( self . stim_components [ index ] )
Sets the stimulus to be generated to the one referenced by index
54
13
15,801
def process_calibration ( self , save = True ) : if not self . save_data : raise Exception ( "Cannot process an unsaved calibration" ) avg_signal = np . mean ( self . datafile . get_data ( self . current_dataset_name + '/signal' ) , axis = 0 ) diffdB = attenuation_curve ( self . stimulus . signal ( ) [ 0 ] , avg_signal , self . stimulus . samplerate ( ) , self . calf ) logger = logging . getLogger ( 'main' ) logger . debug ( 'The maximum dB attenuation is {}, caldB {}' . format ( max ( diffdB ) , self . caldb ) ) # save a vector of only the calibration intensity results self . datafile . init_data ( self . current_dataset_name , mode = 'calibration' , dims = diffdB . shape , nested_name = 'calibration_intensities' ) self . datafile . append ( self . current_dataset_name , diffdB , nested_name = 'calibration_intensities' ) relevant_info = { 'frequencies' : 'all' , 'calibration_dB' : self . caldb , 'calibration_voltage' : self . calv , 'calibration_frequency' : self . calf , } self . datafile . set_metadata ( '/' . join ( [ self . current_dataset_name , 'calibration_intensities' ] ) , relevant_info ) mean_reftone = np . mean ( self . datafile . get_data ( self . current_dataset_name + '/reference_tone' ) , axis = 0 ) tone_amp = signal_amplitude ( mean_reftone , self . player . get_aifs ( ) ) db = calc_db ( tone_amp , self . mphonesens , self . mphonedb ) # remove the reference tone from protocol self . protocol_model . remove ( 0 ) return diffdB , self . current_dataset_name , self . calf , db
processes calibration control signal . Determines transfer function of speaker to get frequency vs . attenuation curve .
474
22
15,802
def setModel ( self , model ) : self . _model = model self . ui . aofsSpnbx . setValue ( model . samplerate ( ) )
Sets the StimulusModel for this editor
38
9
15,803
def setStimIndex ( self , row , stimIndex ) : newcomp = self . _allComponents [ row ] [ stimIndex ] self . _model . removeComponent ( row , 1 ) self . _model . insertComponent ( newcomp , row , 1 )
Change out the component type in row to the one indexed by stimIndex
57
14
15,804
def addComponentEditor ( self ) : row = self . _model . rowCount ( ) comp_stack_editor = ExploreComponentEditor ( ) self . ui . trackStack . addWidget ( comp_stack_editor ) idx_button = IndexButton ( row ) idx_button . pickMe . connect ( self . ui . trackStack . setCurrentIndex ) self . trackBtnGroup . addButton ( idx_button ) self . ui . trackBtnLayout . addWidget ( idx_button ) self . ui . trackStack . setCurrentIndex ( row ) comp_stack_editor . closePlease . connect ( self . removeComponentEditor ) delay = Silence ( ) comp_stack_editor . delaySpnbx . setValue ( delay . duration ( ) ) self . _model . insertComponent ( delay , row , 0 ) self . _allComponents . append ( [ x ( ) for x in self . stimuli_types if x . explore ] ) for stim in self . _allComponents [ row ] : editor = wrapComponent ( stim ) . showEditor ( ) comp_stack_editor . addWidget ( editor , stim . name ) exvocal = comp_stack_editor . widgetForName ( "Vocalization" ) if exvocal is not None : exvocal . filelistView . setSelectionMode ( QtGui . QAbstractItemView . SingleSelection ) initcomp = self . _allComponents [ row ] [ 0 ] self . _model . insertComponent ( initcomp , row , 1 ) self . buttons . append ( idx_button ) comp_stack_editor . exploreStimTypeCmbbx . currentIndexChanged . connect ( lambda x : self . setStimIndex ( row , x ) ) comp_stack_editor . delaySpnbx . valueChanged . connect ( lambda x : self . setDelay ( row , x ) ) comp_stack_editor . valueChanged . connect ( self . valueChanged . emit ) return comp_stack_editor
Adds a new component to the model and an editor for this component to this editor
434
16
15,805
def list_space_systems ( self , page_size = None ) : params = { } if page_size is not None : params [ 'limit' ] = page_size return pagination . Iterator ( client = self . _client , path = '/mdb/{}/space-systems' . format ( self . _instance ) , params = params , response_class = mdb_pb2 . ListSpaceSystemsResponse , items_key = 'spaceSystem' , item_mapper = SpaceSystem , )
Lists the space systems visible to this client .
114
10
15,806
def get_space_system ( self , name ) : url = '/mdb/{}/space-systems{}' . format ( self . _instance , name ) response = self . _client . get_proto ( url ) message = mdb_pb2 . SpaceSystemInfo ( ) message . ParseFromString ( response . content ) return SpaceSystem ( message )
Gets a single space system by its unique name .
82
11
15,807
def list_parameters ( self , parameter_type = None , page_size = None ) : params = { 'details' : True } if parameter_type is not None : params [ 'type' ] = parameter_type if page_size is not None : params [ 'limit' ] = page_size return pagination . Iterator ( client = self . _client , path = '/mdb/{}/parameters' . format ( self . _instance ) , params = params , response_class = mdb_pb2 . ListParametersResponse , items_key = 'parameter' , item_mapper = Parameter , )
Lists the parameters visible to this client .
137
9
15,808
def get_parameter ( self , name ) : name = adapt_name_for_rest ( name ) url = '/mdb/{}/parameters{}' . format ( self . _instance , name ) response = self . _client . get_proto ( url ) message = mdb_pb2 . ParameterInfo ( ) message . ParseFromString ( response . content ) return Parameter ( message )
Gets a single parameter by its name .
91
9
15,809
def list_containers ( self , page_size = None ) : params = { } if page_size is not None : params [ 'limit' ] = page_size return pagination . Iterator ( client = self . _client , path = '/mdb/{}/containers' . format ( self . _instance ) , params = params , response_class = mdb_pb2 . ListContainersResponse , items_key = 'container' , item_mapper = Container , )
Lists the containers visible to this client .
107
9
15,810
def get_container ( self , name ) : name = adapt_name_for_rest ( name ) url = '/mdb/{}/containers{}' . format ( self . _instance , name ) response = self . _client . get_proto ( url ) message = mdb_pb2 . ContainerInfo ( ) message . ParseFromString ( response . content ) return Container ( message )
Gets a single container by its unique name .
88
10
15,811
def list_commands ( self , page_size = None ) : params = { } if page_size is not None : params [ 'limit' ] = page_size return pagination . Iterator ( client = self . _client , path = '/mdb/{}/commands' . format ( self . _instance ) , params = params , response_class = mdb_pb2 . ListCommandsResponse , items_key = 'command' , item_mapper = Command , )
Lists the commands visible to this client .
107
9
15,812
def get_command ( self , name ) : name = adapt_name_for_rest ( name ) url = '/mdb/{}/commands{}' . format ( self . _instance , name ) response = self . _client . get_proto ( url ) message = mdb_pb2 . CommandInfo ( ) message . ParseFromString ( response . content ) return Command ( message )
Gets a single command by its unique name .
88
10
15,813
def list_algorithms ( self , page_size = None ) : params = { } if page_size is not None : params [ 'limit' ] = page_size return pagination . Iterator ( client = self . _client , path = '/mdb/{}/algorithms' . format ( self . _instance ) , params = params , response_class = mdb_pb2 . ListAlgorithmsResponse , items_key = 'algorithm' , item_mapper = Algorithm , )
Lists the algorithms visible to this client .
112
9
15,814
def get_algorithm ( self , name ) : name = adapt_name_for_rest ( name ) url = '/mdb/{}/algorithms{}' . format ( self . _instance , name ) response = self . _client . get_proto ( url ) message = mdb_pb2 . AlgorithmInfo ( ) message . ParseFromString ( response . content ) return Algorithm ( message )
Gets a single algorithm by its unique name .
92
10
15,815
def list_buckets ( self , instance ) : # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods response = self . _client . get_proto ( path = '/buckets/' + instance ) message = rest_pb2 . ListBucketsResponse ( ) message . ParseFromString ( response . content ) buckets = getattr ( message , 'bucket' ) return iter ( [ Bucket ( bucket , instance , self ) for bucket in buckets ] )
List the buckets for an instance .
112
7
15,816
def list_objects ( self , instance , bucket_name , prefix = None , delimiter = None ) : url = '/buckets/{}/{}' . format ( instance , bucket_name ) params = { } if prefix is not None : params [ 'prefix' ] = prefix if delimiter is not None : params [ 'delimiter' ] = delimiter response = self . _client . get_proto ( path = url , params = params ) message = rest_pb2 . ListObjectsResponse ( ) message . ParseFromString ( response . content ) return ObjectListing ( message , instance , bucket_name , self )
List the objects for a bucket .
139
7
15,817
def create_bucket ( self , instance , bucket_name ) : req = rest_pb2 . CreateBucketRequest ( ) req . name = bucket_name url = '/buckets/{}' . format ( instance ) self . _client . post_proto ( url , data = req . SerializeToString ( ) )
Create a new bucket in the specified instance .
72
9
15,818
def remove_bucket ( self , instance , bucket_name ) : url = '/buckets/{}/{}' . format ( instance , bucket_name ) self . _client . delete_proto ( url )
Remove a bucket from the specified instance .
48
8
15,819
def upload_object ( self , instance , bucket_name , object_name , file_obj , content_type = None ) : url = '/buckets/{}/{}/{}' . format ( instance , bucket_name , object_name ) with open ( file_obj , 'rb' ) as f : if content_type : files = { object_name : ( object_name , f , content_type ) } else : files = { object_name : ( object_name , f ) } self . _client . request ( path = url , method = 'post' , files = files )
Upload an object to a bucket .
132
7
15,820
def remove_object ( self , instance , bucket_name , object_name ) : url = '/buckets/{}/{}/{}' . format ( instance , bucket_name , object_name ) self . _client . delete_proto ( url )
Remove an object from a bucket .
58
7
15,821
def get_now_utc ( ) : # Helper class for UTC time # Source: http://stackoverflow.com/questions/2331592/datetime-datetime-utcnow-why-no-tzinfo ZERO = datetime . timedelta ( 0 ) class UTC ( datetime . tzinfo ) : """UTC""" def utcoffset ( self , dt ) : return ZERO def tzname ( self , dt ) : return "UTC" def dst ( self , dt ) : return ZERO #now = datetime.datetime.now(timezone.utc) # Python 3.2 now = datetime . datetime . now ( UTC ( ) ) return now
date in UTC ISO format
156
5
15,822
def get ( self , position ) : counter = 0 current_node = self . head while current_node is not None and counter <= position : if counter == position : return current_node . val current_node = current_node . next_node counter += 1 return None
Gets value at index
57
5
15,823
def insert_first ( self , val ) : self . head = Node ( val , next_node = self . head ) return True
Insert in head
28
3
15,824
def insert ( self , val , position = 0 ) : if position <= 0 : # at beginning return self . insert_first ( val ) counter = 0 last_node = self . head current_node = self . head while current_node is not None and counter <= position : if counter == position : last_node . next_node = Node ( val , current_node ) return True last_node = current_node current_node = current_node . next_node counter += 1 if current_node is None : # append to last element last_node . next_node = Node ( val , None ) return True
Insert in position
130
3
15,825
def remove ( self , position ) : if position <= 0 : # at beginning return self . remove_first ( ) if position >= self . length ( ) - 1 : # at end return self . remove_last ( ) counter = 0 last_node = self . head current_node = self . head while current_node is not None and counter <= position : if counter == position : last_node . next_node = current_node . next_node # remove current return True last_node = current_node current_node = current_node . next_node counter += 1 return False
Removes at index
123
4
15,826
def to_lst ( self ) : out = [ ] node = self . head while node is not None : out . append ( node . val ) node = node . next_node return out
Cycle all items and puts them in a list
41
10
15,827
def execute ( self , func , * args , * * kwargs ) : return [ func ( item , * args , * * kwargs ) for item in self . to_lst ( ) ]
Executes function on each item
44
6
15,828
def from_specification ( specification , env_prefix = None , separator = '.' , parent_names = None ) : items = { } for item_name , item_info in six . iteritems ( specification ) : names = copy . copy ( parent_names ) if parent_names else [ ] items [ item_name ] = _generate_item ( item_name , item_info , env_prefix , separator , names ) return items
Used to create YapconfItems from a specification dictionary .
97
12
15,829
def update_default ( self , new_default , respect_none = False ) : if new_default is not None : self . default = new_default elif new_default is None and respect_none : self . default = None
Update our current default with the new_default .
50
10
15,830
def migrate_config ( self , current_config , config_to_migrate , always_update , update_defaults ) : value = self . _search_config_for_possible_names ( current_config ) self . _update_config ( config_to_migrate , value , always_update , update_defaults )
Migrate config value in current_config updating config_to_migrate .
73
16
15,831
def add_argument ( self , parser , bootstrap = False ) : if self . cli_expose : args = self . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) parser . add_argument ( * args , * * kwargs )
Add this item as an argument to the given parser .
76
11
15,832
def get_config_value ( self , overrides , skip_environment = False ) : label , override , key = self . _search_overrides ( overrides , skip_environment ) if override is None and self . default is None and self . required : raise YapconfItemNotFound ( 'Could not find config value for {0}' . format ( self . fq_name ) , self ) if override is None : self . logger . debug ( 'Config value not found for {0}, falling back to default.' . format ( self . name ) ) value = self . default else : value = override [ key ] if value is None : return value converted_value = self . convert_config_value ( value , label ) self . _validate_value ( converted_value ) return converted_value
Get the configuration value from all overrides .
173
9
15,833
def add_argument ( self , parser , bootstrap = False ) : tmp_default = self . default exclusive_grp = parser . add_mutually_exclusive_group ( ) self . default = True args = self . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) exclusive_grp . add_argument ( * args , * * kwargs ) self . default = False args = self . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) exclusive_grp . add_argument ( * args , * * kwargs ) self . default = tmp_default
Add boolean item as an argument to the given parser .
166
11
15,834
def convert_config_value ( self , value , label ) : if isinstance ( value , six . string_types ) : value = value . lower ( ) if value in self . TRUTHY_VALUES : return True elif value in self . FALSY_VALUES : return False else : raise YapconfValueError ( "Cowardly refusing to interpret " "config value as a boolean. Name: " "{0}, Value: {1}" . format ( self . name , value ) )
Converts all Truthy values to True and Falsy values to False .
108
16
15,835
def add_argument ( self , parser , bootstrap = False ) : if self . cli_expose : if isinstance ( self . child , YapconfBoolItem ) : original_default = self . child . default self . child . default = True args = self . child . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) parser . add_argument ( * args , * * kwargs ) self . child . default = False args = self . child . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) parser . add_argument ( * args , * * kwargs ) self . child . default = original_default else : super ( YapconfListItem , self ) . add_argument ( parser , bootstrap )
Add list - style item as an argument to the given parser .
202
13
15,836
def add_argument ( self , parser , bootstrap = False ) : if self . cli_expose : for child in self . children . values ( ) : child . add_argument ( parser , bootstrap )
Add dict - style item as an argument to the given parser .
46
13
15,837
def log_time ( func ) : @ functools . wraps ( func ) def _execute ( * args , * * kwargs ) : """Executes function and logs time :param args: args of function :param kwargs: extra args of function :param *args: args :param **kwargs: extra args :return: function result """ func_name = get_method_name ( func ) timer = Timer ( ) log_message ( func_name , "has started" ) with timer : result = func ( * args , * * kwargs ) seconds = "{:.3f}" . format ( timer . elapsed_time ( ) ) log_message ( func_name , "has finished. Execution time:" , seconds , "s" ) return result return _execute
Executes function and logs time
168
6
15,838
def load_configuration ( yaml : yaml . ruamel . yaml . YAML , filename : str ) -> DictLike : with open ( filename , "r" ) as f : config = yaml . load ( f ) return config
Load an analysis configuration from a file .
54
8
15,839
def override_options ( config : DictLike , selected_options : Tuple [ Any , ... ] , set_of_possible_options : Tuple [ enum . Enum , ... ] , config_containing_override : DictLike = None ) -> DictLike : if config_containing_override is None : config_containing_override = config override_opts = config_containing_override . pop ( "override" ) override_dict = determine_override_options ( selected_options , override_opts , set_of_possible_options ) logger . debug ( f"override_dict: {override_dict}" ) # Set the configuration values to those specified in the override options # Cannot just use update() on config because we need to maintain the anchors. for k , v in override_dict . items ( ) : # Check if key is there and if it is not None! (The second part is important) if k in config : try : # If it has an anchor, we know that we want to preserve the type. So we check for the anchor # by trying to access it (Note that we don't actually care what the value is - just that it # exists). If it fails with an AttributeError, then we know we can just assign the value. If it # has an anchor, then we want to preserve the anchor information. config [ k ] . anchor logger . debug ( f"type: {type(config[k])}, k: {k}" ) if isinstance ( config [ k ] , list ) : # Clear out the existing list entries del config [ k ] [ : ] if isinstance ( override_dict [ k ] , ( str , int , float , bool ) ) : # We have to treat str carefully because it is an iterable, but it will be expanded as # individual characters if it's treated the same as a list, which is not the desired # behavior! If we wrap it in [], then it will be treated as the only entry in the list # NOTE: We also treat the basic types this way because they will be passed this way if # overriding indirectly with anchors (since the basic scalar types don't yet support # reassignment while maintaining their anchors). config [ k ] . append ( override_dict [ k ] ) else : # Here we just assign all entries of the list to all entries of override_dict[k] config [ k ] . extend ( override_dict [ k ] ) elif isinstance ( config [ k ] , dict ) : # Clear out the existing entries because we are trying to replace everything # Then we can simply update the dict with our new values config [ k ] . clear ( ) config [ k ] . update ( override_dict [ k ] ) elif isinstance ( config [ k ] , ( int , float , bool ) ) : # This isn't really very good (since we lose information), but there's nothing that can be done # about it at the moment (Dec 2018) logger . debug ( "Overwriting YAML anchor object. It is currently unclear how to reassign this value." ) config [ k ] = v else : # Raise a value error on all of the cases that we aren't already aware of. raise ValueError ( f"Object {k} (type {type(config[k])}) somehow has an anchor, but is something other than a list or dict. Attempting to assign directly to it." ) except AttributeError : # If no anchor, just overwrite the value at this key config [ k ] = v else : raise KeyError ( k , f"Trying to override key \"{k}\" that it is not in the config." ) return config
Determine override options for a particular configuration .
785
10
15,840
def simplify_data_representations ( config : DictLike ) -> DictLike : for k , v in config . items ( ) : if v and isinstance ( v , list ) and len ( v ) == 1 : logger . debug ( "v: {}" . format ( v ) ) config [ k ] = v [ 0 ] return config
Convert one entry lists to the scalar value
74
10
15,841
def determine_selection_of_iterable_values_from_config ( config : DictLike , possible_iterables : Mapping [ str , Type [ enum . Enum ] ] ) -> Dict [ str , List [ Any ] ] : iterables = { } requested_iterables = config [ "iterables" ] for k , v in requested_iterables . items ( ) : if k not in possible_iterables : raise KeyError ( k , f"Cannot find requested iterable in possible_iterables: {possible_iterables}" ) logger . debug ( f"k: {k}, v: {v}" ) additional_iterable : List [ Any ] = [ ] enum_values = possible_iterables [ k ] # Check for a string. This is wrong, and the user should be notified. if isinstance ( v , str ) : raise TypeError ( type ( v ) , f"Passed string {v} when must be either bool or list" ) # Allow the possibility to skip if v is False : continue # Allow the possibility to including all possible values in the enum. elif v is True : additional_iterable = list ( enum_values ) else : if enum_values is None : # The enumeration values are none, which means that we want to take # all of the values defined in the config. additional_iterable = list ( v ) else : # Otherwise, only take the requested values. for el in v : additional_iterable . append ( enum_values [ el ] ) # Store for later iterables [ k ] = additional_iterable return iterables
Determine iterable values to use to create objects for a given configuration .
345
16
15,842
def _key_index_iter ( self ) -> Iterator [ Tuple [ str , Any ] ] : for k , v in vars ( self ) . items ( ) : yield k , v
Allows for iteration over the KeyIndex values .
42
9
15,843
def create_key_index_object ( key_index_name : str , iterables : Dict [ str , Any ] ) -> Any : # Validation # We are going to use the iterators when determining the fields, so we need to notify if an iterator was # passed, as this will cause a problem later. Instead of passing an iterator, a iterable should be passed, # which can recreate the iter. # See: https://effectivepython.com/2015/01/03/be-defensive-when-iterating-over-arguments/ for name , iterable in iterables . items ( ) : if iter ( iterable ) == iter ( iterable ) : raise TypeError ( f"Iterable {name} is in iterator which can be exhausted. Please pass the iterable" f" in a container that can recreate the iterable. See the comments here for more info." ) # We need the types of the fields to create the dataclass. However, we are provided with iterables # in the values of the iterables dict. Thus, we need to look at one value of each iterable, and use # that to determine the type of that particular iterable. This is safe to do because the iterables # must always have at least one entry (or else they wouldn't be one of the iterables). # NOTE: The order here matters when we create the ``KeyIndex`` later, so we cannot just take all # objects from the iterables and blindly use set because set won't preserve the order. fields = [ ( name , type ( next ( iter ( iterable ) ) ) ) for name , iterable in iterables . items ( ) ] KeyIndex = dataclasses . make_dataclass ( key_index_name , fields , frozen = True ) # Allow for iteration over the key index values KeyIndex . __iter__ = _key_index_iter return KeyIndex
Create a KeyIndex class based on the passed attributes .
404
11
15,844
def create_objects_from_iterables ( obj , args : dict , iterables : Dict [ str , Any ] , formatting_options : Dict [ str , Any ] , key_index_name : str = "KeyIndex" ) -> Tuple [ Any , Dict [ str , Any ] , dict ] : # Setup objects = { } names = list ( iterables ) logger . debug ( f"iterables: {iterables}" ) # Create the key index object, where the name of each field is the name of each iterable. KeyIndex = create_key_index_object ( key_index_name = key_index_name , iterables = iterables , ) # ``itertools.product`` produces all possible permutations of the iterables values. # NOTE: Product preserves the order of the iterables values, which is important for properly # assigning the values to the ``KeyIndex``. for values in itertools . product ( * iterables . values ( ) ) : logger . debug ( f"Values: {values}" ) # Skip if we don't have a sufficient set of values to create an object. if not values : continue # Add in the values into the arguments and formatting options. # NOTE: We don't need a deep copy for the iterable values in the args and formatting options # because the values will be overwritten for each object. for name , val in zip ( names , values ) : # We want to keep the original value for the arguments. args [ name ] = val # Here, we convert the value, regardless of type, into a string that can be displayed. formatting_options [ name ] = str ( val ) # Apply formatting options # If we formatted in place, we would need to deepcopy the args to ensure that the iterable dependent # values in the formatted values are properly set for each iterable object individually. # However, by formatting into new variables, we can avoid a deepcopy, which greatly improves performance! # NOTE: We don't need a deep copy do this for iterable value names themselves because they will be overwritten # for each object. They are set in the block above. object_args = copy . copy ( args ) logger . debug ( f"object_args pre format: {object_args}" ) object_args = apply_formatting_dict ( object_args , formatting_options ) # Print our results for debugging purposes. However, we skip printing the full # config because it is quite long print_args = { k : v for k , v in object_args . items ( ) if k != "config" } print_args [ "config" ] = "..." logger . debug ( f"Constructing obj \"{obj}\" with args: \"{print_args}\"" ) # Finally create the object. objects [ KeyIndex ( * values ) ] = obj ( * * object_args ) # If nothing has been created at this point, then we are didn't iterating over anything and something # has gone wrong. if not objects : raise ValueError ( iterables , "There appear to be no iterables to use in creating objects." ) return ( KeyIndex , iterables , objects )
Create objects for each set of values based on the given arguments .
672
13
15,845
def apply_formatting_dict ( obj : Any , formatting : Dict [ str , Any ] ) -> Any : #logger.debug("Processing object of type {}".format(type(obj))) new_obj = obj if isinstance ( obj , str ) : # Apply the formatting options to the string. # We explicitly allow for missing keys. They will be kept so they can be filled later. # see: https://stackoverflow.com/a/17215533 # If a more sophisticated solution is needed, # see: https://ashwch.github.io/handling-missing-keys-in-str-format-map.html # Note that we can't use format_map because it is python 3.2+ only. # The solution below works in py 2/3 if "$" not in obj : new_obj = string . Formatter ( ) . vformat ( obj , ( ) , formatting_dict ( * * formatting ) ) #else: # logger.debug("Skipping str {} since it appears to be a latex string, which may break the formatting.".format(obj)) elif isinstance ( obj , dict ) : new_obj = { } for k , v in obj . items ( ) : # Using indirect access to ensure that the original object is updated. new_obj [ k ] = apply_formatting_dict ( v , formatting ) elif isinstance ( obj , list ) : new_obj = [ ] for i , el in enumerate ( obj ) : # Using indirect access to ensure that the original object is updated. new_obj . append ( apply_formatting_dict ( el , formatting ) ) elif isinstance ( obj , int ) or isinstance ( obj , float ) or obj is None : # Skip over this, as there is nothing to be done - we just keep the value. pass elif isinstance ( obj , enum . Enum ) : # Skip over this, as there is nothing to be done - we just keep the value. # This only occurs when a formatting value has already been transformed # into an enumeration. pass else : # This may or may not be expected, depending on the particular value. logger . debug ( f"Unrecognized obj '{obj}' of type '{type(obj)}'" ) return new_obj
Recursively apply a formatting dict to all strings in a configuration .
493
14
15,846
def iterate_with_selected_objects ( analysis_objects : Mapping [ Any , Any ] , * * selections : Mapping [ str , Any ] ) -> Iterator [ Tuple [ Any , Any ] ] : for key_index , obj in analysis_objects . items ( ) : # If selections is empty, we return every object. If it's not empty, then we only want to return # objects which are selected in through the selections. selected_obj = not selections or all ( [ getattr ( key_index , selector ) == selected_value for selector , selected_value in selections . items ( ) ] ) if selected_obj : yield key_index , obj
Iterate over an analysis dictionary with selected attributes .
143
10
15,847
def iterate_with_selected_objects_in_order ( analysis_objects : Mapping [ Any , Any ] , analysis_iterables : Dict [ str , Sequence [ Any ] ] , selection : Union [ str , Sequence [ str ] ] ) -> Iterator [ List [ Tuple [ Any , Any ] ] ] : # Validation if isinstance ( selection , str ) : selection = [ selection ] # Help out mypy. We don't check if it is a list to allow for other sequences. assert not isinstance ( selection , str ) # We don't want to impact the original analysis iterables when we pop some values below. analysis_iterables = copy . copy ( analysis_iterables ) # Extract the selected iterators from the possible iterators so we can select on them later. # First, we want want each set of iterators to be of the form: # {"selection1": [value1, value2, ...], "selection2": [value3, value4, ...]} selected_iterators = { } for s in selection : selected_iterators [ s ] = analysis_iterables . pop ( s ) logger . debug ( f"Initial analysis_iterables: {analysis_iterables}" ) logger . debug ( f"Initial selected_iterators: {selected_iterators}" ) # Now, we convert them to the form: # [[("selection1", value1), ("selection1", value2)], [("selection2", value3), ("selection2", value4)]] # This allows them to iterated over conveniently via itertools.product(...) selected_iterators = [ [ ( k , v ) for v in values ] for k , values in selected_iterators . items ( ) ] # type: ignore analysis_iterables = [ [ ( k , v ) for v in values ] for k , values in analysis_iterables . items ( ) ] # type: ignore logger . debug ( f"Final analysis_iterables: {analysis_iterables}" ) logger . debug ( f"Final selected_iterators: {selected_iterators}" ) # Useful debug information, but too verbose for standard usage. #logger.debug(f"analysis_iterables product: {list(itertools.product(*analysis_iterables))}") #logger.debug(f"selected_iterators product: {list(itertools.product(*selected_iterators))}") for values in itertools . product ( * analysis_iterables ) : selected_analysis_objects = [ ] for selected_values in itertools . product ( * selected_iterators ) : for key_index , obj in analysis_objects . items ( ) : selected_via_analysis_iterables = all ( getattr ( key_index , k ) == v for k , v in values ) selected_via_selected_iterators = all ( getattr ( key_index , k ) == v for k , v in selected_values ) selected_obj = selected_via_analysis_iterables and selected_via_selected_iterators if selected_obj : selected_analysis_objects . append ( ( key_index , obj ) ) logger . debug ( f"Yielding: {selected_analysis_objects}" ) yield selected_analysis_objects
Iterate over an analysis dictionary yielding the selected attributes in order .
703
13
15,848
def _db ( self ) : if not hasattr ( self , "_db_client" ) or getattr ( self , "_db_client" ) is None : self . _db_client = get_db_client ( ) return self . _db_client
Database client for accessing storage .
56
6
15,849
async def filter_new_posts ( self , source_id , post_ids ) : new_ids = [ ] try : db_client = self . _db posts_in_db = await db_client . get_known_posts ( source_id , post_ids ) new_ids = [ p for p in post_ids if p not in posts_in_db ] except Exception as exc : logger . error ( "Error when filtering for new posts {} {}" . format ( source_id , post_ids ) ) logger . exception ( exc ) return new_ids
Filters ist of post_id for new ones .
124
12
15,850
async def get_last_updated ( self , source_id ) : last_updated = await self . _db . get_last_updated ( source_id ) logger . info ( "LAST UPDATED: {} {}" . format ( last_updated , self ) ) return last_updated
Returns latest update - timestamp from storage for source .
63
10
15,851
def clearParameters ( self ) : self . beginRemoveRows ( QtCore . QModelIndex ( ) , 0 , self . rowCount ( ) ) self . model . clear_parameters ( ) self . endRemoveRows ( )
Removes all parameters from model
50
6
15,852
def insertRows ( self , position , rows , parent = QtCore . QModelIndex ( ) ) : self . beginInsertRows ( parent , position , position + rows - 1 ) for i in range ( rows ) : self . model . insertRow ( position ) # self._selectionmap[self._paramid].hintRequested.connect(self.hintRequested) self . endInsertRows ( ) if self . rowCount ( ) == 1 : self . emptied . emit ( False ) return True
Inserts new parameters and emits an emptied False signal
110
10
15,853
def removeRows ( self , position , rows , parent = QtCore . QModelIndex ( ) ) : self . beginRemoveRows ( parent , position , position + rows - 1 ) for i in range ( rows ) : self . model . removeRow ( position ) # cannot purge selection model, or else we have no way of # recovering it when reordering self . endRemoveRows ( ) if self . rowCount ( ) == 0 : self . emptied . emit ( True ) return True
Removes parameters from the model . Emits and emptied True signal if there are no parameters left .
104
20
15,854
def toggleSelection ( self , index , comp ) : self . model . toggleSelection ( index . row ( ) , comp )
Toggles a component in or out of the currently selected parameter s compnents list
28
17
15,855
def parseruninfo ( self ) : # Check if the RunInfo.xml file is provided, otherwise, yield N/A try : runinfo = ElementTree . ElementTree ( file = self . runinfo ) # Get the run id from the for elem in runinfo . iter ( ) : for run in elem : try : self . runid = run . attrib [ 'Id' ] self . runnumber = run . attrib [ 'Number' ] except KeyError : break # pull the text from flowcell and instrument values using the .iter(tag="X") function for elem in runinfo . iter ( tag = "Flowcell" ) : self . flowcell = elem . text for elem in runinfo . iter ( tag = "Instrument" ) : self . instrument = elem . text except IOError : pass # Extract run statistics from either GenerateRunStatistics.xml or indexingQC.txt self . parserunstats ( )
Extracts the flowcell ID as well as the instrument name from RunInfo . xml . If this file is not provided NA values are substituted
205
29
15,856
def get_lines ( self ) : with open ( self . path , "r" ) as data : self . lines = data . readlines ( ) # store data in arrays return self . lines
Gets lines in file
41
5
15,857
def get_matrix ( self ) : data = [ ] with open ( self . path , encoding = self . encoding ) as csv_file : csv_reader = csv . reader ( csv_file , delimiter = "," , quotechar = "\"" ) for row in csv_reader : data . append ( row ) return data
Stores values in array store lines in array
76
9
15,858
def get_dicts ( self ) : reader = csv . DictReader ( open ( self . path , "r" , encoding = self . encoding ) ) for row in reader : if row : yield row
Gets dicts in file
45
6
15,859
def get_by_id ( self , id_code : str ) -> Currency or None : try : return [ _ for _ in self . currencies if _ . id == id_code ] [ 0 ] except IndexError : return None
Get currency by ID
49
4
15,860
def get_stimuli_models ( ) : package_path = os . path . dirname ( __file__ ) mod = '.' . join ( get_stimuli_models . __module__ . split ( '.' ) ) if mod == '__main__' : mod = '' else : mod = mod + '.' module_files = glob . glob ( package_path + os . sep + '[a-zA-Z]*.py' ) module_names = [ os . path . splitext ( os . path . basename ( x ) ) [ 0 ] for x in module_files ] module_paths = [ mod + x for x in module_names ] modules = [ __import__ ( x , fromlist = [ '*' ] ) for x in module_paths ] stimuli = [ ] for module in modules : for name , attr in module . __dict__ . iteritems ( ) : #test if attr is subclass of AbstractStim if type ( attr ) == type and issubclass ( attr , AbstractStimulusComponent ) : # print 'found subclass', name, '!!!' stimuli . append ( attr ) # print stimuli return stimuli
Returns all subclasses of AbstractStimulusComponent in python files in this package
255
16
15,861
def parse_wait_time ( text : str ) -> int : val = RATELIMIT . findall ( text ) if len ( val ) > 0 : try : res = val [ 0 ] if res [ 1 ] == 'minutes' : return int ( res [ 0 ] ) * 60 if res [ 1 ] == 'seconds' : return int ( res [ 0 ] ) except Exception as e : util_logger . warning ( 'Could not parse ratelimit: ' + str ( e ) ) return 1 * 60
Parse the waiting time from the exception
112
8
15,862
def check_comment_depth ( comment : praw . models . Comment , max_depth = 3 ) -> bool : count = 0 while not comment . is_root : count += 1 if count > max_depth : return False comment = comment . parent ( ) return True
Check if comment is in a allowed depth range
57
9
15,863
def get_subs ( subs_file = 'subreddits.txt' , blacklist_file = 'blacklist.txt' ) -> List [ str ] : # Get subs and blacklisted subs subsf = open ( subs_file ) blacklf = open ( blacklist_file ) subs = [ b . lower ( ) . replace ( '\n' , '' ) for b in subsf . readlines ( ) ] blacklisted = [ b . lower ( ) . replace ( '\n' , '' ) for b in blacklf . readlines ( ) ] subsf . close ( ) blacklf . close ( ) # Filter blacklisted subs_filtered = list ( sorted ( set ( subs ) . difference ( set ( blacklisted ) ) ) ) return subs_filtered
Get subs based on a file of subreddits and a file of blacklisted subreddits .
163
16
15,864
def enable_all_links ( ) : for link in client . list_data_links ( instance = 'simulator' ) : client . enable_data_link ( instance = link . instance , link = link . name )
Enable all links .
48
4
15,865
def load_a3m ( fasta , max_gap_fraction = 0.9 ) : mapping = { '-' : 21 , 'A' : 1 , 'B' : 21 , 'C' : 2 , 'D' : 3 , 'E' : 4 , 'F' : 5 , 'G' : 6 , 'H' : 7 , 'I' : 8 , 'K' : 9 , 'L' : 10 , 'M' : 11 , 'N' : 12 , 'O' : 21 , 'P' : 13 , 'Q' : 14 , 'R' : 15 , 'S' : 16 , 'T' : 17 , 'V' : 18 , 'W' : 19 , 'Y' : 20 , 'U' : 21 , 'Z' : 21 , 'X' : 21 , 'J' : 21 } # We want to exclude the lowercase, not ignore the uppercase because of gaps. lowercase = set ( 'abcdefghijklmnopqrstuvwxyz' ) # Figure out the length of the sequence f = open ( fasta ) for line in f : if line . startswith ( '>' ) : continue seq_length = len ( line . strip ( ) ) break else : raise RuntimeError ( 'I cannot find the first sequence' ) f . seek ( 0 ) parsed = [ ] for line in f : if line . startswith ( '>' ) : continue line = line . strip ( ) gap_fraction = line . count ( '-' ) / seq_length if gap_fraction <= max_gap_fraction : parsed . append ( [ mapping . get ( ch , 22 ) for ch in line if ch not in lowercase ] ) return np . array ( parsed , dtype = np . int8 ) . T
load alignment with the alphabet used in GaussDCA
394
11
15,866
def run_plasmid_extractor ( self ) : logging . info ( 'Extracting plasmids' ) # Define the system call extract_command = 'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc' . format ( inf = self . path , outf = self . plasmid_output , plasdb = os . path . join ( self . plasmid_db , 'plasmid_db.fasta' ) , db = self . plasmid_db , cpus = self . cpus ) # Only attempt to extract plasmids if the report doesn't already exist if not os . path . isfile ( self . plasmid_report ) : # Run the system calls out , err = run_subprocess ( extract_command ) # Acquire thread lock, and write the logs to file self . threadlock . acquire ( ) write_to_logfile ( extract_command , extract_command , self . logfile ) write_to_logfile ( out , err , self . logfile ) self . threadlock . release ( )
Create and run the plasmid extractor system call
259
11
15,867
def parse_report ( self ) : logging . info ( 'Parsing Plasmid Extractor outputs' ) # A dictionary to store the parsed excel file in a more readable format nesteddictionary = dict ( ) # Use pandas to read in the CSV file, and convert the pandas data frame to a dictionary (.to_dict()) dictionary = pandas . read_csv ( self . plasmid_report ) . to_dict ( ) # Iterate through the dictionary - each header from the CSV file for header in dictionary : # Sample is the primary key, and value is the value of the cell for that primary key + header combination for sample , value in dictionary [ header ] . items ( ) : # Update the dictionary with the new data try : nesteddictionary [ sample ] . update ( { header : value } ) # Create the nested dictionary if it hasn't been created yet except KeyError : nesteddictionary [ sample ] = dict ( ) nesteddictionary [ sample ] . update ( { header : value } ) # Get the results into the metadata object for sample in self . metadata : # Initialise the plasmid extractor genobject setattr ( sample , self . analysistype , GenObject ( ) ) # Initialise the list of all plasmids sample [ self . analysistype ] . plasmids = list ( ) # Iterate through the dictionary of results for line in nesteddictionary : # Extract the sample name from the dictionary in a manner consistent with the rest of the COWBAT # pipeline e.g. 2014-SEQ-0276_S2_L001 becomes 2014-SEQ-0276 sample_name = nesteddictionary [ line ] [ 'Sample' ] # Use the filer method to extract the name name = list ( filer ( [ sample_name ] ) ) [ 0 ] # Ensure that the names match if name == sample . name : # Append the plasmid name extracted from the dictionary to the list of plasmids sample [ self . analysistype ] . plasmids . append ( nesteddictionary [ line ] [ 'Plasmid' ] ) # Copy the report to the folder containing all reports for the pipeline try : shutil . copyfile ( self . plasmid_report , os . path . join ( self . reportpath , 'plasmidReport.csv' ) ) except IOError : pass
Parse the plasmid extractor report and populate metadata objects
502
13
15,868
def object_clean ( self ) : for sample in self . metadata : try : delattr ( sample [ self . analysistype ] , 'aaidentity' ) delattr ( sample [ self . analysistype ] , 'aaalign' ) delattr ( sample [ self . analysistype ] , 'aaindex' ) delattr ( sample [ self . analysistype ] , 'ntalign' ) delattr ( sample [ self . analysistype ] , 'ntindex' ) delattr ( sample [ self . analysistype ] , 'dnaseq' ) delattr ( sample [ self . analysistype ] , 'blastresults' ) except AttributeError : pass
Remove large attributes from the metadata objects
146
7
15,869
def values ( self ) : if self . ui . hzBtn . isChecked ( ) : fscale = SmartSpinBox . Hz else : fscale = SmartSpinBox . kHz if self . ui . msBtn . isChecked ( ) : tscale = SmartSpinBox . MilliSeconds else : tscale = SmartSpinBox . Seconds return fscale , tscale
Gets the scales that the user chose
88
8
15,870
def insert_trie ( trie , value ) : # aka get_subtrie_or_insert if value in trie : return trie [ value ] multi_check = False for key in tuple ( trie . keys ( ) ) : if len ( value ) > len ( key ) and value . startswith ( key ) : return insert_trie ( trie [ key ] , value ) elif key . startswith ( value ) : # we know the value is not in the trie if not multi_check : trie [ value ] = { } multi_check = True # there can be multiple longer existing prefixes dict_ = trie . pop ( key ) # does not break strie since key<->dict_ remains unchanged trie [ value ] [ key ] = dict_ if value not in trie : trie [ value ] = { } return trie [ value ]
Insert a value into the trie if it is not already contained in the trie . Return the subtree for the value regardless of whether it is a new value or not .
193
36
15,871
def get_valid_cell_indecies ( self ) : return pd . DataFrame ( self ) . groupby ( self . frame_columns ) . apply ( lambda x : list ( x [ 'cell_index' ] ) ) . reset_index ( ) . rename ( columns = { 0 : 'valid' } )
Return a dataframe of images present with valid being a list of cell indecies that can be included
71
20
15,872
def prune_neighbors ( self ) : def _neighbor_check ( neighbors , valid ) : if not neighbors == neighbors : return np . nan valid_keys = set ( valid ) & set ( neighbors . keys ( ) ) d = dict ( [ ( k , v ) for k , v in neighbors . items ( ) if k in valid_keys ] ) return d fixed = self . copy ( ) valid = self . get_valid_cell_indecies ( ) valid = pd . DataFrame ( self ) . merge ( valid , on = self . frame_columns ) . set_index ( self . frame_columns + [ 'cell_index' ] ) valid = valid . apply ( lambda x : _neighbor_check ( x [ 'neighbors' ] , x [ 'valid' ] ) , 1 ) . reset_index ( ) . rename ( columns = { 0 : 'new_neighbors' } ) fixed = fixed . merge ( valid , on = self . frame_columns + [ 'cell_index' ] ) . drop ( columns = 'neighbors' ) . rename ( columns = { 'new_neighbors' : 'neighbors' } ) fixed . microns_per_pixel = self . microns_per_pixel fixed . db = self . db #fixed.loc[:,'neighbors'] = list(new_neighbors) return fixed
If the CellDataFrame has been subsetted some of the cell - cell contacts may no longer be part of the the dataset . This prunes those no - longer existant connections .
308
37
15,873
def to_hdf ( self , path , key , mode = 'a' ) : pd . DataFrame ( self . serialize ( ) ) . to_hdf ( path , key , mode = mode , format = 'table' , complib = 'zlib' , complevel = 9 ) f = h5py . File ( path , 'r+' ) f [ key ] . attrs [ "microns_per_pixel" ] = float ( self . microns_per_pixel ) if self . microns_per_pixel is not None else np . nan f . close ( )
Save the CellDataFrame to an hdf5 file .
130
12
15,874
def phenotypes_to_scored ( self , phenotypes = None , overwrite = False ) : if not self . is_uniform ( ) : raise ValueError ( "inconsistent phenotypes" ) if phenotypes is None : phenotypes = self . phenotypes elif isinstance ( phenotypes , str ) : phenotypes = [ phenotypes ] def _post ( binary , phenotype_label , phenotypes , overwrite ) : d = binary . copy ( ) if len ( set ( phenotypes ) & set ( list ( binary . keys ( ) ) ) ) > 0 and overwrite == False : raise ValueError ( "Error, phenotype already exists as a scored type" ) for label in phenotypes : d [ label ] = 0 if phenotype_label == phenotype_label and phenotype_label in phenotypes : d [ phenotype_label ] = 1 return d output = self . copy ( ) output [ 'scored_calls' ] = output . apply ( lambda x : _post ( x [ 'scored_calls' ] , x [ 'phenotype_label' ] , phenotypes , overwrite ) , 1 ) return output
Add mutually exclusive phenotypes to the scored calls
239
9
15,875
def concat ( self , array_like ) : arr = list ( array_like ) if len ( set ( [ x . microns_per_pixel for x in arr ] ) ) != 1 : raise ValueError ( "Multiple microns per pixel set" ) cdf = CellDataFrame ( pd . concat ( [ pd . DataFrame ( x ) for x in arr ] ) ) cdf . microns_per_pixel = arr [ 0 ] . microns_per_pixel return cdf
Concatonate multiple CellDataFrames
109
8
15,876
def read_hdf ( cls , path , key = None ) : df = pd . read_hdf ( path , key ) df [ 'scored_calls' ] = df [ 'scored_calls' ] . apply ( lambda x : json . loads ( x ) ) df [ 'channel_values' ] = df [ 'channel_values' ] . apply ( lambda x : json . loads ( x ) ) df [ 'regions' ] = df [ 'regions' ] . apply ( lambda x : json . loads ( x ) ) df [ 'phenotype_calls' ] = df [ 'phenotype_calls' ] . apply ( lambda x : json . loads ( x ) ) df [ 'neighbors' ] = df [ 'neighbors' ] . apply ( lambda x : json . loads ( x ) ) df [ 'neighbors' ] = df [ 'neighbors' ] . apply ( lambda x : np . nan if not isinstance ( x , dict ) else dict ( zip ( [ int ( y ) for y in x . keys ( ) ] , x . values ( ) ) ) ) df [ 'frame_shape' ] = df [ 'frame_shape' ] . apply ( lambda x : tuple ( json . loads ( x ) ) ) df = cls ( df ) f = h5py . File ( path , 'r' ) mpp = f [ key ] . attrs [ "microns_per_pixel" ] if not np . isnan ( mpp ) : df . microns_per_pixel = mpp f . close ( ) return df
Read a CellDataFrame from an hdf5 file .
352
12
15,877
def serialize ( self ) : df = self . copy ( ) df [ 'scored_calls' ] = df [ 'scored_calls' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'channel_values' ] = df [ 'channel_values' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'regions' ] = df [ 'regions' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'phenotype_calls' ] = df [ 'phenotype_calls' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'neighbors' ] = df [ 'neighbors' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'frame_shape' ] = df [ 'frame_shape' ] . apply ( lambda x : json . dumps ( x ) ) return df
Convert the data to one that can be saved in h5 structures
202
14
15,878
def contacts ( self , * args , * * kwargs ) : n = Contacts . read_cellframe ( self , prune_neighbors = True ) if 'measured_regions' in kwargs : n . measured_regions = kwargs [ 'measured_regions' ] else : n . measured_regions = self . get_measured_regions ( ) if 'measured_phenotypes' in kwargs : n . measured_phenotypes = kwargs [ 'measured_phenotypes' ] else : n . measured_phenotypes = self . phenotypes n . microns_per_pixel = self . microns_per_pixel return n
Use assess the cell - to - cell contacts recorded in the celldataframe
152
15
15,879
def cartesian ( self , subsets = None , step_pixels = 100 , max_distance_pixels = 150 , * args , * * kwargs ) : n = Cartesian . read_cellframe ( self , subsets = subsets , step_pixels = step_pixels , max_distance_pixels = max_distance_pixels , prune_neighbors = False , * args , * * kwargs ) if 'measured_regions' in kwargs : n . measured_regions = kwargs [ 'measured_regions' ] else : n . measured_regions = self . get_measured_regions ( ) if 'measured_phenotypes' in kwargs : n . measured_phenotypes = kwargs [ 'measured_phenotypes' ] else : n . measured_phenotypes = self . phenotypes n . microns_per_pixel = self . microns_per_pixel return n
Return a class that can be used to create honeycomb plots
213
12
15,880
def counts ( self , * args , * * kwargs ) : n = Counts . read_cellframe ( self , prune_neighbors = False ) if 'measured_regions' in kwargs : n . measured_regions = kwargs [ 'measured_regions' ] else : n . measured_regions = self . get_measured_regions ( ) if 'measured_phenotypes' in kwargs : n . measured_phenotypes = kwargs [ 'measured_phenotypes' ] else : n . measured_phenotypes = self . phenotypes n . microns_per_pixel = self . microns_per_pixel if 'minimum_region_size_pixels' in kwargs : n . minimum_region_size_pixels = kwargs [ 'minimum_region_size_pixels' ] else : n . minimum_region_size_pixels = 1 return n
Return a class that can be used to access count densities
208
12
15,881
def merge_scores ( self , df_addition , reference_markers = 'all' , addition_markers = 'all' , on = [ 'project_name' , 'sample_name' , 'frame_name' , 'cell_index' ] ) : if isinstance ( reference_markers , str ) : reference_markers = self . scored_names elif reference_markers is None : reference_markers = [ ] if isinstance ( addition_markers , str ) : addition_markers = df_addition . scored_names elif addition_markers is None : addition_markers = [ ] df_addition = df_addition . copy ( ) df_addition [ '_key' ] = 1 df = self . merge ( df_addition [ [ 'scored_calls' , '_key' ] + on ] . rename ( columns = { 'scored_calls' : '_addition' } ) , on = on , how = 'left' ) df [ '_sub1' ] = df [ 'scored_calls' ] . apply ( lambda x : dict ( ( k , x [ k ] ) for k in reference_markers ) ) df [ '_sub2' ] = df [ '_addition' ] . apply ( lambda x : dict ( { } ) if x != x else dict ( ( k , x [ k ] ) for k in addition_markers ) # handle NaN where we fail to match properly treat as empty ) # combine the two dictionaries df [ 'scored_calls' ] = df . apply ( lambda x : { * * x [ '_sub1' ] , * * x [ '_sub2' ] } , 1 ) df = df . drop ( columns = [ '_sub1' , '_sub2' , '_addition' ] ) df = df . drop ( columns = '_key' ) . copy ( ) , df [ df [ '_key' ] . isna ( ) ] . drop ( columns = '_key' ) . copy ( ) if self . microns_per_pixel : df [ 0 ] . microns_per_pixel = self . microns_per_pixel if self . microns_per_pixel : df [ 1 ] . microns_per_pixel = self . microns_per_pixel return df
Combine CellDataFrames that differ by score composition
520
10
15,882
def zero_fill_missing_phenotypes ( self ) : if self . is_uniform ( verbose = False ) : return self . copy ( ) output = self . copy ( ) def _do_fill ( d , names ) : old_names = list ( d . keys ( ) ) old_values = list ( d . values ( ) ) missing = set ( names ) - set ( old_names ) return dict ( zip ( old_names + list ( missing ) , old_values + ( [ 0 ] * len ( missing ) ) ) ) ## Need to make these uniform pnames = self . phenotypes output [ 'phenotype_calls' ] = output . apply ( lambda x : _do_fill ( x [ 'phenotype_calls' ] , pnames ) , 1 ) return output
Fill in missing phenotypes and scored types by listing any missing data as negative
173
15
15,883
def drop_scored_calls ( self , names ) : def _remove ( calls , names ) : d = dict ( [ ( k , v ) for k , v in calls . items ( ) if k not in names ] ) return d if isinstance ( names , str ) : names = [ names ] output = self . copy ( ) output [ 'scored_calls' ] = output [ 'scored_calls' ] . apply ( lambda x : _remove ( x , names ) ) return output
Take a name or list of scored call names and drop those from the scored calls
110
16
15,884
def subset ( self , logic , update = False ) : pnames = self . phenotypes snames = self . scored_names data = self . copy ( ) values = [ ] phenotypes = logic . phenotypes if len ( phenotypes ) == 0 : phenotypes = pnames removing = set ( self . phenotypes ) - set ( phenotypes ) for k in phenotypes : if k not in pnames : raise ValueError ( "phenotype must exist in defined" ) temp = data . loc [ data [ 'phenotype_calls' ] . apply ( lambda x : x [ k ] == 1 ) ] . copy ( ) if len ( removing ) > 0 and temp . shape [ 0 ] > 0 : temp [ 'phenotype_calls' ] = temp . apply ( lambda x : dict ( [ ( k , v ) for k , v in x [ 'phenotype_calls' ] . items ( ) if k not in removing ] ) , 1 ) values . append ( temp ) data = pd . concat ( values ) for k , v in logic . scored_calls . items ( ) : if k not in snames : raise ValueError ( "Scored name must exist in defined" ) myfilter = 0 if v == '-' else 1 data = data . loc [ data [ 'scored_calls' ] . apply ( lambda x : x [ k ] == myfilter ) ] data . microns_per_pixel = self . microns_per_pixel if update : data [ 'phenotype_calls' ] = data [ 'phenotype_calls' ] . apply ( lambda x : { logic . label : 1 } ) data . fill_phenotype_label ( inplace = True ) data . db = self . db return data
subset create a specific phenotype based on a logic logic is a SubsetLogic class take union of all the phenotypes listed . If none are listed use all phenotypes . take the intersection of all the scored calls .
379
45
15,885
def collapse_phenotypes ( self , input_phenotype_labels , output_phenotype_label , verbose = True ) : if isinstance ( input_phenotype_labels , str ) : input_phenotype_labels = [ input_phenotype_labels ] bad_phenotypes = set ( input_phenotype_labels ) - set ( self . phenotypes ) if len ( bad_phenotypes ) > 0 : raise ValueError ( "Error phenotype(s) " + str ( bad_phenotypes ) + " are not in the data." ) data = self . copy ( ) if len ( input_phenotype_labels ) == 0 : return data def _swap_in ( d , inputs , output ) : # Get the keys we need to merge together overlap = set ( d . keys ( ) ) . intersection ( inputs ) # if there are none to merge we're done already if len ( overlap ) == 0 : return d keepers = [ ( k , v ) for k , v in d . items ( ) if k not in inputs ] # combine anything thats not a keeper return dict ( keepers + [ ( output_phenotype_label , max ( [ d [ x ] for x in overlap ] ) ) ] ) data [ 'phenotype_calls' ] = data . apply ( lambda x : _swap_in ( x [ 'phenotype_calls' ] , input_phenotype_labels , output_phenotype_label ) , 1 ) def _set_label ( d ) : vals = [ k for k , v in d . items ( ) if v == 1 ] return np . nan if len ( vals ) == 0 else vals [ 0 ] data [ 'phenotype_label' ] = data . apply ( lambda x : _set_label ( x [ 'phenotype_calls' ] ) , 1 ) return data
Rename one or more input phenotypes to a single output phenotype
406
13
15,886
def fill_phenotype_label ( self , inplace = False ) : def _get_phenotype ( d ) : vals = [ k for k , v in d . items ( ) if v == 1 ] return np . nan if len ( vals ) == 0 else vals [ 0 ] if inplace : if self . shape [ 0 ] == 0 : return self self [ 'phenotype_label' ] = self . apply ( lambda x : _get_phenotype ( x [ 'phenotype_calls' ] ) , 1 ) return fixed = self . copy ( ) if fixed . shape [ 0 ] == 0 : return fixed fixed [ 'phenotype_label' ] = fixed . apply ( lambda x : _get_phenotype ( x [ 'phenotype_calls' ] ) , 1 ) return fixed
Set the phenotype_label column according to our rules for mutual exclusion
176
13
15,887
def fill_phenotype_calls ( self , phenotypes = None , inplace = False ) : if phenotypes is None : phenotypes = list ( self [ 'phenotype_label' ] . unique ( ) ) def _get_calls ( label , phenos ) : d = dict ( [ ( x , 0 ) for x in phenos ] ) if label != label : return d # np.nan case d [ label ] = 1 return d if inplace : self [ 'phenotype_calls' ] = self . apply ( lambda x : _get_calls ( x [ 'phenotype_label' ] , phenotypes ) , 1 ) return fixed = self . copy ( ) fixed [ 'phenotype_calls' ] = fixed . apply ( lambda x : _get_calls ( x [ 'phenotype_label' ] , phenotypes ) , 1 ) return fixed
Set the phenotype_calls according to the phenotype names
191
11
15,888
def scored_to_phenotype ( self , phenotypes ) : def _apply_score ( scored_calls , phenotypes ) : present = sorted ( list ( set ( phenotypes ) & set ( scored_calls . keys ( ) ) ) ) total = sum ( [ scored_calls [ x ] for x in present ] ) if total > 1 : raise ValueError ( "You cant extract phenotypes from scores if they are not mutually exclusive" ) if total == 0 : return np . nan for label in present : if scored_calls [ label ] == 1 : return label raise ValueError ( "Should have hit an exit criteria already" ) output = self . copy ( ) output [ 'phenotype_label' ] = output . apply ( lambda x : _apply_score ( x [ 'scored_calls' ] , phenotypes ) , 1 ) # now update the phenotypes with these output [ 'phenotype_calls' ] = output . apply ( lambda x : dict ( [ ( y , 1 if x [ 'phenotype_label' ] == y else 0 ) for y in phenotypes ] ) , 1 ) return output
Convert binary pehnotypes to mutually exclusive phenotypes . If none of the phenotypes are set then phenotype_label becomes nan If any of the phenotypes are multiply set then it throws a fatal error .
244
43
15,889
def issue_and_listen_to_command_history ( ) : def tc_callback ( rec ) : print ( 'TC:' , rec ) command = processor . issue_command ( '/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF' , args = { 'voltage_num' : 1 , } , comment = 'im a comment' ) command . create_command_history_subscription ( on_data = tc_callback )
Listen to command history updates of a single issued command .
99
11
15,890
def get_many2many_table ( table1 , table2 ) : table_name = ( '{}{}__{}' . format ( TABLE_PREFIX , table1 , table2 ) ) return Table ( table_name , Base . metadata , Column ( '{}_id' . format ( table1 ) , Integer , ForeignKey ( '{}{}.id' . format ( TABLE_PREFIX , table1 ) ) ) , Column ( '{}_id' . format ( table2 ) , Integer , ForeignKey ( '{}{}.id' . format ( TABLE_PREFIX , table2 ) ) ) )
Creates a many - to - many table that links the given tables table1 and table2 .
138
20
15,891
async def search ( self , regex ) : coro = self . _loop . run_in_executor ( None , self . _search , regex ) match = await coro return match
Wraps the search for a match in an executor _ and awaits for it .
41
17
15,892
def show_help ( self ) : print ( "Sorry, not well understood." ) print ( "- use" , str ( self . yes_input ) , "to answer 'YES'" ) print ( "- use" , str ( self . no_input ) , "to answer 'NO'" )
Prints to stdout help on how to answer properly
62
11
15,893
def re_ask ( self , with_help = True ) : if with_help : self . show_help ( ) return self . get_answer ( self . last_question )
Re - asks user the last question
39
7
15,894
def get_answer ( self , question ) : self . last_question = str ( question ) . strip ( ) user_answer = input ( self . last_question ) return user_answer . strip ( )
Asks user a question then gets user answer
44
9
15,895
def get_number ( self , question , min_i = float ( "-inf" ) , max_i = float ( "inf" ) , just_these = None ) : try : user_answer = self . get_answer ( question ) user_answer = float ( user_answer ) if min_i < user_answer < max_i : if just_these : if user_answer in just_these : return user_answer exc = "Number cannot be accepted. Just these: " exc += str ( just_these ) raise Exception ( exc ) return user_answer exc = "Number is not within limits. " exc += "Min is " + str ( min_i ) + ". Max is " + str ( max_i ) + "" raise Exception ( exc ) except Exception as exc : print ( str ( exc ) ) return self . get_number ( self . last_question , min_i = min_i , max_i = max_i , just_these = just_these )
Parses answer and gets number
213
7
15,896
def get_list ( self , question , splitter = "," , at_least = 0 , at_most = float ( "inf" ) ) : try : user_answer = self . get_answer ( question ) # ask question user_answer = user_answer . split ( splitter ) # split items user_answer = [ str ( item ) . strip ( ) for item in user_answer ] # strip if at_least < len ( user_answer ) < at_most : return user_answer exc = "List is not correct. " exc += "There must be at least " + str ( at_least ) + " items, " exc += "and at most " + str ( at_most ) + ". " exc += "Use '" + str ( splitter ) + "' to separate items" raise Exception ( exc ) except Exception as exc : print ( str ( exc ) ) return self . get_list ( self . last_question , at_least = at_least , at_most = at_most )
Parses answer and gets list
224
7
15,897
def sanitize_type ( raw_type ) : cleaned = get_printable ( raw_type ) . strip ( ) for bad in [ r'__drv_aliasesMem' , r'__drv_freesMem' , r'__drv_strictTypeMatch\(\w+\)' , r'__out_data_source\(\w+\)' , r'_In_NLS_string_\(\w+\)' , r'_Frees_ptr_' , r'_Frees_ptr_opt_' , r'opt_' , r'\(Mem\) ' ] : cleaned = re . sub ( bad , '' , cleaned ) . strip ( ) if cleaned in [ '_EXCEPTION_RECORD *' , '_EXCEPTION_POINTERS *' ] : cleaned = cleaned . strip ( '_' ) cleaned = cleaned . replace ( '[]' , '*' ) return cleaned
Sanitize the raw type string .
209
8
15,898
def clean_ret_type ( ret_type ) : ret_type = get_printable ( ret_type ) . strip ( ) if ret_type == 'LRESULT LRESULT' : ret_type = 'LRESULT' for bad in [ 'DECLSPEC_NORETURN' , 'NTSYSCALLAPI' , '__kernel_entry' , '__analysis_noreturn' , '_Post_equals_last_error_' , '_Maybe_raises_SEH_exception_' , '_CRT_STDIO_INLINE' , '_ACRTIMP' ] : if bad in ret_type : ret_type = ret_type . replace ( bad , '' ) . strip ( ) logging . debug ( _ ( 'cleaned %s' ) , bad ) return ret_type
Clean the erraneous parsed return type .
185
8
15,899
async def setup ( self ) : try : client = await self . db response = await client . list_tables ( ) created = False # create table if not already created. if self . table_name not in response [ "TableNames" ] : logger . info ( "Creating DynamoDB table [{}]" . format ( self . table_name ) ) resp = await client . create_table ( * * self . table_schema ) if resp . get ( "ResponseMetadata" , { } ) . get ( "HTTPStatusCode" ) == 200 : logger . info ( "DynamoDB table [{}] successfully created!" . format ( self . table_name ) ) created = True # create control table if not already created. if self . control_table_name and self . control_table_name not in response [ "TableNames" ] : logger . info ( "Creating DynamoDB control_table [{}]" . format ( self . control_table_name ) ) resp = await client . create_table ( * * self . control_table_schema ) if resp . get ( "ResponseMetadata" , { } ) . get ( "HTTPStatusCode" ) == 200 : logger . info ( "DynamoDB control table [{}] successfully created!" . format ( self . control_table_name ) ) created = True return created except Exception as exc : logger . error ( "[DB] Error when setting up DynamoDB." ) logger . error ( exc ) return False
Setting up DynamoDB table if it not exists .
322
10