positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def read_key_value_lines(path, separator=' ', default_value=''): """ Reads lines of a text file with two columns as key/value dictionary. Parameters: path (str): Path to the file. separator (str): Separator that is used to split key and value. default_value (str): If no value is given this value is used. Returns: dict: A dictionary with first column as key and second as value. """ gen = read_separated_lines_generator(path, separator, 2) dic = {} for record in gen: if len(record) > 1: dic[record[0]] = record[1] elif len(record) > 0: dic[record[0]] = default_value return dic
Reads lines of a text file with two columns as key/value dictionary. Parameters: path (str): Path to the file. separator (str): Separator that is used to split key and value. default_value (str): If no value is given this value is used. Returns: dict: A dictionary with first column as key and second as value.
def get_object(self, queryset=None): """ Returns the object the view is displaying. Copied from SingleObjectMixin except that this allows us to lookup preview objects. """ schema = manager.get_schema() vid = None if self.request.GET.get('vid') and self.request.user.is_staff and \ self.request.user.is_active: try: schema = 'public' vid = int(self.request.GET.get('vid')) queryset = self.model.normal.filter(vid=vid) except ValueError: pass with manager.SwitchSchema(schema): # Use a custom queryset if provided if queryset is None: queryset = self.get_queryset() # Next, try looking up by primary key. pk = self.kwargs.get(self.pk_url_kwarg, None) slug = self.kwargs.get(self.slug_url_kwarg, None) if pk is not None: if vid: queryset = queryset.filter(vid=vid) else: queryset = queryset.filter(object_id=pk) # Next, try looking up by slug. elif slug is not None: slug_field = self.get_slug_field() queryset = queryset.filter(**{slug_field: slug}) # If none of those are defined, it's an error. else: raise AttributeError(u"View %s must be called with " u"either an object pk or a slug." % self.__class__.__name__) try: obj = queryset.get() except queryset.model.DoesNotExist: raise http.Http404( u"No %(verbose_name)s found matching the query" % {'verbose_name': queryset.model._meta.verbose_name}) return obj
Returns the object the view is displaying. Copied from SingleObjectMixin except that this allows us to lookup preview objects.
def present(self, path, timeout=0): """returns True if there is an entity at path""" ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path), timeout=timeout) assert ret <= 0 and not data, (ret, data) if ret < 0: return False else: return True
returns True if there is an entity at path
def is_Type(tp): """Python version independent check if an object is a type. For Python 3.7 onwards(?) this is not equivalent to ``isinstance(tp, type)`` any more, as that call would return ``False`` for PEP 484 types. Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1. """ if isinstance(tp, type): return True try: typing._type_check(tp, '') return True except TypeError: return False
Python version independent check if an object is a type. For Python 3.7 onwards(?) this is not equivalent to ``isinstance(tp, type)`` any more, as that call would return ``False`` for PEP 484 types. Tested with CPython 2.7, 3.5, 3.6, 3.7 and Jython 2.7.1.
def _analyze_function(self): """ Go over the variable information in variable manager for this function, and return all uninitialized register/stack variables. :return: """ if not self._function.is_simprocedure \ and not self._function.is_plt \ and not self._variable_manager.has_function_manager(self._function.addr): l.warning("Please run variable recovery on %s before analyzing its calling conventions.", repr(self._function)) return None vm = self._variable_manager[self._function.addr] input_variables = vm.input_variables() input_args = self._args_from_vars(input_variables) # TODO: properly decide sp_delta sp_delta = self.project.arch.bytes if self.project.arch.call_pushes_ret else 0 cc = SimCC.find_cc(self.project.arch, list(input_args), sp_delta) if cc is None: l.warning('_analyze_function(): Cannot find a calling convention that fits the given arguments.') return cc
Go over the variable information in variable manager for this function, and return all uninitialized register/stack variables. :return:
def entities(self, name_id): """ Returns all the entities of assertions for a subject, disregarding whether the assertion still is valid or not. :param name_id: The subject identifier, a NameID instance :return: A possibly empty list of entity identifiers """ cni = code(name_id) return list(self._db[cni].keys())
Returns all the entities of assertions for a subject, disregarding whether the assertion still is valid or not. :param name_id: The subject identifier, a NameID instance :return: A possibly empty list of entity identifiers
def index_at_event(self, event): """Get the index under the position of the given MouseEvent This implementation takes the indentation into account. :param event: the mouse event :type event: :class:`QtGui.QMouseEvent` :returns: the index :rtype: :class:`QtCore.QModelIndex` :raises: None """ # find index at mouse position globalpos = event.globalPos() viewport = self.viewport() pos = viewport.mapFromGlobal(globalpos) i = self.indexAt(pos) n = self.get_total_indentation(i) if pos.x() > n: return i else: return QtCore.QModelIndex()
Get the index under the position of the given MouseEvent This implementation takes the indentation into account. :param event: the mouse event :type event: :class:`QtGui.QMouseEvent` :returns: the index :rtype: :class:`QtCore.QModelIndex` :raises: None
def explain_prediction_sklearn(estimator, doc, vec=None, top=None, top_targets=None, target_names=None, targets=None, feature_names=None, feature_re=None, feature_filter=None, vectorized=False): """ Return an explanation of a scikit-learn estimator """ return explain_prediction_sklearn_not_supported(estimator, doc)
Return an explanation of a scikit-learn estimator
def _is_really_comment(tokens, index): """Return true if the token at index is really a comment.""" if tokens[index].type == TokenType.Comment: return True # Really a comment in disguise! try: if tokens[index].content.lstrip()[0] == "#": return True except IndexError: return False
Return true if the token at index is really a comment.
def variance_K(K, verbose=False): """estimate the variance explained by K""" c = SP.sum((SP.eye(len(K)) - (1.0 / len(K)) * SP.ones(K.shape)) * SP.array(K)) scalar = (len(K) - 1) / c return 1.0/scalar
estimate the variance explained by K
def set_image(self, user, image_path): """Sets a custom image for the game. `image_path` should refer to an image file on disk""" _, ext = os.path.splitext(image_path) shutil.copy(image_path, self._custom_image_path(user, ext))
Sets a custom image for the game. `image_path` should refer to an image file on disk
def _aggregate_config_values(config_values: typing.List[ConfigValue]) -> dict: """ Returns a (sorted) :param config_values: :type config_values: :return: :rtype: """ _keys: defaultdict = _nested_default_dict() _sorted_values = sorted(config_values, key=lambda x: x.name) for value in _sorted_values: value_keys = value.path.split(ELIBConfig.config_sep_str) this_config_key = _keys for sub_key in value_keys[:-1]: this_config_key = this_config_key[sub_key] this_config_key[value_keys[-1]] = value return _default_dict_to_dict(_keys)
Returns a (sorted) :param config_values: :type config_values: :return: :rtype:
def match_filter(template_names, template_list, st, threshold, threshold_type, trig_int, plotvar, plotdir='.', xcorr_func=None, concurrency=None, cores=None, debug=0, plot_format='png', output_cat=False, output_event=True, extract_detections=False, arg_check=True, full_peaks=False, peak_cores=None, **kwargs): """ Main matched-filter detection function. Over-arching code to run the correlations of given templates with a day of seismic data and output the detections based on a given threshold. For a functional example see the tutorials. :type template_names: list :param template_names: List of template names in the same order as template_list :type template_list: list :param template_list: A list of templates of which each template is a :class:`obspy.core.stream.Stream` of obspy traces containing seismic data and header information. :type st: :class:`obspy.core.stream.Stream` :param st: A Stream object containing all the data available and required for the correlations with templates given. For efficiency this should contain no excess traces which are not in one or more of the templates. This will now remove excess traces internally, but will copy the stream and work on the copy, leaving your input stream untouched. :type threshold: float :param threshold: A threshold value set based on the threshold_type :type threshold_type: str :param threshold_type: The type of threshold to be used, can be MAD, absolute or av_chan_corr. See Note on thresholding below. :type trig_int: float :param trig_int: Minimum gap between detections in seconds. :type plotvar: bool :param plotvar: Turn plotting on or off :type plotdir: str :param plotdir: Path to plotting folder, plots will be output here, defaults to run location. :type xcorr_func: str or callable :param xcorr_func: A str of a registered xcorr function or a callable for implementing a custom xcorr function. For more information see: :func:`eqcorrscan.utils.correlate.register_array_xcorr` :type concurrency: str :param concurrency: The type of concurrency to apply to the xcorr function. Options are 'multithread', 'multiprocess', 'concurrent'. For more details see :func:`eqcorrscan.utils.correlate.get_stream_xcorr` :type cores: int :param cores: Number of cores to use :type debug: int :param debug: Debug output level, the bigger the number, the more the output. :type plot_format: str :param plot_format: Specify format of output plots if saved :type output_cat: bool :param output_cat: Specifies if matched_filter will output an obspy.Catalog class containing events for each detection. Default is False, in which case matched_filter will output a list of detection classes, as normal. :type output_event: bool :param output_event: Whether to include events in the Detection objects, defaults to True, but for large cases you may want to turn this off as Event objects can be quite memory intensive. :type extract_detections: bool :param extract_detections: Specifies whether or not to return a list of streams, one stream per detection. :type arg_check: bool :param arg_check: Check arguments, defaults to True, but if running in bulk, and you are certain of your arguments, then set to False. :type full_peaks: bool :param full_peaks: See `eqcorrscan.core.findpeaks.find_peaks2_short`. :type peak_cores: int :param peak_cores: Number of processes to use for parallel peak-finding (if different to `cores`). .. note:: **Returns:** If neither `output_cat` or `extract_detections` are set to `True`, then only the list of :class:`eqcorrscan.core.match_filter.Detection`'s will be output: :return: :class:`eqcorrscan.core.match_filter.Detection` detections for each detection made. :rtype: list If `output_cat` is set to `True`, then the :class:`obspy.core.event.Catalog` will also be output: :return: Catalog containing events for each detection, see above. :rtype: :class:`obspy.core.event.Catalog` If `extract_detections` is set to `True` then the list of :class:`obspy.core.stream.Stream`'s will also be output. :return: list of :class:`obspy.core.stream.Stream`'s for each detection, see above. :rtype: list .. note:: If your data contain gaps these must be padded with zeros before using this function. The `eqcorrscan.utils.pre_processing` functions will provide gap-filled data in the appropriate format. Note that if you pad your data with zeros before filtering or resampling the gaps will not be all zeros after filtering. This will result in the calculation of spurious correlations in the gaps. .. Note:: Detections are not corrected for `pre-pick`, the detection.detect_time corresponds to the beginning of the earliest template channel at detection. .. note:: **Data overlap:** Internally this routine shifts and trims the data according to the offsets in the template (e.g. if trace 2 starts 2 seconds after trace 1 in the template then the continuous data will be shifted by 2 seconds to align peak correlations prior to summing). Because of this, detections at the start and end of continuous data streams **may be missed**. The maximum time-period that might be missing detections is the maximum offset in the template. To work around this, if you are conducting matched-filter detections through long-duration continuous data, we suggest using some overlap (a few seconds, on the order of the maximum offset in the templates) in the continous data. You will then need to post-process the detections (which should be done anyway to remove duplicates). .. note:: **Thresholding:** **MAD** threshold is calculated as the: .. math:: threshold {\\times} (median(abs(cccsum))) where :math:`cccsum` is the cross-correlation sum for a given template. **absolute** threshold is a true absolute threshold based on the cccsum value. **av_chan_corr** is based on the mean values of single-channel cross-correlations assuming all data are present as required for the template, e.g: .. math:: av\_chan\_corr\_thresh=threshold \\times (cccsum\ /\ len(template)) where :math:`template` is a single template from the input and the length is the number of channels within this template. .. note:: The output_cat flag will create an :class:`obspy.core.event.Catalog` containing one event for each :class:`eqcorrscan.core.match_filter.Detection`'s generated by match_filter. Each event will contain a number of comments dealing with correlation values and channels used for the detection. Each channel used for the detection will have a corresponding :class:`obspy.core.event.Pick` which will contain time and waveform information. **HOWEVER**, the user should note that the pick times do not account for the prepick times inherent in each template. For example, if a template trace starts 0.1 seconds before the actual arrival of that phase, then the pick time generated by match_filter for that phase will be 0.1 seconds early. .. Note:: xcorr_func can be used as follows: .. rubric::xcorr_func argument example >>> import obspy >>> import numpy as np >>> from eqcorrscan.core.match_filter import match_filter >>> from eqcorrscan.utils.correlate import time_multi_normxcorr >>> # define a custom xcorr function >>> def custom_normxcorr(templates, stream, pads, *args, **kwargs): ... # Just to keep example short call other xcorr function ... # in practice you would define your own function here ... print('calling custom xcorr function') ... return time_multi_normxcorr(templates, stream, pads) >>> # generate some toy templates and stream >>> random = np.random.RandomState(42) >>> template = obspy.read() >>> stream = obspy.read() >>> for num, tr in enumerate(stream): # iter st and embed templates ... data = tr.data ... tr.data = random.randn(6000) * 5 ... tr.data[100: 100 + len(data)] = data >>> # call match_filter ane ensure the custom function is used >>> detections = match_filter( ... template_names=['1'], template_list=[template], st=stream, ... threshold=.5, threshold_type='absolute', trig_int=1, ... plotvar=False, ... xcorr_func=custom_normxcorr) # doctest:+ELLIPSIS calling custom xcorr function... """ from eqcorrscan.utils.plotting import _match_filter_plot if arg_check: # Check the arguments to be nice - if arguments wrong type the parallel # output for the error won't be useful if not isinstance(template_names, list): raise MatchFilterError('template_names must be of type: list') if not isinstance(template_list, list): raise MatchFilterError('templates must be of type: list') if not len(template_list) == len(template_names): raise MatchFilterError('Not the same number of templates as names') for template in template_list: if not isinstance(template, Stream): msg = 'template in template_list must be of type: ' + \ 'obspy.core.stream.Stream' raise MatchFilterError(msg) if not isinstance(st, Stream): msg = 'st must be of type: obspy.core.stream.Stream' raise MatchFilterError(msg) if str(threshold_type) not in [str('MAD'), str('absolute'), str('av_chan_corr')]: msg = 'threshold_type must be one of: MAD, absolute, av_chan_corr' raise MatchFilterError(msg) for tr in st: if not tr.stats.sampling_rate == st[0].stats.sampling_rate: raise MatchFilterError('Sampling rates are not equal %f: %f' % (tr.stats.sampling_rate, st[0].stats.sampling_rate)) for template in template_list: for tr in template: if not tr.stats.sampling_rate == st[0].stats.sampling_rate: raise MatchFilterError( 'Template sampling rate does not ' 'match continuous data') _spike_test(st) if cores is not None: parallel = True else: parallel = False # Copy the stream here because we will muck about with it stream = st.copy() templates = copy.deepcopy(template_list) _template_names = copy.deepcopy(template_names) # Debug option to confirm that the channel names match those in the # templates if debug >= 2: template_stachan = [] data_stachan = [] for template in templates: for tr in template: if isinstance(tr.data, np.ma.core.MaskedArray): raise MatchFilterError('Template contains masked array,' ' split first') template_stachan.append(tr.stats.station + '.' + tr.stats.channel) for tr in stream: data_stachan.append(tr.stats.station + '.' + tr.stats.channel) template_stachan = list(set(template_stachan)) data_stachan = list(set(data_stachan)) debug_print('I have template info for these stations:\n' + template_stachan.__str__() + '\nI have daylong data for these stations:\n' + data_stachan.__str__(), 3, debug) # Perform a check that the continuous data are all the same length min_start_time = min([tr.stats.starttime for tr in stream]) max_end_time = max([tr.stats.endtime for tr in stream]) longest_trace_length = stream[0].stats.sampling_rate * (max_end_time - min_start_time) longest_trace_length += 1 for tr in stream: if not tr.stats.npts == longest_trace_length: msg = 'Data are not equal length, padding short traces' warnings.warn(msg) start_pad = np.zeros(int(tr.stats.sampling_rate * (tr.stats.starttime - min_start_time))) end_pad = np.zeros(int(tr.stats.sampling_rate * (max_end_time - tr.stats.endtime))) # In some cases there will be one sample missing when sampling # time-stamps are not set consistently between channels, this # results in start_pad and end_pad being len==0 if len(start_pad) == 0 and len(end_pad) == 0: debug_print("start and end pad are both zero, padding at one " "end", 2, debug) if (tr.stats.starttime - min_start_time) > ( max_end_time - tr.stats.endtime): start_pad = np.zeros( int(longest_trace_length - tr.stats.npts)) else: end_pad = np.zeros( int(longest_trace_length - tr.stats.npts)) tr.data = np.concatenate([start_pad, tr.data, end_pad]) # Perform check that all template lengths are internally consistent for i, temp in enumerate(template_list): if len(set([tr.stats.npts for tr in temp])) > 1: msg = ('Template %s contains traces of differing length, this is ' 'not currently supported' % _template_names[i]) raise MatchFilterError(msg) outtic = time.clock() debug_print('Ensuring all template channels have matches in' ' continuous data', 2, debug) template_stachan = {} # Work out what station-channel pairs are in the templates, including # duplicate station-channel pairs. We will use this information to fill # all templates with the same station-channel pairs as required by # _template_loop. for template in templates: stachans_in_template = [] for tr in template: stachans_in_template.append((tr.stats.network, tr.stats.station, tr.stats.location, tr.stats.channel)) stachans_in_template = dict(Counter(stachans_in_template)) for stachan in stachans_in_template.keys(): stachans = stachans_in_template[stachan] if stachan not in template_stachan.keys(): template_stachan.update({stachan: stachans}) elif stachans_in_template[stachan] > template_stachan[stachan]: template_stachan.update({stachan: stachans}) # Remove un-matched channels from templates. _template_stachan = copy.deepcopy(template_stachan) for stachan in template_stachan.keys(): if not stream.select(network=stachan[0], station=stachan[1], location=stachan[2], channel=stachan[3]): # Remove stachan from list of dictionary of template_stachans _template_stachan.pop(stachan) # Remove template traces rather than adding NaN data for template in templates: if template.select(network=stachan[0], station=stachan[1], location=stachan[2], channel=stachan[3]): for tr in template.select( network=stachan[0], station=stachan[1], location=stachan[2], channel=stachan[3]): template.remove(tr) print('Removing template channel %s.%s.%s.%s due to' ' no matches in continuous data' % (stachan[0], stachan[1], stachan[2], stachan[3])) template_stachan = _template_stachan # Remove un-needed channels from continuous data. for tr in stream: if not (tr.stats.network, tr.stats.station, tr.stats.location, tr.stats.channel) in \ template_stachan.keys(): print('Removing channel in continuous data for %s.%s.%s.%s:' ' no match in template' % (tr.stats.network, tr.stats.station, tr.stats.location, tr.stats.channel)) stream.remove(tr) # Check for duplicate channels stachans = [(tr.stats.network, tr.stats.station, tr.stats.location, tr.stats.channel) for tr in stream] c_stachans = Counter(stachans) for key in c_stachans.keys(): if c_stachans[key] > 1: msg = ('Multiple channels for %s.%s.%s.%s, likely a data issue' % (key[0], key[1], key[2], key[3])) raise MatchFilterError(msg) # Pad out templates to have all channels _templates = [] used_template_names = [] for template, template_name in zip(templates, _template_names): if len(template) == 0: msg = ('No channels matching in continuous data for ' + 'template' + template_name) warnings.warn(msg) continue for stachan in template_stachan.keys(): number_of_channels = len(template.select( network=stachan[0], station=stachan[1], location=stachan[2], channel=stachan[3])) if number_of_channels < template_stachan[stachan]: missed_channels = template_stachan[stachan] - \ number_of_channels nulltrace = Trace() nulltrace.stats.update( {'network': stachan[0], 'station': stachan[1], 'location': stachan[2], 'channel': stachan[3], 'sampling_rate': template[0].stats.sampling_rate, 'starttime': template[0].stats.starttime, 'not_in_original': True}) nulltrace.data = np.array([np.NaN] * len(template[0].data), dtype=np.float32) for dummy in range(missed_channels): template += nulltrace template.sort() _templates.append(template) used_template_names.append(template_name) # Quick check that this has all worked if len(template) != max([len(t) for t in templates]): raise MatchFilterError('Internal error forcing same template ' 'lengths, report this error.') templates = _templates _template_names = used_template_names debug_print('Starting the correlation run for these data', 2, debug) for template in templates: debug_print(template.__str__(), 3, debug) debug_print(stream.__str__(), 3, debug) multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency) [cccsums, no_chans, chans] = multichannel_normxcorr( templates=templates, stream=stream, cores=cores, **kwargs) if len(cccsums[0]) == 0: raise MatchFilterError('Correlation has not run, zero length cccsum') outtoc = time.clock() debug_print(' '.join(['Looping over templates and streams took:', str(outtoc - outtic), 's']), 0, debug) debug_print('The shape of the returned cccsums is: %s\n' 'This is from %i templates\nCorrelated with %i channels of ' 'data' % (cccsums.shape, len(templates), len(stream)), 2, debug) detections = [] if output_cat: det_cat = Catalog() if str(threshold_type) == str("absolute"): thresholds = [threshold for _ in range(len(cccsums))] elif str(threshold_type) == str('MAD'): thresholds = [threshold * np.median(np.abs(cccsum)) for cccsum in cccsums] else: thresholds = [threshold * no_chans[i] for i in range(len(cccsums))] if peak_cores is None: peak_cores = cores all_peaks = multi_find_peaks( arr=cccsums, thresh=thresholds, debug=debug, parallel=parallel, trig_int=int(trig_int * stream[0].stats.sampling_rate), full_peaks=full_peaks, cores=peak_cores) for i, cccsum in enumerate(cccsums): if np.abs(np.mean(cccsum)) > 0.05: warnings.warn('Mean is not zero! Check this!') # Set up a trace object for the cccsum as this is easier to plot and # maintains timing if plotvar: _match_filter_plot( stream=stream, cccsum=cccsum, template_names=_template_names, rawthresh=thresholds[i], plotdir=plotdir, plot_format=plot_format, i=i) if debug >= 4: np.save(_template_names[i] + stream[0].stats.starttime.datetime.strftime('%Y%j'), cccsum) debug_print( ' '.join(['Saved the cccsum to:', _template_names[i], stream[0].stats.starttime.datetime.strftime('%Y%j')]), 4, debug) if all_peaks[i]: for peak in all_peaks[i]: detecttime = ( stream[0].stats.starttime + peak[1] / stream[0].stats.sampling_rate) detection = Detection( template_name=_template_names[i], detect_time=detecttime, no_chans=no_chans[i], detect_val=peak[0], threshold=thresholds[i], typeofdet='corr', chans=chans[i], threshold_type=threshold_type, threshold_input=threshold) if output_cat or output_event: detection._calculate_event(template_st=templates[i]) detections.append(detection) if output_cat: det_cat.append(detection.event) if extract_detections: detection_streams = extract_from_stream(stream, detections) del stream, templates if output_cat and not extract_detections: return detections, det_cat elif not extract_detections: return detections elif extract_detections and not output_cat: return detections, detection_streams else: return detections, det_cat, detection_streams
Main matched-filter detection function. Over-arching code to run the correlations of given templates with a day of seismic data and output the detections based on a given threshold. For a functional example see the tutorials. :type template_names: list :param template_names: List of template names in the same order as template_list :type template_list: list :param template_list: A list of templates of which each template is a :class:`obspy.core.stream.Stream` of obspy traces containing seismic data and header information. :type st: :class:`obspy.core.stream.Stream` :param st: A Stream object containing all the data available and required for the correlations with templates given. For efficiency this should contain no excess traces which are not in one or more of the templates. This will now remove excess traces internally, but will copy the stream and work on the copy, leaving your input stream untouched. :type threshold: float :param threshold: A threshold value set based on the threshold_type :type threshold_type: str :param threshold_type: The type of threshold to be used, can be MAD, absolute or av_chan_corr. See Note on thresholding below. :type trig_int: float :param trig_int: Minimum gap between detections in seconds. :type plotvar: bool :param plotvar: Turn plotting on or off :type plotdir: str :param plotdir: Path to plotting folder, plots will be output here, defaults to run location. :type xcorr_func: str or callable :param xcorr_func: A str of a registered xcorr function or a callable for implementing a custom xcorr function. For more information see: :func:`eqcorrscan.utils.correlate.register_array_xcorr` :type concurrency: str :param concurrency: The type of concurrency to apply to the xcorr function. Options are 'multithread', 'multiprocess', 'concurrent'. For more details see :func:`eqcorrscan.utils.correlate.get_stream_xcorr` :type cores: int :param cores: Number of cores to use :type debug: int :param debug: Debug output level, the bigger the number, the more the output. :type plot_format: str :param plot_format: Specify format of output plots if saved :type output_cat: bool :param output_cat: Specifies if matched_filter will output an obspy.Catalog class containing events for each detection. Default is False, in which case matched_filter will output a list of detection classes, as normal. :type output_event: bool :param output_event: Whether to include events in the Detection objects, defaults to True, but for large cases you may want to turn this off as Event objects can be quite memory intensive. :type extract_detections: bool :param extract_detections: Specifies whether or not to return a list of streams, one stream per detection. :type arg_check: bool :param arg_check: Check arguments, defaults to True, but if running in bulk, and you are certain of your arguments, then set to False. :type full_peaks: bool :param full_peaks: See `eqcorrscan.core.findpeaks.find_peaks2_short`. :type peak_cores: int :param peak_cores: Number of processes to use for parallel peak-finding (if different to `cores`). .. note:: **Returns:** If neither `output_cat` or `extract_detections` are set to `True`, then only the list of :class:`eqcorrscan.core.match_filter.Detection`'s will be output: :return: :class:`eqcorrscan.core.match_filter.Detection` detections for each detection made. :rtype: list If `output_cat` is set to `True`, then the :class:`obspy.core.event.Catalog` will also be output: :return: Catalog containing events for each detection, see above. :rtype: :class:`obspy.core.event.Catalog` If `extract_detections` is set to `True` then the list of :class:`obspy.core.stream.Stream`'s will also be output. :return: list of :class:`obspy.core.stream.Stream`'s for each detection, see above. :rtype: list .. note:: If your data contain gaps these must be padded with zeros before using this function. The `eqcorrscan.utils.pre_processing` functions will provide gap-filled data in the appropriate format. Note that if you pad your data with zeros before filtering or resampling the gaps will not be all zeros after filtering. This will result in the calculation of spurious correlations in the gaps. .. Note:: Detections are not corrected for `pre-pick`, the detection.detect_time corresponds to the beginning of the earliest template channel at detection. .. note:: **Data overlap:** Internally this routine shifts and trims the data according to the offsets in the template (e.g. if trace 2 starts 2 seconds after trace 1 in the template then the continuous data will be shifted by 2 seconds to align peak correlations prior to summing). Because of this, detections at the start and end of continuous data streams **may be missed**. The maximum time-period that might be missing detections is the maximum offset in the template. To work around this, if you are conducting matched-filter detections through long-duration continuous data, we suggest using some overlap (a few seconds, on the order of the maximum offset in the templates) in the continous data. You will then need to post-process the detections (which should be done anyway to remove duplicates). .. note:: **Thresholding:** **MAD** threshold is calculated as the: .. math:: threshold {\\times} (median(abs(cccsum))) where :math:`cccsum` is the cross-correlation sum for a given template. **absolute** threshold is a true absolute threshold based on the cccsum value. **av_chan_corr** is based on the mean values of single-channel cross-correlations assuming all data are present as required for the template, e.g: .. math:: av\_chan\_corr\_thresh=threshold \\times (cccsum\ /\ len(template)) where :math:`template` is a single template from the input and the length is the number of channels within this template. .. note:: The output_cat flag will create an :class:`obspy.core.event.Catalog` containing one event for each :class:`eqcorrscan.core.match_filter.Detection`'s generated by match_filter. Each event will contain a number of comments dealing with correlation values and channels used for the detection. Each channel used for the detection will have a corresponding :class:`obspy.core.event.Pick` which will contain time and waveform information. **HOWEVER**, the user should note that the pick times do not account for the prepick times inherent in each template. For example, if a template trace starts 0.1 seconds before the actual arrival of that phase, then the pick time generated by match_filter for that phase will be 0.1 seconds early. .. Note:: xcorr_func can be used as follows: .. rubric::xcorr_func argument example >>> import obspy >>> import numpy as np >>> from eqcorrscan.core.match_filter import match_filter >>> from eqcorrscan.utils.correlate import time_multi_normxcorr >>> # define a custom xcorr function >>> def custom_normxcorr(templates, stream, pads, *args, **kwargs): ... # Just to keep example short call other xcorr function ... # in practice you would define your own function here ... print('calling custom xcorr function') ... return time_multi_normxcorr(templates, stream, pads) >>> # generate some toy templates and stream >>> random = np.random.RandomState(42) >>> template = obspy.read() >>> stream = obspy.read() >>> for num, tr in enumerate(stream): # iter st and embed templates ... data = tr.data ... tr.data = random.randn(6000) * 5 ... tr.data[100: 100 + len(data)] = data >>> # call match_filter ane ensure the custom function is used >>> detections = match_filter( ... template_names=['1'], template_list=[template], st=stream, ... threshold=.5, threshold_type='absolute', trig_int=1, ... plotvar=False, ... xcorr_func=custom_normxcorr) # doctest:+ELLIPSIS calling custom xcorr function...
def is_valid_coll(self, coll): """Determines if the collection name for a request is valid (exists) :param str coll: The name of the collection to check :return: True if the collection is valid, false otherwise :rtype: bool """ #if coll == self.all_coll: # return True return (coll in self.warcserver.list_fixed_routes() or coll in self.warcserver.list_dynamic_routes())
Determines if the collection name for a request is valid (exists) :param str coll: The name of the collection to check :return: True if the collection is valid, false otherwise :rtype: bool
def extract_single_dist_for_current_platform(self, reqs, dist_key): """Resolve a specific distribution from a set of requirements matching the current platform. :param list reqs: A list of :class:`PythonRequirement` to resolve. :param str dist_key: The value of `distribution.key` to match for a `distribution` from the resolved requirements. :return: The single :class:`pkg_resources.Distribution` matching `dist_key`. :raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given `dist_key`. """ distributions = self._resolve_distributions_by_platform(reqs, platforms=['current']) try: matched_dist = assert_single_element(list( dist for _, dists in distributions.items() for dist in dists if dist.key == dist_key )) except (StopIteration, ValueError) as e: raise self.SingleDistExtractionError( "Exactly one dist was expected to match name {} in requirements {}: {}" .format(dist_key, reqs, e)) return matched_dist
Resolve a specific distribution from a set of requirements matching the current platform. :param list reqs: A list of :class:`PythonRequirement` to resolve. :param str dist_key: The value of `distribution.key` to match for a `distribution` from the resolved requirements. :return: The single :class:`pkg_resources.Distribution` matching `dist_key`. :raises: :class:`self.SingleDistExtractionError` if no dists or multiple dists matched the given `dist_key`.
def bind(self, source=None, destination=None, node=None, edge_title=None, edge_label=None, edge_color=None, edge_weight=None, point_title=None, point_label=None, point_color=None, point_size=None): """Relate data attributes to graph structure and visual representation. To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs. :param source: Attribute containing an edge's source ID :type source: String. :param destination: Attribute containing an edge's destination ID :type destination: String. :param node: Attribute containing a node's ID :type node: String. :param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used. :type edge_title: HtmlString. :param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings. :type edge_label: HtmlString. :param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer. :type edge_color: String. :param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value. :type edge_weight: String. :param point_title: Attribute overriding node's minimized label text. By default, the node ID is used. :type point_title: HtmlString. :param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings. :type point_label: HtmlString. :param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer. :type point_color: Integer. :param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom. :type point_size: HtmlString. :returns: Plotter. :rtype: Plotter. **Example: Minimal** :: import graphistry g = graphistry.bind() g = g.bind(source='src', destination='dst') **Example: Node colors** :: import graphistry g = graphistry.bind() g = g.bind(source='src', destination='dst', node='id', point_color='color') **Example: Chaining** :: import graphistry g = graphistry.bind(source='src', destination='dst', node='id') g1 = g.bind(point_color='color1', point_size='size1') g.bind(point_color='color1b') g2a = g1.bind(point_color='color2a') g2b = g1.bind(point_color='color2b', point_size='size2b') g3a = g2a.bind(point_size='size3a') g3b = g2b.bind(point_size='size3b') In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to: :: g: default/default g1: color1/size1 g2a: color2a/size1 g2b: color2b/size2b g3a: color2a/size3a g3b: color2b/size3b """ res = copy.copy(self) res._source = source or self._source res._destination = destination or self._destination res._node = node or self._node res._edge_title = edge_title or self._edge_title res._edge_label = edge_label or self._edge_label res._edge_color = edge_color or self._edge_color res._edge_weight = edge_weight or self._edge_weight res._point_title = point_title or self._point_title res._point_label = point_label or self._point_label res._point_color = point_color or self._point_color res._point_size = point_size or self._point_size return res
Relate data attributes to graph structure and visual representation. To facilitate reuse and replayable notebooks, the binding call is chainable. Invocation does not effect the old binding: it instead returns a new Plotter instance with the new bindings added to the existing ones. Both the old and new bindings can then be used for different graphs. :param source: Attribute containing an edge's source ID :type source: String. :param destination: Attribute containing an edge's destination ID :type destination: String. :param node: Attribute containing a node's ID :type node: String. :param edge_title: Attribute overriding edge's minimized label text. By default, the edge source and destination is used. :type edge_title: HtmlString. :param edge_label: Attribute overriding edge's expanded label text. By default, scrollable list of attribute/value mappings. :type edge_label: HtmlString. :param edge_color: Attribute overriding edge's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer. :type edge_color: String. :param edge_weight: Attribute overriding edge weight. Default is 1. Advanced layout controls will relayout edges based on this value. :type edge_weight: String. :param point_title: Attribute overriding node's minimized label text. By default, the node ID is used. :type point_title: HtmlString. :param point_label: Attribute overriding node's expanded label text. By default, scrollable list of attribute/value mappings. :type point_label: HtmlString. :param point_color: Attribute overriding node's color. `See palette definitions <https://graphistry.github.io/docs/legacy/api/0.9.2/api.html#extendedpalette>`_ for values. Based on Color Brewer. :type point_color: Integer. :param point_size: Attribute overriding node's size. By default, uses the node degree. The visualization will normalize point sizes and adjust dynamically using semantic zoom. :type point_size: HtmlString. :returns: Plotter. :rtype: Plotter. **Example: Minimal** :: import graphistry g = graphistry.bind() g = g.bind(source='src', destination='dst') **Example: Node colors** :: import graphistry g = graphistry.bind() g = g.bind(source='src', destination='dst', node='id', point_color='color') **Example: Chaining** :: import graphistry g = graphistry.bind(source='src', destination='dst', node='id') g1 = g.bind(point_color='color1', point_size='size1') g.bind(point_color='color1b') g2a = g1.bind(point_color='color2a') g2b = g1.bind(point_color='color2b', point_size='size2b') g3a = g2a.bind(point_size='size3a') g3b = g2b.bind(point_size='size3b') In the above **Chaining** example, all bindings use src/dst/id. Colors and sizes bind to: :: g: default/default g1: color1/size1 g2a: color2a/size1 g2b: color2b/size2b g3a: color2a/size3a g3b: color2b/size3b
def _qrd_solve_full(a, b, ddiag, dtype=np.float): """Solve the equation A^T x = B, D x = 0. Parameters: a - an n-by-m array, m >= n b - an m-vector ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.) Returns: x - n-vector solving the equation. s - the n-by-n supplementary matrix s. pmut - n-element permutation vector defining the permutation matrix P. The equations are solved in a least-squares sense if the system is rank-deficient. D is a diagonal matrix and hence only its diagonal is in fact supplied as an argument. The matrix s is full lower triangular and solves the equation P^T (A A^T + D D) P = S^T S (needs transposition?) where P is the permutation matrix defined by the vector pmut; it puts the rows of 'a' in order of nonincreasing rank, so that a[pmut] has its rows sorted that way. """ a = np.asarray(a, dtype) b = np.asarray(b, dtype) ddiag = np.asarray(ddiag, dtype) n, m = a.shape assert m >= n assert b.shape == (m, ) assert ddiag.shape == (n, ) # The computation is straightforward. q, r, pmut = _qr_factor_full(a) bqt = np.dot(b, q.T) x, s = _manual_qrd_solve(r[:,:n], pmut, ddiag, bqt, dtype=dtype, build_s=True) return x, s, pmut
Solve the equation A^T x = B, D x = 0. Parameters: a - an n-by-m array, m >= n b - an m-vector ddiag - an n-vector giving the diagonal of D. (The rest of D is 0.) Returns: x - n-vector solving the equation. s - the n-by-n supplementary matrix s. pmut - n-element permutation vector defining the permutation matrix P. The equations are solved in a least-squares sense if the system is rank-deficient. D is a diagonal matrix and hence only its diagonal is in fact supplied as an argument. The matrix s is full lower triangular and solves the equation P^T (A A^T + D D) P = S^T S (needs transposition?) where P is the permutation matrix defined by the vector pmut; it puts the rows of 'a' in order of nonincreasing rank, so that a[pmut] has its rows sorted that way.
def do_unique(environment, value, case_sensitive=False, attribute=None): """Returns a list of unique items from the the given iterable. .. sourcecode:: jinja {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }} -> ['foo', 'bar', 'foobar'] The unique items are yielded in the same order as their first occurrence in the iterable passed to the filter. :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Filter objects with unique values for this attribute. """ getter = make_attrgetter( environment, attribute, postprocess=ignore_case if not case_sensitive else None ) seen = set() for item in value: key = getter(item) if key not in seen: seen.add(key) yield item
Returns a list of unique items from the the given iterable. .. sourcecode:: jinja {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }} -> ['foo', 'bar', 'foobar'] The unique items are yielded in the same order as their first occurrence in the iterable passed to the filter. :param case_sensitive: Treat upper and lower case strings as distinct. :param attribute: Filter objects with unique values for this attribute.
def _resolve_sources(self, sources, tables, stage=None, predicate=None): """ Determine what sources to run from an input of sources and tables :param sources: A collection of source objects, source names, or source vids :param tables: A collection of table names :param stage: If not None, select only sources from this stage :param predicate: If not none, a callable that selects a source to return when True :return: """ assert sources is None or tables is None if not sources: if tables: sources = list(s for s in self.sources if s.dest_table_name in tables) else: sources = self.sources elif not isinstance(sources, (list, tuple)): sources = [sources] def objectify(source): if isinstance(source, basestring): source_name = source return self.source(source_name) else: return source sources = [objectify(s) for s in sources] if predicate: sources = [s for s in sources if predicate(s)] if stage: sources = [s for s in sources if str(s.stage) == str(stage)] return sources
Determine what sources to run from an input of sources and tables :param sources: A collection of source objects, source names, or source vids :param tables: A collection of table names :param stage: If not None, select only sources from this stage :param predicate: If not none, a callable that selects a source to return when True :return:
def system_listMethods(self): """system.listMethods() => ['add', 'subtract', 'multiple'] Returns a list of the methods supported by the server.""" methods = self.funcs.keys() if self.instance is not None: # Instance can implement _listMethod to return a list of # methods if hasattr(self.instance, '_listMethods'): methods = remove_duplicates( methods + self.instance._listMethods() ) # if the instance has a _dispatch method then we # don't have enough information to provide a list # of methods elif not hasattr(self.instance, '_dispatch'): methods = remove_duplicates( methods + list_public_methods(self.instance) ) methods.sort() return methods
system.listMethods() => ['add', 'subtract', 'multiple'] Returns a list of the methods supported by the server.
def get_family_admin_session(self): """Gets the ``OsidSession`` associated with the family administrative service. return: (osid.relationship.FamilyAdminSession) - a ``FamilyAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_family_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.* """ if not self.supports_family_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.FamilyAdminSession(proxy=self._proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the ``OsidSession`` associated with the family administrative service. return: (osid.relationship.FamilyAdminSession) - a ``FamilyAdminSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_family_admin()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_family_admin()`` is ``true``.*
def table(contents, heading=True, colw=None, cwunit='dxa', tblw=0, twunit='auto', borders={}, celstyle=None): """ Return a table element based on specified parameters @param list contents: A list of lists describing contents. Every item in the list can be a string or a valid XML element itself. It can also be a list. In that case all the listed elements will be merged into the cell. @param bool heading: Tells whether first line should be treated as heading or not @param list colw: list of integer column widths specified in wunitS. @param str cwunit: Unit used for column width: 'pct' : fiftieths of a percent 'dxa' : twentieths of a point 'nil' : no width 'auto' : automagically determined @param int tblw: Table width @param str twunit: Unit used for table width. Same possible values as cwunit. @param dict borders: Dictionary defining table border. Supported keys are: 'top', 'left', 'bottom', 'right', 'insideH', 'insideV', 'all'. When specified, the 'all' key has precedence over others. Each key must define a dict of border attributes: color : The color of the border, in hex or 'auto' space : The space, measured in points sz : The size of the border, in eighths of a point val : The style of the border, see http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm @param list celstyle: Specify the style for each colum, list of dicts. supported keys: 'align' : specify the alignment, see paragraph documentation. @return lxml.etree: Generated XML etree element """ table = makeelement('tbl') columns = len(contents[0]) # Table properties tableprops = makeelement('tblPr') tablestyle = makeelement('tblStyle', attributes={'val': ''}) tableprops.append(tablestyle) tablewidth = makeelement( 'tblW', attributes={'w': str(tblw), 'type': str(twunit)}) tableprops.append(tablewidth) if len(borders.keys()): tableborders = makeelement('tblBorders') for b in ['top', 'left', 'bottom', 'right', 'insideH', 'insideV']: if b in borders.keys() or 'all' in borders.keys(): k = 'all' if 'all' in borders.keys() else b attrs = {} for a in borders[k].keys(): attrs[a] = unicode(borders[k][a]) borderelem = makeelement(b, attributes=attrs) tableborders.append(borderelem) tableprops.append(tableborders) tablelook = makeelement('tblLook', attributes={'val': '0400'}) tableprops.append(tablelook) table.append(tableprops) # Table Grid tablegrid = makeelement('tblGrid') for i in range(columns): attrs = {'w': str(colw[i]) if colw else '2390'} tablegrid.append(makeelement('gridCol', attributes=attrs)) table.append(tablegrid) # Heading Row row = makeelement('tr') rowprops = makeelement('trPr') cnfStyle = makeelement('cnfStyle', attributes={'val': '000000100000'}) rowprops.append(cnfStyle) row.append(rowprops) if heading: i = 0 for heading in contents[0]: cell = makeelement('tc') # Cell properties cellprops = makeelement('tcPr') if colw: wattr = {'w': str(colw[i]), 'type': cwunit} else: wattr = {'w': '0', 'type': 'auto'} cellwidth = makeelement('tcW', attributes=wattr) cellstyle = makeelement('shd', attributes={'val': 'clear', 'color': 'auto', 'fill': 'FFFFFF', 'themeFill': 'text2', 'themeFillTint': '99'}) cellprops.append(cellwidth) cellprops.append(cellstyle) cell.append(cellprops) # Paragraph (Content) if not isinstance(heading, (list, tuple)): heading = [heading] for h in heading: if isinstance(h, etree._Element): cell.append(h) else: cell.append(paragraph(h, jc='center')) row.append(cell) i += 1 table.append(row) # Contents Rows for contentrow in contents[1 if heading else 0:]: row = makeelement('tr') i = 0 for content in contentrow: cell = makeelement('tc') # Properties cellprops = makeelement('tcPr') if colw: wattr = {'w': str(colw[i]), 'type': cwunit} else: wattr = {'w': '0', 'type': 'auto'} cellwidth = makeelement('tcW', attributes=wattr) cellprops.append(cellwidth) cell.append(cellprops) # Paragraph (Content) if not isinstance(content, (list, tuple)): content = [content] for c in content: if isinstance(c, etree._Element): cell.append(c) else: if celstyle and 'align' in celstyle[i].keys(): align = celstyle[i]['align'] else: align = 'left' cell.append(paragraph(c, jc=align)) row.append(cell) i += 1 table.append(row) return table
Return a table element based on specified parameters @param list contents: A list of lists describing contents. Every item in the list can be a string or a valid XML element itself. It can also be a list. In that case all the listed elements will be merged into the cell. @param bool heading: Tells whether first line should be treated as heading or not @param list colw: list of integer column widths specified in wunitS. @param str cwunit: Unit used for column width: 'pct' : fiftieths of a percent 'dxa' : twentieths of a point 'nil' : no width 'auto' : automagically determined @param int tblw: Table width @param str twunit: Unit used for table width. Same possible values as cwunit. @param dict borders: Dictionary defining table border. Supported keys are: 'top', 'left', 'bottom', 'right', 'insideH', 'insideV', 'all'. When specified, the 'all' key has precedence over others. Each key must define a dict of border attributes: color : The color of the border, in hex or 'auto' space : The space, measured in points sz : The size of the border, in eighths of a point val : The style of the border, see http://www.schemacentral.com/sc/ooxml/t-w_ST_Border.htm @param list celstyle: Specify the style for each colum, list of dicts. supported keys: 'align' : specify the alignment, see paragraph documentation. @return lxml.etree: Generated XML etree element
def cookiejar(name='session'): """ Ready the CookieJar, loading a saved session if available @rtype: cookielib.LWPCookieJar """ log = logging.getLogger('ipsv.common.cookiejar') spath = os.path.join(config().get('Paths', 'Data'), '{n}.txt'.format(n=name)) cj = cookielib.LWPCookieJar(spath) log.debug('Attempting to load session file: %s', spath) if os.path.exists(spath): try: cj.load() log.info('Successfully loaded a saved session / cookie file') except cookielib.LoadError as e: log.warn('Session / cookie file exists, but could not be loaded', exc_info=e) return cj
Ready the CookieJar, loading a saved session if available @rtype: cookielib.LWPCookieJar
def recv(files, dest): ''' Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI. ''' ret = {} for path, data in six.iteritems(files): if os.path.basename(path) == os.path.basename(dest) \ and not os.path.isdir(dest): final = dest elif os.path.isdir(dest): final = os.path.join(dest, os.path.basename(path)) elif os.path.isdir(os.path.dirname(dest)): final = dest else: return 'Destination unavailable' try: with salt.utils.files.fopen(final, 'w+') as fp_: fp_.write(data) ret[final] = True except IOError: ret[final] = False return ret
Used with salt-cp, pass the files dict, and the destination. This function receives small fast copy files from the master via salt-cp. It does not work via the CLI.
def default_format(self): """ Returns full name (first and last) if name is available. If not, returns username if available. If not available too, returns the user id as a string. """ user = self.user if user.first_name is not None: return self.full_name elif user.username is not None: return user.username else: return str(user.id)
Returns full name (first and last) if name is available. If not, returns username if available. If not available too, returns the user id as a string.
def cons(f, mindepth): """ Makes a list of lists of reads at each site """ C = ClustFile(f) for data in C: names, seqs, nreps = zip(*data) total_nreps = sum(nreps) # Depth filter if total_nreps < mindepth: continue S = [] for name, seq, nrep in data: # Append sequence * number of dereps S.append([seq, nrep]) # Make list for each site in sequences res = stack(S) yield [x[:4] for x in res if sum(x[:4]) >= mindepth]
Makes a list of lists of reads at each site
def camelResource(obj): """Some sources from apis return lowerCased where as describe calls always return TitleCase, this function turns the former to the later """ if not isinstance(obj, dict): return obj for k in list(obj.keys()): v = obj.pop(k) obj["%s%s" % (k[0].upper(), k[1:])] = v if isinstance(v, dict): camelResource(v) elif isinstance(v, list): list(map(camelResource, v)) return obj
Some sources from apis return lowerCased where as describe calls always return TitleCase, this function turns the former to the later
def explode_map(self, map_): """ Much faster version of ``pyny.Space.explode()`` method for previously locked ``pyny.Space``. :param map_: the points, and the same order, that appear at ``pyny.Space.get_map()``. There is no need for the index if locked. :type map_: ndarray (shape=(N, 3)) :returns: The polygons, the holes and the points. :rtype: list .. seealso:: * :func:`get_seed` * :func:`get_map` * :func:`map2pyny` * :func:`map2seed` """ if self.explode_map_schedule is None: index = map_[0] points = map_[1] # points k = index[:, 1] == -1 sop = points[k] # Set of points index = index[np.logical_not(k)] points = points[np.logical_not(k)] # new index index_bool = np.diff(index[:, 2]*1e12 +index[:, 1]*1e8 +index[:, 2]*1e4).astype(bool) # Dissemination loop polygons = [] holes = [] dif = np.arange(index_bool.shape[0], dtype=int)[index_bool]+1 dif = np.append(dif, index_bool.shape[0]+1) i = 0 for j in dif: if index[i, 2] < 0: # hole holes.append(points[i:j, :]) if index[i, 2] >= 0: # polygon polygons.append(points[i:j, :]) i = j return [polygons, holes, sop] else: # Only points (without index) allowed if type(map_) == list: points = map_[1] else: points = map_ ex = self.explode_map_schedule polygons = [ points[p ,:] for p in ex[0] ] holes = [ points[p ,:] for p in ex[1] ] sop = points[ex[2] ,:] return [polygons, holes, sop]
Much faster version of ``pyny.Space.explode()`` method for previously locked ``pyny.Space``. :param map_: the points, and the same order, that appear at ``pyny.Space.get_map()``. There is no need for the index if locked. :type map_: ndarray (shape=(N, 3)) :returns: The polygons, the holes and the points. :rtype: list .. seealso:: * :func:`get_seed` * :func:`get_map` * :func:`map2pyny` * :func:`map2seed`
def to_cloudformation(self, **kwargs): """Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list """ function = kwargs.get('function') if not function: raise TypeError("Missing required keyword argument: function") if 'bucket' not in kwargs or kwargs['bucket'] is None: raise TypeError("Missing required keyword argument: bucket") if 'bucket_id' not in kwargs or kwargs['bucket_id'] is None: raise TypeError("Missing required keyword argument: bucket_id") bucket = kwargs['bucket'] bucket_id = kwargs['bucket_id'] resources = [] source_account = ref('AWS::AccountId') permission = self._construct_permission(function, source_account=source_account) if CONDITION in permission.resource_attributes: self._depend_on_lambda_permissions_using_tag(bucket, permission) else: self._depend_on_lambda_permissions(bucket, permission) resources.append(permission) # NOTE: `bucket` here is a dictionary representing the S3 Bucket resource in your SAM template. If there are # multiple S3 Events attached to the same bucket, we will update the Bucket resource with notification # configuration for each event. This is the reason why we continue to use existing bucket dict and append onto # it. # # NOTE: There is some fragile logic here where we will append multiple resources to output # SAM template but de-dupe them when merging into output CFN template. This is scary because the order of # merging is literally "last one wins", which works fine because we linearly loop through the template once. # The de-dupe happens inside `samtranslator.translator.Translator.translate` method when merging results of # to_cloudformation() to output template. self._inject_notification_configuration(function, bucket) resources.append(S3Bucket.from_dict(bucket_id, bucket)) return resources
Returns the Lambda Permission resource allowing S3 to invoke the function this event source triggers. :param dict kwargs: S3 bucket resource :returns: a list of vanilla CloudFormation Resources, to which this S3 event expands :rtype: list
def clark(self, databasepath): """ Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database type, and species for taxonomic level :param databasepath: path to use to save the database """ if self.clarkpath: logging.info('Downloading CLARK database') # Create the folder in which the database is to be stored databasepath = self.create_database_folder(databasepath, 'clark') # Set the call to create the database - use the --light option, as we don't require the full database targetcall = 'cd {clarkpath} && ../opt/clark/set_targets.sh {dbpath} bacteria --species --light'\ .format(clarkpath=self.clarkpath, dbpath=databasepath) # Download the database self.database_clone(targetcall, databasepath) else: logging.warning('No CLARK scripts detected in $PATH. Cannot download database.')
Download and set-up the CLARK database using the set_targets.sh script. Use defaults of bacteria for database type, and species for taxonomic level :param databasepath: path to use to save the database
def get_serializer(self, *args, **kwargs): """Get an instance of the child serializer.""" init_args = { k: v for k, v in six.iteritems(self.kwargs) if k in self.SERIALIZER_KWARGS } kwargs = self._inherit_parent_kwargs(kwargs) init_args.update(kwargs) if self.embed and self._is_dynamic: init_args['embed'] = True return self._get_cached_serializer(args, init_args)
Get an instance of the child serializer.
def print_meter_record(file_path, rows=5): """ Output readings for specified number of rows to console """ m = nr.read_nem_file(file_path) print('Header:', m.header) print('Transactions:', m.transactions) for nmi in m.readings: for channel in m.readings[nmi]: print(nmi, 'Channel', channel) for reading in m.readings[nmi][channel][-rows:]: print('', reading)
Output readings for specified number of rows to console
def taskRunning(self, *args, **kwargs): """ Task Running Messages Whenever a task is claimed by a worker, a run is started on the worker, and a message is posted on this exchange. This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * taskId: `taskId` for the task this message concerns (required) * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required) * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required) * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required) * provisionerId: `provisionerId` this task is targeted at. (required) * workerType: `workerType` this task must run on. (required) * schedulerId: `schedulerId` this task was created by. (required) * taskGroupId: `taskGroupId` this task was created in. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. """ ref = { 'exchange': 'task-running', 'name': 'taskRunning', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'taskId', }, { 'multipleWords': False, 'name': 'runId', }, { 'multipleWords': False, 'name': 'workerGroup', }, { 'multipleWords': False, 'name': 'workerId', }, { 'multipleWords': False, 'name': 'provisionerId', }, { 'multipleWords': False, 'name': 'workerType', }, { 'multipleWords': False, 'name': 'schedulerId', }, { 'multipleWords': False, 'name': 'taskGroupId', }, { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/task-running-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
Task Running Messages Whenever a task is claimed by a worker, a run is started on the worker, and a message is posted on this exchange. This exchange outputs: ``v1/task-running-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * taskId: `taskId` for the task this message concerns (required) * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required) * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required) * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required) * provisionerId: `provisionerId` this task is targeted at. (required) * workerType: `workerType` this task must run on. (required) * schedulerId: `schedulerId` this task was created by. (required) * taskGroupId: `taskGroupId` this task was created in. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
def Presentation(pptx=None): """ Return a |Presentation| object loaded from *pptx*, where *pptx* can be either a path to a ``.pptx`` file (a string) or a file-like object. If *pptx* is missing or ``None``, the built-in default presentation "template" is loaded. """ if pptx is None: pptx = _default_pptx_path() presentation_part = Package.open(pptx).main_document_part if not _is_pptx_package(presentation_part): tmpl = "file '%s' is not a PowerPoint file, content type is '%s'" raise ValueError(tmpl % (pptx, presentation_part.content_type)) return presentation_part.presentation
Return a |Presentation| object loaded from *pptx*, where *pptx* can be either a path to a ``.pptx`` file (a string) or a file-like object. If *pptx* is missing or ``None``, the built-in default presentation "template" is loaded.
def generate(oracle, seq_len, p=0.5, k=1, LRS=0, weight=None): """ Generate a sequence based on traversing an oracle. :param oracle: a indexed vmo object :param seq_len: the length of the returned improvisation sequence :param p: a float between (0,1) representing the probability using the forward links. :param k: the starting improvisation time step in oracle :param LRS: the length of minimum longest repeated suffixes allowed to jump :param weight: None: choose uniformly among all the possible sfx/rsfx given current state. "max": always choose the sfx/rsfx having the longest LRS. "weight": choose sfx/rsfx in a way that favors longer ones than shorter ones. :return: s: a list containing the sequence generated, each element represents a state. kend: the ending state. ktrace: """ trn = oracle.trn[:] sfx = oracle.sfx[:] lrs = oracle.lrs[:] rsfx = oracle.rsfx[:] s = [] ktrace = [1] for _i in range(seq_len): # generate each state if sfx[k] != 0 and sfx[k] is not None: if (random.random() < p): # copy forward according to transitions I = trn[k] if len(I) == 0: # if last state, choose a suffix k = sfx[k] ktrace.append(k) I = trn[k] sym = I[int(np.floor(random.random() * len(I)))] s.append(sym) # Why (sym-1) before? k = sym ktrace.append(k) else: # copy any of the next symbols ktrace.append(k) _k = k k_vec = [] k_vec = _find_links(k_vec, sfx, rsfx, _k) k_vec = [_i for _i in k_vec if lrs[_i] >= LRS] lrs_vec = [lrs[_i] for _i in k_vec] if len(k_vec) > 0: # if a possibility found, len(I) if weight == 'weight': max_lrs = np.amax(lrs_vec) query_lrs = max_lrs - np.floor(random.expovariate(1)) if query_lrs in lrs_vec: _tmp = np.where(lrs_vec == query_lrs)[0] _tmp = _tmp[int( np.floor(random.random() * len(_tmp)))] sym = k_vec[_tmp] else: _tmp = np.argmin(abs( np.subtract(lrs_vec, query_lrs))) sym = k_vec[_tmp] elif weight == 'max': sym = k_vec[np.argmax([lrs[_i] for _i in k_vec])] else: sym = k_vec[int(np.floor(random.random() * len(k_vec)))] if sym == len(sfx) - 1: sym = sfx[sym] + 1 else: s.append(sym + 1) k = sym + 1 ktrace.append(k) else: # otherwise continue if k < len(sfx) - 1: sym = k + 1 else: sym = sfx[k] + 1 s.append(sym) k = sym ktrace.append(k) else: if k < len(sfx) - 1: s.append(k + 1) k += 1 ktrace.append(k) else: sym = sfx[k] + 1 s.append(sym) k = sym ktrace.append(k) if k >= len(sfx) - 1: k = 0 kend = k return s, kend, ktrace
Generate a sequence based on traversing an oracle. :param oracle: a indexed vmo object :param seq_len: the length of the returned improvisation sequence :param p: a float between (0,1) representing the probability using the forward links. :param k: the starting improvisation time step in oracle :param LRS: the length of minimum longest repeated suffixes allowed to jump :param weight: None: choose uniformly among all the possible sfx/rsfx given current state. "max": always choose the sfx/rsfx having the longest LRS. "weight": choose sfx/rsfx in a way that favors longer ones than shorter ones. :return: s: a list containing the sequence generated, each element represents a state. kend: the ending state. ktrace:
def remove_builder(cls, builder_name: str): """Remove a registered builder `builder_name`. No reason to use this except for tests. """ cls.builders.pop(builder_name, None) for hook_spec in cls.hooks.values(): hook_spec.pop(builder_name, None)
Remove a registered builder `builder_name`. No reason to use this except for tests.
def validlines(self): """Return all lines within which Prosodic understood all words.""" return [ln for ln in self.lines() if (not ln.isBroken() and not ln.ignoreMe)]
Return all lines within which Prosodic understood all words.
def _check_file_is_under_workingdir(filename, wdir): """ Raise error if input is being staged to a location not underneath the working dir """ p = filename if not os.path.isabs(p): p = os.path.join(wdir, p) targetpath = os.path.realpath(p) wdir = os.path.realpath(wdir) common = os.path.commonprefix([wdir, targetpath]) if len(common) < len(wdir): raise exceptions.PathError( "The subprocess engine does not support input files with absolute paths") return p
Raise error if input is being staged to a location not underneath the working dir
def print_duplicate_anchor_information(duplicate_tags): """ Prints information about duplicate AnchorHub tags found during collection. :param duplicate_tags: Dictionary mapping string file path keys to a list of tuples. The tuples contain the following information, in order: 1. The string AnchorHub tag that was repeated 2. The line in the file that the duplicate was found, as a number 3. The string generated anchor that first used the repeated tag """ print("Duplicate anchors specified within file(s)") print("Please modify your code to remove duplicates.\r\n") for file_path in duplicate_tags: print("File: " + file_path) for line_info in duplicate_tags[file_path]: print("\tLine " + str(line_info[1]) + # Line number "\t#" + line_info[0] + # Repeated AnchorHub tag " :\t" + line_info[2])
Prints information about duplicate AnchorHub tags found during collection. :param duplicate_tags: Dictionary mapping string file path keys to a list of tuples. The tuples contain the following information, in order: 1. The string AnchorHub tag that was repeated 2. The line in the file that the duplicate was found, as a number 3. The string generated anchor that first used the repeated tag
def remove_tx_rich(self): """ Remove any `c:tx[c:rich]` child, or do nothing if not present. """ matches = self.xpath('c:tx[c:rich]') if not matches: return tx = matches[0] self.remove(tx)
Remove any `c:tx[c:rich]` child, or do nothing if not present.
def create_bag(dir_bag): """ Create a Bag out of given files. :param str dir_bag: Directory that contains csv, jsonld, and changelog files. :return obj: Bag """ logger_bagit.info("enter create_bag") # if not dir_bag: # dir_bag = os.getcwd() try: bag = bagit.make_bag(dir_bag, {'Name': 'LiPD Project', 'Reference': 'www.lipds.net', 'DOI-Resolved': 'True'}) logger_bagit.info("created bag") return bag except FileNotFoundError as e: print("Error: directory not found to create bagit") logger_bagit.debug("create_bag: FileNotFoundError: failed to create bagit, {}".format(e)) except Exception as e: print("Error: failed to create bagit bag") logger_bagit.debug("create_bag: Exception: failed to create bag, {}".format(e)) return None
Create a Bag out of given files. :param str dir_bag: Directory that contains csv, jsonld, and changelog files. :return obj: Bag
def clone(self, **kwargs): """ Clone Task instance. Reset network_try_count, increase task_try_count. Reset priority attribute if it was not set explicitly. """ # First, create exact copy of the current Task object attr_copy = self.__dict__.copy() if attr_copy.get('grab_config') is not None: del attr_copy['url'] if not attr_copy['priority_set_explicitly']: attr_copy['priority'] = None task = Task(**attr_copy) # Reset some task properties if they have not # been set explicitly in kwargs if 'network_try_count' not in kwargs: task.network_try_count = 0 if 'task_try_count' not in kwargs: task.task_try_count = self.task_try_count + 1 if 'refresh_cache' not in kwargs: task.refresh_cache = False if 'disable_cache' not in kwargs: task.disable_cache = False if kwargs.get('url') is not None and kwargs.get('grab') is not None: raise SpiderMisuseError('Options url and grab could not be ' 'used together') if (kwargs.get('url') is not None and kwargs.get('grab_config') is not None): raise SpiderMisuseError('Options url and grab_config could not ' 'be used together') if (kwargs.get('grab') is not None and kwargs.get('grab_config') is not None): raise SpiderMisuseError('Options grab and grab_config could not ' 'be used together') if kwargs.get('grab'): task.setup_grab_config(kwargs['grab'].dump_config()) del kwargs['grab'] elif kwargs.get('grab_config'): task.setup_grab_config(kwargs['grab_config']) del kwargs['grab_config'] elif kwargs.get('url'): task.url = kwargs['url'] if task.grab_config: task.grab_config['url'] = kwargs['url'] del kwargs['url'] for key, value in kwargs.items(): setattr(task, key, value) task.process_delay_option(None) return task
Clone Task instance. Reset network_try_count, increase task_try_count. Reset priority attribute if it was not set explicitly.
def write_frames(self, frames_out): """Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return: """ self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out)
Write multiple pamqp frames from the current channel. :param list frames_out: A list of pamqp frames. :return:
def get_urls(htmlDoc, limit=200): '''takes in html document as string, returns links to dots''' soup = BeautifulSoup( htmlDoc ) anchors = soup.findAll( 'a' ) urls = {} counter = 0 for i,v in enumerate( anchors ): href = anchors[i].get( 'href' ) if ('dots' in href and counter < limit): href = href.split('/')[2] text = anchors[i].text.split(' ')[0].replace('/', '_') urls[ text ] = href counter += 1 return urls
takes in html document as string, returns links to dots
def exponential_moving_average(data, period): """ Exponential Moving Average. Formula: p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +... / 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +... where: w = 2 / (N + 1) """ catch_errors.check_for_period_error(data, period) emas = [exponential_moving_average_helper( data[idx - period + 1:idx + 1], period) for idx in range(period - 1, len(data))] emas = fill_for_noncomputable_vals(data, emas) return emas
Exponential Moving Average. Formula: p0 + (1 - w) * p1 + (1 - w)^2 * p2 + (1 + w)^3 * p3 +... / 1 + (1 - w) + (1 - w)^2 + (1 - w)^3 +... where: w = 2 / (N + 1)
def sendSync(self, query, *parameters, **options): '''Performs a synchronous query against a q service and returns parsed data. In typical use case, `query` is the name of the function to call and `parameters` are its parameters. When `parameters` list is empty, the query can be an arbitrary q expression (e.g. ``0 +/ til 100``). Executes a q expression: >>> print(q.sendSync('til 10')) [0 1 2 3 4 5 6 7 8 9] Executes an anonymous q function with a single parameter: >>> print(q.sendSync('{til x}', 10)) [0 1 2 3 4 5 6 7 8 9] Executes an anonymous q function with two parameters: >>> print(q.sendSync('{y + til x}', 10, 1)) [ 1 2 3 4 5 6 7 8 9 10] >>> print(q.sendSync('{y + til x}', *[10, 1])) [ 1 2 3 4 5 6 7 8 9 10] The :func:`.sendSync` is called from the overloaded :func:`.__call__` function. This allows :class:`.QConnection` instance to be called as a function: >>> print(q('{y + til x}', 10, 1)) [ 1 2 3 4 5 6 7 8 9 10] :Parameters: - `query` (`string`) - query to be executed - `parameters` (`list` or `None`) - parameters for the query :Options: - `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed data, **Default**: ``False`` - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` - `single_char_strings` (`boolean`) - if ``True`` single char Python strings are encoded as q strings instead of chars, **Default**: ``False`` :returns: query result parsed to Python data structures :raises: :class:`.QConnectionException`, :class:`.QWriterException`, :class:`.QReaderException` ''' self.query(MessageType.SYNC, query, *parameters, **options) response = self.receive(data_only = False, **options) if response.type == MessageType.RESPONSE: return response.data else: self._writer.write(QException('nyi: qPython expected response message'), MessageType.ASYNC if response.type == MessageType.ASYNC else MessageType.RESPONSE) raise QReaderException('Received message of type: %s where response was expected')
Performs a synchronous query against a q service and returns parsed data. In typical use case, `query` is the name of the function to call and `parameters` are its parameters. When `parameters` list is empty, the query can be an arbitrary q expression (e.g. ``0 +/ til 100``). Executes a q expression: >>> print(q.sendSync('til 10')) [0 1 2 3 4 5 6 7 8 9] Executes an anonymous q function with a single parameter: >>> print(q.sendSync('{til x}', 10)) [0 1 2 3 4 5 6 7 8 9] Executes an anonymous q function with two parameters: >>> print(q.sendSync('{y + til x}', 10, 1)) [ 1 2 3 4 5 6 7 8 9 10] >>> print(q.sendSync('{y + til x}', *[10, 1])) [ 1 2 3 4 5 6 7 8 9 10] The :func:`.sendSync` is called from the overloaded :func:`.__call__` function. This allows :class:`.QConnection` instance to be called as a function: >>> print(q('{y + til x}', 10, 1)) [ 1 2 3 4 5 6 7 8 9 10] :Parameters: - `query` (`string`) - query to be executed - `parameters` (`list` or `None`) - parameters for the query :Options: - `raw` (`boolean`) - if ``True`` returns raw data chunk instead of parsed data, **Default**: ``False`` - `numpy_temporals` (`boolean`) - if ``False`` temporal vectors are backed by raw q representation (:class:`.QTemporalList`, :class:`.QTemporal`) instances, otherwise are represented as `numpy datetime64`/`timedelta64` arrays and atoms, **Default**: ``False`` - `single_char_strings` (`boolean`) - if ``True`` single char Python strings are encoded as q strings instead of chars, **Default**: ``False`` :returns: query result parsed to Python data structures :raises: :class:`.QConnectionException`, :class:`.QWriterException`, :class:`.QReaderException`
def construct(self, max_message_size, remote_name=None, python_path=None, debug=False, connect_timeout=None, profiling=False, unidirectional=False, old_router=None, **kwargs): """Get the named context running on the local machine, creating it if it does not exist.""" super(Stream, self).construct(**kwargs) self.max_message_size = max_message_size if python_path: self.python_path = python_path if connect_timeout: self.connect_timeout = connect_timeout if remote_name is None: remote_name = get_default_remote_name() if '/' in remote_name or '\\' in remote_name: raise ValueError('remote_name= cannot contain slashes') self.remote_name = remote_name self.debug = debug self.profiling = profiling self.unidirectional = unidirectional self.max_message_size = max_message_size self.connect_deadline = time.time() + self.connect_timeout
Get the named context running on the local machine, creating it if it does not exist.
def fade_out(self, duration=3): """Turns off the light by gradually fading it out. The optional `duration` parameter allows for control of the fade out duration (in seconds)""" super(RgbLight, self).fade_out(duration) self.off()
Turns off the light by gradually fading it out. The optional `duration` parameter allows for control of the fade out duration (in seconds)
async def make_default_options_response(self) -> Response: """This is the default route function for OPTIONS requests.""" methods = _request_ctx_stack.top.url_adapter.allowed_methods() return self.response_class('', headers={'Allow': ', '.join(methods)})
This is the default route function for OPTIONS requests.
def launch(self, timeout=2): """ Hierapp instance, with environment dependencies: - can be launched within short timeout - auto-destroys shortly """ self.start_time = time.time() self.end_time = time.time() instance = self.app.launch(environment=self.env) time.sleep(2) # Instance need time to appear in ui assert instance.running(timeout=timeout), "Monitor didn't get Active state" launched = instance.status == 'Active' instance.reschedule_workflow(workflow_name='destroy', timestamp=self.destroy_interval) assert instance.destroyed(timeout=timeout), "Monitor didn't get Destroyed after short time" stopped = instance.status == 'Destroyed' instance.force_remove() self.end_time = time.time() self.status = launched and stopped
Hierapp instance, with environment dependencies: - can be launched within short timeout - auto-destroys shortly
def find_value_type(global_ns, value_type_str): """implementation details""" if not value_type_str.startswith('::'): value_type_str = '::' + value_type_str found = global_ns.decls( name=value_type_str, function=lambda decl: not isinstance(decl, calldef.calldef_t), allow_empty=True) if not found: no_global_ns_value_type_str = value_type_str[2:] if no_global_ns_value_type_str in cpptypes.FUNDAMENTAL_TYPES: return cpptypes.FUNDAMENTAL_TYPES[no_global_ns_value_type_str] elif type_traits.is_std_string(value_type_str): string_ = global_ns.typedef('::std::string') return type_traits.remove_declarated(string_) elif type_traits.is_std_wstring(value_type_str): string_ = global_ns.typedef('::std::wstring') return type_traits.remove_declarated(string_) else: value_type_str = no_global_ns_value_type_str has_const = value_type_str.startswith('const ') if has_const: value_type_str = value_type_str[len('const '):] has_pointer = value_type_str.endswith('*') if has_pointer: value_type_str = value_type_str[:-1] found = None if has_const or has_pointer: found = impl_details.find_value_type( global_ns, value_type_str) if not found: return None else: if isinstance(found, class_declaration.class_types): return cpptypes.declarated_t(found) if has_const: return cpptypes.const_t(found) if has_pointer: return cpptypes.pointer_t(found) if len(found) == 1: return found[0] return None
implementation details
def unsubscribe(self, topic): """Unsubscribe to some topic.""" if self.sock == NC.INVALID_SOCKET: return NC.ERR_NO_CONN self.logger.info("UNSUBSCRIBE: %s", topic) return self.send_unsubscribe(False, [utf8encode(topic)])
Unsubscribe to some topic.
def set(self, key, samples, sampling_rate): """ Set the samples and sampling-rate for the given key. Existing data will be overwritten. The samples have to have ``np.float32`` datatype and values in the range of -1.0 and 1.0. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (np.float32). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance. """ if not np.issubdtype(samples.dtype, np.floating): raise ValueError('Samples are required as np.float32!') if len(samples.shape) > 1: raise ValueError('Only single channel supported!') self.raise_error_if_not_open() if key in self._file: del self._file[key] samples = (samples * MAX_INT16_VALUE).astype(np.int16) dset = self._file.create_dataset(key, data=samples) dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate
Set the samples and sampling-rate for the given key. Existing data will be overwritten. The samples have to have ``np.float32`` datatype and values in the range of -1.0 and 1.0. Args: key (str): A key to store the data for. samples (numpy.ndarray): 1-D array of audio samples (np.float32). sampling_rate (int): The sampling-rate of the audio samples. Note: The container has to be opened in advance.
def set_chime_volume(self, volume): """ :param volume: one of [low, medium, high] """ values = { "desired_state": { "chime_volume": volume } } response = self.api_interface.set_device_state(self, values) self._update_state_from_response(response)
:param volume: one of [low, medium, high]
def merge_vertices(self, digits=None): """ Merges vertices which are identical and replace references. Parameters -------------- digits : None, or int How many digits to consider when merging vertices Alters ----------- self.entities : entity.points re- referenced self.vertices : duplicates removed """ if len(self.vertices) == 0: return if digits is None: digits = util.decimal_to_digits(tol.merge * self.scale, min_digits=1) unique, inverse = grouping.unique_rows(self.vertices, digits=digits) self.vertices = self.vertices[unique] entities_ok = np.ones(len(self.entities), dtype=np.bool) for index, entity in enumerate(self.entities): # what kind of entity are we dealing with kind = type(entity).__name__ # entities that don't need runs merged # don't screw up control- point- knot relationship if kind in 'BSpline Bezier Text': entity.points = inverse[entity.points] continue # if we merged duplicate vertices, the entity may # have multiple references to the same vertex points = grouping.merge_runs(inverse[entity.points]) # if there are three points and two are identical fix it if kind == 'Line': if len(points) == 3 and points[0] == points[-1]: points = points[:2] elif len(points) < 2: # lines need two or more vertices entities_ok[index] = False elif kind == 'Arc' and len(points) != 3: # three point arcs need three points entities_ok[index] = False # store points in entity entity.points = points # remove degenerate entities self.entities = self.entities[entities_ok]
Merges vertices which are identical and replace references. Parameters -------------- digits : None, or int How many digits to consider when merging vertices Alters ----------- self.entities : entity.points re- referenced self.vertices : duplicates removed
def windowed_statistic(pos, values, statistic, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan): """Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1]) """ # assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # check lengths are equal if isinstance(values, tuple): # assume multiple values arrays check_equal_length(pos, *values) else: # assume a single values array check_equal_length(pos, values) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # setup outputs out = [] counts = [] # iterate over windows for start_idx, stop_idx in locs: # calculate number of values in window n = stop_idx - start_idx if n == 0: # window is empty s = fill else: if isinstance(values, tuple): # assume multiple values arrays wv = [v[start_idx:stop_idx] for v in values] s = statistic(*wv) else: # assume a single values array wv = values[start_idx:stop_idx] s = statistic(wv) # store outputs out.append(s) counts.append(n) # convert to arrays for output return np.asarray(out), windows, np.asarray(counts)
Calculate a statistic from items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. values : array_like, int, shape (n_items,) The values to summarise. May also be a tuple of values arrays, in which case each array will be sliced and passed through to the statistic function as separate arguments. statistic : function The statistic to compute. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Count non-zero (i.e., True) items in non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> values = [True, False, True, False, False] >>> nnz, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.count_nonzero, size=10 ... ) >>> nnz array([1, 1, 0]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) >>> counts array([2, 2, 1]) Compute a sum over items in half-overlapping windows:: >>> values = [3, 4, 2, 6, 9] >>> x, windows, counts = allel.windowed_statistic( ... pos, values, statistic=np.sum, size=10, step=5, fill=0 ... ) >>> x array([ 7, 12, 8, 0, 9]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]]) >>> counts array([2, 3, 2, 0, 1])
def iter(self, **kwargs): """see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>` """ if self.propagator.orbit is not self: self.propagator.orbit = self return self.propagator.iter(**kwargs)
see :py:meth:`Propagator.iter() <beyond.propagators.base.Propagator.iter>`
def query_str_2_dict(query_str): """ 将查询字符串,转换成字典 a=123&b=456 {'a': '123', 'b': '456'} :param query_str: :return: """ if query_str: query_list = query_str.split('&') query_dict = {} for t in query_list: x = t.split('=') query_dict[x[0]] = x[1] else: query_dict = {} return query_dict
将查询字符串,转换成字典 a=123&b=456 {'a': '123', 'b': '456'} :param query_str: :return:
def resolve_asset_dependency(self): """ Converts every file dependency into absolute path so when we merge we don't break things. """ for node in self.asset.findall("./*[@file]"): file = node.get("file") abs_path = os.path.abspath(self.folder) abs_path = os.path.join(abs_path, file) node.set("file", abs_path)
Converts every file dependency into absolute path so when we merge we don't break things.
def setup(self): """Continue the run process blocking on MasterControlProgram.run""" # If the app was invoked to specified to prepend the path, do so now if self.args.prepend_path: self._prepend_python_path(self.args.prepend_path)
Continue the run process blocking on MasterControlProgram.run
def output_key_name(self, input_key: str, output_hist: Hist, projection_name: str, **kwargs) -> str: """ Returns the key under which the output object should be stored. Note: This function is just a basic placeholder which returns the projection name and likely should be overridden. Args: input_key: Key of the input hist in the input dict output_hist: The output histogram projection_name: Projection name for the output histogram kwargs: Projection information dict combined with additional arguments passed to the projection function. Returns: Key under which the output object should be stored. By default, it returns the projection name. """ return projection_name
Returns the key under which the output object should be stored. Note: This function is just a basic placeholder which returns the projection name and likely should be overridden. Args: input_key: Key of the input hist in the input dict output_hist: The output histogram projection_name: Projection name for the output histogram kwargs: Projection information dict combined with additional arguments passed to the projection function. Returns: Key under which the output object should be stored. By default, it returns the projection name.
def validate(self, data=None, only=None, exclude=None): """ Validate the data for all fields and return whether the validation was successful. This method also retains the validated data in ``self.data`` so that it can be accessed later. This is usually the method you want to call after creating the validator instance. :param data: Dictionary of data to validate. :param only: List or tuple of fields to validate. :param exclude: List or tuple of fields to exclude from validation. :return: True if validation was successful. Otherwise False. """ only = only or [] exclude = exclude or [] data = data or {} self.errors = {} self.data = {} # Validate individual fields. for name, field in self._meta.fields.items(): if name in exclude or (only and name not in only): continue try: field.validate(name, data) except ValidationError as err: self.add_error(name, err) continue self.data[name] = field.value # Clean individual fields. if not self.errors: self.clean_fields(self.data) # Then finally clean the whole data dict. if not self.errors: try: self.data = self.clean(self.data) except ValidationError as err: self.add_error('__base__', err) return (not self.errors)
Validate the data for all fields and return whether the validation was successful. This method also retains the validated data in ``self.data`` so that it can be accessed later. This is usually the method you want to call after creating the validator instance. :param data: Dictionary of data to validate. :param only: List or tuple of fields to validate. :param exclude: List or tuple of fields to exclude from validation. :return: True if validation was successful. Otherwise False.
def spawn_container(addr, env_cls=Environment, mgr_cls=EnvManager, set_seed=True, *args, **kwargs): """Spawn a new environment in a given address as a coroutine. Arguments and keyword arguments are passed down to the created environment at initialization time. If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is installed, this function renames the title of the process to start with 'creamas' so that the process is easily identifiable, e.g. with ``ps -x | grep creamas``. """ # Try setting the process name to easily recognize the spawned # environments with 'ps -x' or 'top' try: import setproctitle as spt title = 'creamas: {}({})'.format(env_cls.__class__.__name__, _get_base_url(addr)) spt.setproctitle(title) except: pass if set_seed: _set_random_seeds() # kwargs['codec'] = aiomas.MsgPack task = start(addr, env_cls, mgr_cls, *args, **kwargs) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) loop.run_until_complete(task)
Spawn a new environment in a given address as a coroutine. Arguments and keyword arguments are passed down to the created environment at initialization time. If `setproctitle <https://pypi.python.org/pypi/setproctitle>`_ is installed, this function renames the title of the process to start with 'creamas' so that the process is easily identifiable, e.g. with ``ps -x | grep creamas``.
async def create_link_secret(self, label: str) -> None: """ Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret """ LOGGER.debug('Wallet.create_link_secret >>> label: %s', label) if not self.handle: LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: await anoncreds.prover_create_master_secret(self.handle, label) await self._write_link_secret_label(label) except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError: LOGGER.warning( 'Wallet %s link secret already current: abstaining from updating label record', self.name) await self._write_link_secret_label(label) else: LOGGER.debug( 'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s', self.name, x_indy.error_code) raise LOGGER.debug('Wallet.create_link_secret <<<')
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret
def export_agg_losses(ekey, dstore): """ :param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object """ dskey = ekey[0] oq = dstore['oqparam'] dt = oq.loss_dt() name, value, tags = _get_data(dstore, dskey, oq.hazard_stats().items()) writer = writers.CsvWriter(fmt=writers.FIVEDIGITS) expvalue = dstore['exposed_value'].value # shape (T1, T2, ..., L) tagcol = dstore['assetcol/tagcol'] tagnames = tuple(dstore['oqparam'].aggregate_by) header = ('loss_type',) + tagnames + ( 'loss_value', 'exposed_value', 'loss_ratio') for r, tag in enumerate(tags): rows = [] for multi_idx, loss in numpy.ndenumerate(value[:, r]): l, *tagidxs = multi_idx evalue = expvalue[tuple(tagidxs) + (l,)] row = tagcol.get_tagvalues(tagnames, tagidxs) + ( loss, evalue, loss / evalue) rows.append((dt.names[l],) + row) dest = dstore.build_fname(name, tag, 'csv') writer.save(rows, dest, header) return writer.getsaved()
:param ekey: export key, i.e. a pair (datastore key, fmt) :param dstore: datastore object
def _run_command(command, targets, options): # type: (str, List[str], List[str]) -> bool """Runs `command` + `targets` + `options` in a subprocess and returns a boolean determined by the process return code. >>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E']) >>> result True :param command: str :param targets: List[str] :param options: List[str] :return: bool """ print('{0}: targets={1} options={2}'.format(command, targets, options)) cmd = [command] + targets + options process = Popen(cmd) process.wait() return bool(process.returncode)
Runs `command` + `targets` + `options` in a subprocess and returns a boolean determined by the process return code. >>> result = run_command('pylint', ['foo.py', 'some_module'], ['-E']) >>> result True :param command: str :param targets: List[str] :param options: List[str] :return: bool
def validate_attr(resource_attr_id, scenario_id, template_id=None): """ Check that a resource attribute satisfies the requirements of all the types of the resource. """ rs = db.DBSession.query(ResourceScenario).\ filter(ResourceScenario.resource_attr_id==resource_attr_id, ResourceScenario.scenario_id==scenario_id).options( joinedload_all("resourceattr")).options( joinedload_all("dataset") ).one() error = None try: _do_validate_resourcescenario(rs, template_id) except HydraError as e: error = JSONObject(dict( ref_key = rs.resourceattr.ref_key, ref_id = rs.resourceattr.get_resource_id(), ref_name = rs.resourceattr.get_resource().get_name(), resource_attr_id = rs.resource_attr_id, attr_id = rs.resourceattr.attr.id, attr_name = rs.resourceattr.attr.name, dataset_id = rs.dataset_id, scenario_id=scenario_id, template_id=template_id, error_text=e.args[0])) return error
Check that a resource attribute satisfies the requirements of all the types of the resource.
def stop_watch(self): """ Stops the periodic watch greenlet, thus the pool itself """ if self.greenlet_watch: self.greenlet_watch.kill(block=False) self.greenlet_watch = None
Stops the periodic watch greenlet, thus the pool itself
def partition_all(n, iterable): """Partition a list into equally sized pieces, including last smaller parts http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all """ it = iter(iterable) while True: chunk = list(itertools.islice(it, n)) if not chunk: break yield chunk
Partition a list into equally sized pieces, including last smaller parts http://stackoverflow.com/questions/5129102/python-equivalent-to-clojures-partition-all
def updateModel(self, X_all, Y_all, X_new, Y_new): """ Updates the model with new observations. """ if self.model is None: self._create_model(X_all, Y_all) else: self.model.set_XY(X_all, Y_all) # WARNING: Even if self.max_iters=0, the hyperparameters are bit modified... if self.max_iters > 0: # --- update the model maximizing the marginal likelihood. if self.optimize_restarts==1: self.model.optimize(optimizer=self.optimizer, max_iters = self.max_iters, messages=False, ipython_notebook=False) else: self.model.optimize_restarts(num_restarts=self.optimize_restarts, optimizer=self.optimizer, max_iters = self.max_iters, verbose=self.verbose)
Updates the model with new observations.
def triangle(self, params=None, **kwargs): """ Makes a nifty corner plot. """ if params is None: params = ['mass_A', 'mass_B', 'mass_C', 'age', 'feh', 'distance', 'AV'] super(TripleStarModel, self).triangle(params=params, **kwargs)
Makes a nifty corner plot.
def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None, renormalize = True, stabalize = True, additional_vectors = 0): """ Spectral clustering for find K clusters by using the eigenvectors of a matrix which is derived from a set of similarities S. Parameters ----------- S: array-like,shape(n_sample,n_sample) similarity matrix K: integer number of K clusters eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'} 'auto' : algorithm will attempt to choose the best method for input data 'dense' : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. 'arpack' : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. 'lobpcg' : Locally Optimal Block Preconditioned Conjugate Gradient Method. A preconditioned eigensolver for large symmetric positive definite (SPD) generalized eigenproblems. 'amg' : AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random.RandomState solver_kwds : any additional keyword arguments to pass to the selected eigen_solver renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1 this can improve label quality stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2 instead of P = D^-1*S additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition. When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then omitted. The remaining K-1 eigenvectors should be informative. Returns ------- labels: array-like, shape (1,n_samples) """ # Step 1: get similarity matrix if geom.affinity_matrix is None: S = geom.compute_affinity_matrix() else: S = geom.affinity_matrix # Check for stability method, symmetric solvers require this if eigen_solver in ['lobpcg', 'amg']: stabalize = True if stabalize: geom.laplacian_type = 'symmetricnormalized' return_lapsym = True else: geom.laplacian_type = 'randomwalk' return_lapsym = False # Step 2: get the Laplacian matrix P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym) # by default the Laplacian is subtracted from the Identify matrix (this step may not be needed) P += identity(P.shape[0]) # Step 3: Compute the top K eigenvectors and drop the first if eigen_solver in ['auto', 'amg', 'lobpcg']: n_components = 2*int(np.log(P.shape[0]))*K + 1 n_components += int(additional_vectors) else: n_components = K n_components = min(n_components, P.shape[0]) (lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver, random_state=random_state, drop_first = True, solver_kwds=solver_kwds) # the first vector is usually uninformative if eigen_solver in ['auto', 'lobpcg', 'amg']: if np.abs(lambdas[0] - 1) > 1e-4: warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter") eigen_vectors = eigen_vectors[:, 1:K] lambdas = lambdas[1:K] # If stability method chosen, adjust eigenvectors if stabalize: w = np.array(geom.laplacian_weights) eigen_vectors /= np.sqrt(w[:,np.newaxis]) eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0) # If renormalize: set each data point to unit length if renormalize: norms = np.linalg.norm(eigen_vectors, axis=1) eigen_vectors /= norms[:,np.newaxis] # Step 4: run k-means clustering labels = k_means_clustering(eigen_vectors,K) return labels, eigen_vectors, P
Spectral clustering for find K clusters by using the eigenvectors of a matrix which is derived from a set of similarities S. Parameters ----------- S: array-like,shape(n_sample,n_sample) similarity matrix K: integer number of K clusters eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'} 'auto' : algorithm will attempt to choose the best method for input data 'dense' : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. 'arpack' : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. 'lobpcg' : Locally Optimal Block Preconditioned Conjugate Gradient Method. A preconditioned eigensolver for large symmetric positive definite (SPD) generalized eigenproblems. 'amg' : AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random.RandomState solver_kwds : any additional keyword arguments to pass to the selected eigen_solver renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1 this can improve label quality stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2 instead of P = D^-1*S additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition. When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then omitted. The remaining K-1 eigenvectors should be informative. Returns ------- labels: array-like, shape (1,n_samples)
def parse_html_list(dictionary, prefix=''): """ Used to suport list values in HTML forms. Supports lists of primitives and/or dictionaries. * List of primitives. { '[0]': 'abc', '[1]': 'def', '[2]': 'hij' } --> [ 'abc', 'def', 'hij' ] * List of dictionaries. { '[0]foo': 'abc', '[0]bar': 'def', '[1]foo': 'hij', '[1]bar': 'klm', } --> [ {'foo': 'abc', 'bar': 'def'}, {'foo': 'hij', 'bar': 'klm'} ] """ ret = {} regex = re.compile(r'^%s\[([0-9]+)\](.*)$' % re.escape(prefix)) for field, value in dictionary.items(): match = regex.match(field) if not match: continue index, key = match.groups() index = int(index) if not key: ret[index] = value elif isinstance(ret.get(index), dict): ret[index][key] = value else: ret[index] = MultiValueDict({key: [value]}) return [ret[item] for item in sorted(ret.keys())]
Used to suport list values in HTML forms. Supports lists of primitives and/or dictionaries. * List of primitives. { '[0]': 'abc', '[1]': 'def', '[2]': 'hij' } --> [ 'abc', 'def', 'hij' ] * List of dictionaries. { '[0]foo': 'abc', '[0]bar': 'def', '[1]foo': 'hij', '[1]bar': 'klm', } --> [ {'foo': 'abc', 'bar': 'def'}, {'foo': 'hij', 'bar': 'klm'} ]
def set_distance_units(value=np.NaN, from_units='mm', to_units='cm'): """convert distance into new units Parameters: =========== value: float. value to convert from_units: string. Must be 'mm', 'cm' or 'm' to_units: string. must be 'mm','cm' or 'm' Returns: ======== converted value Raises: ======= ValueError if from_units is not a valid unit (see above) ValueError if to_units is not a valid unit """ if from_units == to_units: return value if from_units == 'cm': if to_units == 'mm': coeff = 10 elif to_units == 'm': coeff = 0.01 else: raise ValueError("to_units not supported ['cm','m','mm']!") elif from_units == 'mm': if to_units == 'cm': coeff = 0.1 elif to_units == 'm': coeff = 0.001 else: raise ValueError("to_units not supported ['cm','m','mm']!") elif from_units == 'm': if to_units == 'mm': coeff = 1000 elif to_units == 'cm': coeff = 100 else: raise ValueError("to_units not supported ['cm','m','mm']!") else: raise ValueError("to_units not supported ['cm','m','mm']!") return coeff * value
convert distance into new units Parameters: =========== value: float. value to convert from_units: string. Must be 'mm', 'cm' or 'm' to_units: string. must be 'mm','cm' or 'm' Returns: ======== converted value Raises: ======= ValueError if from_units is not a valid unit (see above) ValueError if to_units is not a valid unit
def get_email_receivers_of_recurring_per_page(self, recurring_id, per_page=1000, page=1): """ Get email receivers of recurring per page :param recurring_id: the recurring id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list """ return self._get_resource_per_page( resource=RECURRING_EMAIL_RECEIVERS, per_page=per_page, page=page, params={'recurring_id': recurring_id}, )
Get email receivers of recurring per page :param recurring_id: the recurring id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
async def viewreaction(self, ctx, *, reactor : str): """Views a specific reaction""" data = self.config.get(ctx.message.server.id, {}) keyword = data.get(reactor, {}) if not keyword: await self.bot.responses.failure(message="Reaction '{}' was not found.".format(reactor)) return response = data.get(reactor, {}).get("response", "") reacts = data.get(reactor, {}).get("reaction", []) for i, r in enumerate(reacts): if ":" in r: reacts[i] = "<:" + r + ">" reacts = " ".join(reacts) if reacts else "-" response = response if response else "-" string = "Here's what I say to '{reactor}': {response}\n"\ "I'll react to this message how I react to '{reactor}'.".format(reactor=reactor,response=response) await self.bot.responses.full(sections=[{"name": "Response", "value": response}, {"name": "Reactions", "value": reacts, "inline": False}])
Views a specific reaction
def _process_disease_models(self, limit): """ Here we make associations between a disease and the supplied "model". In this case it's an allele. FIXME consider changing this... are alleles really models? Perhaps map these alleles into actual animals/strains or genotypes? :param limit: :return: """ if self.test_mode: graph = self.testgraph else: graph = self.graph raw = '/'.join((self.rawdir, self.files['disease_models']['file'])) LOG.info("processing disease models") line_counter = 0 geno = Genotype(graph) fly_taxon = self.globaltt["Drosophila melanogaster"] with gzip.open(raw, 'rb') as f: filereader = csv.reader( io.TextIOWrapper(f, newline=""), delimiter='\t', quotechar='\"') for line in filereader: # skip comments if re.match(r'#', ''.join(line)) or ''.join(line) == '': continue (allele_id, allele_symbol, qualifier, doid_label, doid_id, evidence_or_interacting_allele, pub_id) = line line_counter += 1 if self.test_mode and self.test_ids['disease'] is not None \ and doid_id not in self.test_ids['disease']: continue rel = None allele_id = 'FlyBase:' + allele_id if qualifier == 'model of': rel = self.globaltt['is model of'] else: # TODO amelorates, exacerbates, and DOES NOT * continue animal_id = geno.make_experimental_model_with_genotype( allele_id, allele_symbol, fly_taxon, 'fly') assoc = G2PAssoc(graph, self.name, animal_id, doid_id, rel) if pub_id != '': pub_id = 'FlyBase:'+pub_id assoc.add_source(pub_id) if evidence_or_interacting_allele == 'inferred from mutant phenotype': evidence_id = self.globaltt['mutant phenotype evidence'] assoc.add_evidence(evidence_id) else: assoc.set_description(evidence_or_interacting_allele) assoc.add_association_to_graph() if not self.test_mode and limit is not None and line_counter > limit: break return
Here we make associations between a disease and the supplied "model". In this case it's an allele. FIXME consider changing this... are alleles really models? Perhaps map these alleles into actual animals/strains or genotypes? :param limit: :return:
def is_writable(path): """Check if path has write access""" try: testfile = tempfile.TemporaryFile(dir=path) testfile.close() except OSError as e: if e.errno == errno.EACCES: # 13 return False return True
Check if path has write access
def is_name_owner( self, name, sender_script_pubkey ): """ Given the fully-qualified name and a sender's script pubkey, determine if the sender owns the name. The name must exist and not be revoked or expired at the current block. """ if not self.is_name_registered( name ): # no one owns it return False owner = self.get_name_owner( name ) if owner != sender_script_pubkey: return False else: return True
Given the fully-qualified name and a sender's script pubkey, determine if the sender owns the name. The name must exist and not be revoked or expired at the current block.
def predict(self, u=0): """ Predict next position. Parameters ---------- u : ndarray Optional control vector. If non-zero, it is multiplied by `B` to create the control input into the system. """ # x = Fx + Bu self.x = dot(self.F, self.x) + dot(self.B, u)
Predict next position. Parameters ---------- u : ndarray Optional control vector. If non-zero, it is multiplied by `B` to create the control input into the system.
def check_bam(bam, o): """ Check reads in BAM file for read type and lengths. :param str bam: BAM file path. :param int o: Number of reads to look at for estimation. """ try: p = sp.Popen(['samtools', 'view', bam], stdout=sp.PIPE) # Count paired alignments paired = 0 read_lengths = defaultdict(int) while o > 0: # Count down number of lines line = p.stdout.readline().decode().split("\t") flag = int(line[1]) read_lengths[len(line[9])] += 1 if 1 & flag: # check decimal flag contains 1 (paired) paired += 1 o -= 1 p.kill() except OSError: reason = "Note (samtools not in path): For NGS inputs, " \ "pep needs samtools to auto-populate " \ "'read_length' and 'read_type' attributes; " \ "these attributes were not populated." raise OSError(reason) _LOGGER.debug("Read lengths: {}".format(read_lengths)) _LOGGER.debug("paired: {}".format(paired)) return read_lengths, paired
Check reads in BAM file for read type and lengths. :param str bam: BAM file path. :param int o: Number of reads to look at for estimation.
def offset(self): """int: offset of the key within the Windows Registry file or None.""" if not self._registry_key and self._registry: self._GetKeyFromRegistry() if not self._registry_key: return None return self._registry_key.offset
int: offset of the key within the Windows Registry file or None.
def drop(self): """Drop the table from the database. Deletes both the schema and all the contents within it. """ with self.db.lock: if self.exists: self._threading_warn() self.table.drop(self.db.executable, checkfirst=True) self._table = None
Drop the table from the database. Deletes both the schema and all the contents within it.
def on(self, left_speed, right_speed): """ Start rotating the motors according to ``left_speed`` and ``right_speed`` forever. Speeds can be percentages or any SpeedValue implementation. """ (left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed) # Set all parameters self.left_motor.speed_sp = int(round(left_speed_native_units)) self.right_motor.speed_sp = int(round(right_speed_native_units)) # This debug involves disk I/O to pull speed_sp so only uncomment # if you need to troubleshoot in more detail. # log.debug("%s: on at left-speed %s, right-speed %s" % # (self, self.left_motor.speed_sp, self.right_motor.speed_sp)) # Start the motors self.left_motor.run_forever() self.right_motor.run_forever()
Start rotating the motors according to ``left_speed`` and ``right_speed`` forever. Speeds can be percentages or any SpeedValue implementation.
def _make_intersection(edge_info, all_edge_nodes): """Convert a description of edges into a curved polygon. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Args: edge_info (Tuple[Tuple[int, float, float], ...]): Information describing each edge in the curved polygon by indicating which surface / edge on the surface and then start and end parameters along that edge. (See :func:`.ends_to_curve`.) all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second. Returns: .CurvedPolygon: The intersection corresponding to ``edge_info``. """ edges = [] for index, start, end in edge_info: nodes = all_edge_nodes[index] new_nodes = _curve_helpers.specialize_curve(nodes, start, end) degree = new_nodes.shape[1] - 1 edge = _curve_mod.Curve(new_nodes, degree, _copy=False) edges.append(edge) return curved_polygon.CurvedPolygon( *edges, metadata=edge_info, _verify=False )
Convert a description of edges into a curved polygon. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Args: edge_info (Tuple[Tuple[int, float, float], ...]): Information describing each edge in the curved polygon by indicating which surface / edge on the surface and then start and end parameters along that edge. (See :func:`.ends_to_curve`.) all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges of the first surface being intersected followed by the nodes of the three edges of the second. Returns: .CurvedPolygon: The intersection corresponding to ``edge_info``.
def install_package(self, path, recursive=False): # type: (str, bool) -> tuple """ Installs all the modules found in the given package (directory). It is a utility method working like :meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor accepting every module found. :param path: Path of the package (folder) :param recursive: If True, installs the modules found in sub-directories :return: A 2-tuple, with the list of installed bundles (:class:`~pelix.framework.Bundle`) and the list of the names of the modules which import failed. :raise ValueError: The given path is invalid """ return self.__framework.install_package(path, recursive)
Installs all the modules found in the given package (directory). It is a utility method working like :meth:`~pelix.framework.BundleContext.install_visiting`, with a visitor accepting every module found. :param path: Path of the package (folder) :param recursive: If True, installs the modules found in sub-directories :return: A 2-tuple, with the list of installed bundles (:class:`~pelix.framework.Bundle`) and the list of the names of the modules which import failed. :raise ValueError: The given path is invalid
def value(self): """Returns the positive value to subtract from the total.""" originalPrice = self.lineItem.totalPrice if self.flatRate == 0: return originalPrice * self.percent return self.flatRate
Returns the positive value to subtract from the total.
def parse_url(self): """ Parses a URL of the form: - ws://host[:port][path] - wss://host[:port][path] - ws+unix:///path/to/my.socket """ self.scheme = None self.resource = None self.host = None self.port = None if self.url is None: return scheme, url = self.url.split(":", 1) parsed = urlsplit(url, scheme="http") if parsed.hostname: self.host = parsed.hostname elif '+unix' in scheme: self.host = 'localhost' else: raise ValueError("Invalid hostname from: %s", self.url) if parsed.port: self.port = parsed.port if scheme == "ws": if not self.port: self.port = 8080 elif scheme == "wss": if not self.port: self.port = 443 elif scheme in ('ws+unix', 'wss+unix'): pass else: raise ValueError("Invalid scheme: %s" % scheme) if parsed.path: resource = parsed.path else: resource = "/" if '+unix' in scheme: self.unix_socket_path = resource resource = '/' if parsed.query: resource += "?" + parsed.query self.scheme = scheme self.resource = resource
Parses a URL of the form: - ws://host[:port][path] - wss://host[:port][path] - ws+unix:///path/to/my.socket
def _get_length_sequences_where(x): """ This method calculates the length of all sub-sequences where the array x is either True or 1. Examples -------- >>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1] >>> _get_length_sequences_where(x) >>> [1, 3, 1, 2] >>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True] >>> _get_length_sequences_where(x) >>> [1, 3, 1, 2] >>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True] >>> _get_length_sequences_where(x) >>> [1, 3, 1, 2] :param x: An iterable containing only 1, True, 0 and False values :return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues contained, the list [0] is returned. """ if len(x) == 0: return [0] else: res = [len(list(group)) for value, group in itertools.groupby(x) if value == 1] return res if len(res) > 0 else [0]
This method calculates the length of all sub-sequences where the array x is either True or 1. Examples -------- >>> x = [0,1,0,0,1,1,1,0,0,1,0,1,1] >>> _get_length_sequences_where(x) >>> [1, 3, 1, 2] >>> x = [0,True,0,0,True,True,True,0,0,True,0,True,True] >>> _get_length_sequences_where(x) >>> [1, 3, 1, 2] >>> x = [0,True,0,0,1,True,1,0,0,True,0,1,True] >>> _get_length_sequences_where(x) >>> [1, 3, 1, 2] :param x: An iterable containing only 1, True, 0 and False values :return: A list with the length of all sub-sequences where the array is either True or False. If no ones or Trues contained, the list [0] is returned.
def _teardown_redundancy_router_gw_connectivity(self, context, router, router_db, plugging_driver): """To be called in update_router() if the router gateway is to change BEFORE router has been updated in DB . """ if not router[ha.ENABLED]: # No HA currently enabled so we're done return e_context = context.elevated() # since gateway is about to change the ha group for the current gateway # is removed, a new one will be created later self._delete_ha_group(e_context, router_db.gw_port_id) # teardown connectivity for the gw ports on the redundancy routers # and remove those ports as new ones will be created later rr_ids = [] for r_b_db in router_db.redundancy_bindings: if plugging_driver is not None: plugging_driver.teardown_logical_port_connectivity( e_context, r_b_db.redundancy_router.gw_port, r_b_db.redundancy_router.hosting_info.hosting_device_id) self._update_router_no_notify( e_context, r_b_db.redundancy_router_id, {'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}}) rr_ids.append(r_b_db.redundancy_router_id) self.notify_routers_updated(e_context, rr_ids)
To be called in update_router() if the router gateway is to change BEFORE router has been updated in DB .
def cred_def_id(issuer_did: str, schema_seq_no: int, protocol: Protocol = None) -> str: """ Return credential definition identifier for input issuer DID and schema sequence number. Implementation passes to NodePool Protocol. :param issuer_did: DID of credential definition issuer :param schema_seq_no: schema sequence number :param protocol: indy protocol version :return: credential definition identifier """ return (protocol or Protocol.DEFAULT).cred_def_id(issuer_did, schema_seq_no)
Return credential definition identifier for input issuer DID and schema sequence number. Implementation passes to NodePool Protocol. :param issuer_did: DID of credential definition issuer :param schema_seq_no: schema sequence number :param protocol: indy protocol version :return: credential definition identifier
def install_os(name, **kwargs): ''' Installs the given image on the device. After the installation is complete the device is rebooted, if reboot=True is given as a keyworded argument. .. code-block:: yaml salt://images/junos_image.tgz: junos: - install_os - timeout: 100 - reboot: True Parameters: Required * path: Path where the image file is present on the pro\ xy minion. Optional * kwargs: keyworded arguments to be given such as timeout, reboot etc * timeout: Set NETCONF RPC timeout. Can be used to RPCs which take a while to execute. (default = 30 seconds) * reboot: Whether to reboot after installation (default = False) * no_copy: When True the software package will not be SCP’d to the device. \ (default = False) ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} ret['changes'] = __salt__['junos.install_os'](name, **kwargs) return ret
Installs the given image on the device. After the installation is complete the device is rebooted, if reboot=True is given as a keyworded argument. .. code-block:: yaml salt://images/junos_image.tgz: junos: - install_os - timeout: 100 - reboot: True Parameters: Required * path: Path where the image file is present on the pro\ xy minion. Optional * kwargs: keyworded arguments to be given such as timeout, reboot etc * timeout: Set NETCONF RPC timeout. Can be used to RPCs which take a while to execute. (default = 30 seconds) * reboot: Whether to reboot after installation (default = False) * no_copy: When True the software package will not be SCP’d to the device. \ (default = False)
def metadata_and_language_from_option_line(self, line): """Parse code options on the given line. When a start of a code cell is found, self.metadata is set to a dictionary.""" if self.start_code_re.match(line): self.language, self.metadata = self.options_to_metadata(line[line.find('%%') + 2:]) elif self.alternative_start_code_re.match(line): self.metadata = {}
Parse code options on the given line. When a start of a code cell is found, self.metadata is set to a dictionary.
def get_onsets(text, vowels="aeiou", threshold=0.0002): """ Source: Resonances in Middle High German: New Methodologies in Prosody, 2017, C. L. Hench :param text: str list: text to be analysed :param vowels: str: valid vowels constituting the syllable :param threshold: minimum frequency count for valid onset, C. Hench noted that the algorithm produces the best result for an untagged wordset of MHG, when retaining onsets which appear in at least 0.02% of the words Example: Let's test it on the opening lines of Nibelungenlied >>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen'] >>> vowels = "aeiouæœôîöü" >>> get_onsets(text, vowels=vowels) ['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str'] Of course, this is an insignificant sample, but we could try and see how modifying the threshold affects the returned onset: >>> get_onsets(text, threshold = 0.05, vowels=vowels) ['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n'] """ onset_dict = defaultdict(lambda: 0) n = len(text) for word in text: onset = '' candidates = [] for l in word: if l not in vowels: onset += l else: if onset != '': candidates.append(onset) onset = '' for c in candidates: onset_dict[c] += 1 return [onset for onset, i in onset_dict.items() if i/n > threshold]
Source: Resonances in Middle High German: New Methodologies in Prosody, 2017, C. L. Hench :param text: str list: text to be analysed :param vowels: str: valid vowels constituting the syllable :param threshold: minimum frequency count for valid onset, C. Hench noted that the algorithm produces the best result for an untagged wordset of MHG, when retaining onsets which appear in at least 0.02% of the words Example: Let's test it on the opening lines of Nibelungenlied >>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen'] >>> vowels = "aeiouæœôîöü" >>> get_onsets(text, vowels=vowels) ['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str'] Of course, this is an insignificant sample, but we could try and see how modifying the threshold affects the returned onset: >>> get_onsets(text, threshold = 0.05, vowels=vowels) ['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n']
def concordance_index(event_times, predicted_scores, event_observed=None): """ Calculates the concordance index (C-index) between two series of event times. The first is the real survival times from the experimental data, and the other is the predicted survival times from a model of some kind. The c-index is the average of how often a model says X is greater than Y when, in the observed data, X is indeed greater than Y. The c-index also handles how to handle censored values (obviously, if Y is censored, it's hard to know if X is truly greater than Y). The concordance index is a value between 0 and 1 where: - 0.5 is the expected result from random predictions, - 1.0 is perfect concordance and, - 0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0) Parameters ---------- event_times: iterable a length-n iterable of observed survival times. predicted_scores: iterable a length-n iterable of predicted scores - these could be survival times, or hazards, etc. See https://stats.stackexchange.com/questions/352183/use-median-survival-time-to-calculate-cph-c-statistic/352435#352435 event_observed: iterable, optional a length-n iterable censorship flags, 1 if observed, 0 if not. Default None assumes all observed. Returns ------- c-index: float a value between 0 and 1. References ----------- Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors. Statistics in Medicine 1996;15(4):361-87. Examples -------- >>> from lifelines.utils import concordance_index >>> cph = CoxPHFitter().fit(df, 'T', 'E') >>> concordance_index(df['T'], -cph.predict_partial_hazard(df), df['E']) """ event_times = np.asarray(event_times, dtype=float) predicted_scores = np.asarray(predicted_scores, dtype=float) # Allow for (n, 1) or (1, n) arrays if event_times.ndim == 2 and (event_times.shape[0] == 1 or event_times.shape[1] == 1): # Flatten array event_times = event_times.ravel() # Allow for (n, 1) or (1, n) arrays if predicted_scores.ndim == 2 and (predicted_scores.shape[0] == 1 or predicted_scores.shape[1] == 1): # Flatten array predicted_scores = predicted_scores.ravel() if event_times.shape != predicted_scores.shape: raise ValueError("Event times and predictions must have the same shape") if event_times.ndim != 1: raise ValueError("Event times can only be 1-dimensional: (n,)") if event_observed is None: event_observed = np.ones(event_times.shape[0], dtype=float) else: event_observed = np.asarray(event_observed, dtype=float).ravel() if event_observed.shape != event_times.shape: raise ValueError("Observed events must be 1-dimensional of same length as event times") num_correct, num_tied, num_pairs = _concordance_summary_statistics(event_times, predicted_scores, event_observed) return _concordance_ratio(num_correct, num_tied, num_pairs)
Calculates the concordance index (C-index) between two series of event times. The first is the real survival times from the experimental data, and the other is the predicted survival times from a model of some kind. The c-index is the average of how often a model says X is greater than Y when, in the observed data, X is indeed greater than Y. The c-index also handles how to handle censored values (obviously, if Y is censored, it's hard to know if X is truly greater than Y). The concordance index is a value between 0 and 1 where: - 0.5 is the expected result from random predictions, - 1.0 is perfect concordance and, - 0.0 is perfect anti-concordance (multiply predictions with -1 to get 1.0) Parameters ---------- event_times: iterable a length-n iterable of observed survival times. predicted_scores: iterable a length-n iterable of predicted scores - these could be survival times, or hazards, etc. See https://stats.stackexchange.com/questions/352183/use-median-survival-time-to-calculate-cph-c-statistic/352435#352435 event_observed: iterable, optional a length-n iterable censorship flags, 1 if observed, 0 if not. Default None assumes all observed. Returns ------- c-index: float a value between 0 and 1. References ----------- Harrell FE, Lee KL, Mark DB. Multivariable prognostic models: issues in developing models, evaluating assumptions and adequacy, and measuring and reducing errors. Statistics in Medicine 1996;15(4):361-87. Examples -------- >>> from lifelines.utils import concordance_index >>> cph = CoxPHFitter().fit(df, 'T', 'E') >>> concordance_index(df['T'], -cph.predict_partial_hazard(df), df['E'])
def _merge_out_from_infiles(in_files): """Generate output merged file name from set of input files. Handles non-shared filesystems where we don't know output path when setting up split parts. """ fname = os.path.commonprefix([os.path.basename(f) for f in in_files]) while fname.endswith(("-", "_", ".")): fname = fname[:-1] ext = os.path.splitext(in_files[0])[-1] dirname = os.path.dirname(in_files[0]) while dirname.endswith(("split", "merge")): dirname = os.path.dirname(dirname) return os.path.join(dirname, "%s%s" % (fname, ext))
Generate output merged file name from set of input files. Handles non-shared filesystems where we don't know output path when setting up split parts.
def complete(self, default_output=None): """Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async. """ # TODO: Enforce that all outputs expected by this async pipeline were # filled before this complete() function was called. May required all # async functions to declare their outputs upfront. if not self.async: raise UnexpectedPipelineError( 'May only call complete() method for asynchronous pipelines.') self._context.fill_slot( self._pipeline_key, self.outputs.default, default_output)
Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async.
def from_csv(cls, path:PathOrStr, folder:PathOrStr=None, label_delim:str=None, csv_labels:PathOrStr='labels.csv', valid_pct:float=0.2, fn_col:int=0, label_col:int=1, suffix:str='', delimiter:str=None, header:Optional[Union[int,str]]='infer', **kwargs:Any)->'ImageDataBunch': "Create from a csv file in `path/csv_labels`." path = Path(path) df = pd.read_csv(path/csv_labels, header=header, delimiter=delimiter) return cls.from_df(path, df, folder=folder, label_delim=label_delim, valid_pct=valid_pct, fn_col=fn_col, label_col=label_col, suffix=suffix, **kwargs)
Create from a csv file in `path/csv_labels`.
def _get_proxy_info(self, _=None): """Generate a ProxyInfo class from a connected SSH transport Args: _ (None): Ignored. This is just here as the ProxyInfo spec requires it. Returns: SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH """ # parse the fleet endpoint url, to establish a tunnel to that host (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint) # implement the proxy_info interface from httplib which requires # that we accept a scheme, and return a ProxyInfo object # we do :P # This is called once per request, so we keep this here # so that we can keep one ssh connection open, and allocate # new channels as needed per-request sock = None if target_path: sock = self._ssh_tunnel.forward_unix(path=target_path) else: sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port) # Return a ProxyInfo class with this socket return SSHTunnelProxyInfo(sock=sock)
Generate a ProxyInfo class from a connected SSH transport Args: _ (None): Ignored. This is just here as the ProxyInfo spec requires it. Returns: SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
def run_cell(self, cell, cell_index=0): """ Run cell with caching :param cell: cell to run :param cell_index: cell index (optional) :return: """ hash = self.cell_hash(cell, cell_index) fname_session = '/tmp/pynb-cache-{}-session.dill'.format(hash) fname_value = '/tmp/pynb-cache-{}-value.dill'.format(hash) cell_snippet = str(" ".join(cell.source.split())).strip()[:40] if self.disable_cache: logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet)) return super().run_cell(cell, cell_index) if not self.ignore_cache: if self.cache_valid and os.path.isfile(fname_session) and os.path.isfile(fname_value): logging.info('Cell {}: Loading: "{}.."'.format(hash, cell_snippet)) self.prev_fname_session = fname_session with open(fname_value, 'rb') as f: value = dill.load(f) return value # If cache does not exist or not valid: # # 1) Invalidate subsequent cell caches # 2) Load session from previous cached cell (if existing) # 3) Run cell # 4) Cache cell session # 5) Cache cell value logging.info('Cell {}: Running: "{}.."'.format(hash, cell_snippet)) # 1) Invalidate subsequent cell caches self.cache_valid = False # 2) Load session from previous cached cell (if existing and required) if self.prev_fname_session: if self.prev_fname_session_loaded != self.prev_fname_session: self.session_load(hash, self.prev_fname_session) # 2) Run cell value = super().run_cell(cell, cell_index) # We make sure that injected cells do not interfere with the cell index... # value[0]['content']['execution_count'] = cell_index # 3) Cache cell session cached = self.session_dump(cell, hash, fname_session) # 4) Cache cell value, if no errors while dumping the cell session in 3). if cached: self.prev_fname_session_loaded = fname_session self.prev_fname_session = fname_session logging.debug('Cell {}: dumping value to {}'.format(hash, fname_value)) with open(fname_value, 'wb') as f: dill.dump(value, f) logging.debug('Cell {}: cached'.format(hash)) return value
Run cell with caching :param cell: cell to run :param cell_index: cell index (optional) :return: