text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_annotation(data): """Extract names and values of rows and columns. Parameter: data : DataFrame | Panel Returns: col_name, col_values, row_name, row_values """
xlabel = None xvalues = None ylabel = None yvalues = None if hasattr(data, 'minor_axis'): xvalues = data.minor_axis if hasattr(data.minor_axis, 'name'): xlabel = data.minor_axis.name if hasattr(data, 'columns'): xvalues = data.columns if hasattr(data.columns, 'name'): xlabel = data.columns.name if hasattr(data, 'major_axis'): yvalues = data.major_axis if hasattr(data.major_axis, 'name'): ylabel = data.major_axis.name if hasattr(data, 'index'): yvalues = data.index if hasattr(data.index, 'name'): ylabel = data.index.name return xlabel, xvalues, ylabel, yvalues
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_using_this_method(original_sample): """ This function implements a log transformation on the data. """
# Copy the original sample new_sample = original_sample.copy() new_data = new_sample.data # Our transformation goes here new_data['Y2-A'] = log(new_data['Y2-A']) new_data = new_data.dropna() # Removes all NaN entries new_sample.data = new_data return new_sample
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_data(self, **kwargs): ''' Read the datafile specified in Sample.datafile and return the resulting object. Does NOT assign the data to self.data It's advised not to use this method, but instead to access the data through the FCMeasurement.data attribute. ''' meta, data = parse_fcs(self.datafile, **kwargs) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_meta_fields(self, fields, kwargs={}): ''' Return a dictionary of metadata fields ''' fields = to_list(fields) meta = self.get_meta() return {field: meta.get(field) for field in fields}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self, channel_names, kind='histogram', gates=None, gate_colors=None, gate_lw=1, **kwargs): """Plot the flow cytometry data associated with the sample on the current axis. To produce the plot, follow up with a call to matplotlib's show() function. Parameters {graph_plotFCM_pars} {FCMeasurement_plot_pars} {common_plot_ax} gates : [None, Gate, list of Gate] Gate must be of type {_gate_available_classes}. gate_lw: float | iterable line width to use when drawing gates if float, uses the same line width for all gates if iterable, then cycles between the values kwargs : dict Additional keyword arguments to be passed to graph.plotFCM Returns ------- None : if no data is present plot_output : output of plot command used to draw (e.g., output of hist) Examples -------- """
ax = kwargs.get('ax') channel_names = to_list(channel_names) gates = to_list(gates) plot_output = graph.plotFCM(self.data, channel_names, kind=kind, **kwargs) if gates is not None: if gate_colors is None: gate_colors = cycle(('b', 'g', 'r', 'm', 'c', 'y')) if not isinstance(gate_lw, collections.Iterable): gate_lw = [gate_lw] gate_lw = cycle(gate_lw) for (g, c, lw) in zip(gates, gate_colors, gate_lw): g.plot(ax=ax, ax_channels=channel_names, color=c, lw=lw) return plot_output
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def view(self, channel_names='auto', gates=None, diag_kw={}, offdiag_kw={}, gate_colors=None, **kwargs): """ Generates a matrix of subplots allowing for a quick way to examine how the sample looks in different channels. Parameters channel_names : [list | 'auto'] List of channel names to plot. offdiag_plot : ['histogram' | 'scatter'] Specifies the type of plot for the off-diagonal elements. diag_kw : dict Not implemented Returns axes references """
if channel_names == 'auto': channel_names = list(self.channel_names) def plot_region(channels, **kwargs): if channels[0] == channels[1]: channels = channels[0] kind = 'histogram' self.plot(channels, kind=kind, gates=gates, gate_colors=gate_colors, autolabel=False) channel_list = np.array(list(channel_names), dtype=object) channel_mat = [[(x, y) for x in channel_list] for y in channel_list] channel_mat = DataFrame(channel_mat, columns=channel_list, index=channel_list) kwargs.setdefault('wspace', 0.1) kwargs.setdefault('hspace', 0.1) return plot_ndpanel(channel_mat, plot_region, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def view_interactively(self, backend='wx'): '''Loads the current sample in a graphical interface for drawing gates. Parameters ---------- backend: 'auto' | 'wx' | 'webagg' Specifies which backend should be used to view the sample. ''' if backend == 'auto': if matplotlib.__version__ >= '1.4.3': backend = 'WebAgg' else: backend = 'wx' if backend == 'wx': from FlowCytometryTools.gui.wx_backend import gui elif backend == 'webagg': from FlowCytometryTools.gui.webagg_backend import gui else: raise ValueError('No support for backend {}'.format(backend)) gui.GUILauncher(measurement=self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(self, transform, direction='forward', channels=None, return_all=True, auto_range=True, use_spln=True, get_transformer=False, ID=None, apply_now=True, args=(), **kwargs): """ Applies a transformation to the specified channels. The transformation parameters are shared between all transformed channels. If different parameters need to be applied to different channels, use several calls to `transform`. Parameters {FCMeasurement_transform_pars} ID : hashable | None ID for the resulting collection. If None is passed, the original ID is used. Returns ------- new : FCMeasurement New measurement containing the transformed data. transformer : Transformation The Transformation applied to the input measurement. Only returned if get_transformer=True. Examples -------- {FCMeasurement_transform_examples} """
# Create new measurement new = self.copy() data = new.data channels = to_list(channels) if channels is None: channels = data.columns ## create transformer if isinstance(transform, Transformation): transformer = transform else: if auto_range: # determine transformation range if 'd' in kwargs: warnings.warn( 'Encountered both auto_range=True and user-specified range value in ' 'parameter d.\n Range value specified in parameter d is used.') else: channel_meta = self.channels # the -1 below because the channel numbers begin from 1 instead of 0 # (this is fragile code) ranges = [float(r['$PnR']) for i, r in channel_meta.iterrows() if self.channel_names[i - 1] in channels] if not np.allclose(ranges, ranges[0]): raise Exception("""Not all specified channels have the same data range, therefore they cannot be transformed together.\n HINT: Try transforming one channel at a time. You'll need to provide the name of the channel in the transform.""") if transform in {'hlog', 'tlog', 'hlog_inv', 'tlog_inv'}: # Hacky fix to make sure that 'd' is provided only # for hlog / tlog transformations kwargs['d'] = np.log10(ranges[0]) transformer = Transformation(transform, direction, args, **kwargs) ## create new data transformed = transformer(data[channels], use_spln) if return_all: new_data = data else: new_data = data.filter(channels) new_data[channels] = transformed ## update new Measurement new.data = new_data if ID is not None: new.ID = ID if get_transformer: return new, transformer else: return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def transform(self, transform, direction='forward', share_transform=True, channels=None, return_all=True, auto_range=True, use_spln=True, get_transformer=False, ID=None, apply_now=True, args=(), **kwargs): ''' Apply transform to each Measurement in the Collection. Return a new Collection with transformed data. {_containers_held_in_memory_warning} Parameters ---------- {FCMeasurement_transform_pars} ID : hashable | None ID for the resulting collection. If None is passed, the original ID is used. Returns ------- new : FCCollection New collection containing the transformed measurements. transformer : Transformation The Transformation applied to the measurements. Only returned if get_transformer=True & share_transform=True. Examples -------- {FCMeasurement_transform_examples} ''' new = self.copy() if share_transform: channel_meta = list(self.values())[0].channels channel_names = list(self.values())[0].channel_names if channels is None: channels = list(channel_names) else: channels = to_list(channels) ## create transformer if isinstance(transform, Transformation): transformer = transform else: if auto_range: # determine transformation range if 'd' in kwargs: warnings.warn('Encountered both auto_range=True and user-specified range ' 'value in parameter d.\n ' 'Range value specified in parameter d is used.') else: # the -1 below because the channel numbers begin from 1 instead of 0 (this is fragile code) ranges = [float(r['$PnR']) for i, r in channel_meta.iterrows() if channel_names[i - 1] in channels] if not np.allclose(ranges, ranges[0]): raise Exception('Not all specified channels have the same ' 'data range, therefore they cannot be ' 'transformed together.') if transform in {'hlog', 'tlog', 'hlog_inv', 'tlog_inv'}: # Hacky fix to make sure that 'd' is provided only # for hlog / tlog transformations kwargs['d'] = np.log10(ranges[0]) transformer = Transformation(transform, direction, args, **kwargs) if use_spln: xmax = self.apply(lambda x: x[channels].max().max(), applyto='data').max().max() xmin = self.apply(lambda x: x[channels].min().min(), applyto='data').min().min() transformer.set_spline(xmin, xmax) ## transform all measurements for k, v in new.items(): new[k] = v.transform(transformer, channels=channels, return_all=return_all, use_spln=use_spln, apply_now=apply_now) else: for k, v in new.items(): new[k] = v.transform(transform, direction=direction, channels=channels, return_all=return_all, auto_range=auto_range, get_transformer=False, use_spln=use_spln, apply_now=apply_now, args=args, **kwargs) if ID is not None: new.ID = ID if share_transform and get_transformer: return new, transformer else: return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def gate(self, gate, ID=None, apply_now=True): ''' Applies the gate to each Measurement in the Collection, returning a new Collection with gated data. {_containers_held_in_memory_warning} Parameters ---------- gate : {_gate_available_classes} ID : [ str, numeric, None] New ID to be given to the output. If None, the ID of the current collection will be used. ''' def func(well): return well.gate(gate, apply_now=apply_now) return self.apply(func, output_format='collection', ID=ID)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def counts(self, ids=None, setdata=False, output_format='DataFrame'): """ Return the counts in each of the specified measurements. Parameters ids : [hashable | iterable of hashables | None] Keys of measurements to get counts of. If None is given get counts of all measurements. setdata : bool Whether to set the data in the Measurement object. Used only if data is not already set. output_format : DataFrame | dict Specifies the output format for that data. Returns ------- [DataFrame | Dictionary] Dictionary keys correspond to measurement keys. """
return self.apply(lambda x: x.counts, ids=ids, setdata=setdata, output_format=output_format)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self, channel_names, kind='histogram', gates=None, gate_colors=None, ids=None, row_labels=None, col_labels=None, xlim='auto', ylim='auto', autolabel=True, **kwargs): """ Produces a grid plot with each subplot corresponding to the data at the given position. Parameters {FCMeasurement_plot_pars} {graph_plotFCM_pars} {_graph_grid_layout} Returns ------- {_graph_grid_layout_returns} Examples -------- Below, plate is an instance of FCOrderedCollection .. note:: For more details see documentation for FCMeasurement.plot **kwargs passes arguments to both grid_plot and to FCMeasurement.plot. """
## # Note # ------- # The function assumes that grid_plot and FCMeasurement.plot use unique key words. # Any key word arguments that appear in both functions are passed only to grid_plot in the end. ## # Automatically figure out which of the kwargs should # be sent to grid_plot instead of two sample.plot # (May not be a robust solution, we'll see as the code evolves grid_arg_list = inspect.getargspec(OrderedCollection.grid_plot).args grid_plot_kwargs = {'ids': ids, 'row_labels': row_labels, 'col_labels': col_labels} for key, value in list(kwargs.items()): if key in grid_arg_list: kwargs.pop(key) grid_plot_kwargs[key] = value ## # Make sure channel names is a list to make the code simpler below channel_names = to_list(channel_names) ## # Determine data limits for binning # if kind == 'histogram': nbins = kwargs.get('bins', 200) if isinstance(nbins, int): min_list = [] max_list = [] for sample in self: min_list.append(self[sample].data[channel_names].min().values) max_list.append(self[sample].data[channel_names].max().values) min_list = list(zip(*min_list)) max_list = list(zip(*max_list)) bins = [] for i, c in enumerate(channel_names): min_v = min(min_list[i]) max_v = max(max_list[i]) bins.append(np.linspace(min_v, max_v, nbins)) # Check if 1d if len(channel_names) == 1: bins = bins[0] # bins should be an ndarray, not a list of ndarrays kwargs['bins'] = bins ########## # Defining the plotting function that will be used. # At the moment grid_plot handles the labeling # (rather than sample.plot or the base function # in GoreUtilities.graph def plot_sample(sample, ax): return sample.plot(channel_names, ax=ax, gates=gates, gate_colors=gate_colors, colorbar=False, kind=kind, autolabel=False, **kwargs) xlabel, ylabel = None, None if autolabel: cnames = to_list(channel_names) xlabel = cnames[0] if len(cnames) == 2: ylabel = cnames[1] return self.grid_plot(plot_sample, xlim=xlim, ylim=ylim, xlabel=xlabel, ylabel=ylabel, **grid_plot_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def obj2unicode(obj): """Return a unicode representation of a python object """
if isinstance(obj, unicode_type): return obj elif isinstance(obj, bytes_type): try: return unicode_type(obj, 'utf-8') except UnicodeDecodeError as strerror: sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror)) return unicode_type(obj, 'utf-8', 'replace') else: return unicode_type(obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_chars(self, array): """Set the characters used to draw lines between rows and columns - the array should contain 4 fields: [horizontal, vertical, corner, header] - default is set to: ['-', '|', '+', '='] """
if len(array) != 4: raise ArraySizeError("array should contain 4 characters") array = [ x[:1] for x in [ str(s) for s in array ] ] (self._char_horiz, self._char_vert, self._char_corner, self._char_header) = array return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_header_align(self, array): """Set the desired header alignment - the elements of the array should be either "l", "c" or "r": * "l": column flushed left * "c": column centered * "r": column flushed right """
self._check_row_size(array) self._header_align = array return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_cols_align(self, array): """Set the desired columns alignment - the elements of the array should be either "l", "c" or "r": * "l": column flushed left * "c": column centered * "r": column flushed right """
self._check_row_size(array) self._align = array return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_cols_valign(self, array): """Set the desired columns vertical alignment - the elements of the array should be either "t", "m" or "b": * "t": column aligned on the top of the cell * "m": column aligned on the middle of the cell * "b": column aligned on the bottom of the cell """
self._check_row_size(array) self._valign = array return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_cols_dtype(self, array): """Set the desired columns datatype for the cols. - the elements of the array should be either a callable or any of "a", "t", "f", "e" or "i": * "a": automatic (try to use the most appropriate datatype) * "t": treat as text * "f": treat as float in decimal format * "e": treat as float in exponential format * "i": treat as int * a callable: should return formatted string for any value given - by default, automatic datatyping is used for each column """
self._check_row_size(array) self._dtype = array return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_rows(self, rows, header=True): """Add several rows in the rows stack - The 'rows' argument can be either an iterator returning arrays, or a by-dimensional array - 'header' specifies if the first row should be used as the header of the table """
# nb: don't use 'iter' on by-dimensional arrays, to get a # usable code for python 2.1 if header: if hasattr(rows, '__iter__') and hasattr(rows, 'next'): self.header(rows.next()) else: self.header(rows[0]) rows = rows[1:] for row in rows: self.add_row(row) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def draw(self): """Draw the table - the table is returned as a whole string """
if not self._header and not self._rows: return self._compute_cols_width() self._check_align() out = "" if self._has_border(): out += self._hline() if self._header: out += self._draw_line(self._header, isheader=True) if self._has_header(): out += self._hline_header() length = 0 for row in self._rows: length += 1 out += self._draw_line(row) if self._has_hlines() and length < len(self._rows): out += self._hline() if self._has_border(): out += self._hline() return out[:-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fmt_int(cls, x, **kw): """Integer formatting class-method. - x will be float-converted and then used. """
return str(int(round(cls._to_float(x))))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fmt_float(cls, x, **kw): """Float formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument. """
n = kw.get('n') return '%.*f' % (n, cls._to_float(x))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fmt_exp(cls, x, **kw): """Exponential formatting class-method. - x parameter is ignored. Instead kw-argument f being x float-converted will be used. - precision will be taken from `n` kw-argument. """
n = kw.get('n') return '%.*e' % (n, cls._to_float(x))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _fmt_auto(cls, x, **kw): """auto formatting class-method."""
f = cls._to_float(x) if abs(f) > 1e8: fn = cls._fmt_exp else: if f - round(f) == 0: fn = cls._fmt_int else: fn = cls._fmt_float return fn(x, **kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _str(self, i, x): """Handles string formatting of cell data i - index of the cell datatype in self._dtype x - cell data to format """
FMT = { 'a':self._fmt_auto, 'i':self._fmt_int, 'f':self._fmt_float, 'e':self._fmt_exp, 't':self._fmt_text, } n = self._precision dtype = self._dtype[i] try: if callable(dtype): return dtype(x) else: return FMT[dtype](x, n=n) except FallbackToText: return self._fmt_text(x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _hline(self): """Print an horizontal line """
if not self._hline_string: self._hline_string = self._build_hline() return self._hline_string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_hline(self, is_header=False): """Return a string used to separated rows or separate header from rows """
horiz = self._char_horiz if (is_header): horiz = self._char_header # compute cell separator s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()], horiz) # build the line l = s.join([horiz * n for n in self._width]) # add border if needed if self._has_border(): l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz, self._char_corner) else: l += "\n" return l
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _len_cell(self, cell): """Return the width of the cell Special characters are taken into account to return the width of the cell, such like newlines and tabs """
cell_lines = cell.split('\n') maxi = 0 for line in cell_lines: length = 0 parts = line.split('\t') for part, i in zip(parts, list(range(1, len(parts) + 1))): length = length + len(part) if i < len(parts): length = (length//8 + 1) * 8 maxi = max(maxi, length) return maxi
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def region(self, rect): """ Selects a sub-region of the image using the supplied rectangle, x, y, width, height. """
box = (int(rect[0]), int(rect[1]), int(rect[0]) + int(rect[2]), int(rect[1]) + int(rect[3])) if box[2] > self.img.size[0] or box[3] > self.img.size[1]: raise errors.RectangleError("Region out-of-bounds") self.img = self.img.crop(box) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def derive_signature(key, qs): """Derives the signature from the supplied query string using the key."""
key, qs = (key or "", qs or "") return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sign(key, qs): """Signs the query string using the key."""
sig = derive_signature(key, qs) return "%s&%s" % (qs, urlencode([("sig", sig)]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify_signature(key, qs): """Verifies that the signature in the query string is correct."""
unsigned_qs = re.sub(r'&?sig=[^&]*', '', qs) sig = derive_signature(key, unsigned_qs) return urlparse.parse_qs(qs).get("sig", [None])[0] == sig
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def canonical_name(name): """Find the canonical name for the given window in scipy.signal Parameters name : `str` the name of the window you want Returns ------- realname : `str` the name of the window as implemented in `scipy.signal.window` Raises ------- ValueError if ``name`` cannot be resolved to a window function in `scipy.signal` Examples -------- 'hann' 'kaiser' """
if name.lower() == 'planck': # make sure to handle the Planck window return 'planck' try: # use equivalence introduced in scipy 0.16.0 # pylint: disable=protected-access return scipy_windows._win_equiv[name.lower()].__name__ except AttributeError: # old scipy try: return getattr(scipy_windows, name.lower()).__name__ except AttributeError: # no match pass # raise later except KeyError: # no match pass # raise later raise ValueError('no window function in scipy.signal equivalent to %r' % name,)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def recommended_overlap(name, nfft=None): """Returns the recommended fractional overlap for the given window If ``nfft`` is given, the return is in samples Parameters name : `str` the name of the window you are using nfft : `int`, optional the length of the window Returns ------- rov : `float`, `int` the recommended overlap (ROV) for the given window, in samples if ``nfft` is given (`int`), otherwise fractional (`float`) Examples -------- 0.5 85 """
try: name = canonical_name(name) except KeyError as exc: raise ValueError(str(exc)) try: rov = ROV[name] except KeyError: raise ValueError("no recommended overlap for %r window" % name) if nfft: return int(ceil(nfft * rov)) return rov
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def planck(N, nleft=0, nright=0): """Return a Planck taper window. Parameters N : `int` Number of samples in the output window nleft : `int`, optional Number of samples to taper on the left, should be less than `N/2` nright : `int`, optional Number of samples to taper on the right, should be less than `N/2` Returns ------- w : `ndarray` The window, with the maximum value normalized to 1 and at least one end tapered smoothly to 0. Examples -------- To taper 0.1 seconds on both ends of one second of data sampled at 2048 Hz: References .. [1] McKechan, D.J.A., Robinson, C., and Sathyaprakash, B.S. (April 2010). "A tapering window for time-domain templates and simulated signals in the detection of gravitational waves from coalescing compact binaries". Classical and Quantum Gravity 27 (8). :doi:`10.1088/0264-9381/27/8/084020` .. [2] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function#Planck-taper_window """
# construct a Planck taper window w = numpy.ones(N) if nleft: w[0] *= 0 zleft = numpy.array([nleft * (1./k + 1./(k-nleft)) for k in range(1, nleft)]) w[1:nleft] *= expit(-zleft) if nright: w[N-1] *= 0 zright = numpy.array([-nright * (1./(k-nright) + 1./k) for k in range(1, nright)]) w[N-nright:N-1] *= expit(-zright) return w
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bool_env(key, default=False): """Parse an environment variable as a boolean switch `True` is returned if the variable value matches one of the following: - ``'1'`` - ``'y'`` - ``'yes'`` - ``'true'`` The match is case-insensitive (so ``'Yes'`` will match as `True`) Parameters key : `str` the name of the environment variable to find default : `bool` the default return value if the key is not found Returns ------- True if the environment variable matches as 'yes' or similar False otherwise Examples -------- True False False """
try: return os.environ[key].lower() in TRUE except KeyError: return default
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def call(cmd, stdout=PIPE, stderr=PIPE, on_error='raise', **kwargs): """Call out to the shell using `subprocess.Popen` Parameters stdout : `file-like`, optional stream for stdout stderr : `file-like`, optional stderr for stderr on_error : `str`, optional what to do when the command fails, one of - 'ignore' - do nothing - 'warn' - print a warning - 'raise' - raise an exception **kwargs other keyword arguments to pass to `subprocess.Popen` Returns ------- out : `str` the output stream of the command err : `str` the error stream from the command Raises ------ OSError if `cmd` is a `str` (or `shell=True` is passed) and the executable is not found subprocess.CalledProcessError if the command fails otherwise """
if isinstance(cmd, (list, tuple)): cmdstr = ' '.join(cmd) kwargs.setdefault('shell', False) else: cmdstr = str(cmd) kwargs.setdefault('shell', True) proc = Popen(cmd, stdout=stdout, stderr=stderr, **kwargs) out, err = proc.communicate() if proc.returncode: if on_error == 'ignore': pass elif on_error == 'warn': e = CalledProcessError(proc.returncode, cmdstr) warnings.warn(str(e)) else: raise CalledProcessError(proc.returncode, cmdstr) return out.decode('utf-8'), err.decode('utf-8')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_with_columns(func): """Decorate a Table read method to use the ``columns`` keyword """
def wrapper(*args, **kwargs): # parse columns argument columns = kwargs.pop("columns", None) # read table tab = func(*args, **kwargs) # filter on columns if columns is None: return tab return tab[columns] return _safe_wraps(wrapper, func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_with_selection(func): """Decorate a Table read method to apply ``selection`` keyword """
def wrapper(*args, **kwargs): """Execute a function, then apply a selection filter """ # parse selection selection = kwargs.pop('selection', None) or [] # read table tab = func(*args, **kwargs) # apply selection if selection: return filter_table(tab, selection) return tab return _safe_wraps(wrapper, func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decorate_registered_reader( name, data_class=EventTable, columns=True, selection=True, ): """Wrap an existing registered reader to use GWpy's input decorators Parameters name : `str` the name of the registered format data_class : `type`, optional the class for whom the format is registered columns : `bool`, optional use the `read_with_columns` decorator selection : `bool`, optional use the `read_with_selection` decorator """
reader = registry.get_reader(name, data_class) wrapped = ( # noqa read_with_columns( # use ``columns`` read_with_selection( # use ``selection`` reader )) ) return registry.register_reader(name, data_class, wrapped, force=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_from_root(source, treename=None, columns=None, **kwargs): """Read a Table from a ROOT tree """
import root_numpy # parse column filters into tree2array ``selection`` keyword # NOTE: not all filters can be passed directly to root_numpy, so we store # those separately and apply them after-the-fact before returning try: selection = kwargs.pop('selection') except KeyError: # no filters filters = None else: rootfilters = [] filters = [] for col, op_, value in parse_column_filters(selection): try: opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0] except (IndexError, KeyError): # cannot filter with root_numpy filters.append((col, op_, value)) else: # can filter with root_numpy rootfilters.append('{0} {1} {2!r}'.format(col, opstr, value)) kwargs['selection'] = ' && '.join(rootfilters) # pass file name (not path) if not isinstance(source, string_types): source = source.name # find single tree (if only one tree present) if treename is None: trees = root_numpy.list_trees(source) if len(trees) == 1: treename = trees[0] elif not trees: raise ValueError("No trees found in %s" % source) else: raise ValueError("Multiple trees found in %s, please select on " "via the `treename` keyword argument, e.g. " "`treename='events'`. Available trees are: %s." % (source, ', '.join(map(repr, trees)))) # read, filter, and return t = Table(root_numpy.root2array( source, treename, branches=columns, **kwargs )) if filters: return filter_table(t, *filters) return t
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_to_root(table, filename, **kwargs): """Write a Table to a ROOT file """
import root_numpy root_numpy.array2root(table.as_array(), filename, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _gps_scale_factory(unit): """Construct a GPSScale for this unit """
class FixedGPSScale(GPSScale): """`GPSScale` for a specific GPS time unit """ name = str('{0}s'.format(unit.long_names[0] if unit.long_names else unit.names[0])) def __init__(self, axis, epoch=None): """ """ super(FixedGPSScale, self).__init__(axis, epoch=epoch, unit=unit) return FixedGPSScale
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_epoch(self, epoch): """Set the GPS epoch """
if epoch is None: self._epoch = None return if isinstance(epoch, (Number, Decimal)): self._epoch = float(epoch) else: self._epoch = float(to_gps(epoch))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_unit(self, unit): """Set the GPS step scale """
# accept all core time units if unit is None or (isinstance(unit, units.NamedUnit) and unit.physical_type == 'time'): self._unit = unit return # convert float to custom unit in seconds if isinstance(unit, Number): unit = units.Unit(unit * units.second) # otherwise, should be able to convert to a time unit try: unit = units.Unit(unit) except ValueError as exc: # catch annoying plurals try: unit = units.Unit(str(unit).rstrip('s')) except ValueError: raise exc # decompose and check that it's actually a time unit dec = unit.decompose() if dec.bases != [units.second]: raise ValueError("Cannot set GPS unit to %s" % unit) # check equivalent units for other in TIME_UNITS: if other.decompose().scale == dec.scale: self._unit = other return raise ValueError("Unrecognised unit: %s" % unit)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_unit_name(self): """Returns the name of the unit for this GPS scale Note that this returns a simply-pluralised version of the name. """
if not self.unit: return None name = sorted(self.unit.names, key=len)[-1] return '%ss' % name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_non_affine(self, values): """Transform an array of GPS times. This method is designed to filter out transformations that will generate text elements that require exact precision, and use `Decimal` objects to do the transformation, and simple `float` otherwise. """
scale = self.scale or 1 epoch = self.epoch or 0 values = numpy.asarray(values) # handle simple or data transformations with floats if self._parents or ( # part of composite transform (from draw()) epoch == 0 and # no large additions scale == 1 # no multiplications ): return self._transform(values, float(epoch), float(scale)) # otherwise do things carefully (and slowly) with Decimals # -- ideally this only gets called for transforming tick positions flat = values.flatten() def _trans(x): return self._transform_decimal(x, epoch, scale) return numpy.asarray(list(map(_trans, flat))).reshape(values.shape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def deprecated_function(func, warning=DEPRECATED_FUNCTION_WARNING): """Adds a `DeprecationWarning` to a function Parameters func : `callable` the function to decorate with a `DeprecationWarning` warning : `str`, optional the warning to present Notes ----- The final warning message is formatted as ``warning.format(func)`` so you can use attribute references to the function itself. See the default message as an example. """
@wraps(func) def wrapped_func(*args, **kwargs): warnings.warn( DEPRECATED_FUNCTION_WARNING.format(func), category=DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapped_func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def return_as(returntype): """Decorator to cast return of function as the given type Parameters returntype : `type` the desired return type of the decorated function """
def decorator(func): # @wraps(func) <- we can't use this as normal because it doesn't work # on python < 3 for instance methods, # see workaround below def wrapped(*args, **kwargs): result = func(*args, **kwargs) try: return returntype(result) except (TypeError, ValueError) as exc: exc.args = ( 'failed to cast return from {0} as {1}: {2}'.format( func.__name__, returntype.__name__, str(exc)), ) raise try: return wraps(func)(wrapped) except AttributeError: # python < 3.0.0 wrapped.__doc__ == func.__doc__ return wrapped return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v'): """Query and format a citations page from Zenodo entries Parameters zid : `int`, `str` the Zenodo ID of the target record url : `str`, optional the base URL of the Zenodo host, defaults to ``https://zenodo.org`` hist : `int`, optional the maximum number of hits to show, default: ``10`` tag_prefix : `str`, optional the prefix for git tags. This is removed to generate the section headers in the output RST Returns ------- rst : `str` an RST-formatted string of DOI badges with URLs """
# query for metadata url = ('{url}/api/records/?' 'page=1&' 'size={hits}&' 'q=conceptrecid:"{id}"&' 'sort=-version&' 'all_versions=True'.format(id=zid, url=url, hits=hits)) metadata = requests.get(url).json() lines = [] for i, hit in enumerate(metadata['hits']['hits']): version = hit['metadata']['version'][len(tag_prefix):] lines.append('-' * len(version)) lines.append(version) lines.append('-' * len(version)) lines.append('') lines.append('.. image:: {badge}\n' ' :target: {doi}'.format(**hit['links'])) if i < hits - 1: lines.append('') return '\n'.join(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(source, channels, start=None, end=None, scaled=None, type=None, series_class=TimeSeries): # pylint: disable=redefined-builtin """Read a dict of series from one or more GWF files Parameters source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of cache file, - `list` of paths. channels : `~gwpy.detector.ChannelList`, `list` a list of channels to read from the source. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional GPS start time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. scaled : `bool`, optional apply slope and bias calibration to ADC data. type : `dict`, optional a `dict` of ``(name, channel-type)`` pairs, where ``channel-type`` can be one of ``'adc'``, ``'proc'``, or ``'sim'``. series_class : `type`, optional the `Series` sub-type to return. Returns ------- data : `~gwpy.timeseries.TimeSeriesDict` or similar a dict of ``(channel, series)`` pairs read from the GWF source(s). """
# parse input source source = file_list(source) # parse type ctype = channel_dict_kwarg(type, channels, (str,)) # read each individually and append out = series_class.DictClass() for i, file_ in enumerate(source): if i == 1: # force data into fresh memory so that append works for name in out: out[name] = numpy.require(out[name], requirements=['O']) # read frame out.append(read_gwf(file_, channels, start=start, end=end, ctype=ctype, scaled=scaled, series_class=series_class), copy=False) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_gwf(filename, channels, start=None, end=None, scaled=None, ctype=None, series_class=TimeSeries): """Read a dict of series data from a single GWF file Parameters filename : `str` the GWF path from which to read channels : `~gwpy.detector.ChannelList`, `list` a list of channels to read from the source. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional GPS start time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, anything parseable by :func:`~gwpy.time.to_gps` is fine. scaled : `bool`, optional apply slope and bias calibration to ADC data. type : `dict`, optional a `dict` of ``(name, channel-type)`` pairs, where ``channel-type`` can be one of ``'adc'``, ``'proc'``, or ``'sim'``. series_class : `type`, optional the `Series` sub-type to return. Returns ------- data : `~gwpy.timeseries.TimeSeriesDict` or similar a dict of ``(channel, series)`` pairs read from the GWF file. """
# parse kwargs if not start: start = 0 if not end: end = 0 span = Segment(start, end) # open file stream = io_gwf.open_gwf(filename, 'r') nframes = stream.GetNumberOfFrames() # find channels out = series_class.DictClass() # loop over frames in GWF i = 0 while True: this = i i += 1 # read frame try: frame = stream.ReadFrameNSubset(this, 0) except IndexError: if this >= nframes: break raise # check whether we need this frame at all if not _need_frame(frame, start, end): continue # get epoch for this frame epoch = LIGOTimeGPS(*frame.GetGTime()) # and read all the channels for channel in channels: _scaled = _dynamic_scaled(scaled, channel) try: new = _read_channel(stream, this, str(channel), ctype.get(channel, None), epoch, start, end, scaled=_scaled, series_class=series_class) except _Skip: # don't need this frame for this channel continue try: out[channel].append(new) except KeyError: out[channel] = numpy.require(new, requirements=['O']) # if we have all of the data we want, stop now if all(span in out[channel].span for channel in out): break # if any channels weren't read, something went wrong for channel in channels: if channel not in out: msg = "Failed to read {0!r} from {1!r}".format( str(channel), filename) if start or end: msg += ' for {0}'.format(span) raise ValueError(msg) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _read_channel(stream, num, name, ctype, epoch, start, end, scaled=True, series_class=TimeSeries): """Read a channel from a specific frame in a stream """
data = _get_frdata(stream, num, name, ctype=ctype) return read_frdata(data, epoch, start, end, scaled=scaled, series_class=series_class)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_frdata(stream, num, name, ctype=None): """Brute force-ish method to return the FrData structure for a channel This saves on pulling the channel type from the TOC """
ctypes = (ctype,) if ctype else ('adc', 'proc', 'sim') for ctype in ctypes: _reader = getattr(stream, 'ReadFr{0}Data'.format(ctype.title())) try: return _reader(num, name) except IndexError as exc: if FRERR_NO_CHANNEL_OF_TYPE.match(str(exc)): continue raise raise ValueError("no Fr{{Adc,Proc,Sim}}Data structures with the " "name {0}".format(name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_frdata(frdata, epoch, start, end, scaled=True, series_class=TimeSeries): """Read a series from an `FrData` structure Parameters frdata : `LDAStools.frameCPP.FrAdcData` or similar the data structure to read epoch : `float` the GPS start time of the containing frame (`LDAStools.frameCPP.FrameH.GTime`) start : `float` the GPS start time of the user request end : `float` the GPS end time of the user request scaled : `bool`, optional apply slope and bias calibration to ADC data. series_class : `type`, optional the `Series` sub-type to return. Returns ------- series : `~gwpy.timeseries.TimeSeriesBase` the formatted data series Raises ------ _Skip if this data structure doesn't overlap with the requested ``[start, end)`` interval. """
datastart = epoch + frdata.GetTimeOffset() try: trange = frdata.GetTRange() except AttributeError: # not proc channel trange = 0. # check overlap with user-requested span if (end and datastart >= end) or (trange and datastart + trange < start): raise _Skip() # get scaling try: slope = frdata.GetSlope() bias = frdata.GetBias() except AttributeError: # not FrAdcData slope = None bias = None null_scaling = True else: null_scaling = slope == 1. and bias == 0. out = None for j in range(frdata.data.size()): # we use range(frdata.data.size()) to avoid segfault # related to iterating directly over frdata.data try: new = read_frvect(frdata.data[j], datastart, start, end, name=frdata.GetName(), series_class=series_class) except _Skip: continue # apply ADC scaling (only if interesting; this prevents unnecessary # type-casting errors) if scaled and not null_scaling: new *= slope new += bias if slope is not None: # user has deliberately disabled the ADC calibration, so # the stored engineering unit is not valid, revert to 'counts': new.override_unit('count') if out is None: out = new else: out.append(new) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_frvect(vect, epoch, start, end, name=None, series_class=TimeSeries): """Read an array from an `FrVect` structure Parameters vect : `LDASTools.frameCPP.FrVect` the frame vector structur to read start : `float` the GPS start time of the request end : `float` the GPS end time of the request epoch : `float` the GPS start time of the containing `FrData` structure name : `str`, optional the name of the output `series_class`; this is also used to ignore ``FrVect`` structures containing other information series_class : `type`, optional the `Series` sub-type to return. Returns ------- series : `~gwpy.timeseries.TimeSeriesBase` the formatted data series Raises ------ _Skip if this vect doesn't overlap with the requested ``[start, end)`` interval, or the name doesn't match. """
# only read FrVect with matching name (or no name set) # frame spec allows for arbitrary other FrVects # to hold other information if vect.GetName() and name and vect.GetName() != name: raise _Skip() # get array arr = vect.GetDataArray() nsamp = arr.size # and dimensions dim = vect.GetDim(0) dx = dim.dx x0 = dim.startX # start and end GPS times of this FrVect dimstart = epoch + x0 dimend = dimstart + nsamp * dx # index of first required sample nxstart = int(max(0., float(start-dimstart)) / dx) # requested start time is after this frame, skip if nxstart >= nsamp: raise _Skip() # index of end sample if end: nxend = int(nsamp - ceil(max(0., float(dimend-end)) / dx)) else: nxend = None if nxstart or nxend: arr = arr[nxstart:nxend] # -- cast as a series # get unit unit = vect.GetUnitY() or None # create array series = series_class(arr, t0=dimstart+nxstart*dx, dt=dx, name=name, channel=name, unit=unit, copy=False) # add information to channel series.channel.sample_rate = series.sample_rate.value series.channel.unit = unit series.channel.dtype = series.dtype return series
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0, compression=257, compression_level=6): """Write data to a GWF file using the frameCPP API """
# set frame header metadata if not start: starts = {LIGOTimeGPS(tsdict[key].x0.value) for key in tsdict} if len(starts) != 1: raise RuntimeError("Cannot write multiple TimeSeries to a single " "frame with different start times, " "please write into different frames") start = list(starts)[0] if not end: ends = {tsdict[key].span[1] for key in tsdict} if len(ends) != 1: raise RuntimeError("Cannot write multiple TimeSeries to a single " "frame with different end times, " "please write into different frames") end = list(ends)[0] duration = end - start start = LIGOTimeGPS(start) ifos = {ts.channel.ifo for ts in tsdict.values() if ts.channel and ts.channel.ifo and hasattr(frameCPP, 'DETECTOR_LOCATION_{0}'.format(ts.channel.ifo))} # create frame frame = io_gwf.create_frame(time=start, duration=duration, name=name, run=run, ifos=ifos) # append channels for i, key in enumerate(tsdict): try: # pylint: disable=protected-access ctype = tsdict[key].channel._ctype or 'proc' except AttributeError: ctype = 'proc' append_to_frame(frame, tsdict[key].crop(start, end), type=ctype, channelid=i) # write frame to file io_gwf.write_frames(outfile, [frame], compression=compression, compression_level=compression_level)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_to_frame(frame, timeseries, type='proc', channelid=0): # pylint: disable=redefined-builtin """Append data from a `TimeSeries` to a `~frameCPP.FrameH` Parameters frame : `~frameCPP.FrameH` frame object to append to timeseries : `TimeSeries` the timeseries to append type : `str` the type of the channel, one of 'adc', 'proc', 'sim' channelid : `int`, optional the ID of the channel within the group (only used for ADC channels) """
if timeseries.channel: channel = str(timeseries.channel) else: channel = str(timeseries.name) offset = float(LIGOTimeGPS(timeseries.t0.value) - LIGOTimeGPS(*frame.GetGTime())) # create the data container if type.lower() == 'adc': frdata = frameCPP.FrAdcData( channel, 0, # channel group channelid, # channel number in group 16, # number of bits in ADC timeseries.sample_rate.value, # sample rate ) frdata.SetTimeOffset(offset) append = frame.AppendFrAdcData elif type.lower() == 'proc': frdata = frameCPP.FrProcData( channel, # channel name str(timeseries.name), # comment frameCPP.FrProcData.TIME_SERIES, # ID as time-series frameCPP.FrProcData.UNKNOWN_SUB_TYPE, # empty sub-type (fseries) offset, # offset of first sample relative to frame start abs(timeseries.span), # duration of data 0., # heterodyne frequency 0., # phase of heterodyne 0., # frequency range 0., # resolution bandwidth ) append = frame.AppendFrProcData elif type.lower() == 'sim': frdata = frameCPP.FrSimData( str(timeseries.channel), # channel name str(timeseries.name), # comment timeseries.sample_rate.value, # sample rate offset, # time offset of first sample 0., # heterodyne frequency 0., # phase of heterodyne ) append = frame.AppendFrSimData else: raise RuntimeError("Invalid channel type {!r}, please select one of " "'adc, 'proc', or 'sim'".format(type)) # append an FrVect frdata.AppendData(create_frvect(timeseries)) append(frdata)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_frvect(timeseries): """Create a `~frameCPP.FrVect` from a `TimeSeries` This method is primarily designed to make writing data to GWF files a bit easier. Parameters timeseries : `TimeSeries` the input `TimeSeries` Returns ------- frvect : `~frameCPP.FrVect` the output `FrVect` """
# create timing dimension dims = frameCPP.Dimension( timeseries.size, timeseries.dx.value, str(timeseries.dx.unit), 0) # create FrVect vect = frameCPP.FrVect( timeseries.name or '', FRVECT_TYPE_FROM_NUMPY[timeseries.dtype.type], 1, dims, str(timeseries.unit)) # populate FrVect and return vect.GetDataArray()[:] = numpy.require(timeseries.value, requirements=['C']) return vect
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bool_segments(array, start=0, delta=1, minlen=1): """Yield segments of consecutive `True` values in a boolean array Parameters array : `iterable` An iterable of boolean-castable values. start : `float` The value of the first sample on the indexed axis (e.g.the GPS start time of the array). delta : `float` The step size on the indexed axis (e.g. sample duration). minlen : `int`, optional The minimum number of consecutive `True` values for a segment. Yields ------ segment : `tuple` ``(start + i * delta, start + (i + n) * delta)`` for a sequence of ``n`` consecutive True values starting at position ``i``. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. The datatype of the values returned will be the larger of the types of ``start`` and ``delta``. Examples -------- [(1, 2), (5, 8), (9, 10)] [(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)] """
array = iter(array) i = 0 while True: try: # get next value val = next(array) except StopIteration: # end of array return if val: # start of new segment n = 1 # count consecutive True try: while next(array): # run until segment will end n += 1 except StopIteration: # have reached the end return # stop finally: # yield segment (including at StopIteration) if n >= minlen: # ... if long enough yield (start + i * delta, start + (i + n) * delta) i += n i += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dqflag(self, name=None, minlen=1, dtype=None, round=False, label=None, description=None): """Convert this series into a `~gwpy.segments.DataQualityFlag`. Each contiguous set of `True` values are grouped as a `~gwpy.segments.Segment` running from the GPS time the first found `True`, to the GPS time of the next `False` (or the end of the series) Parameters minlen : `int`, optional minimum number of consecutive `True` values to identify as a `~gwpy.segments.Segment`. This is useful to ignore single bit flips, for example. dtype : `type`, `callable` output segment entry type, can pass either a type for simple casting, or a callable function that accepts a float and returns another numeric type, defaults to the `dtype` of the time index round : `bool`, optional choose to round each `~gwpy.segments.Segment` to its inclusive integer boundaries label : `str`, optional the :attr:`~gwpy.segments.DataQualityFlag.label` for the output flag. description : `str`, optional the :attr:`~gwpy.segments.DataQualityFlag.description` for the output flag. Returns ------- dqflag : `~gwpy.segments.DataQualityFlag` a segment representation of this `StateTimeSeries`, the span defines the `known` segments, while the contiguous `True` sets defined each of the `active` segments """
from ..segments import DataQualityFlag # format dtype if dtype is None: dtype = self.t0.dtype if isinstance(dtype, numpy.dtype): # use callable dtype dtype = dtype.type start = dtype(self.t0.value) dt = dtype(self.dt.value) # build segmentlists (can use simple objects since DQFlag converts) active = _bool_segments(self.value, start, dt, minlen=int(minlen)) known = [tuple(map(dtype, self.span))] # build flag and return out = DataQualityFlag(name=name or self.name, active=active, known=known, label=label or self.name, description=description) if round: return out.round() return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bits(self): """list of `Bits` for this `StateVector` :type: `Bits` """
try: return self._bits except AttributeError: if self.dtype.name.startswith(('uint', 'int')): nbits = self.itemsize * 8 self.bits = Bits(['Bit %d' % b for b in range(nbits)], channel=self.channel, epoch=self.epoch) return self.bits elif hasattr(self.channel, 'bits'): self.bits = self.channel.bits return self.bits return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def boolean(self): """A mapping of this `StateVector` to a 2-D array containing all binary bits as booleans, for each time point. """
try: return self._boolean except AttributeError: nbits = len(self.bits) boolean = numpy.zeros((self.size, nbits), dtype=bool) for i, sample in enumerate(self.value): boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)] self._boolean = Array2D(boolean, name=self.name, x0=self.x0, dx=self.dx, y0=0, dy=1) return self.boolean
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_bit_series(self, bits=None): """Get the `StateTimeSeries` for each bit of this `StateVector`. Parameters bits : `list`, optional a list of bit indices or bit names, defaults to all bits Returns ------- bitseries : `StateTimeSeriesDict` a `dict` of `StateTimeSeries`, one for each given bit """
if bits is None: bits = [b for b in self.bits if b not in {None, ''}] bindex = [] for bit in bits: try: bindex.append((self.bits.index(bit), bit)) except (IndexError, ValueError) as exc: exc.args = ('Bit %r not found in StateVector' % bit,) raise self._bitseries = StateTimeSeriesDict() for i, bit in bindex: self._bitseries[bit] = StateTimeSeries( self.value >> i & 1, name=bit, epoch=self.x0.value, channel=self.channel, sample_rate=self.sample_rate) return self._bitseries
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(cls, source, *args, **kwargs): """Read data into a `StateVector` Parameters source : `str`, `list` Source of data, any of the following: - `str` path of single data file, - `str` path of LAL-format cache file, - `list` of paths. channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional GPS end time of required data, defaults to end of data found; any input parseable by `~gwpy.time.to_gps` is fine bits : `list`, optional list of bits names for this `StateVector`, give `None` at any point in the list to mask that bit format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. nproc : `int`, optional, default: `1` number of parallel processes to use, serial process by default. gap : `str`, optional how to handle gaps in the cache, one of - 'ignore': do nothing, let the undelying reader method handle it - 'warn': do nothing except print a warning to the screen - 'raise': raise an exception upon finding a gap (default) - 'pad': insert a value to fill the gaps pad : `float`, optional value with which to fill gaps in the source data, only used if gap is not given, or `gap='pad'` is given Examples -------- To read the S6 state vector, with names for all the bits:: 'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR', bits=['Science mode', 'Conlog OK', 'Locked', 'No injections', 'No Excitations'], dtype='uint32') then you can convert these to segments or to read just the interferometer operations bits:: 'H-H1_LDAS_C02_L2-968654592-128.gwf', 'H1:IFO-SV_STATE_VECTOR', bits=['Science mode', None, 'Locked'], dtype='uint32') Running `to_dqflags` on this example would only give 2 flags, rather than all five. Alternatively the `bits` attribute can be reset after reading, but before any further operations. Notes -----"""
return super(StateVector, cls).read(source, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dqflags(self, bits=None, minlen=1, dtype=float, round=False): """Convert this `StateVector` into a `~gwpy.segments.DataQualityDict` The `StateTimeSeries` for each bit is converted into a `~gwpy.segments.DataQualityFlag` with the bits combined into a dict. Parameters minlen : `int`, optional, default: 1 minimum number of consecutive `True` values to identify as a `Segment`. This is useful to ignore single bit flips, for example. bits : `list`, optional a list of bit indices or bit names to select, defaults to `~StateVector.bits` Returns ------- DataQualityFlag list : `list` a list of `~gwpy.segments.flag.DataQualityFlag` reprensentations for each bit in this `StateVector` See Also -------- :meth:`StateTimeSeries.to_dqflag` for details on the segment representation method for `StateVector` bits """
from ..segments import DataQualityDict out = DataQualityDict() bitseries = self.get_bit_series(bits=bits) for bit, sts in bitseries.items(): out[bit] = sts.to_dqflag(name=bit, minlen=minlen, round=round, dtype=dtype, description=self.bits.description[bit]) return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch(cls, channel, start, end, bits=None, host=None, port=None, verbose=False, connection=None, type=Nds2ChannelType.any()): """Fetch data from NDS into a `StateVector`. Parameters channel : `str`, `~gwpy.detector.Channel` the name of the channel to read, or a `Channel` object. start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS start time of required data, any input parseable by `~gwpy.time.to_gps` is fine end : `~gwpy.time.LIGOTimeGPS`, `float`, `str` GPS end time of required data, any input parseable by `~gwpy.time.to_gps` is fine bits : `Bits`, `list`, optional definition of bits for this `StateVector` host : `str`, optional URL of NDS server to use, defaults to observatory site host port : `int`, optional port number for NDS server query, must be given with `host` verify : `bool`, optional, default: `True` check channels exist in database before asking for data connection : `nds2.connection` open NDS connection to use verbose : `bool`, optional print verbose output about NDS progress type : `int`, optional NDS2 channel type integer dtype : `type`, `numpy.dtype`, `str`, optional identifier for desired output data type """
new = cls.DictClass.fetch( [channel], start, end, host=host, port=port, verbose=verbose, connection=connection)[channel] if bits: new.bits = bits return new
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self, format='segments', bits=None, **kwargs): """Plot the data for this `StateVector` Parameters format : `str`, optional, default: ``'segments'`` The type of plot to make, either 'segments' to plot the SegmentList for each bit, or 'timeseries' to plot the raw data for this `StateVector` bits : `list`, optional A list of bit indices or bit names, defaults to `~StateVector.bits`. This argument is ignored if ``format`` is not ``'segments'`` **kwargs Other keyword arguments to be passed to either `~gwpy.plot.SegmentAxes.plot` or `~gwpy.plot.Axes.plot`, depending on ``format``. Returns ------- plot : `~gwpy.plot.Plot` output plot object See Also -------- matplotlib.pyplot.figure for documentation of keyword arguments used to create the figure matplotlib.figure.Figure.add_subplot for documentation of keyword arguments used to create the axes gwpy.plot.SegmentAxes.plot_flag for documentation of keyword arguments used in rendering each statevector flag. """
if format == 'timeseries': return super(StateVector, self).plot(**kwargs) if format == 'segments': from ..plot import Plot kwargs.setdefault('xscale', 'auto-gps') return Plot(*self.to_dqflags(bits=bits).values(), projection='segments', **kwargs) raise ValueError("'format' argument must be one of: 'timeseries' or " "'segments'")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resample(self, rate): """Resample this `StateVector` to a new rate Because of the nature of a state-vector, downsampling is done by taking the logical 'and' of all original samples in each new sampling interval, while upsampling is achieved by repeating samples. Parameters rate : `float` rate to which to resample this `StateVector`, must be a divisor of the original sample rate (when downsampling) or a multiple of the original (when upsampling). Returns ------- vector : `StateVector` resampled version of the input `StateVector` """
rate1 = self.sample_rate.value if isinstance(rate, units.Quantity): rate2 = rate.value else: rate2 = float(rate) # upsample if (rate2 / rate1).is_integer(): raise NotImplementedError("StateVector upsampling has not " "been implemented yet, sorry.") # downsample elif (rate1 / rate2).is_integer(): factor = int(rate1 / rate2) # reshape incoming data to one column per new sample newsize = int(self.size / factor) old = self.value.reshape((newsize, self.size // newsize)) # work out number of bits if self.bits: nbits = len(self.bits) else: max_ = self.value.max() nbits = int(ceil(log(max_, 2))) if max_ else 1 bits = range(nbits) # construct an iterator over the columns of the old array itr = numpy.nditer( [old, None], flags=['external_loop', 'reduce_ok'], op_axes=[None, [0, -1]], op_flags=[['readonly'], ['readwrite', 'allocate']]) dtype = self.dtype type_ = self.dtype.type # for each new sample, each bit is logical AND of old samples # bit is ON, for x, y in itr: y[...] = numpy.sum([type_((x >> bit & 1).all() * (2 ** bit)) for bit in bits], dtype=self.dtype) new = StateVector(itr.operands[1], dtype=dtype) new.__metadata_finalize__(self) new._unit = self.unit new.sample_rate = rate2 return new # error for non-integer resampling factors elif rate1 < rate2: raise ValueError("New sample rate must be multiple of input " "series rate if upsampling a StateVector") else: raise ValueError("New sample rate must be divisor of input " "series rate if downsampling a StateVector")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read(cls, source, format=None, coalesce=False, **kwargs): # pylint: disable=redefined-builtin """Read segments from file into a `SegmentList` Parameters filename : `str` path of file to read format : `str`, optional source format identifier. If not given, the format will be detected if possible. See below for list of acceptable formats. coalesce : `bool`, optional if `True` coalesce the segment list before returning, otherwise return exactly as contained in file(s). **kwargs other keyword arguments depend on the format, see the online documentation for details (:ref:`gwpy-segments-io`) Returns ------- segmentlist : `SegmentList` `SegmentList` active and known segments read from file. Notes -----"""
def combiner(listofseglists): """Combine `SegmentList` from each file into a single object """ out = cls(seg for seglist in listofseglists for seg in seglist) if coalesce: return out.coalesce() return out return io_read_multi(combiner, cls, source, format=format, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, target, *args, **kwargs): """Write this `SegmentList` to a file Arguments and keywords depend on the output format, see the online documentation for full details for each format. Parameters target : `str` output filename Notes -----"""
return io_registry.write(self, target, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_from_cwb(source, *args, **kwargs): """Read an `EventTable` from a Coherent WaveBurst ROOT file This function just redirects to the format='root' reader with appropriate defaults. """
return EventTable.read(source, 'waveburst', *args, format='root', **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_backend_mod(name=None): """Returns the imported module for the given backend name Parameters name : `str`, optional the name of the backend, defaults to the current backend. Returns ------- backend_mod: `module` the module as returned by :func:`importlib.import_module` Examples -------- """
if name is None: name = get_backend() backend_name = (name[9:] if name.startswith("module://") else "matplotlib.backends.backend_{}".format(name.lower())) return importlib.import_module(backend_name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _group_axes_data(inputs, separate=None, flat=False): """Determine the number of axes from the input args to this `Plot` Parameters inputs : `list` of array-like data sets A list of data arrays, or a list of lists of data sets sep : `bool`, optional Plot each set of data on a separate `Axes` flat : `bool`, optional Return a flattened list of data objects Returns ------- axesdata : `list` of lists of array-like data A `list` with one element per required `Axes` containing the array-like data sets for those `Axes`, unless ``flat=True`` is given. Notes ----- The logic for this method is as follows: - if a `list` of data arrays are given, and `separate=False`, use 1 `Axes` - if a `list` of data arrays are given, and `separate=True`, use N `Axes, one for each data array - if a nested `list` of data arrays are given, ignore `sep` and use one `Axes` for each group of arrays. Examples -------- [[1, 2]] [[1], [2]] [[1, 2], [3]] """
# determine auto-separation if separate is None and inputs: # if given a nested list of data, multiple axes are required if any(isinstance(x, iterable_types + (dict,)) for x in inputs): separate = True # if data are of different types, default to separate elif not all(type(x) is type(inputs[0]) for x in inputs): # noqa: E721 separate = True # build list of lists out = [] for x in inputs: if isinstance(x, dict): # unwrap dict x = list(x.values()) # new group from iterable, notes: # the iterable is presumed to be a list of independent data # structures, unless its a list of scalars in which case we # should plot them all as one if ( isinstance(x, (KeysView, ValuesView)) or isinstance(x, (list, tuple)) and ( not x or not numpy.isscalar(x[0])) ): out.append(x) # dataset starts a new group elif separate or not out: out.append([x]) # dataset joins current group else: # append input to most recent group out[-1].append(x) if flat: return [s for group in out for s in group] return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _init_axes(self, data, method='plot', xscale=None, sharex=False, sharey=False, geometry=None, separate=None, **kwargs): """Populate this figure with data, creating `Axes` as necessary """
if isinstance(sharex, bool): sharex = "all" if sharex else "none" if isinstance(sharey, bool): sharey = "all" if sharey else "none" # parse keywords axes_kw = {key: kwargs.pop(key) for key in utils.AXES_PARAMS if key in kwargs} # handle geometry and group axes if geometry is not None and geometry[0] * geometry[1] == len(data): separate = True axes_groups = _group_axes_data(data, separate=separate) if geometry is None: geometry = (len(axes_groups), 1) nrows, ncols = geometry if axes_groups and nrows * ncols != len(axes_groups): # mismatching data and geometry raise ValueError("cannot group data into {0} axes with a " "{1}x{2} grid".format(len(axes_groups), nrows, ncols)) # create grid spec gs = GridSpec(nrows, ncols) axarr = numpy.empty((nrows, ncols), dtype=object) # set default labels defxlabel = 'xlabel' not in axes_kw defylabel = 'ylabel' not in axes_kw flatdata = [s for group in axes_groups for s in group] for axis in ('x', 'y'): unit = _common_axis_unit(flatdata, axis=axis) if unit: axes_kw.setdefault('{}label'.format(axis), unit.to_string('latex_inline_dimensional')) # create axes for each group and draw each data object for group, (row, col) in zip_longest( axes_groups, itertools.product(range(nrows), range(ncols)), fillvalue=[]): # create Axes shared_with = {"none": None, "all": axarr[0, 0], "row": axarr[row, 0], "col": axarr[0, col]} axes_kw["sharex"] = shared_with[sharex] axes_kw["sharey"] = shared_with[sharey] axes_kw['xscale'] = xscale if xscale else _parse_xscale(group) ax = axarr[row, col] = self.add_subplot(gs[row, col], **axes_kw) # plot data plot_func = getattr(ax, method) if method in ('imshow', 'pcolormesh'): for obj in group: plot_func(obj, **kwargs) elif group: plot_func(*group, **kwargs) # set default axis labels for axis, share, pos, n, def_ in ( (ax.xaxis, sharex, row, nrows, defxlabel), (ax.yaxis, sharey, col, ncols, defylabel), ): # hide label if shared axis and not bottom left panel if share == 'all' and pos < n - 1: axis.set_label_text('') # otherwise set default status else: axis.isDefault_label = def_ return self.axes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def refresh(self): """Refresh the current figure """
for cbar in self.colorbars: cbar.draw_all() self.canvas.draw()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """Close the plot and release its memory. """
from matplotlib.pyplot import close for ax in self.axes[::-1]: # avoid matplotlib/matplotlib#9970 ax.set_xscale('linear') ax.set_yscale('linear') # clear the axes ax.cla() # close the figure close(self)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_axes(self, projection=None): """Find all `Axes`, optionally matching the given projection Parameters projection : `str` name of axes types to return Returns ------- axlist : `list` of `~matplotlib.axes.Axes` """
if projection is None: return self.axes return [ax for ax in self.axes if ax.name == projection.lower()]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def colorbar(self, mappable=None, cax=None, ax=None, fraction=0., label=None, emit=True, **kwargs): """Add a colorbar to the current `Plot` A colorbar must be associated with an `Axes` on this `Plot`, and an existing mappable element (e.g. an image). Parameters mappable : matplotlib data collection Collection against which to map the colouring cax : `~matplotlib.axes.Axes` Axes on which to draw colorbar ax : `~matplotlib.axes.Axes` Axes relative to which to position colorbar fraction : `float`, optional Fraction of original axes to use for colorbar, give `fraction=0` to not resize the original axes at all. emit : `bool`, optional If `True` update all mappables on `Axes` to match the same colouring as the colorbar. **kwargs other keyword arguments to be passed to the :meth:`~matplotlib.figure.Figure.colorbar` Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See Also -------- matplotlib.figure.Figure.colorbar matplotlib.colorbar.Colorbar Examples -------- To plot a simple image and add a colorbar: Colorbars can also be generated by directly referencing the parent axes: """
# pre-process kwargs mappable, kwargs = gcbar.process_colorbar_kwargs( self, mappable, ax, cax=cax, fraction=fraction, **kwargs) # generate colour bar cbar = super(Plot, self).colorbar(mappable, **kwargs) self.colorbars.append(cbar) if label: # mpl<1.3 doesn't accept label in Colorbar constructor cbar.set_label(label) # update mappables for this axis if emit: ax = kwargs.pop('ax') norm = mappable.norm cmap = mappable.get_cmap() for map_ in ax.collections + ax.images: map_.set_norm(norm) map_.set_cmap(cmap) return cbar
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_colorbar(self, *args, **kwargs): """DEPRECATED, use `Plot.colorbar` instead """
warnings.warn( "{0}.add_colorbar was renamed {0}.colorbar, this warnings will " "result in an error in the future".format(type(self).__name__), DeprecationWarning) return self.colorbar(*args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_segments_bar(self, segments, ax=None, height=0.14, pad=0.1, sharex=True, location='bottom', **plotargs): """Add a segment bar `Plot` indicating state information. By default, segments are displayed in a thin horizontal set of Axes sitting immediately below the x-axis of the main, similarly to a colorbar. Parameters segments : `~gwpy.segments.DataQualityFlag` A data-quality flag, or `SegmentList` denoting state segments about this Plot ax : `Axes`, optional Specific `Axes` relative to which to position new `Axes`, defaults to :func:`~matplotlib.pyplot.gca()` height : `float, `optional Height of the new axes, as a fraction of the anchor axes pad : `float`, optional Padding between the new axes and the anchor, as a fraction of the anchor axes dimension sharex : `True`, `~matplotlib.axes.Axes`, optional Either `True` to set ``sharex=ax`` for the new segment axes, or an `Axes` to use directly location : `str`, optional Location for new segment axes, defaults to ``'bottom'``, acceptable values are ``'top'`` or ``'bottom'``. **plotargs extra keyword arguments are passed to :meth:`~gwpy.plot.SegmentAxes.plot` """
# get axes to anchor against if not ax: ax = self.gca() # set options for new axes axes_kw = { 'pad': pad, 'add_to_figure': True, 'sharex': ax if sharex is True else sharex or None, 'axes_class': get_projection_class('segments'), } # map X-axis limit from old axes if axes_kw['sharex'] is ax and not ax.get_autoscalex_on(): axes_kw['xlim'] = ax.get_xlim() # if axes uses GPS scaling, copy the epoch as well try: axes_kw['epoch'] = ax.get_epoch() except AttributeError: pass # add new axes if ax.get_axes_locator(): divider = ax.get_axes_locator()._axes_divider else: from mpl_toolkits.axes_grid1 import make_axes_locatable divider = make_axes_locatable(ax) if location not in {'top', 'bottom'}: raise ValueError("Segments can only be positoned at 'top' or " "'bottom'.") segax = divider.append_axes(location, height, **axes_kw) # update anchor axes if axes_kw['sharex'] is ax and location == 'bottom': # map label segax.set_xlabel(ax.get_xlabel()) segax.xaxis.isDefault_label = ax.xaxis.isDefault_label ax.set_xlabel("") # hide ticks on original axes setp(ax.get_xticklabels(), visible=False) # plot segments segax.plot(segments, **plotargs) segax.grid(b=False, which='both', axis='y') segax.autoscale(axis='y', tight=True) return segax
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_hacr_channels(db=None, gps=None, connection=None, **conectkwargs): """Return the names of all channels present in the given HACR database """
# connect if needed if connection is None: if gps is None: gps = from_gps('now') if db is None: db = get_database_names(gps, gps)[0] connection = connect(db=db, **conectkwargs) # query out = query("select channel from job where monitorName = 'chacr'") return [r[0] for r in out]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_hacr_triggers(channel, start, end, columns=HACR_COLUMNS, pid=None, monitor='chacr', selection=None, **connectkwargs): """Fetch a table of HACR triggers in the given interval """
if columns is None: columns = HACR_COLUMNS columns = list(columns) span = Segment(*map(to_gps, (start, end))) # parse selection for SQL query (removing leading 'where ') selectionstr = 'and %s' % format_db_selection(selection, engine=None)[6:] # get database names and loop over each on databases = get_database_names(start, end) rows = [] for db in databases: conn = connect(db, **connectkwargs) cursor = conn.cursor() # find process ID(s) for this channel pids = query("select process_id, gps_start, gps_stop " "from job where monitorName = %r and channel = %r" % (monitor, str(channel)), connection=conn) for p, s, e in pids: # validate this process id if pid is not None and int(p) != int(pid): continue tspan = Segment(float(s), float(e)) if not tspan.intersects(span): continue # execute trigger query q = ('select %s from mhacr where process_id = %d and ' 'gps_start > %s and gps_start < %d %s order by gps_start asc' % (', '.join(columns), int(p), span[0], span[1], selectionstr)) n = cursor.execute(q) if n == 0: continue # get new events, convert to recarray, and append to table rows.extend(cursor.fetchall()) return EventTable(rows=rows, names=columns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(db, host=HACR_DATABASE_SERVER, user=HACR_DATABASE_USER, passwd=HACR_DATABASE_PASSWD): """Connect to the given SQL database """
try: import pymysql except ImportError as e: e.args = ('pymysql is required to fetch HACR triggers',) raise return pymysql.connect(host=host, user=user, passwd=passwd, db=db)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query(querystr, connection=None, **connectkwargs): """Execute a query of the given SQL database """
if connection is None: connection = connect(**connectkwargs) cursor = connection.cursor() cursor.execute(querystr) return cursor.fetchall()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_filter(self, filter_, frequencies=None, dB=True, analog=False, sample_rate=None, **kwargs): """Add a linear time-invariant filter to this BodePlot Parameters filter_ : `~scipy.signal.lti`, `tuple` the filter to plot, either as a `~scipy.signal.lti`, or a `tuple` with the following number and meaning of elements - 2: (numerator, denominator) - 3: (zeros, poles, gain) - 4: (A, B, C, D) frequencies : `numpy.ndarray`, optional list of frequencies (in Hertz) at which to plot dB : `bool`, optional if `True`, display magnitude in decibels, otherwise display amplitude, default: `True` **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter. """
if not analog: if not sample_rate: raise ValueError("Must give sample_rate frequency to display " "digital (analog=False) filter") sample_rate = Quantity(sample_rate, 'Hz').value dt = 2 * pi / sample_rate if not isinstance(frequencies, (type(None), int)): frequencies = numpy.atleast_1d(frequencies).copy() frequencies *= dt # parse filter (without digital conversions) _, fcomp = parse_filter(filter_, analog=False) if analog: lti = signal.lti(*fcomp) else: lti = signal.dlti(*fcomp, dt=dt) # calculate frequency response w, mag, phase = lti.bode(w=frequencies) # convert from decibels if not dB: mag = 10 ** (mag / 10.) # draw mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_frequencyseries(self, spectrum, dB=True, power=False, **kwargs): """Plot the magnitude and phase of a complex-valued `FrequencySeries` Parameters spectrum : `~gwpy.frequencyseries.FrequencySeries` the (complex-valued) `FrequencySeries` to display db : `bool`, optional, default: `True` if `True`, display magnitude in decibels, otherwise display amplitude. power : `bool`, optional, default: `False` give `True` to incidate that ``spectrum`` holds power values, so ``dB = 10 * log(abs(spectrum))``, otherwise ``db = 20 * log(abs(spectrum))``. This argument is ignored if ``db=False``. **kwargs any other keyword arguments accepted by :meth:`~matplotlib.axes.Axes.plot` Returns ------- mag, phase : `tuple` of `lines <matplotlib.lines.Line2D>` the lines drawn for the magnitude and phase of the filter. """
# parse spectrum arguments kwargs.setdefault('label', spectrum.name) # get magnitude mag = numpy.absolute(spectrum.value) if dB: mag = to_db(mag) if not power: mag *= 2. # get phase phase = numpy.angle(spectrum.value, deg=True) # plot w = spectrum.frequencies.value mline = self.maxes.plot(w, mag, **kwargs)[0] pline = self.paxes.plot(w, phase, **kwargs)[0] return mline, pline
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_omega_scan_config(source): """Parse an Omega-scan configuration file into a `ChannelList` Parameters source : `str` path of Omega configuration file to parse Returns ------- channels : `ChannelList` the list of channels (in order) as parsed Raises ------ RuntimeError if this method finds a line it cannot parse sensibly """
out = ChannelList() append = out.append if isinstance(source, FILE_LIKE): close = False else: source = open(source, 'r') close = True try: section = None while True: try: line = next(source) except StopIteration: break if line == '' or line == '\n' or line.startswith('#'): continue elif line.startswith('['): section = line[1:-2] elif line.startswith('{'): append(parse_omega_channel(source, section)) else: raise RuntimeError("Failed to parse Omega config line:\n%s" % line) finally: if close: source.close() return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_omega_channel(fobj, section=None): """Parse a `Channel` from an Omega-scan configuration file Parameters fobj : `file` the open file-like object to parse section : `str` name of section in which this channel should be recorded Returns ------- channel : `Channel` the channel as parsed from this `file` """
params = OrderedDict() while True: line = next(fobj) if line == '}\n': break key, value = line.split(':', 1) params[key.strip().rstrip()] = omega_param(value) out = Channel(params.get('channelName'), sample_rate=params.get('sampleFrequency'), frametype=params.get('frameType'), frequency_range=params.get('searchFrequencyRange')) out.group = section out.params = params return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def omega_param(val): """Parse a value from an Omega-scan configuration file This method tries to parse matlab-syntax parameters into a `str`, `float`, or `tuple` """
val = val.strip().rstrip() if val.startswith(('"', "'")): return str(val[1:-1]) if val.startswith('['): return tuple(map(float, val[1:-1].split())) return float(val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_omega_scan_config(channellist, fobj, header=True): """Write a `ChannelList` to an Omega-pipeline scan configuration file This method is dumb and assumes the channels are sorted in the right order already """
if isinstance(fobj, FILE_LIKE): close = False else: fobj = open(fobj, 'w') close = True try: # print header if header: print('# Q Scan configuration file', file=fobj) print('# Generated with GWpy from a ChannelList', file=fobj) group = None for channel in channellist: # print header if channel.group != group: group = channel.group print('\n[%s]' % group, file=fobj) print("", file=fobj) print_omega_channel(channel, file=fobj) finally: if close: fobj.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_omega_channel(channel, file=sys.stdout): """Print a `Channel` in Omega-pipeline scan format """
print('{', file=file) try: params = channel.params.copy() except AttributeError: params = OrderedDict() params.setdefault('channelName', str(channel)) params.setdefault('alwaysPlotFlag', int(params.pop('important', False))) if channel.frametype: params.setdefault('frameType', channel.frametype) if channel.sample_rate is not None: params.setdefault('sampleFrequency', channel.sample_rate.to('Hz').value) if channel.frequency_range is not None: low, high = channel.frequency_range.to('Hz').value params.setdefault('searchFrequencyRange', (low, high)) if 'qlow' in params or 'qhigh' in params: qlow = params.pop('qlow', 'sqrt(11)') qhigh = params.pop('qhigh', 64) params.setdefault('searchQRange', (qlow, qhigh)) # write params for key in ['channelName', 'frameType']: if key not in params: raise KeyError("No %r defined for %s" % (key, str(channel))) for key, value in params.items(): key = '%s:' % str(key) if isinstance(value, tuple): value = '[%s]' % ' '.join(map(str, value)) elif isinstance(value, float) and value.is_integer(): value = int(value) elif isinstance(value, str): value = repr(value) print(' {0: <30} {1}'.format(key, value), file=file) print('}', file=file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_nds2_name(channel): """Returns the NDS2-formatted name for a channel Understands how to format NDS name strings from `gwpy.detector.Channel` and `nds2.channel` objects """
if hasattr(channel, 'ndsname'): # gwpy.detector.Channel return channel.ndsname if hasattr(channel, 'channel_type'): # nds2.channel return '%s,%s' % (channel.name, channel.channel_type_to_string(channel.channel_type)) return str(channel)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_nds_env(env='NDSSERVER'): """Parse the NDSSERVER environment variable into a list of hosts Parameters env : `str`, optional environment variable name to use for server order, default ``'NDSSERVER'``. The contents of this variable should be a comma-separated list of `host:port` strings, e.g. ``'nds1.server.com:80,nds2.server.com:80'`` Returns ------- hostiter : `list` of `tuple` a list of (unique) ``(str, int)`` tuples for each host:port pair """
hosts = [] for host in os.getenv(env).split(','): try: host, port = host.rsplit(':', 1) except ValueError: port = None else: port = int(port) if (host, port) not in hosts: hosts.append((host, port)) return hosts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connect(host, port=None): """Open an `nds2.connection` to a given host and port Parameters host : `str` name of server with which to connect port : `int`, optional connection port Returns ------- connection : `nds2.connection` a new open connection to the given NDS host """
import nds2 # pylint: disable=no-member # set default port for NDS1 connections (required, I think) if port is None and NDS1_HOSTNAME.match(host): port = 8088 if port is None: return nds2.connection(host) return nds2.connection(host, port)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def auth_connect(host, port=None): """Open an `nds2.connection` handling simple authentication errors This method will catch exceptions related to kerberos authentication, and execute a kinit() for the user before attempting to connect again. Parameters host : `str` name of server with which to connect port : `int`, optional connection port Returns ------- connection : `nds2.connection` a new open connection to the given NDS host """
try: return connect(host, port) except RuntimeError as exc: if 'Request SASL authentication' not in str(exc): raise warnings.warn('Error authenticating against {0}:{1}'.format(host, port), NDSWarning) kinit() return connect(host, port)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open_connection(func): """Decorate a function to create a `nds2.connection` if required """
@wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring if kwargs.get('connection', None) is None: try: host = kwargs.pop('host') except KeyError: raise TypeError("one of `connection` or `host` is required " "to query NDS2 server") kwargs['connection'] = auth_connect(host, kwargs.pop('port', None)) return func(*args, **kwargs) return wrapped_func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_nds2_enums(func): """Decorate a function to translate a type string into an integer """
@wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring for kwd, enum_ in (('type', Nds2ChannelType), ('dtype', Nds2DataType)): if kwargs.get(kwd, None) is None: kwargs[kwd] = enum_.any() elif not isinstance(kwargs[kwd], int): kwargs[kwd] = enum_.find(kwargs[kwd]).value return func(*args, **kwargs) return wrapped_func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_epoch(func): """Wrap a function to reset the epoch when finished This is useful for functions that wish to use `connection.set_epoch`. """
@wraps(func) def wrapped_func(*args, **kwargs): # pylint: disable=missing-docstring connection = kwargs.get('connection', None) epoch = connection.current_epoch() if connection else None try: return func(*args, **kwargs) finally: if epoch is not None: connection.set_epoch(epoch.gps_start, epoch.gps_stop) return wrapped_func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_channels(channels, connection=None, host=None, port=None, sample_rate=None, type=Nds2ChannelType.any(), dtype=Nds2DataType.any(), unique=False, epoch='ALL'): # pylint: disable=unused-argument,redefined-builtin """Query an NDS2 server for channel information Parameters channels : `list` of `str` list of channel names to query, each can include bash-style globs connection : `nds2.connection`, optional open NDS2 connection to use for query host : `str`, optional name of NDS2 server to query, required if ``connection`` is not given port : `int`, optional port number on host to use for NDS2 connection sample_rate : `int`, `float`, `tuple`, optional a single number, representing a specific sample rate to match, or a tuple representing a ``(low, high)` interval to match type : `int`, optional the NDS2 channel type to match dtype : `int`, optional the NDS2 data type to match unique : `bool`, optional, default: `False` require one (and only one) match per channel epoch : `str`, `tuple` of `int`, optional the NDS epoch to restrict to, either the name of a known epoch, or a 2-tuple of GPS ``[start, stop)`` times Returns ------- channels : `list` of `nds2.channel` list of NDS2 channel objects See also -------- nds2.connection.find_channels for documentation on the underlying query method Examples -------- [<G1:DER_DATA_H (16384Hz, RDS, FLOAT64)>] """
# set epoch if not isinstance(epoch, tuple): epoch = (epoch or 'All',) connection.set_epoch(*epoch) # format sample_rate as tuple for find_channels call if isinstance(sample_rate, (int, float)): sample_rate = (sample_rate, sample_rate) elif sample_rate is None: sample_rate = tuple() # query for channels out = [] for name in _get_nds2_names(channels): out.extend(_find_channel(connection, name, type, dtype, sample_rate, unique=unique)) return out