code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def get_urls(self, controllers=None, prefix_path=''): """ Return a list of all valid urls (minus args and kwargs, just the program paths) for this manifest. If a single program has two urls, both will be returned. """ tag_match = lambda program: set(program.controllers) & set(controllers or []) urls = set() for key, value in self.manifest.items(): path = "%s/%s" % (prefix_path, key) if path.endswith('/') and prefix_path: path = path[:-1] if hasattr(value, 'lower'): # is a string redirect urls.add(path) elif isinstance(value, Manifest): # is manifest pp = '' if path == '/' else path # for 'stacked' root programs. new_urls = value.get_urls(controllers=controllers, prefix_path=pp) urls.update(new_urls) elif isinstance(value, Program): # make a list so we can iterate through it in the next `if` block value = [value] if hasattr(value, 'append'): # defined is multiple programs, get the one for this controller tag. for program in value: if not program.controllers or not controllers: # no controllers defined on program. Always add. # or no tags defined for this get_urls call. Always add. urls.add(path) elif tag_match(program): urls.add(path) return urls
Return a list of all valid urls (minus args and kwargs, just the program paths) for this manifest. If a single program has two urls, both will be returned.
Below is the the instruction that describes the task: ### Input: Return a list of all valid urls (minus args and kwargs, just the program paths) for this manifest. If a single program has two urls, both will be returned. ### Response: def get_urls(self, controllers=None, prefix_path=''): """ Return a list of all valid urls (minus args and kwargs, just the program paths) for this manifest. If a single program has two urls, both will be returned. """ tag_match = lambda program: set(program.controllers) & set(controllers or []) urls = set() for key, value in self.manifest.items(): path = "%s/%s" % (prefix_path, key) if path.endswith('/') and prefix_path: path = path[:-1] if hasattr(value, 'lower'): # is a string redirect urls.add(path) elif isinstance(value, Manifest): # is manifest pp = '' if path == '/' else path # for 'stacked' root programs. new_urls = value.get_urls(controllers=controllers, prefix_path=pp) urls.update(new_urls) elif isinstance(value, Program): # make a list so we can iterate through it in the next `if` block value = [value] if hasattr(value, 'append'): # defined is multiple programs, get the one for this controller tag. for program in value: if not program.controllers or not controllers: # no controllers defined on program. Always add. # or no tags defined for this get_urls call. Always add. urls.add(path) elif tag_match(program): urls.add(path) return urls
def closest_pair(arr, give="indicies"): """Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]] """ idxs = [idx for idx in np.ndindex(arr.shape)] outs = [] min_dist = arr.max() - arr.min() for idxa in idxs: for idxb in idxs: if idxa == idxb: continue dist = abs(arr[idxa] - arr[idxb]) if dist == min_dist: if not [idxb, idxa] in outs: outs.append([idxa, idxb]) elif dist < min_dist: min_dist = dist outs = [[idxa, idxb]] if give == "indicies": return outs elif give == "distance": return min_dist else: raise KeyError("give not recognized in closest_pair")
Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]]
Below is the the instruction that describes the task: ### Input: Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]] ### Response: def closest_pair(arr, give="indicies"): """Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]] """ idxs = [idx for idx in np.ndindex(arr.shape)] outs = [] min_dist = arr.max() - arr.min() for idxa in idxs: for idxb in idxs: if idxa == idxb: continue dist = abs(arr[idxa] - arr[idxb]) if dist == min_dist: if not [idxb, idxa] in outs: outs.append([idxa, idxb]) elif dist < min_dist: min_dist = dist outs = [[idxa, idxb]] if give == "indicies": return outs elif give == "distance": return min_dist else: raise KeyError("give not recognized in closest_pair")
def parse(cls, data): """ Extracts message informations from `data`. :param data: received line. :type data: unicode :return: extracted informations (source, destination, command, args). :rtype: tuple(Source, str, str, list) :raise: :class:`fatbotslim.irc.NullMessage` if `data` is empty. """ src = u'' dst = None if data[0] == u':': src, data = data[1:].split(u' ', 1) if u' :' in data: data, trailing = data.split(u' :', 1) args = data.split() args.extend(trailing.split()) else: args = data.split() command = args.pop(0) if command in (PRIVMSG, NOTICE): dst = args.pop(0) if ctcp_re.match(args[0]): args = args[0].strip(u'\x01').split() command = u'CTCP_' + args.pop(0) return Source(src), dst, command, args
Extracts message informations from `data`. :param data: received line. :type data: unicode :return: extracted informations (source, destination, command, args). :rtype: tuple(Source, str, str, list) :raise: :class:`fatbotslim.irc.NullMessage` if `data` is empty.
Below is the the instruction that describes the task: ### Input: Extracts message informations from `data`. :param data: received line. :type data: unicode :return: extracted informations (source, destination, command, args). :rtype: tuple(Source, str, str, list) :raise: :class:`fatbotslim.irc.NullMessage` if `data` is empty. ### Response: def parse(cls, data): """ Extracts message informations from `data`. :param data: received line. :type data: unicode :return: extracted informations (source, destination, command, args). :rtype: tuple(Source, str, str, list) :raise: :class:`fatbotslim.irc.NullMessage` if `data` is empty. """ src = u'' dst = None if data[0] == u':': src, data = data[1:].split(u' ', 1) if u' :' in data: data, trailing = data.split(u' :', 1) args = data.split() args.extend(trailing.split()) else: args = data.split() command = args.pop(0) if command in (PRIVMSG, NOTICE): dst = args.pop(0) if ctcp_re.match(args[0]): args = args[0].strip(u'\x01').split() command = u'CTCP_' + args.pop(0) return Source(src), dst, command, args
def feature_index(*feature_names): '''Returns a index creation function. Returns a valid index ``create`` function for the feature names given. This can be used with the :meth:`Store.define_index` method to create indexes on any combination of features in a feature collection. :type feature_names: list(unicode) :rtype: ``(val -> index val) -> (content_id, FeatureCollection) -> generator of [index val]`` ''' def _(trans, (cid, fc)): for fname in feature_names: feat = fc.get(fname) if feat is None: continue elif isinstance(feat, unicode): yield trans(feat) else: # string counter, sparse/dense vector for val in feat.iterkeys(): yield trans(val) return _
Returns a index creation function. Returns a valid index ``create`` function for the feature names given. This can be used with the :meth:`Store.define_index` method to create indexes on any combination of features in a feature collection. :type feature_names: list(unicode) :rtype: ``(val -> index val) -> (content_id, FeatureCollection) -> generator of [index val]``
Below is the the instruction that describes the task: ### Input: Returns a index creation function. Returns a valid index ``create`` function for the feature names given. This can be used with the :meth:`Store.define_index` method to create indexes on any combination of features in a feature collection. :type feature_names: list(unicode) :rtype: ``(val -> index val) -> (content_id, FeatureCollection) -> generator of [index val]`` ### Response: def feature_index(*feature_names): '''Returns a index creation function. Returns a valid index ``create`` function for the feature names given. This can be used with the :meth:`Store.define_index` method to create indexes on any combination of features in a feature collection. :type feature_names: list(unicode) :rtype: ``(val -> index val) -> (content_id, FeatureCollection) -> generator of [index val]`` ''' def _(trans, (cid, fc)): for fname in feature_names: feat = fc.get(fname) if feat is None: continue elif isinstance(feat, unicode): yield trans(feat) else: # string counter, sparse/dense vector for val in feat.iterkeys(): yield trans(val) return _
def connect_model(self, model): """Link the Database to the Model instance. In case a new database is created from scratch, ``connect_model`` creates Trace objects for all tallyable pymc objects defined in `model`. If the database is being loaded from an existing file, ``connect_model`` restore the objects trace to their stored value. :Parameters: model : pymc.Model instance An instance holding the pymc objects defining a statistical model (stochastics, deterministics, data, ...) """ # Changed this to allow non-Model models. -AP if isinstance(model, pymc.Model): self.model = model else: raise AttributeError('Not a Model instance.') # Restore the state of the Model from an existing Database. # The `load` method will have already created the Trace objects. if hasattr(self, '_state_'): names = set() for morenames in self.trace_names: names.update(morenames) for name, fun in six.iteritems(model._funs_to_tally): if name in self._traces: self._traces[name]._getfunc = fun names.remove(name) if len(names) > 0: raise RuntimeError( "Some objects from the database have not been assigned a getfunc: %s" % ', '.join(names)) # Create a fresh new state. This is now taken care of in initialize. else: for name, fun in six.iteritems(model._funs_to_tally): if np.array(fun()).dtype is np.dtype('object'): self._traces[ name] = TraceObject( name, getfunc=fun, db=self) else: self._traces[name] = Trace(name, getfunc=fun, db=self)
Link the Database to the Model instance. In case a new database is created from scratch, ``connect_model`` creates Trace objects for all tallyable pymc objects defined in `model`. If the database is being loaded from an existing file, ``connect_model`` restore the objects trace to their stored value. :Parameters: model : pymc.Model instance An instance holding the pymc objects defining a statistical model (stochastics, deterministics, data, ...)
Below is the the instruction that describes the task: ### Input: Link the Database to the Model instance. In case a new database is created from scratch, ``connect_model`` creates Trace objects for all tallyable pymc objects defined in `model`. If the database is being loaded from an existing file, ``connect_model`` restore the objects trace to their stored value. :Parameters: model : pymc.Model instance An instance holding the pymc objects defining a statistical model (stochastics, deterministics, data, ...) ### Response: def connect_model(self, model): """Link the Database to the Model instance. In case a new database is created from scratch, ``connect_model`` creates Trace objects for all tallyable pymc objects defined in `model`. If the database is being loaded from an existing file, ``connect_model`` restore the objects trace to their stored value. :Parameters: model : pymc.Model instance An instance holding the pymc objects defining a statistical model (stochastics, deterministics, data, ...) """ # Changed this to allow non-Model models. -AP if isinstance(model, pymc.Model): self.model = model else: raise AttributeError('Not a Model instance.') # Restore the state of the Model from an existing Database. # The `load` method will have already created the Trace objects. if hasattr(self, '_state_'): names = set() for morenames in self.trace_names: names.update(morenames) for name, fun in six.iteritems(model._funs_to_tally): if name in self._traces: self._traces[name]._getfunc = fun names.remove(name) if len(names) > 0: raise RuntimeError( "Some objects from the database have not been assigned a getfunc: %s" % ', '.join(names)) # Create a fresh new state. This is now taken care of in initialize. else: for name, fun in six.iteritems(model._funs_to_tally): if np.array(fun()).dtype is np.dtype('object'): self._traces[ name] = TraceObject( name, getfunc=fun, db=self) else: self._traces[name] = Trace(name, getfunc=fun, db=self)
def lightcurve_flux_measures(ftimes, fmags, ferrs, magsarefluxes=False): '''This calculates percentiles and percentile ratios of the flux. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. magsarefluxes : bool If the `fmags` array actually contains fluxes, will not convert `mags` to fluxes before calculating the percentiles. Returns ------- dict A dict with all of the light curve flux percentiles and percentile ratios calculated. ''' ndet = len(fmags) if ndet > 9: # get the fluxes if magsarefluxes: series_fluxes = fmags else: series_fluxes = 10.0**(-0.4*fmags) series_flux_median = npmedian(series_fluxes) # get the percent_amplitude for the fluxes series_flux_percent_amplitude = ( npmax(npabs(series_fluxes))/series_flux_median ) # get the flux percentiles series_flux_percentiles = nppercentile( series_fluxes, [5.0,10,17.5,25,32.5,40,60,67.5,75,82.5,90,95] ) series_frat_595 = ( series_flux_percentiles[-1] - series_flux_percentiles[0] ) series_frat_1090 = ( series_flux_percentiles[-2] - series_flux_percentiles[1] ) series_frat_175825 = ( series_flux_percentiles[-3] - series_flux_percentiles[2] ) series_frat_2575 = ( series_flux_percentiles[-4] - series_flux_percentiles[3] ) series_frat_325675 = ( series_flux_percentiles[-5] - series_flux_percentiles[4] ) series_frat_4060 = ( series_flux_percentiles[-6] - series_flux_percentiles[5] ) # calculate the flux percentile ratios series_flux_percentile_ratio_mid20 = series_frat_4060/series_frat_595 series_flux_percentile_ratio_mid35 = series_frat_325675/series_frat_595 series_flux_percentile_ratio_mid50 = series_frat_2575/series_frat_595 series_flux_percentile_ratio_mid65 = series_frat_175825/series_frat_595 series_flux_percentile_ratio_mid80 = series_frat_1090/series_frat_595 # calculate the ratio of F595/median flux series_percent_difference_flux_percentile = ( series_frat_595/series_flux_median ) series_percentile_magdiff = -2.5*nplog10( series_percent_difference_flux_percentile ) return { 'flux_median':series_flux_median, 'flux_percent_amplitude':series_flux_percent_amplitude, 'flux_percentiles':series_flux_percentiles, 'flux_percentile_ratio_mid20':series_flux_percentile_ratio_mid20, 'flux_percentile_ratio_mid35':series_flux_percentile_ratio_mid35, 'flux_percentile_ratio_mid50':series_flux_percentile_ratio_mid50, 'flux_percentile_ratio_mid65':series_flux_percentile_ratio_mid65, 'flux_percentile_ratio_mid80':series_flux_percentile_ratio_mid80, 'percent_difference_flux_percentile':series_percentile_magdiff, } else: LOGERROR('not enough detections in this magseries ' 'to calculate flux measures') return None
This calculates percentiles and percentile ratios of the flux. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. magsarefluxes : bool If the `fmags` array actually contains fluxes, will not convert `mags` to fluxes before calculating the percentiles. Returns ------- dict A dict with all of the light curve flux percentiles and percentile ratios calculated.
Below is the the instruction that describes the task: ### Input: This calculates percentiles and percentile ratios of the flux. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. magsarefluxes : bool If the `fmags` array actually contains fluxes, will not convert `mags` to fluxes before calculating the percentiles. Returns ------- dict A dict with all of the light curve flux percentiles and percentile ratios calculated. ### Response: def lightcurve_flux_measures(ftimes, fmags, ferrs, magsarefluxes=False): '''This calculates percentiles and percentile ratios of the flux. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. magsarefluxes : bool If the `fmags` array actually contains fluxes, will not convert `mags` to fluxes before calculating the percentiles. Returns ------- dict A dict with all of the light curve flux percentiles and percentile ratios calculated. ''' ndet = len(fmags) if ndet > 9: # get the fluxes if magsarefluxes: series_fluxes = fmags else: series_fluxes = 10.0**(-0.4*fmags) series_flux_median = npmedian(series_fluxes) # get the percent_amplitude for the fluxes series_flux_percent_amplitude = ( npmax(npabs(series_fluxes))/series_flux_median ) # get the flux percentiles series_flux_percentiles = nppercentile( series_fluxes, [5.0,10,17.5,25,32.5,40,60,67.5,75,82.5,90,95] ) series_frat_595 = ( series_flux_percentiles[-1] - series_flux_percentiles[0] ) series_frat_1090 = ( series_flux_percentiles[-2] - series_flux_percentiles[1] ) series_frat_175825 = ( series_flux_percentiles[-3] - series_flux_percentiles[2] ) series_frat_2575 = ( series_flux_percentiles[-4] - series_flux_percentiles[3] ) series_frat_325675 = ( series_flux_percentiles[-5] - series_flux_percentiles[4] ) series_frat_4060 = ( series_flux_percentiles[-6] - series_flux_percentiles[5] ) # calculate the flux percentile ratios series_flux_percentile_ratio_mid20 = series_frat_4060/series_frat_595 series_flux_percentile_ratio_mid35 = series_frat_325675/series_frat_595 series_flux_percentile_ratio_mid50 = series_frat_2575/series_frat_595 series_flux_percentile_ratio_mid65 = series_frat_175825/series_frat_595 series_flux_percentile_ratio_mid80 = series_frat_1090/series_frat_595 # calculate the ratio of F595/median flux series_percent_difference_flux_percentile = ( series_frat_595/series_flux_median ) series_percentile_magdiff = -2.5*nplog10( series_percent_difference_flux_percentile ) return { 'flux_median':series_flux_median, 'flux_percent_amplitude':series_flux_percent_amplitude, 'flux_percentiles':series_flux_percentiles, 'flux_percentile_ratio_mid20':series_flux_percentile_ratio_mid20, 'flux_percentile_ratio_mid35':series_flux_percentile_ratio_mid35, 'flux_percentile_ratio_mid50':series_flux_percentile_ratio_mid50, 'flux_percentile_ratio_mid65':series_flux_percentile_ratio_mid65, 'flux_percentile_ratio_mid80':series_flux_percentile_ratio_mid80, 'percent_difference_flux_percentile':series_percentile_magdiff, } else: LOGERROR('not enough detections in this magseries ' 'to calculate flux measures') return None
def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases.
Below is the the instruction that describes the task: ### Input: Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. ### Response: def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def filter_data_frame(self, data_frame, centre=False, keep_cols=['anno']): """ This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data frame using a butter Butterworth digital and analog filter (https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method filter the data frame along one-dimension using a digital filter. (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html) :param data_frame: the data frame :param str cutoff_frequency: The path to load data from :param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. """ b_f = lambda x: butter_lowpass_filter(x.values, self.sampling_frequency, cutoff=self.cutoff_frequency, order=self.filter_order) filtered_data_frame = data_frame.apply(b_f, 0) # we don't need to filter the time difference # filtered_data_frame.td = data_frame.td logging.debug("filtered whole dataframe!") # I need to fix this as I am losing some important information # one idea would be to look at where the sign changes (first and second peak) # and keep that information aswell. if centre: # de-mean filtered_data_frame -= filtered_data_frame.mean() for col in filtered_data_frame: first_zero_crossing = np.argwhere(filtered_data_frame[col] > 0)[0][0] filtered_data_frame[col][:first_zero_crossing] = 0 # No python3 support :( # if {*keep_cols}.issubset(filtered_data_frame.columns): for c in keep_cols: if c not in filtered_data_frame.columns: return filtered_data_frame[keep_cols] = data_frame[keep_cols] return filtered_data_frame
This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data frame using a butter Butterworth digital and analog filter (https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method filter the data frame along one-dimension using a digital filter. (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html) :param data_frame: the data frame :param str cutoff_frequency: The path to load data from :param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data.
Below is the the instruction that describes the task: ### Input: This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data frame using a butter Butterworth digital and analog filter (https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method filter the data frame along one-dimension using a digital filter. (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html) :param data_frame: the data frame :param str cutoff_frequency: The path to load data from :param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. ### Response: def filter_data_frame(self, data_frame, centre=False, keep_cols=['anno']): """ This method filters a data frame signal as suggested in [1]. First step is to high pass filter the data frame using a butter Butterworth digital and analog filter (https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.signal.butter.html). Then the method filter the data frame along one-dimension using a digital filter. (https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html) :param data_frame: the data frame :param str cutoff_frequency: The path to load data from :param str filter_order: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. """ b_f = lambda x: butter_lowpass_filter(x.values, self.sampling_frequency, cutoff=self.cutoff_frequency, order=self.filter_order) filtered_data_frame = data_frame.apply(b_f, 0) # we don't need to filter the time difference # filtered_data_frame.td = data_frame.td logging.debug("filtered whole dataframe!") # I need to fix this as I am losing some important information # one idea would be to look at where the sign changes (first and second peak) # and keep that information aswell. if centre: # de-mean filtered_data_frame -= filtered_data_frame.mean() for col in filtered_data_frame: first_zero_crossing = np.argwhere(filtered_data_frame[col] > 0)[0][0] filtered_data_frame[col][:first_zero_crossing] = 0 # No python3 support :( # if {*keep_cols}.issubset(filtered_data_frame.columns): for c in keep_cols: if c not in filtered_data_frame.columns: return filtered_data_frame[keep_cols] = data_frame[keep_cols] return filtered_data_frame
def execute(self, triple_map, output, **kwargs): """Method executes mapping between JSON source and output RDF Args: ----- triple_map: SimpleNamespace """ subjects = [] logical_src_iterator = str(triple_map.logicalSource.iterator) json_object = kwargs.get('obj', self.source) # Removes '.' as a generic iterator, replace with '@' if logical_src_iterator == ".": results = [None,] else: json_path_exp = jsonpath_ng.parse(logical_src_iterator) results = [r.value for r in json_path_exp.find(json_object)][0] for row in results: subject = self.generate_term(term_map=triple_map.subjectMap, **kwargs) for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if pred_obj_map.template is not None: output.add(( subject, predicate, self.generate_term(term_map=pred_obj_map, **kwargs))) if pred_obj_map.parentTriplesMap is not None: self.__handle_parents__( output, parent_map=pred_obj_map.parentTriplesMap, subject=subject, predicate=predicate, obj=row, **kwargs) if pred_obj_map.reference is not None: ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference)) found_objects = [r.value for r in ref_exp.find(row)] for obj in found_objects: if rdflib.term._is_valid_uri(obj): rdf_obj = rdflib.URIRef(str(obj)) else: rdf_obj = rdflib.Literal(str(obj)) output.add((subject, predicate, rdf_obj)) if pred_obj_map.constant is not None: output.add((subject, predicate, pred_obj_map.constant)) subjects.append(subject) return subjects
Method executes mapping between JSON source and output RDF Args: ----- triple_map: SimpleNamespace
Below is the the instruction that describes the task: ### Input: Method executes mapping between JSON source and output RDF Args: ----- triple_map: SimpleNamespace ### Response: def execute(self, triple_map, output, **kwargs): """Method executes mapping between JSON source and output RDF Args: ----- triple_map: SimpleNamespace """ subjects = [] logical_src_iterator = str(triple_map.logicalSource.iterator) json_object = kwargs.get('obj', self.source) # Removes '.' as a generic iterator, replace with '@' if logical_src_iterator == ".": results = [None,] else: json_path_exp = jsonpath_ng.parse(logical_src_iterator) results = [r.value for r in json_path_exp.find(json_object)][0] for row in results: subject = self.generate_term(term_map=triple_map.subjectMap, **kwargs) for pred_obj_map in triple_map.predicateObjectMap: predicate = pred_obj_map.predicate if pred_obj_map.template is not None: output.add(( subject, predicate, self.generate_term(term_map=pred_obj_map, **kwargs))) if pred_obj_map.parentTriplesMap is not None: self.__handle_parents__( output, parent_map=pred_obj_map.parentTriplesMap, subject=subject, predicate=predicate, obj=row, **kwargs) if pred_obj_map.reference is not None: ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference)) found_objects = [r.value for r in ref_exp.find(row)] for obj in found_objects: if rdflib.term._is_valid_uri(obj): rdf_obj = rdflib.URIRef(str(obj)) else: rdf_obj = rdflib.Literal(str(obj)) output.add((subject, predicate, rdf_obj)) if pred_obj_map.constant is not None: output.add((subject, predicate, pred_obj_map.constant)) subjects.append(subject) return subjects
def check_secure(): """Check request, return False if using SSL or local connection.""" if this.request.is_secure(): return True # using SSL elif this.request.META['REMOTE_ADDR'] in [ 'localhost', '127.0.0.1', ]: return True # localhost raise MeteorError(403, 'Authentication refused without SSL.')
Check request, return False if using SSL or local connection.
Below is the the instruction that describes the task: ### Input: Check request, return False if using SSL or local connection. ### Response: def check_secure(): """Check request, return False if using SSL or local connection.""" if this.request.is_secure(): return True # using SSL elif this.request.META['REMOTE_ADDR'] in [ 'localhost', '127.0.0.1', ]: return True # localhost raise MeteorError(403, 'Authentication refused without SSL.')
def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ databases = set() databases.update(self._plain_db.keys()) for _, namespace in self._regex_map: database_name, _ = namespace.source_name.split(".", 1) if "*" in database_name: return [] databases.add(database_name) return list(databases)
Return the databases we want to include, or empty list for all.
Below is the the instruction that describes the task: ### Input: Return the databases we want to include, or empty list for all. ### Response: def get_included_databases(self): """Return the databases we want to include, or empty list for all. """ databases = set() databases.update(self._plain_db.keys()) for _, namespace in self._regex_map: database_name, _ = namespace.source_name.split(".", 1) if "*" in database_name: return [] databases.add(database_name) return list(databases)
def convert_to_float_list(value): """ Converts a comma separate string to a list :param value: the format must be 1.2,-3.5 (commas with no space) :type value: String :returns: List :example: >>> convert_to_integer_list('003,003,004,004') [1.2, -3.5] """ if isinstance(value, list) or value is None: return value else: s = re.findall('([-+]?\d*\.\d+|\d+|[-+]?\d+)', value) for k, v in enumerate(s): try: s[k] = float(v) except ValueError: pass return s
Converts a comma separate string to a list :param value: the format must be 1.2,-3.5 (commas with no space) :type value: String :returns: List :example: >>> convert_to_integer_list('003,003,004,004') [1.2, -3.5]
Below is the the instruction that describes the task: ### Input: Converts a comma separate string to a list :param value: the format must be 1.2,-3.5 (commas with no space) :type value: String :returns: List :example: >>> convert_to_integer_list('003,003,004,004') [1.2, -3.5] ### Response: def convert_to_float_list(value): """ Converts a comma separate string to a list :param value: the format must be 1.2,-3.5 (commas with no space) :type value: String :returns: List :example: >>> convert_to_integer_list('003,003,004,004') [1.2, -3.5] """ if isinstance(value, list) or value is None: return value else: s = re.findall('([-+]?\d*\.\d+|\d+|[-+]?\d+)', value) for k, v in enumerate(s): try: s[k] = float(v) except ValueError: pass return s
def check(self): """ Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise. """ for character, followers in self.items(): for follower in followers: if follower not in self: return False return True
Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise.
Below is the the instruction that describes the task: ### Input: Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise. ### Response: def check(self): """ Check that this table is complete, that is, every character of this table can be followed by a new character. :return: True if the table is complete, False otherwise. """ for character, followers in self.items(): for follower in followers: if follower not in self: return False return True
def invoke_shell(self, locs, banner): """ Invokes the appropriate flavor of the python shell. Falls back on the native python shell if the requested flavor (ipython, bpython,etc) is not installed. """ shell = self.SHELLS[self.args.shell] try: shell().invoke(locs, banner) except ImportError as e: warn(( "%s is not installed, `%s`, " "falling back to native shell") % (self.args.shell, e), RuntimeWarning ) if shell == NativePythonShell: raise NativePythonShell().invoke(locs, banner)
Invokes the appropriate flavor of the python shell. Falls back on the native python shell if the requested flavor (ipython, bpython,etc) is not installed.
Below is the the instruction that describes the task: ### Input: Invokes the appropriate flavor of the python shell. Falls back on the native python shell if the requested flavor (ipython, bpython,etc) is not installed. ### Response: def invoke_shell(self, locs, banner): """ Invokes the appropriate flavor of the python shell. Falls back on the native python shell if the requested flavor (ipython, bpython,etc) is not installed. """ shell = self.SHELLS[self.args.shell] try: shell().invoke(locs, banner) except ImportError as e: warn(( "%s is not installed, `%s`, " "falling back to native shell") % (self.args.shell, e), RuntimeWarning ) if shell == NativePythonShell: raise NativePythonShell().invoke(locs, banner)
def _F(self, x, p): """ solution of the projection integal (kappa) arctanh / arctan function :param x: r/Rs :param p: r_core / Rs :return: """ prefactor = 0.5 * (1 + p ** 2) ** -1 * p if isinstance(x, np.ndarray): inds0 = np.where(x * p == 1) inds1 = np.where(x * p < 1) inds2 = np.where(x * p > 1) func = np.ones_like(x) func[inds0] = self._u(x[inds0]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds0]) ** -1)) func[inds1] = self._u(x[inds1]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds1]) ** -1)) - \ (2 * p * self._g(x[inds1], p) ** -1 * np.arctanh(self._g(x[inds1], p))) func[inds2] = self._u(x[inds2]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds2]) ** -1)) - \ (2 * p * self._f(x[inds2], p) ** -1 * np.arctan(self._f(x[inds2], p))) return prefactor * func else: if x * p == 1: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) elif x * p < 1: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \ (2 * p * self._g(x, p) ** -1 * np.arctanh(self._g(x, p))) else: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \ (2 * p * self._f(x, p) ** -1 * np.arctan(self._f(x, p))) return prefactor * func
solution of the projection integal (kappa) arctanh / arctan function :param x: r/Rs :param p: r_core / Rs :return:
Below is the the instruction that describes the task: ### Input: solution of the projection integal (kappa) arctanh / arctan function :param x: r/Rs :param p: r_core / Rs :return: ### Response: def _F(self, x, p): """ solution of the projection integal (kappa) arctanh / arctan function :param x: r/Rs :param p: r_core / Rs :return: """ prefactor = 0.5 * (1 + p ** 2) ** -1 * p if isinstance(x, np.ndarray): inds0 = np.where(x * p == 1) inds1 = np.where(x * p < 1) inds2 = np.where(x * p > 1) func = np.ones_like(x) func[inds0] = self._u(x[inds0]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds0]) ** -1)) func[inds1] = self._u(x[inds1]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds1]) ** -1)) - \ (2 * p * self._g(x[inds1], p) ** -1 * np.arctanh(self._g(x[inds1], p))) func[inds2] = self._u(x[inds2]) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x[inds2]) ** -1)) - \ (2 * p * self._f(x[inds2], p) ** -1 * np.arctan(self._f(x[inds2], p))) return prefactor * func else: if x * p == 1: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) elif x * p < 1: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \ (2 * p * self._g(x, p) ** -1 * np.arctanh(self._g(x, p))) else: func = self._u(x) ** -1 * (np.pi + 2 * p * np.arctanh(self._u(x) ** -1)) - \ (2 * p * self._f(x, p) ** -1 * np.arctan(self._f(x, p))) return prefactor * func
def _add_q(self, q_object): """Add a Q-object to the current filter.""" self._criteria = self._criteria._combine(q_object, q_object.connector)
Add a Q-object to the current filter.
Below is the the instruction that describes the task: ### Input: Add a Q-object to the current filter. ### Response: def _add_q(self, q_object): """Add a Q-object to the current filter.""" self._criteria = self._criteria._combine(q_object, q_object.connector)
def optimize_auto(self,max_iters=10000,verbose=True): """ Optimize the model parameters through a pre-defined protocol. :param int max_iters: the maximum number of iterations. :param boolean verbose: print the progress of optimization or not. """ self.Z.fix(warning=False) self.kern.fix(warning=False) self.kern_row.fix(warning=False) self.Zr.fix(warning=False) self.Xr.fix(warning=False) self.optimize(max_iters=int(0.1*max_iters),messages=verbose) self.unfix() self.optimize(max_iters=max_iters,messages=verbose)
Optimize the model parameters through a pre-defined protocol. :param int max_iters: the maximum number of iterations. :param boolean verbose: print the progress of optimization or not.
Below is the the instruction that describes the task: ### Input: Optimize the model parameters through a pre-defined protocol. :param int max_iters: the maximum number of iterations. :param boolean verbose: print the progress of optimization or not. ### Response: def optimize_auto(self,max_iters=10000,verbose=True): """ Optimize the model parameters through a pre-defined protocol. :param int max_iters: the maximum number of iterations. :param boolean verbose: print the progress of optimization or not. """ self.Z.fix(warning=False) self.kern.fix(warning=False) self.kern_row.fix(warning=False) self.Zr.fix(warning=False) self.Xr.fix(warning=False) self.optimize(max_iters=int(0.1*max_iters),messages=verbose) self.unfix() self.optimize(max_iters=max_iters,messages=verbose)
def region_size(im): r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. """ if im.dtype == bool: im = spim.label(im)[0] counts = sp.bincount(im.flatten()) counts[0] = 0 chords = counts[im] return chords
r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``.
Below is the the instruction that describes the task: ### Input: r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. ### Response: def region_size(im): r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``. """ if im.dtype == bool: im = spim.label(im)[0] counts = sp.bincount(im.flatten()) counts[0] = 0 chords = counts[im] return chords
def values(self) -> typing.Dict[str, str]: """The field values of this object's name as a dictionary in the form of {field: value}.""" return {k: v for k, v in self._items if v is not None}
The field values of this object's name as a dictionary in the form of {field: value}.
Below is the the instruction that describes the task: ### Input: The field values of this object's name as a dictionary in the form of {field: value}. ### Response: def values(self) -> typing.Dict[str, str]: """The field values of this object's name as a dictionary in the form of {field: value}.""" return {k: v for k, v in self._items if v is not None}
def send_caught_exception_stack_proceeded(self, thread): """Sends that some thread was resumed and is no longer showing an exception trace. """ thread_id = get_thread_id(thread) int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id) self.post_internal_command(int_cmd, thread_id) self.process_internal_commands()
Sends that some thread was resumed and is no longer showing an exception trace.
Below is the the instruction that describes the task: ### Input: Sends that some thread was resumed and is no longer showing an exception trace. ### Response: def send_caught_exception_stack_proceeded(self, thread): """Sends that some thread was resumed and is no longer showing an exception trace. """ thread_id = get_thread_id(thread) int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id) self.post_internal_command(int_cmd, thread_id) self.process_internal_commands()
def add_route(self, view: View, path: str, exact: bool = True) -> None: """Add a view to the app. Parameters ---------- view : View path : str exact : bool, optional """ if path[0] != '/': path = '/' + path for route in self._routes: assert path != route.path, 'Cannot use the same path twice' self._routes.append(Route(view=view, path=path, exact=exact)) self.app.add_url_rule( path, path[1:], lambda: render_template('bowtie.html', title=self.title) )
Add a view to the app. Parameters ---------- view : View path : str exact : bool, optional
Below is the the instruction that describes the task: ### Input: Add a view to the app. Parameters ---------- view : View path : str exact : bool, optional ### Response: def add_route(self, view: View, path: str, exact: bool = True) -> None: """Add a view to the app. Parameters ---------- view : View path : str exact : bool, optional """ if path[0] != '/': path = '/' + path for route in self._routes: assert path != route.path, 'Cannot use the same path twice' self._routes.append(Route(view=view, path=path, exact=exact)) self.app.add_url_rule( path, path[1:], lambda: render_template('bowtie.html', title=self.title) )
def restore(self): """ This method constructs the restoring beam and then adds the convolution to the residual. """ clean_beam, beam_params = beam_fit(self.psf_data, self.cdelt1, self.cdelt2) if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): self.restored = np.fft.fftshift(np.fft.irfft2(np.fft.rfft2(conv.pad_array(self.model))*np.fft.rfft2(clean_beam))) self.restored = self.restored[self.dirty_data_shape[0]/2:-self.dirty_data_shape[0]/2, self.dirty_data_shape[1]/2:-self.dirty_data_shape[1]/2] else: self.restored = np.fft.fftshift(np.fft.irfft2(np.fft.rfft2(self.model)*np.fft.rfft2(clean_beam))) self.restored += self.residual self.restored = self.restored.astype(np.float32) return beam_params
This method constructs the restoring beam and then adds the convolution to the residual.
Below is the the instruction that describes the task: ### Input: This method constructs the restoring beam and then adds the convolution to the residual. ### Response: def restore(self): """ This method constructs the restoring beam and then adds the convolution to the residual. """ clean_beam, beam_params = beam_fit(self.psf_data, self.cdelt1, self.cdelt2) if np.all(np.array(self.psf_data_shape)==2*np.array(self.dirty_data_shape)): self.restored = np.fft.fftshift(np.fft.irfft2(np.fft.rfft2(conv.pad_array(self.model))*np.fft.rfft2(clean_beam))) self.restored = self.restored[self.dirty_data_shape[0]/2:-self.dirty_data_shape[0]/2, self.dirty_data_shape[1]/2:-self.dirty_data_shape[1]/2] else: self.restored = np.fft.fftshift(np.fft.irfft2(np.fft.rfft2(self.model)*np.fft.rfft2(clean_beam))) self.restored += self.residual self.restored = self.restored.astype(np.float32) return beam_params
def containing_triangle(self, xi, yi): """ Returns indices of the triangles containing xi yi Parameters ---------- xi : float / array of floats, shape (l,) Cartesian coordinates in the x direction yi : float / array of floats, shape (l,) Cartesian coordinates in the y direction Returns ------- tri_indices: array of ints, shape (l,) Notes ----- The simplices are found as cartesian.Triangulation.simplices[tri_indices] """ p = self._permutation pts = np.column_stack([xi, yi]) sorted_simplices = np.sort(self._simplices, axis=1) triangles = [] for pt in pts: t = _tripack.trfind(3, pt[0], pt[1], self._x, self._y, self.lst, self.lptr, self.lend) tri = np.sort(t) - 1 triangles.extend(np.where(np.all(p[sorted_simplices]==p[tri], axis=1))[0]) return np.array(triangles).ravel()
Returns indices of the triangles containing xi yi Parameters ---------- xi : float / array of floats, shape (l,) Cartesian coordinates in the x direction yi : float / array of floats, shape (l,) Cartesian coordinates in the y direction Returns ------- tri_indices: array of ints, shape (l,) Notes ----- The simplices are found as cartesian.Triangulation.simplices[tri_indices]
Below is the the instruction that describes the task: ### Input: Returns indices of the triangles containing xi yi Parameters ---------- xi : float / array of floats, shape (l,) Cartesian coordinates in the x direction yi : float / array of floats, shape (l,) Cartesian coordinates in the y direction Returns ------- tri_indices: array of ints, shape (l,) Notes ----- The simplices are found as cartesian.Triangulation.simplices[tri_indices] ### Response: def containing_triangle(self, xi, yi): """ Returns indices of the triangles containing xi yi Parameters ---------- xi : float / array of floats, shape (l,) Cartesian coordinates in the x direction yi : float / array of floats, shape (l,) Cartesian coordinates in the y direction Returns ------- tri_indices: array of ints, shape (l,) Notes ----- The simplices are found as cartesian.Triangulation.simplices[tri_indices] """ p = self._permutation pts = np.column_stack([xi, yi]) sorted_simplices = np.sort(self._simplices, axis=1) triangles = [] for pt in pts: t = _tripack.trfind(3, pt[0], pt[1], self._x, self._y, self.lst, self.lptr, self.lend) tri = np.sort(t) - 1 triangles.extend(np.where(np.all(p[sorted_simplices]==p[tri], axis=1))[0]) return np.array(triangles).ravel()
def _get_env_list(obj, env): """Creates the list of environments to read :param obj: the settings instance :param env: settings env default='DYNACONF' :return: a list of working environments """ # add the [default] env env_list = [obj.get("DEFAULT_ENV_FOR_DYNACONF")] # compatibility with older versions that still uses [dynaconf] as # [default] env global_env = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") or "DYNACONF" if global_env not in env_list: env_list.append(global_env) # add the current env if obj.current_env and obj.current_env not in env_list: env_list.append(obj.current_env) # add a manually set env if env and env not in env_list: env_list.append(env) # add the [global] env env_list.append("GLOBAL") return [env.lower() for env in env_list]
Creates the list of environments to read :param obj: the settings instance :param env: settings env default='DYNACONF' :return: a list of working environments
Below is the the instruction that describes the task: ### Input: Creates the list of environments to read :param obj: the settings instance :param env: settings env default='DYNACONF' :return: a list of working environments ### Response: def _get_env_list(obj, env): """Creates the list of environments to read :param obj: the settings instance :param env: settings env default='DYNACONF' :return: a list of working environments """ # add the [default] env env_list = [obj.get("DEFAULT_ENV_FOR_DYNACONF")] # compatibility with older versions that still uses [dynaconf] as # [default] env global_env = obj.get("ENVVAR_PREFIX_FOR_DYNACONF") or "DYNACONF" if global_env not in env_list: env_list.append(global_env) # add the current env if obj.current_env and obj.current_env not in env_list: env_list.append(obj.current_env) # add a manually set env if env and env not in env_list: env_list.append(env) # add the [global] env env_list.append("GLOBAL") return [env.lower() for env in env_list]
def estimate_B( xray_table, vhe_table, photon_energy_density=0.261 * u.eV / u.cm ** 3 ): """ Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter. """ xray = validate_data_table(xray_table, sed=False) vhe = validate_data_table(vhe_table, sed=False) xray_lum = trapz_loglog(xray["flux"] * xray["energy"], xray["energy"]) vhe_lum = trapz_loglog(vhe["flux"] * vhe["energy"], vhe["energy"]) uph = (photon_energy_density.to("erg/cm3")).value B0 = ( np.sqrt((xray_lum / vhe_lum).decompose().value * 8 * np.pi * uph) * u.G ).to("uG") return B0
Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter.
Below is the the instruction that describes the task: ### Input: Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter. ### Response: def estimate_B( xray_table, vhe_table, photon_energy_density=0.261 * u.eV / u.cm ** 3 ): """ Estimate magnetic field from synchrotron to Inverse Compton luminosity ratio Estimate the magnetic field from the ratio of X-ray to gamma-ray emission according to: .. math:: \\frac{L_\mathrm{xray}}{L_\gamma} = \\frac{u_\mathrm{B}}{u_\mathrm{ph}} = \\frac{B^2}{ 8 \pi u_\mathrm{ph}} where :math:`L_\mathrm{xray}` is the X-ray luminosity, :math:`L_\gamma` is the gamma-ray luminosity, and :math:`u_\mathrm{ph}` is the seed photon field energy density. Note that this assumes that the ratio of observed fluxes is equal to the ratio of bolometric synchrotron and IC luminosities, and that IC proceeds in the Thomson regims. This assumption is safe as long as the X-ray and gamma-ray emission contain the bulk of the bolometric emission (i.e., the peak in the SED is in the X-ray and gamma-ray observed bands). Even if the assumption does not hold, this is a good starting point for the magnetic field when doing simultaneous X-ray and gamma-ray spectral fits. Parameters ---------- xray_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the X-ray spectrum. vhe_table : :class:`~astropy.table.Table` Data table (see :ref:`dataformat` for details on the format) containing the HE/VHE gamma-ray spectrum. photon_energy_density : :class:`~astropy.units.Quantity` float, optional Energy density of the seed photon field for IC emission. Defaults to 0.261 eV/cm3, the energy density of the CMB. Returns ------- B : :class:`~astropy.units.Quantity` float Estimate of the magnetic flux density at the emitter. """ xray = validate_data_table(xray_table, sed=False) vhe = validate_data_table(vhe_table, sed=False) xray_lum = trapz_loglog(xray["flux"] * xray["energy"], xray["energy"]) vhe_lum = trapz_loglog(vhe["flux"] * vhe["energy"], vhe["energy"]) uph = (photon_energy_density.to("erg/cm3")).value B0 = ( np.sqrt((xray_lum / vhe_lum).decompose().value * 8 * np.pi * uph) * u.G ).to("uG") return B0
def save_device_info(self): """Save all device information to the device info file.""" if self._workdir is not None: devices = [] for addr in self._devices: device = self._devices.get(addr) if not device.address.is_x10: aldb = {} for mem in device.aldb: rec = device.aldb[mem] if rec: aldbRec = {'memory': mem, 'control_flags': rec.control_flags.byte, 'group': rec.group, 'address': rec.address.id, 'data1': rec.data1, 'data2': rec.data2, 'data3': rec.data3} aldb[mem] = aldbRec deviceInfo = {'address': device.address.id, 'cat': device.cat, 'subcat': device.subcat, 'product_key': device.product_key, 'aldb_status': device.aldb.status.value, 'aldb': aldb} devices.append(deviceInfo) asyncio.ensure_future(self._write_saved_device_info(devices), loop=self._loop)
Save all device information to the device info file.
Below is the the instruction that describes the task: ### Input: Save all device information to the device info file. ### Response: def save_device_info(self): """Save all device information to the device info file.""" if self._workdir is not None: devices = [] for addr in self._devices: device = self._devices.get(addr) if not device.address.is_x10: aldb = {} for mem in device.aldb: rec = device.aldb[mem] if rec: aldbRec = {'memory': mem, 'control_flags': rec.control_flags.byte, 'group': rec.group, 'address': rec.address.id, 'data1': rec.data1, 'data2': rec.data2, 'data3': rec.data3} aldb[mem] = aldbRec deviceInfo = {'address': device.address.id, 'cat': device.cat, 'subcat': device.subcat, 'product_key': device.product_key, 'aldb_status': device.aldb.status.value, 'aldb': aldb} devices.append(deviceInfo) asyncio.ensure_future(self._write_saved_device_info(devices), loop=self._loop)
def networkCoAuthor(self, detailedInfo = False, weighted = True, dropNonJournals = False, count = True, useShortNames = False, citeProfile = False): """Creates a coauthorship network for the RecordCollection. # Parameters _detailedInfo_ : `optional [bool or iterable[WOS tag Strings]]` > Default `False`, if `True` all nodes will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['PY', 'TI', 'SO', 'VL', 'BP']`. > If _detailedInfo_ is an iterable (that evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attributes. > For each of the selected tags an attribute will be added to the node using the values of those tags on the first `Record` encountered. **Warning** iterating over `RecordCollection` objects is not deterministic the first `Record` will not always be same between runs. The node will be given attributes with the names of the WOS tags for each of the selected tags. The attributes will contain strings of containing the values (with commas removed), if multiple values are encountered they will be comma separated. > Note: _detailedInfo_ is not identical to the _detailedCore_ argument of [Recordcollection.networkCoCitation()](#metaknowledge.RecordCollection.networkCoCitation) or [Recordcollection.networkCitation()](#metaknowledge.RecordCollection.networkCitation) _weighted_ : `optional [bool]` > Default `True`, whether the edges are weighted. If `True` the edges are weighted by the number of co-authorships. _dropNonJournals_ : `optional [bool]` > Default `False`, whether to drop authors from non-journals _count_ : `optional [bool]` > Default `True`, causes the number of occurrences of a node to be counted # Returns `Networkx Graph` > A networkx graph with author names as nodes and collaborations as edges. """ grph = nx.Graph() pcount = 0 progArgs = (0, "Starting to make a co-authorship network") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} if bool(detailedInfo): try: infoVals = [] for tag in detailedInfo: infoVals.append(normalizeToTag(tag)) except TypeError: infoVals = ['year', 'title', 'journal', 'volume', 'beginningPage'] def attributeMaker(Rec): attribsDict = {} for val in infoVals: recVal = Rec.get(val) if isinstance(recVal, list): attribsDict[val] = ', '.join((str(v).replace(',', '') for v in recVal)) else: attribsDict[val] = str(recVal).replace(',', '') if count: attribsDict['count'] = 1 if citeProfile: attribsDict['citeProfile'] = {} return attribsDict else: if count: if citeProfile: attributeMaker = lambda x: {'count' : 1, 'citeProfile' : {}} else: attributeMaker = lambda x: {'count' : 1} else: if citeProfile: attributeMaker = lambda x: {'citeProfile' : {}} else: attributeMaker = lambda x: {} with _ProgressBar(*progArgs, **progKwargs) as PBar: for R in self: if PBar: pcount += 1 PBar.updateVal(pcount/ len(self), "Analyzing: " + str(R)) if dropNonJournals and not R.createCitation().isJournal(): continue if useShortNames: authsList = R.get('authorsShort', []) else: authsList = R.get('authorsFull', []) if authsList: authsList = list(authsList) detailedInfo = attributeMaker(R) if citeProfile: citesLst = R.get('citations', []) for i, auth1 in enumerate(authsList): if auth1 not in grph: grph.add_node(auth1, **detailedInfo.copy()) elif count: grph.node[auth1]['count'] += 1 if citeProfile: for c in citesLst: try: grph.node[auth1]['citeProfile'][c] += 1 except KeyError: grph.node[auth1]['citeProfile'][c] = 1 for auth2 in authsList[i + 1:]: if auth2 not in grph: grph.add_node(auth2, **detailedInfo.copy()) elif count: grph.node[auth2]['count'] += 1 if citeProfile: for c in citesLst: try: grph.node[auth2]['citeProfile'][c] += 1 except KeyError: grph.node[auth2]['citeProfile'][c] = 1 if grph.has_edge(auth1, auth2) and weighted: grph.edges[auth1, auth2]['weight'] += 1 elif weighted: grph.add_edge(auth1, auth2, weight = 1) else: grph.add_edge(auth1, auth2) if citeProfile: if PBar: PBar.updateVal(.99, "Extracting citation profiles") previous = {} for n, dat in grph.nodes(data = True): previous[n] = dat #zip(*l) undoes zip(l1, l2) try: cites, counts = zip(*dat['citeProfile'].items()) except ValueError: cites, counts = [], [] dat['citeProfileCites'] = '|'.join((str(c) for c in cites)) dat['citeProfileCounts'] = '|'.join((str(c) for c in counts)) del dat['citeProfile'] if PBar: PBar.finish("Done making a co-authorship network from {}".format(self)) return grph
Creates a coauthorship network for the RecordCollection. # Parameters _detailedInfo_ : `optional [bool or iterable[WOS tag Strings]]` > Default `False`, if `True` all nodes will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['PY', 'TI', 'SO', 'VL', 'BP']`. > If _detailedInfo_ is an iterable (that evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attributes. > For each of the selected tags an attribute will be added to the node using the values of those tags on the first `Record` encountered. **Warning** iterating over `RecordCollection` objects is not deterministic the first `Record` will not always be same between runs. The node will be given attributes with the names of the WOS tags for each of the selected tags. The attributes will contain strings of containing the values (with commas removed), if multiple values are encountered they will be comma separated. > Note: _detailedInfo_ is not identical to the _detailedCore_ argument of [Recordcollection.networkCoCitation()](#metaknowledge.RecordCollection.networkCoCitation) or [Recordcollection.networkCitation()](#metaknowledge.RecordCollection.networkCitation) _weighted_ : `optional [bool]` > Default `True`, whether the edges are weighted. If `True` the edges are weighted by the number of co-authorships. _dropNonJournals_ : `optional [bool]` > Default `False`, whether to drop authors from non-journals _count_ : `optional [bool]` > Default `True`, causes the number of occurrences of a node to be counted # Returns `Networkx Graph` > A networkx graph with author names as nodes and collaborations as edges.
Below is the the instruction that describes the task: ### Input: Creates a coauthorship network for the RecordCollection. # Parameters _detailedInfo_ : `optional [bool or iterable[WOS tag Strings]]` > Default `False`, if `True` all nodes will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['PY', 'TI', 'SO', 'VL', 'BP']`. > If _detailedInfo_ is an iterable (that evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attributes. > For each of the selected tags an attribute will be added to the node using the values of those tags on the first `Record` encountered. **Warning** iterating over `RecordCollection` objects is not deterministic the first `Record` will not always be same between runs. The node will be given attributes with the names of the WOS tags for each of the selected tags. The attributes will contain strings of containing the values (with commas removed), if multiple values are encountered they will be comma separated. > Note: _detailedInfo_ is not identical to the _detailedCore_ argument of [Recordcollection.networkCoCitation()](#metaknowledge.RecordCollection.networkCoCitation) or [Recordcollection.networkCitation()](#metaknowledge.RecordCollection.networkCitation) _weighted_ : `optional [bool]` > Default `True`, whether the edges are weighted. If `True` the edges are weighted by the number of co-authorships. _dropNonJournals_ : `optional [bool]` > Default `False`, whether to drop authors from non-journals _count_ : `optional [bool]` > Default `True`, causes the number of occurrences of a node to be counted # Returns `Networkx Graph` > A networkx graph with author names as nodes and collaborations as edges. ### Response: def networkCoAuthor(self, detailedInfo = False, weighted = True, dropNonJournals = False, count = True, useShortNames = False, citeProfile = False): """Creates a coauthorship network for the RecordCollection. # Parameters _detailedInfo_ : `optional [bool or iterable[WOS tag Strings]]` > Default `False`, if `True` all nodes will be given info strings composed of information from the Record objects themselves. This is Equivalent to passing the list: `['PY', 'TI', 'SO', 'VL', 'BP']`. > If _detailedInfo_ is an iterable (that evaluates to `True`) of WOS Tags (or long names) The values of those tags will be used to make the info attributes. > For each of the selected tags an attribute will be added to the node using the values of those tags on the first `Record` encountered. **Warning** iterating over `RecordCollection` objects is not deterministic the first `Record` will not always be same between runs. The node will be given attributes with the names of the WOS tags for each of the selected tags. The attributes will contain strings of containing the values (with commas removed), if multiple values are encountered they will be comma separated. > Note: _detailedInfo_ is not identical to the _detailedCore_ argument of [Recordcollection.networkCoCitation()](#metaknowledge.RecordCollection.networkCoCitation) or [Recordcollection.networkCitation()](#metaknowledge.RecordCollection.networkCitation) _weighted_ : `optional [bool]` > Default `True`, whether the edges are weighted. If `True` the edges are weighted by the number of co-authorships. _dropNonJournals_ : `optional [bool]` > Default `False`, whether to drop authors from non-journals _count_ : `optional [bool]` > Default `True`, causes the number of occurrences of a node to be counted # Returns `Networkx Graph` > A networkx graph with author names as nodes and collaborations as edges. """ grph = nx.Graph() pcount = 0 progArgs = (0, "Starting to make a co-authorship network") if metaknowledge.VERBOSE_MODE: progKwargs = {'dummy' : False} else: progKwargs = {'dummy' : True} if bool(detailedInfo): try: infoVals = [] for tag in detailedInfo: infoVals.append(normalizeToTag(tag)) except TypeError: infoVals = ['year', 'title', 'journal', 'volume', 'beginningPage'] def attributeMaker(Rec): attribsDict = {} for val in infoVals: recVal = Rec.get(val) if isinstance(recVal, list): attribsDict[val] = ', '.join((str(v).replace(',', '') for v in recVal)) else: attribsDict[val] = str(recVal).replace(',', '') if count: attribsDict['count'] = 1 if citeProfile: attribsDict['citeProfile'] = {} return attribsDict else: if count: if citeProfile: attributeMaker = lambda x: {'count' : 1, 'citeProfile' : {}} else: attributeMaker = lambda x: {'count' : 1} else: if citeProfile: attributeMaker = lambda x: {'citeProfile' : {}} else: attributeMaker = lambda x: {} with _ProgressBar(*progArgs, **progKwargs) as PBar: for R in self: if PBar: pcount += 1 PBar.updateVal(pcount/ len(self), "Analyzing: " + str(R)) if dropNonJournals and not R.createCitation().isJournal(): continue if useShortNames: authsList = R.get('authorsShort', []) else: authsList = R.get('authorsFull', []) if authsList: authsList = list(authsList) detailedInfo = attributeMaker(R) if citeProfile: citesLst = R.get('citations', []) for i, auth1 in enumerate(authsList): if auth1 not in grph: grph.add_node(auth1, **detailedInfo.copy()) elif count: grph.node[auth1]['count'] += 1 if citeProfile: for c in citesLst: try: grph.node[auth1]['citeProfile'][c] += 1 except KeyError: grph.node[auth1]['citeProfile'][c] = 1 for auth2 in authsList[i + 1:]: if auth2 not in grph: grph.add_node(auth2, **detailedInfo.copy()) elif count: grph.node[auth2]['count'] += 1 if citeProfile: for c in citesLst: try: grph.node[auth2]['citeProfile'][c] += 1 except KeyError: grph.node[auth2]['citeProfile'][c] = 1 if grph.has_edge(auth1, auth2) and weighted: grph.edges[auth1, auth2]['weight'] += 1 elif weighted: grph.add_edge(auth1, auth2, weight = 1) else: grph.add_edge(auth1, auth2) if citeProfile: if PBar: PBar.updateVal(.99, "Extracting citation profiles") previous = {} for n, dat in grph.nodes(data = True): previous[n] = dat #zip(*l) undoes zip(l1, l2) try: cites, counts = zip(*dat['citeProfile'].items()) except ValueError: cites, counts = [], [] dat['citeProfileCites'] = '|'.join((str(c) for c in cites)) dat['citeProfileCounts'] = '|'.join((str(c) for c in counts)) del dat['citeProfile'] if PBar: PBar.finish("Done making a co-authorship network from {}".format(self)) return grph
def announcements_view(request): ''' The view of manager announcements. ''' page_name = "Manager Announcements" userProfile = UserProfile.objects.get(user=request.user) announcement_form = None manager_positions = Manager.objects.filter(incumbent=userProfile) if manager_positions: announcement_form = AnnouncementForm( request.POST if "post_announcement" in request.POST else None, profile=userProfile, ) if announcement_form and announcement_form.is_valid(): announcement_form.save(request) return HttpResponseRedirect(reverse('managers:announcements')) # A pseudo-dictionary, actually a list with items of form: # (announcement, announcement_pin_form) announcements_dict = list() for a in Announcement.objects.filter(pinned=True): pin_form = None if (a.manager.incumbent == userProfile) or request.user.is_superuser: pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) if pin_form.is_valid(): pin_form.save() return HttpResponseRedirect(reverse('managers:announcements')) announcements_dict.append((a, pin_form)) # Oldest genesis of an pinned announcement to be displayed. within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE) for a in Announcement.objects.filter(pinned=False, post_date__gte=within_life): pin_form = None if request.user.is_superuser or (a.manager.incumbent == userProfile): pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) announcements_dict.append((a, pin_form)) return render_to_response('announcements.html', { 'page_name': page_name, 'manager_positions': manager_positions, 'announcements_dict': announcements_dict, 'announcement_form': announcement_form, }, context_instance=RequestContext(request))
The view of manager announcements.
Below is the the instruction that describes the task: ### Input: The view of manager announcements. ### Response: def announcements_view(request): ''' The view of manager announcements. ''' page_name = "Manager Announcements" userProfile = UserProfile.objects.get(user=request.user) announcement_form = None manager_positions = Manager.objects.filter(incumbent=userProfile) if manager_positions: announcement_form = AnnouncementForm( request.POST if "post_announcement" in request.POST else None, profile=userProfile, ) if announcement_form and announcement_form.is_valid(): announcement_form.save(request) return HttpResponseRedirect(reverse('managers:announcements')) # A pseudo-dictionary, actually a list with items of form: # (announcement, announcement_pin_form) announcements_dict = list() for a in Announcement.objects.filter(pinned=True): pin_form = None if (a.manager.incumbent == userProfile) or request.user.is_superuser: pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) if pin_form.is_valid(): pin_form.save() return HttpResponseRedirect(reverse('managers:announcements')) announcements_dict.append((a, pin_form)) # Oldest genesis of an pinned announcement to be displayed. within_life = now() - timedelta(hours=settings.ANNOUNCEMENT_LIFE) for a in Announcement.objects.filter(pinned=False, post_date__gte=within_life): pin_form = None if request.user.is_superuser or (a.manager.incumbent == userProfile): pin_form = PinForm( request.POST if "pin-{0}".format(a.pk) else None, instance=a, ) announcements_dict.append((a, pin_form)) return render_to_response('announcements.html', { 'page_name': page_name, 'manager_positions': manager_positions, 'announcements_dict': announcements_dict, 'announcement_form': announcement_form, }, context_instance=RequestContext(request))
def modified_files(root, tracked_only=False, commit=None): """Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status. """ assert os.path.isabs(root), "Root has to be absolute, got: %s" % root command = ['hg', 'status'] if commit: command.append('--change=%s' % commit) # Convert to unicode and split status_lines = subprocess.check_output(command).decode('utf-8').split( os.linesep) modes = ['M', 'A'] if not tracked_only: modes.append(r'\?') modes_str = '|'.join(modes) modified_file_status = utils.filter_lines( status_lines, r'(?P<mode>%s) (?P<filename>.+)' % modes_str, groups=('filename', 'mode')) return dict((os.path.join(root, filename), mode) for filename, mode in modified_file_status)
Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status.
Below is the the instruction that describes the task: ### Input: Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status. ### Response: def modified_files(root, tracked_only=False, commit=None): """Returns a list of files that has been modified since the last commit. Args: root: the root of the repository, it has to be an absolute path. tracked_only: exclude untracked files when True. commit: SHA1 of the commit. If None, it will get the modified files in the working copy. Returns: a dictionary with the modified files as keys, and additional information as value. In this case it adds the status returned by hg status. """ assert os.path.isabs(root), "Root has to be absolute, got: %s" % root command = ['hg', 'status'] if commit: command.append('--change=%s' % commit) # Convert to unicode and split status_lines = subprocess.check_output(command).decode('utf-8').split( os.linesep) modes = ['M', 'A'] if not tracked_only: modes.append(r'\?') modes_str = '|'.join(modes) modified_file_status = utils.filter_lines( status_lines, r'(?P<mode>%s) (?P<filename>.+)' % modes_str, groups=('filename', 'mode')) return dict((os.path.join(root, filename), mode) for filename, mode in modified_file_status)
def compare(self, node, prev_value=None, prev_index=None): """Raises :exc:`TestFailed` if the node is not matched with `prev_value` or `prev_index`. """ if prev_value is not None and node.value != prev_value or \ prev_index is not None and node.index != prev_index: raise TestFailed(index=self.index)
Raises :exc:`TestFailed` if the node is not matched with `prev_value` or `prev_index`.
Below is the the instruction that describes the task: ### Input: Raises :exc:`TestFailed` if the node is not matched with `prev_value` or `prev_index`. ### Response: def compare(self, node, prev_value=None, prev_index=None): """Raises :exc:`TestFailed` if the node is not matched with `prev_value` or `prev_index`. """ if prev_value is not None and node.value != prev_value or \ prev_index is not None and node.index != prev_index: raise TestFailed(index=self.index)
def read_pdb(pdbfname, as_string=False): """Reads a given PDB file and returns a Pybel Molecule.""" pybel.ob.obErrorLog.StopLogging() # Suppress all OpenBabel warnings if os.name != 'nt': # Resource module not available for Windows maxsize = resource.getrlimit(resource.RLIMIT_STACK)[-1] resource.setrlimit(resource.RLIMIT_STACK, (min(2 ** 28, maxsize), maxsize)) sys.setrecursionlimit(10 ** 5) # increase Python recoursion limit return readmol(pdbfname, as_string=as_string)
Reads a given PDB file and returns a Pybel Molecule.
Below is the the instruction that describes the task: ### Input: Reads a given PDB file and returns a Pybel Molecule. ### Response: def read_pdb(pdbfname, as_string=False): """Reads a given PDB file and returns a Pybel Molecule.""" pybel.ob.obErrorLog.StopLogging() # Suppress all OpenBabel warnings if os.name != 'nt': # Resource module not available for Windows maxsize = resource.getrlimit(resource.RLIMIT_STACK)[-1] resource.setrlimit(resource.RLIMIT_STACK, (min(2 ** 28, maxsize), maxsize)) sys.setrecursionlimit(10 ** 5) # increase Python recoursion limit return readmol(pdbfname, as_string=as_string)
def make_specified_size_gctoo(og_gctoo, num_entries, dim): """ Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified. """ assert dim in ["row", "col"], "dim specified must be either 'row' or 'col'" dim_index = 0 if "row" == dim else 1 assert num_entries <= og_gctoo.data_df.shape[dim_index], ("number of entries must be smaller than dimension being " "subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}".format( num_entries, dim, dim_index, og_gctoo.data_df.shape[dim_index])) if dim == "col": columns = [x for x in og_gctoo.data_df.columns.values] numpy.random.shuffle(columns) columns = columns[0:num_entries] rows = og_gctoo.data_df.index.values else: rows = [x for x in og_gctoo.data_df.index.values] numpy.random.shuffle(rows) rows = rows[0:num_entries] columns = og_gctoo.data_df.columns.values new_data_df = og_gctoo.data_df.loc[rows, columns] new_row_meta = og_gctoo.row_metadata_df.loc[rows] new_col_meta = og_gctoo.col_metadata_df.loc[columns] logger.debug( "after slice - new_col_meta.shape: {} new_row_meta.shape: {}".format(new_col_meta.shape, new_row_meta.shape)) # make & return new gctoo instance new_gctoo = GCToo.GCToo(data_df=new_data_df, row_metadata_df=new_row_meta, col_metadata_df=new_col_meta) return new_gctoo
Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified.
Below is the the instruction that describes the task: ### Input: Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified. ### Response: def make_specified_size_gctoo(og_gctoo, num_entries, dim): """ Subsets a GCToo instance along either rows or columns to obtain a specified size. Input: - og_gctoo (GCToo): a GCToo instance - num_entries (int): the number of entries to keep - dim (str): the dimension along which to subset. Must be "row" or "col" Output: - new_gctoo (GCToo): the GCToo instance subsetted as specified. """ assert dim in ["row", "col"], "dim specified must be either 'row' or 'col'" dim_index = 0 if "row" == dim else 1 assert num_entries <= og_gctoo.data_df.shape[dim_index], ("number of entries must be smaller than dimension being " "subsetted - num_entries: {} dim: {} dim_index: {} og_gctoo.data_df.shape[dim_index]: {}".format( num_entries, dim, dim_index, og_gctoo.data_df.shape[dim_index])) if dim == "col": columns = [x for x in og_gctoo.data_df.columns.values] numpy.random.shuffle(columns) columns = columns[0:num_entries] rows = og_gctoo.data_df.index.values else: rows = [x for x in og_gctoo.data_df.index.values] numpy.random.shuffle(rows) rows = rows[0:num_entries] columns = og_gctoo.data_df.columns.values new_data_df = og_gctoo.data_df.loc[rows, columns] new_row_meta = og_gctoo.row_metadata_df.loc[rows] new_col_meta = og_gctoo.col_metadata_df.loc[columns] logger.debug( "after slice - new_col_meta.shape: {} new_row_meta.shape: {}".format(new_col_meta.shape, new_row_meta.shape)) # make & return new gctoo instance new_gctoo = GCToo.GCToo(data_df=new_data_df, row_metadata_df=new_row_meta, col_metadata_df=new_col_meta) return new_gctoo
def truncate(self, app_label, schema_editor, models): """Truncate tables.""" for model_name in models: model = '%s_%s' % (app_label, model_name) schema_editor.execute( 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE' % ( model.lower(), ), )
Truncate tables.
Below is the the instruction that describes the task: ### Input: Truncate tables. ### Response: def truncate(self, app_label, schema_editor, models): """Truncate tables.""" for model_name in models: model = '%s_%s' % (app_label, model_name) schema_editor.execute( 'TRUNCATE TABLE %s RESTART IDENTITY CASCADE' % ( model.lower(), ), )
def __feed_backend_arthur(self, repo): """ Feed Ocean with backend data collected from arthur redis queue""" # Always get pending items from arthur for all data sources self.__feed_arthur() tag = self.backend_tag(repo) logger.debug("Arthur items available for %s", self.arthur_items.keys()) logger.debug("Getting arthur items for %s.", tag) if tag in self.arthur_items: logger.debug("Found items for %s.", tag) while self.arthur_items[tag]: yield self.arthur_items[tag].pop()
Feed Ocean with backend data collected from arthur redis queue
Below is the the instruction that describes the task: ### Input: Feed Ocean with backend data collected from arthur redis queue ### Response: def __feed_backend_arthur(self, repo): """ Feed Ocean with backend data collected from arthur redis queue""" # Always get pending items from arthur for all data sources self.__feed_arthur() tag = self.backend_tag(repo) logger.debug("Arthur items available for %s", self.arthur_items.keys()) logger.debug("Getting arthur items for %s.", tag) if tag in self.arthur_items: logger.debug("Found items for %s.", tag) while self.arthur_items[tag]: yield self.arthur_items[tag].pop()
def get_asset_composition_design_session(self, proxy): """Gets the session for creating asset compositions. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetCompositionDesignSession) - an ``AssetCompositionDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_composition_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_composition_design()`` is ``true``.* """ if not self.supports_asset_composition_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssetCompositionDesignSession(proxy=proxy, runtime=self._runtime)
Gets the session for creating asset compositions. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetCompositionDesignSession) - an ``AssetCompositionDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_composition_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_composition_design()`` is ``true``.*
Below is the the instruction that describes the task: ### Input: Gets the session for creating asset compositions. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetCompositionDesignSession) - an ``AssetCompositionDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_composition_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_composition_design()`` is ``true``.* ### Response: def get_asset_composition_design_session(self, proxy): """Gets the session for creating asset compositions. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetCompositionDesignSession) - an ``AssetCompositionDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_asset_composition_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_asset_composition_design()`` is ``true``.* """ if not self.supports_asset_composition_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssetCompositionDesignSession(proxy=proxy, runtime=self._runtime)
def extension (network, session, version, scn_extension, start_snapshot, end_snapshot, **kwargs): """ Function that adds an additional network to the existing network container. The new network can include every PyPSA-component (e.g. buses, lines, links). To connect it to the existing network, transformers are needed. All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' Parameters ----- network : The existing network container (e.g. scenario 'NEP 2035') session : session-data overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') start_snapshot, end_snapshot: Simulation time Returns ------ network : Network container including existing and additional network """ if version is None: ormcls_prefix = 'EgoGridPfHvExtension' else: ormcls_prefix = 'EgoPfHvExtension' # Adding overlay-network to existing network scenario = NetworkScenario(session, version = version, prefix=ormcls_prefix, method=kwargs.get('method', 'lopf'), start_snapshot=start_snapshot, end_snapshot=end_snapshot, scn_name='extension_' + scn_extension) network = scenario.build_network(network) # Allow lossless links to conduct bidirectional network.links.loc[network.links.efficiency == 1.0, 'p_min_pu'] = -1 # Set coordinates for new buses extension_buses = network.buses[network.buses.scn_name == 'extension_' + scn_extension] for idx, row in extension_buses.iterrows(): wkt_geom = to_shape(row['geom']) network.buses.loc[idx, 'x'] = wkt_geom.x network.buses.loc[idx, 'y'] = wkt_geom.y return network
Function that adds an additional network to the existing network container. The new network can include every PyPSA-component (e.g. buses, lines, links). To connect it to the existing network, transformers are needed. All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' Parameters ----- network : The existing network container (e.g. scenario 'NEP 2035') session : session-data overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') start_snapshot, end_snapshot: Simulation time Returns ------ network : Network container including existing and additional network
Below is the the instruction that describes the task: ### Input: Function that adds an additional network to the existing network container. The new network can include every PyPSA-component (e.g. buses, lines, links). To connect it to the existing network, transformers are needed. All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' Parameters ----- network : The existing network container (e.g. scenario 'NEP 2035') session : session-data overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') start_snapshot, end_snapshot: Simulation time Returns ------ network : Network container including existing and additional network ### Response: def extension (network, session, version, scn_extension, start_snapshot, end_snapshot, **kwargs): """ Function that adds an additional network to the existing network container. The new network can include every PyPSA-component (e.g. buses, lines, links). To connect it to the existing network, transformers are needed. All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' Parameters ----- network : The existing network container (e.g. scenario 'NEP 2035') session : session-data overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') start_snapshot, end_snapshot: Simulation time Returns ------ network : Network container including existing and additional network """ if version is None: ormcls_prefix = 'EgoGridPfHvExtension' else: ormcls_prefix = 'EgoPfHvExtension' # Adding overlay-network to existing network scenario = NetworkScenario(session, version = version, prefix=ormcls_prefix, method=kwargs.get('method', 'lopf'), start_snapshot=start_snapshot, end_snapshot=end_snapshot, scn_name='extension_' + scn_extension) network = scenario.build_network(network) # Allow lossless links to conduct bidirectional network.links.loc[network.links.efficiency == 1.0, 'p_min_pu'] = -1 # Set coordinates for new buses extension_buses = network.buses[network.buses.scn_name == 'extension_' + scn_extension] for idx, row in extension_buses.iterrows(): wkt_geom = to_shape(row['geom']) network.buses.loc[idx, 'x'] = wkt_geom.x network.buses.loc[idx, 'y'] = wkt_geom.y return network
def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( opts=__opts__, provider=__active_provider_name__ or __virtualname__, aliases=__virtual_aliases__, required_keys=('personal_access_token',) )
Return the first configured instance.
Below is the the instruction that describes the task: ### Input: Return the first configured instance. ### Response: def get_configured_provider(): ''' Return the first configured instance. ''' return config.is_provider_configured( opts=__opts__, provider=__active_provider_name__ or __virtualname__, aliases=__virtual_aliases__, required_keys=('personal_access_token',) )
def chroma(chromagram, times, fs, **kwargs): """Reverse synthesis of a chromagram (semitone matrix) Parameters ---------- chromagram : np.ndarray, shape=(12, times.shape[0]) Chromagram matrix, where each row represents a semitone [C->Bb] i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to ``times[j + 1]`` times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2) Either the start time of each column in the chromagram, or the time interval corresponding to each column. fs : int Sampling rate to synthesize audio data at kwargs Additional keyword arguments to pass to :func:`mir_eval.sonify.time_frequency` Returns ------- output : np.ndarray Synthesized chromagram """ # We'll just use time_frequency with a Shepard tone-gram # To create the Shepard tone-gram, we copy the chromagram across 7 octaves n_octaves = 7 # starting from C2 base_note = 24 # and weight each octave by a normal distribution # The normal distribution has mean 72 (one octave above middle C) # and std 6 (one half octave) mean = 72 std = 6 notes = np.arange(12*n_octaves) + base_note shepard_weight = np.exp(-(notes - mean)**2./(2.*std**2.)) # Copy the chromagram matrix vertically n_octaves times gram = np.tile(chromagram.T, n_octaves).T # This fixes issues if the supplied chromagram is int type gram = gram.astype(float) # Apply Sheppard weighting gram *= shepard_weight.reshape(-1, 1) # Compute frequencies frequencies = 440.0*(2.0**((notes - 69)/12.0)) return time_frequency(gram, frequencies, times, fs, **kwargs)
Reverse synthesis of a chromagram (semitone matrix) Parameters ---------- chromagram : np.ndarray, shape=(12, times.shape[0]) Chromagram matrix, where each row represents a semitone [C->Bb] i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to ``times[j + 1]`` times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2) Either the start time of each column in the chromagram, or the time interval corresponding to each column. fs : int Sampling rate to synthesize audio data at kwargs Additional keyword arguments to pass to :func:`mir_eval.sonify.time_frequency` Returns ------- output : np.ndarray Synthesized chromagram
Below is the the instruction that describes the task: ### Input: Reverse synthesis of a chromagram (semitone matrix) Parameters ---------- chromagram : np.ndarray, shape=(12, times.shape[0]) Chromagram matrix, where each row represents a semitone [C->Bb] i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to ``times[j + 1]`` times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2) Either the start time of each column in the chromagram, or the time interval corresponding to each column. fs : int Sampling rate to synthesize audio data at kwargs Additional keyword arguments to pass to :func:`mir_eval.sonify.time_frequency` Returns ------- output : np.ndarray Synthesized chromagram ### Response: def chroma(chromagram, times, fs, **kwargs): """Reverse synthesis of a chromagram (semitone matrix) Parameters ---------- chromagram : np.ndarray, shape=(12, times.shape[0]) Chromagram matrix, where each row represents a semitone [C->Bb] i.e., ``chromagram[3, j]`` is the magnitude of D# from ``times[j]`` to ``times[j + 1]`` times: np.ndarray, shape=(len(chord_labels),) or (len(chord_labels), 2) Either the start time of each column in the chromagram, or the time interval corresponding to each column. fs : int Sampling rate to synthesize audio data at kwargs Additional keyword arguments to pass to :func:`mir_eval.sonify.time_frequency` Returns ------- output : np.ndarray Synthesized chromagram """ # We'll just use time_frequency with a Shepard tone-gram # To create the Shepard tone-gram, we copy the chromagram across 7 octaves n_octaves = 7 # starting from C2 base_note = 24 # and weight each octave by a normal distribution # The normal distribution has mean 72 (one octave above middle C) # and std 6 (one half octave) mean = 72 std = 6 notes = np.arange(12*n_octaves) + base_note shepard_weight = np.exp(-(notes - mean)**2./(2.*std**2.)) # Copy the chromagram matrix vertically n_octaves times gram = np.tile(chromagram.T, n_octaves).T # This fixes issues if the supplied chromagram is int type gram = gram.astype(float) # Apply Sheppard weighting gram *= shepard_weight.reshape(-1, 1) # Compute frequencies frequencies = 440.0*(2.0**((notes - 69)/12.0)) return time_frequency(gram, frequencies, times, fs, **kwargs)
def _get_nets_arin(self, *args, **kwargs): """ Deprecated. This will be removed in a future release. """ from warnings import warn warn('Whois._get_nets_arin() has been deprecated and will be ' 'removed. You should now use Whois.get_nets_arin().') return self.get_nets_arin(*args, **kwargs)
Deprecated. This will be removed in a future release.
Below is the the instruction that describes the task: ### Input: Deprecated. This will be removed in a future release. ### Response: def _get_nets_arin(self, *args, **kwargs): """ Deprecated. This will be removed in a future release. """ from warnings import warn warn('Whois._get_nets_arin() has been deprecated and will be ' 'removed. You should now use Whois.get_nets_arin().') return self.get_nets_arin(*args, **kwargs)
def get_tables(self): """ Adds tables to the network. Example ------- >>> writer = UAIWriter(model) >>> writer.get_tables() """ if isinstance(self.model, BayesianModel): cpds = self.model.get_cpds() cpds.sort(key=lambda x: x.variable) tables = [] for cpd in cpds: values = list(map(str, cpd.values.ravel())) tables.append(values) return tables elif isinstance(self.model, MarkovModel): factors = self.model.get_factors() tables = [] for factor in factors: values = list(map(str, factor.values.ravel())) tables.append(values) return tables else: raise TypeError("Model must be an instance of Markov or Bayesian model.")
Adds tables to the network. Example ------- >>> writer = UAIWriter(model) >>> writer.get_tables()
Below is the the instruction that describes the task: ### Input: Adds tables to the network. Example ------- >>> writer = UAIWriter(model) >>> writer.get_tables() ### Response: def get_tables(self): """ Adds tables to the network. Example ------- >>> writer = UAIWriter(model) >>> writer.get_tables() """ if isinstance(self.model, BayesianModel): cpds = self.model.get_cpds() cpds.sort(key=lambda x: x.variable) tables = [] for cpd in cpds: values = list(map(str, cpd.values.ravel())) tables.append(values) return tables elif isinstance(self.model, MarkovModel): factors = self.model.get_factors() tables = [] for factor in factors: values = list(map(str, factor.values.ravel())) tables.append(values) return tables else: raise TypeError("Model must be an instance of Markov or Bayesian model.")
def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None, offset=0): """Fill an HDF5 file with incoming images from a socket. Parameters ---------- socket : :class:`zmq.Socket` PULL socket on which to receive images. hdf5_file : :class:`h5py.File` instance HDF5 file handle to which to write. Assumes `features`, `targets` and `filenames` already exist and have first dimension larger than `sum(images_per_class)`. num_expected : int The number of items we expect to be sent over the socket. shuffle_seed : int or sequence, optional Seed for a NumPy random number generator that permutes the images on disk. offset : int, optional The offset in the HDF5 datasets at which to start writing received examples. Defaults to 0. """ with progress_bar('images', maxval=num_expected) as pb: if shuffle_seed is None: index_gen = iter(xrange(num_expected)) else: rng = numpy.random.RandomState(shuffle_seed) index_gen = iter(rng.permutation(num_expected)) for i, num in enumerate(index_gen): image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE) image_data = numpy.fromstring(socket.recv(), dtype='uint8') _write_to_hdf5(hdf5_file, num + offset, image_filename, image_data, class_index) pb.update(i + 1)
Fill an HDF5 file with incoming images from a socket. Parameters ---------- socket : :class:`zmq.Socket` PULL socket on which to receive images. hdf5_file : :class:`h5py.File` instance HDF5 file handle to which to write. Assumes `features`, `targets` and `filenames` already exist and have first dimension larger than `sum(images_per_class)`. num_expected : int The number of items we expect to be sent over the socket. shuffle_seed : int or sequence, optional Seed for a NumPy random number generator that permutes the images on disk. offset : int, optional The offset in the HDF5 datasets at which to start writing received examples. Defaults to 0.
Below is the the instruction that describes the task: ### Input: Fill an HDF5 file with incoming images from a socket. Parameters ---------- socket : :class:`zmq.Socket` PULL socket on which to receive images. hdf5_file : :class:`h5py.File` instance HDF5 file handle to which to write. Assumes `features`, `targets` and `filenames` already exist and have first dimension larger than `sum(images_per_class)`. num_expected : int The number of items we expect to be sent over the socket. shuffle_seed : int or sequence, optional Seed for a NumPy random number generator that permutes the images on disk. offset : int, optional The offset in the HDF5 datasets at which to start writing received examples. Defaults to 0. ### Response: def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None, offset=0): """Fill an HDF5 file with incoming images from a socket. Parameters ---------- socket : :class:`zmq.Socket` PULL socket on which to receive images. hdf5_file : :class:`h5py.File` instance HDF5 file handle to which to write. Assumes `features`, `targets` and `filenames` already exist and have first dimension larger than `sum(images_per_class)`. num_expected : int The number of items we expect to be sent over the socket. shuffle_seed : int or sequence, optional Seed for a NumPy random number generator that permutes the images on disk. offset : int, optional The offset in the HDF5 datasets at which to start writing received examples. Defaults to 0. """ with progress_bar('images', maxval=num_expected) as pb: if shuffle_seed is None: index_gen = iter(xrange(num_expected)) else: rng = numpy.random.RandomState(shuffle_seed) index_gen = iter(rng.permutation(num_expected)) for i, num in enumerate(index_gen): image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE) image_data = numpy.fromstring(socket.recv(), dtype='uint8') _write_to_hdf5(hdf5_file, num + offset, image_filename, image_data, class_index) pb.update(i + 1)
def formfield(self, form_class=None, choices_form_class=None, **kwargs): """ Returns a django.forms.Field instance for this database Field. """ defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) choices = [BLANK_CHOICE_DASH, ] if include_blank else [] choices.extend([ ( x.name, getattr(x, 'verbose_name', x.name) or x.name, getattr(x, 'help_text', None) or None ) for x in self.choices_class.constants() ]) defaults['choices'] = choices defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial'): del kwargs[k] defaults.update(kwargs) form_class = choices_form_class or ChoicesFormField return form_class(**defaults)
Returns a django.forms.Field instance for this database Field.
Below is the the instruction that describes the task: ### Input: Returns a django.forms.Field instance for this database Field. ### Response: def formfield(self, form_class=None, choices_form_class=None, **kwargs): """ Returns a django.forms.Field instance for this database Field. """ defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) choices = [BLANK_CHOICE_DASH, ] if include_blank else [] choices.extend([ ( x.name, getattr(x, 'verbose_name', x.name) or x.name, getattr(x, 'help_text', None) or None ) for x in self.choices_class.constants() ]) defaults['choices'] = choices defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial'): del kwargs[k] defaults.update(kwargs) form_class = choices_form_class or ChoicesFormField return form_class(**defaults)
def sample_hull(hull,domain,isDomainFinite): """sample_hull: Sample the upper hull Input: hull - hull structure (see setup_hull for a definition of this) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: a sample from the hull History: 2009-05-21 - Written - Bovy """ u= stats.uniform.rvs() #Find largest zs[jj] such that scum[jj] < u #The first bin is a special case if hull[5][0] >= u: if hull[3][0] == 0: if isDomainFinite[0]: thissample= domain[0]+u/hull[5][0]*(hull[4][0]-domain[0]) else: thissample= 100000000 #Throw some kind of error else: thissample= hull[4][0]+1./hull[3][0]*m.log(1.-hull[3][0]*hull[0]*(hull[5][0]-u)/m.exp(hull[6][0])) else: if len(hull[5]) == 1: indx= 0 else: indx= 1 while indx < len(hull[5]) and hull[5][indx] < u: indx= indx+1 indx= indx-1 if m.fabs(hull[3][indx+1]) == 0: if indx != (len(hull[5])-1): thissample= hull[4][indx]+(u-hull[5][indx])/(hull[5][indx+1]-hull[5][indx])*(hull[4][indx+1]-hull[4][indx]) else: if isDomainFinite[1]: thissample= hull[4][indx]+(u-hull[5][indx])/(1.-hull[5][indx])*(domain[1]-hull[4][indx]) else: thissample= 100000 #Throw some kind of error else: thissample= hull[4][indx]+1./hull[3][indx+1]*m.log(1.+hull[3][indx+1]*hull[0]*(u-hull[5][indx])/m.exp(hull[6][indx])) return thissample
sample_hull: Sample the upper hull Input: hull - hull structure (see setup_hull for a definition of this) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: a sample from the hull History: 2009-05-21 - Written - Bovy
Below is the the instruction that describes the task: ### Input: sample_hull: Sample the upper hull Input: hull - hull structure (see setup_hull for a definition of this) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: a sample from the hull History: 2009-05-21 - Written - Bovy ### Response: def sample_hull(hull,domain,isDomainFinite): """sample_hull: Sample the upper hull Input: hull - hull structure (see setup_hull for a definition of this) domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? Output: a sample from the hull History: 2009-05-21 - Written - Bovy """ u= stats.uniform.rvs() #Find largest zs[jj] such that scum[jj] < u #The first bin is a special case if hull[5][0] >= u: if hull[3][0] == 0: if isDomainFinite[0]: thissample= domain[0]+u/hull[5][0]*(hull[4][0]-domain[0]) else: thissample= 100000000 #Throw some kind of error else: thissample= hull[4][0]+1./hull[3][0]*m.log(1.-hull[3][0]*hull[0]*(hull[5][0]-u)/m.exp(hull[6][0])) else: if len(hull[5]) == 1: indx= 0 else: indx= 1 while indx < len(hull[5]) and hull[5][indx] < u: indx= indx+1 indx= indx-1 if m.fabs(hull[3][indx+1]) == 0: if indx != (len(hull[5])-1): thissample= hull[4][indx]+(u-hull[5][indx])/(hull[5][indx+1]-hull[5][indx])*(hull[4][indx+1]-hull[4][indx]) else: if isDomainFinite[1]: thissample= hull[4][indx]+(u-hull[5][indx])/(1.-hull[5][indx])*(domain[1]-hull[4][indx]) else: thissample= 100000 #Throw some kind of error else: thissample= hull[4][indx]+1./hull[3][indx+1]*m.log(1.+hull[3][indx+1]*hull[0]*(u-hull[5][indx])/m.exp(hull[6][indx])) return thissample
def _register_webhook(self, webhook_url, events): """Register webhook.""" response = self._request( MINUT_WEBHOOKS_URL, request_type='POST', json={ 'url': webhook_url, 'events': events, }, ) return response
Register webhook.
Below is the the instruction that describes the task: ### Input: Register webhook. ### Response: def _register_webhook(self, webhook_url, events): """Register webhook.""" response = self._request( MINUT_WEBHOOKS_URL, request_type='POST', json={ 'url': webhook_url, 'events': events, }, ) return response
def closeConnection(self): """close current serial port connection""" print '%s call closeConnection' % self.port try: if self.handle: self.handle.close() self.handle = None except Exception, e: ModuleHelper.WriteIntoDebugLogger("closeConnection() Error: " + str(e))
close current serial port connection
Below is the the instruction that describes the task: ### Input: close current serial port connection ### Response: def closeConnection(self): """close current serial port connection""" print '%s call closeConnection' % self.port try: if self.handle: self.handle.close() self.handle = None except Exception, e: ModuleHelper.WriteIntoDebugLogger("closeConnection() Error: " + str(e))
def unpack(self, token, **kwargs): """ Unpacks a JWT into its parts and base64 decodes the parts individually :param token: The JWT :param kwargs: A possible empty set of claims to verify the header against. """ if isinstance(token, str): try: token = token.encode("utf-8") except UnicodeDecodeError: pass part = split_token(token) self.b64part = part self.part = [b64d(p) for p in part] self.headers = json.loads(as_unicode(self.part[0])) for key,val in kwargs.items(): if not val and key in self.headers: continue try: _ok = self.verify_header(key,val) except KeyError: raise else: if not _ok: raise HeaderError( 'Expected "{}" to be "{}", was "{}"'.format( key, val, self.headers[key])) return self
Unpacks a JWT into its parts and base64 decodes the parts individually :param token: The JWT :param kwargs: A possible empty set of claims to verify the header against.
Below is the the instruction that describes the task: ### Input: Unpacks a JWT into its parts and base64 decodes the parts individually :param token: The JWT :param kwargs: A possible empty set of claims to verify the header against. ### Response: def unpack(self, token, **kwargs): """ Unpacks a JWT into its parts and base64 decodes the parts individually :param token: The JWT :param kwargs: A possible empty set of claims to verify the header against. """ if isinstance(token, str): try: token = token.encode("utf-8") except UnicodeDecodeError: pass part = split_token(token) self.b64part = part self.part = [b64d(p) for p in part] self.headers = json.loads(as_unicode(self.part[0])) for key,val in kwargs.items(): if not val and key in self.headers: continue try: _ok = self.verify_header(key,val) except KeyError: raise else: if not _ok: raise HeaderError( 'Expected "{}" to be "{}", was "{}"'.format( key, val, self.headers[key])) return self
def import_file(source, use_32bit_registry=False): ''' Import registry settings from a Windows ``REG`` file by invoking ``REG.EXE``. .. versionadded:: 2018.3.0 Args: source (str): The full path of the ``REG`` file. This can be either a local file path or a URL type supported by salt (e.g. ``salt://salt_master_path``) use_32bit_registry (bool): If the value of this parameter is ``True`` then the ``REG`` file will be imported into the Windows 32 bit registry. Otherwise the Windows 64 bit registry will be used. Returns: bool: True if successful, otherwise an error is raised Raises: ValueError: If the value of ``source`` is an invalid path or otherwise causes ``cp.cache_file`` to return ``False`` CommandExecutionError: If ``reg.exe`` exits with a non-0 exit code CLI Example: .. code-block:: bash salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg ''' cache_path = __salt__['cp.cache_file'](source) if not cache_path: error_msg = "File/URL '{0}' probably invalid.".format(source) raise ValueError(error_msg) if use_32bit_registry: word_sz_txt = "32" else: word_sz_txt = "64" cmd = 'reg import "{0}" /reg:{1}'.format(cache_path, word_sz_txt) cmd_ret_dict = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = cmd_ret_dict['retcode'] if retcode != 0: raise CommandExecutionError( 'reg.exe import failed', info=cmd_ret_dict ) return True
Import registry settings from a Windows ``REG`` file by invoking ``REG.EXE``. .. versionadded:: 2018.3.0 Args: source (str): The full path of the ``REG`` file. This can be either a local file path or a URL type supported by salt (e.g. ``salt://salt_master_path``) use_32bit_registry (bool): If the value of this parameter is ``True`` then the ``REG`` file will be imported into the Windows 32 bit registry. Otherwise the Windows 64 bit registry will be used. Returns: bool: True if successful, otherwise an error is raised Raises: ValueError: If the value of ``source`` is an invalid path or otherwise causes ``cp.cache_file`` to return ``False`` CommandExecutionError: If ``reg.exe`` exits with a non-0 exit code CLI Example: .. code-block:: bash salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg
Below is the the instruction that describes the task: ### Input: Import registry settings from a Windows ``REG`` file by invoking ``REG.EXE``. .. versionadded:: 2018.3.0 Args: source (str): The full path of the ``REG`` file. This can be either a local file path or a URL type supported by salt (e.g. ``salt://salt_master_path``) use_32bit_registry (bool): If the value of this parameter is ``True`` then the ``REG`` file will be imported into the Windows 32 bit registry. Otherwise the Windows 64 bit registry will be used. Returns: bool: True if successful, otherwise an error is raised Raises: ValueError: If the value of ``source`` is an invalid path or otherwise causes ``cp.cache_file`` to return ``False`` CommandExecutionError: If ``reg.exe`` exits with a non-0 exit code CLI Example: .. code-block:: bash salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg ### Response: def import_file(source, use_32bit_registry=False): ''' Import registry settings from a Windows ``REG`` file by invoking ``REG.EXE``. .. versionadded:: 2018.3.0 Args: source (str): The full path of the ``REG`` file. This can be either a local file path or a URL type supported by salt (e.g. ``salt://salt_master_path``) use_32bit_registry (bool): If the value of this parameter is ``True`` then the ``REG`` file will be imported into the Windows 32 bit registry. Otherwise the Windows 64 bit registry will be used. Returns: bool: True if successful, otherwise an error is raised Raises: ValueError: If the value of ``source`` is an invalid path or otherwise causes ``cp.cache_file`` to return ``False`` CommandExecutionError: If ``reg.exe`` exits with a non-0 exit code CLI Example: .. code-block:: bash salt machine1 reg.import_file salt://win/printer_config/110_Canon/postinstall_config.reg ''' cache_path = __salt__['cp.cache_file'](source) if not cache_path: error_msg = "File/URL '{0}' probably invalid.".format(source) raise ValueError(error_msg) if use_32bit_registry: word_sz_txt = "32" else: word_sz_txt = "64" cmd = 'reg import "{0}" /reg:{1}'.format(cache_path, word_sz_txt) cmd_ret_dict = __salt__['cmd.run_all'](cmd, python_shell=True) retcode = cmd_ret_dict['retcode'] if retcode != 0: raise CommandExecutionError( 'reg.exe import failed', info=cmd_ret_dict ) return True
def sync_month_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier num_months = self._num_months(start_date, end_date) first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=num_months)) month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date months_to_update = list(itertools.islice(month_date_generator, num_months)) for uid in unique_identifier: for single_metric in metric: for month in months_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1]) month_counter = sum([value for key, value in series_results.items()]) hash_key_monthly = self._get_weekly_metric_key(uid, month) monthly_metric_name = self._get_monthly_metric_name(single_metric, month) with self._analytics_backend.map() as conn: conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end
Below is the the instruction that describes the task: ### Input: Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end ### Response: def sync_month_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier num_months = self._num_months(start_date, end_date) first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=num_months)) month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date months_to_update = list(itertools.islice(month_date_generator, num_months)) for uid in unique_identifier: for single_metric in metric: for month in months_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1]) month_counter = sum([value for key, value in series_results.items()]) hash_key_monthly = self._get_weekly_metric_key(uid, month) monthly_metric_name = self._get_monthly_metric_name(single_metric, month) with self._analytics_backend.map() as conn: conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cursor: try: sql = 'delete from jids where jid in (select distinct jid from salt_returns where alter_time < %s)' cursor.execute(sql, (timestamp,)) cursor.execute('COMMIT') except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err try: sql = 'delete from salt_returns where alter_time < %s' cursor.execute(sql, (timestamp,)) cursor.execute('COMMIT') except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err try: sql = 'delete from salt_events where alter_time < %s' cursor.execute(sql, (timestamp,)) cursor.execute('COMMIT') except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err return True
Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return:
Below is the the instruction that describes the task: ### Input: Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ### Response: def _purge_jobs(timestamp): ''' Purge records from the returner tables. :param job_age_in_seconds: Purge jobs older than this :return: ''' with _get_serv() as cursor: try: sql = 'delete from jids where jid in (select distinct jid from salt_returns where alter_time < %s)' cursor.execute(sql, (timestamp,)) cursor.execute('COMMIT') except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err try: sql = 'delete from salt_returns where alter_time < %s' cursor.execute(sql, (timestamp,)) cursor.execute('COMMIT') except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err try: sql = 'delete from salt_events where alter_time < %s' cursor.execute(sql, (timestamp,)) cursor.execute('COMMIT') except psycopg2.DatabaseError as err: error = err.args sys.stderr.write(six.text_type(error)) cursor.execute("ROLLBACK") raise err return True
def show_command(endpoint_id, rule_id): """ Executor for `globus endpoint permission show` """ client = get_client() rule = client.get_endpoint_acl_rule(endpoint_id, rule_id) formatted_print( rule, text_format=FORMAT_TEXT_RECORD, fields=( ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", _shared_with_keyfunc), ("Path", "path"), ), )
Executor for `globus endpoint permission show`
Below is the the instruction that describes the task: ### Input: Executor for `globus endpoint permission show` ### Response: def show_command(endpoint_id, rule_id): """ Executor for `globus endpoint permission show` """ client = get_client() rule = client.get_endpoint_acl_rule(endpoint_id, rule_id) formatted_print( rule, text_format=FORMAT_TEXT_RECORD, fields=( ("Rule ID", "id"), ("Permissions", "permissions"), ("Shared With", _shared_with_keyfunc), ("Path", "path"), ), )
def append(self, key, value=MARKER, replace=True): ''' Append the item to the metadata. ''' return self.add_item(key, value, replace=replace)
Append the item to the metadata.
Below is the the instruction that describes the task: ### Input: Append the item to the metadata. ### Response: def append(self, key, value=MARKER, replace=True): ''' Append the item to the metadata. ''' return self.add_item(key, value, replace=replace)
def ray_trace(self, origin, end_point, first_point=False, plot=False, off_screen=False): """ Performs a single ray trace calculation given a mesh and a line segment defined by an origin and end_point. Parameters ---------- origin : np.ndarray or list Start of the line segment. end_point : np.ndarray or list End of the line segment. first_point : bool, optional Returns intersection of first point only. plot : bool, optional Plots ray trace results off_screen : bool, optional Plots off screen. Used for unit testing. Returns ------- intersection_points : np.ndarray Location of the intersection points. Empty array if no intersections. intersection_cells : np.ndarray Indices of the intersection cells. Empty array if no intersections. """ points = vtk.vtkPoints() cell_ids = vtk.vtkIdList() code = self.obbTree.IntersectWithLine(np.array(origin), np.array(end_point), points, cell_ids) intersection_points = vtk_to_numpy(points.GetData()) if first_point and intersection_points.shape[0] >= 1: intersection_points = intersection_points[0] intersection_cells = [] if intersection_points.any(): if first_point: ncells = 1 else: ncells = cell_ids.GetNumberOfIds() for i in range(ncells): intersection_cells.append(cell_ids.GetId(i)) intersection_cells = np.array(intersection_cells) if plot: plotter = vtki.Plotter(off_screen=off_screen) plotter.add_mesh(self, label='Test Mesh') segment = np.array([origin, end_point]) plotter.add_lines(segment, 'b', label='Ray Segment') plotter.add_mesh(intersection_points, 'r', point_size=10, label='Intersection Points') plotter.add_legend() plotter.add_axes() plotter.show() return intersection_points, intersection_cells
Performs a single ray trace calculation given a mesh and a line segment defined by an origin and end_point. Parameters ---------- origin : np.ndarray or list Start of the line segment. end_point : np.ndarray or list End of the line segment. first_point : bool, optional Returns intersection of first point only. plot : bool, optional Plots ray trace results off_screen : bool, optional Plots off screen. Used for unit testing. Returns ------- intersection_points : np.ndarray Location of the intersection points. Empty array if no intersections. intersection_cells : np.ndarray Indices of the intersection cells. Empty array if no intersections.
Below is the the instruction that describes the task: ### Input: Performs a single ray trace calculation given a mesh and a line segment defined by an origin and end_point. Parameters ---------- origin : np.ndarray or list Start of the line segment. end_point : np.ndarray or list End of the line segment. first_point : bool, optional Returns intersection of first point only. plot : bool, optional Plots ray trace results off_screen : bool, optional Plots off screen. Used for unit testing. Returns ------- intersection_points : np.ndarray Location of the intersection points. Empty array if no intersections. intersection_cells : np.ndarray Indices of the intersection cells. Empty array if no intersections. ### Response: def ray_trace(self, origin, end_point, first_point=False, plot=False, off_screen=False): """ Performs a single ray trace calculation given a mesh and a line segment defined by an origin and end_point. Parameters ---------- origin : np.ndarray or list Start of the line segment. end_point : np.ndarray or list End of the line segment. first_point : bool, optional Returns intersection of first point only. plot : bool, optional Plots ray trace results off_screen : bool, optional Plots off screen. Used for unit testing. Returns ------- intersection_points : np.ndarray Location of the intersection points. Empty array if no intersections. intersection_cells : np.ndarray Indices of the intersection cells. Empty array if no intersections. """ points = vtk.vtkPoints() cell_ids = vtk.vtkIdList() code = self.obbTree.IntersectWithLine(np.array(origin), np.array(end_point), points, cell_ids) intersection_points = vtk_to_numpy(points.GetData()) if first_point and intersection_points.shape[0] >= 1: intersection_points = intersection_points[0] intersection_cells = [] if intersection_points.any(): if first_point: ncells = 1 else: ncells = cell_ids.GetNumberOfIds() for i in range(ncells): intersection_cells.append(cell_ids.GetId(i)) intersection_cells = np.array(intersection_cells) if plot: plotter = vtki.Plotter(off_screen=off_screen) plotter.add_mesh(self, label='Test Mesh') segment = np.array([origin, end_point]) plotter.add_lines(segment, 'b', label='Ray Segment') plotter.add_mesh(intersection_points, 'r', point_size=10, label='Intersection Points') plotter.add_legend() plotter.add_axes() plotter.show() return intersection_points, intersection_cells
def increment(method): """ Static method used to increment the depth of a context belonging to 'method' :param function method: A method with a context :rtype: caliendo.hooks.Context :returns: The context instance for the method. """ if not hasattr(method, '__context'): raise ContextException("Method does not have context!") ctxt = getattr(method, '__context') ctxt.enter() return ctxt
Static method used to increment the depth of a context belonging to 'method' :param function method: A method with a context :rtype: caliendo.hooks.Context :returns: The context instance for the method.
Below is the the instruction that describes the task: ### Input: Static method used to increment the depth of a context belonging to 'method' :param function method: A method with a context :rtype: caliendo.hooks.Context :returns: The context instance for the method. ### Response: def increment(method): """ Static method used to increment the depth of a context belonging to 'method' :param function method: A method with a context :rtype: caliendo.hooks.Context :returns: The context instance for the method. """ if not hasattr(method, '__context'): raise ContextException("Method does not have context!") ctxt = getattr(method, '__context') ctxt.enter() return ctxt
def _remap_cortex_out(cortex_out, region, out_file): """Remap coordinates in local cortex variant calls to the original global region. """ def _remap_vcf_line(line, contig, start): parts = line.split("\t") if parts[0] == "" or parts[1] == "": return None parts[0] = contig try: parts[1] = str(int(parts[1]) + start) except ValueError: raise ValueError("Problem in {0} with \n{1}".format( cortex_out, parts)) return "\t".join(parts) def _not_filtered(line): parts = line.split("\t") return parts[6] == "PASS" contig, start, _ = region start = int(start) with open(cortex_out) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("##fileDate"): pass elif line.startswith("#"): out_handle.write(line) elif _not_filtered(line): update_line = _remap_vcf_line(line, contig, start) if update_line: out_handle.write(update_line)
Remap coordinates in local cortex variant calls to the original global region.
Below is the the instruction that describes the task: ### Input: Remap coordinates in local cortex variant calls to the original global region. ### Response: def _remap_cortex_out(cortex_out, region, out_file): """Remap coordinates in local cortex variant calls to the original global region. """ def _remap_vcf_line(line, contig, start): parts = line.split("\t") if parts[0] == "" or parts[1] == "": return None parts[0] = contig try: parts[1] = str(int(parts[1]) + start) except ValueError: raise ValueError("Problem in {0} with \n{1}".format( cortex_out, parts)) return "\t".join(parts) def _not_filtered(line): parts = line.split("\t") return parts[6] == "PASS" contig, start, _ = region start = int(start) with open(cortex_out) as in_handle: with open(out_file, "w") as out_handle: for line in in_handle: if line.startswith("##fileDate"): pass elif line.startswith("#"): out_handle.write(line) elif _not_filtered(line): update_line = _remap_vcf_line(line, contig, start) if update_line: out_handle.write(update_line)
def normalize_alleles_left(ref, start, stop, alleles, bound, ref_step, shuffle=True): """ Normalize loci by removing extraneous reference padding >>> normalize_alleles_left('A', 1, 2, 'A', 1, 2) shuffled_alleles(start=1, stop=2, alleles='A') """ normalized_alleles = namedtuple('shuffled_alleles', 'start stop alleles') if len(alleles) < 2: return normalized_alleles(start, stop, alleles) # STEP 1: Trim common suffix trimmed, alleles = trim_common_suffixes(alleles) stop -= trimmed # STEP 2: Trim common prefix trimmed, alleles = trim_common_prefixes(alleles) start += trimmed # assert bound <= start,'start={:d}, left bound={:d}'.format(start, bound) # STEP 3: While a null allele exists, left shuffle by prepending alleles # with reference and trimming common suffixes while shuffle and '' in alleles and start > bound: step = min(ref_step, start - bound) r = ref[start - step:start].upper() new_alleles = [r + a for a in alleles] trimmed, new_alleles = trim_common_suffixes(new_alleles) if not trimmed: break start -= trimmed stop -= trimmed if trimmed == step: alleles = new_alleles else: left = step - trimmed alleles = [a[left:] for a in new_alleles] break return normalized_alleles(start, stop, tuple(alleles))
Normalize loci by removing extraneous reference padding >>> normalize_alleles_left('A', 1, 2, 'A', 1, 2) shuffled_alleles(start=1, stop=2, alleles='A')
Below is the the instruction that describes the task: ### Input: Normalize loci by removing extraneous reference padding >>> normalize_alleles_left('A', 1, 2, 'A', 1, 2) shuffled_alleles(start=1, stop=2, alleles='A') ### Response: def normalize_alleles_left(ref, start, stop, alleles, bound, ref_step, shuffle=True): """ Normalize loci by removing extraneous reference padding >>> normalize_alleles_left('A', 1, 2, 'A', 1, 2) shuffled_alleles(start=1, stop=2, alleles='A') """ normalized_alleles = namedtuple('shuffled_alleles', 'start stop alleles') if len(alleles) < 2: return normalized_alleles(start, stop, alleles) # STEP 1: Trim common suffix trimmed, alleles = trim_common_suffixes(alleles) stop -= trimmed # STEP 2: Trim common prefix trimmed, alleles = trim_common_prefixes(alleles) start += trimmed # assert bound <= start,'start={:d}, left bound={:d}'.format(start, bound) # STEP 3: While a null allele exists, left shuffle by prepending alleles # with reference and trimming common suffixes while shuffle and '' in alleles and start > bound: step = min(ref_step, start - bound) r = ref[start - step:start].upper() new_alleles = [r + a for a in alleles] trimmed, new_alleles = trim_common_suffixes(new_alleles) if not trimmed: break start -= trimmed stop -= trimmed if trimmed == step: alleles = new_alleles else: left = step - trimmed alleles = [a[left:] for a in new_alleles] break return normalized_alleles(start, stop, tuple(alleles))
def do_help(self, arg): """ Show help on all commands. """ print(self.response_prompt, file=self.stdout) return cmd.Cmd.do_help(self, arg)
Show help on all commands.
Below is the the instruction that describes the task: ### Input: Show help on all commands. ### Response: def do_help(self, arg): """ Show help on all commands. """ print(self.response_prompt, file=self.stdout) return cmd.Cmd.do_help(self, arg)
def set_canonical_host(self, canonical_host): """ Set host and port from a canonical host string as for the Host HTTP header specification. """ parts = canonical_host.lower().split(":") self.host = parts[0] if len(parts) > 1 and parts[1]: self.port = int(parts[1]) else: self.port = None
Set host and port from a canonical host string as for the Host HTTP header specification.
Below is the the instruction that describes the task: ### Input: Set host and port from a canonical host string as for the Host HTTP header specification. ### Response: def set_canonical_host(self, canonical_host): """ Set host and port from a canonical host string as for the Host HTTP header specification. """ parts = canonical_host.lower().split(":") self.host = parts[0] if len(parts) > 1 and parts[1]: self.port = int(parts[1]) else: self.port = None
def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None): """ Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file. """ require_string(path, "Path to MAF") n_basic_columns = len(MAF_COLUMN_NAMES) # pylint: disable=no-member # pylint gets confused by read_csv df = pandas.read_csv( path, comment="#", sep="\t", low_memory=False, skip_blank_lines=True, header=0, encoding=encoding) if len(df.columns) < n_basic_columns: error_message = ( "Too few columns in MAF file %s, expected %d but got %d : %s" % ( path, n_basic_columns, len(df.columns), df.columns)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) # check each pair of expected/actual column names to make sure they match for expected, actual in zip(MAF_COLUMN_NAMES, df.columns): if expected != actual: # MAFs in the wild have capitalization differences in their # column names, normalize them to always use the names above if expected.lower() == actual.lower(): # using DataFrame.rename in Python 2.7.x doesn't seem to # work for some files, possibly because Pandas treats # unicode vs. str columns as different? df[expected] = df[actual] del df[actual] else: error_message = ( "Expected column %s but got %s" % (expected, actual)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) return df
Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file.
Below is the the instruction that describes the task: ### Input: Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file. ### Response: def load_maf_dataframe(path, nrows=None, raise_on_error=True, encoding=None): """ Load the guaranteed columns of a TCGA MAF file into a DataFrame Parameters ---------- path : str Path to MAF file nrows : int Optional limit to number of rows loaded raise_on_error : bool Raise an exception upon encountering an error or log an error encoding : str, optional Encoding to use for UTF when reading MAF file. """ require_string(path, "Path to MAF") n_basic_columns = len(MAF_COLUMN_NAMES) # pylint: disable=no-member # pylint gets confused by read_csv df = pandas.read_csv( path, comment="#", sep="\t", low_memory=False, skip_blank_lines=True, header=0, encoding=encoding) if len(df.columns) < n_basic_columns: error_message = ( "Too few columns in MAF file %s, expected %d but got %d : %s" % ( path, n_basic_columns, len(df.columns), df.columns)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) # check each pair of expected/actual column names to make sure they match for expected, actual in zip(MAF_COLUMN_NAMES, df.columns): if expected != actual: # MAFs in the wild have capitalization differences in their # column names, normalize them to always use the names above if expected.lower() == actual.lower(): # using DataFrame.rename in Python 2.7.x doesn't seem to # work for some files, possibly because Pandas treats # unicode vs. str columns as different? df[expected] = df[actual] del df[actual] else: error_message = ( "Expected column %s but got %s" % (expected, actual)) if raise_on_error: raise ValueError(error_message) else: logging.warn(error_message) return df
def find_element_by_partial_link_text(self, link_text): """ Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_partial_link_text('Sign') """ return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_partial_link_text('Sign')
Below is the the instruction that describes the task: ### Input: Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_partial_link_text('Sign') ### Response: def find_element_by_partial_link_text(self, link_text): """ Finds an element by a partial match of its link text. :Args: - link_text: The text of the element to partially match on. :Returns: - WebElement - the element if it was found :Raises: - NoSuchElementException - if the element wasn't found :Usage: :: element = driver.find_element_by_partial_link_text('Sign') """ return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def get_all_rotated_notes(notes): """ Get all rotated notes get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]] :type notes: list[str] :rtype: list[list[str]] """ notes_list = [] for x in range(len(notes)): notes_list.append(notes[x:] + notes[:x]) return notes_list
Get all rotated notes get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]] :type notes: list[str] :rtype: list[list[str]]
Below is the the instruction that describes the task: ### Input: Get all rotated notes get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]] :type notes: list[str] :rtype: list[list[str]] ### Response: def get_all_rotated_notes(notes): """ Get all rotated notes get_all_rotated_notes([1,3,5]) -> [[1,3,5],[3,5,1],[5,1,3]] :type notes: list[str] :rtype: list[list[str]] """ notes_list = [] for x in range(len(notes)): notes_list.append(notes[x:] + notes[:x]) return notes_list
def create(self, request, *args, **kwargs): """ To create new push hook issue **POST** against */api/hooks-push/* as an authenticated user. You should specify list of event_types or event_groups. Example of a request: .. code-block:: http POST /api/hooks-push/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "event_types": ["resource_start_succeeded"], "event_groups": ["users"], "type": "Android" } You may temporarily disable hook without deleting it by issuing following **PATCH** request against hook URL: .. code-block:: javascript { "is_active": "false" } """ return super(PushHookViewSet, self).create(request, *args, **kwargs)
To create new push hook issue **POST** against */api/hooks-push/* as an authenticated user. You should specify list of event_types or event_groups. Example of a request: .. code-block:: http POST /api/hooks-push/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "event_types": ["resource_start_succeeded"], "event_groups": ["users"], "type": "Android" } You may temporarily disable hook without deleting it by issuing following **PATCH** request against hook URL: .. code-block:: javascript { "is_active": "false" }
Below is the the instruction that describes the task: ### Input: To create new push hook issue **POST** against */api/hooks-push/* as an authenticated user. You should specify list of event_types or event_groups. Example of a request: .. code-block:: http POST /api/hooks-push/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "event_types": ["resource_start_succeeded"], "event_groups": ["users"], "type": "Android" } You may temporarily disable hook without deleting it by issuing following **PATCH** request against hook URL: .. code-block:: javascript { "is_active": "false" } ### Response: def create(self, request, *args, **kwargs): """ To create new push hook issue **POST** against */api/hooks-push/* as an authenticated user. You should specify list of event_types or event_groups. Example of a request: .. code-block:: http POST /api/hooks-push/ HTTP/1.1 Content-Type: application/json Accept: application/json Authorization: Token c84d653b9ec92c6cbac41c706593e66f567a7fa4 Host: example.com { "event_types": ["resource_start_succeeded"], "event_groups": ["users"], "type": "Android" } You may temporarily disable hook without deleting it by issuing following **PATCH** request against hook URL: .. code-block:: javascript { "is_active": "false" } """ return super(PushHookViewSet, self).create(request, *args, **kwargs)
def end_of_paragraph(self, count=1, after=False): """ Return the end of the current paragraph. (Relative cursor position.) """ def match_func(text): return not text or text.isspace() line_index = self.find_next_matching_line(match_func=match_func, count=count) if line_index: add = 0 if after else 1 return max(0, self.get_cursor_down_position(count=line_index) - add) else: return len(self.text_after_cursor)
Return the end of the current paragraph. (Relative cursor position.)
Below is the the instruction that describes the task: ### Input: Return the end of the current paragraph. (Relative cursor position.) ### Response: def end_of_paragraph(self, count=1, after=False): """ Return the end of the current paragraph. (Relative cursor position.) """ def match_func(text): return not text or text.isspace() line_index = self.find_next_matching_line(match_func=match_func, count=count) if line_index: add = 0 if after else 1 return max(0, self.get_cursor_down_position(count=line_index) - add) else: return len(self.text_after_cursor)
def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and add the event source. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # TODO: Detect changes in config and refine exists algorithm if not dry: if not event_source_obj.status(funk): event_source_obj.add(funk) return 'successful' if event_source_obj.status(funk) else 'failed' else: return 'exists' return 'dryrun'
Given an event_source dictionary, create the object and add the event source.
Below is the the instruction that describes the task: ### Input: Given an event_source dictionary, create the object and add the event source. ### Response: def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): """ Given an event_source dictionary, create the object and add the event source. """ event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # TODO: Detect changes in config and refine exists algorithm if not dry: if not event_source_obj.status(funk): event_source_obj.add(funk) return 'successful' if event_source_obj.status(funk) else 'failed' else: return 'exists' return 'dryrun'
def _insert_optional_roles(cursor, model, ident): """Inserts the optional roles if values for the optional roles exist. """ optional_roles = [ # (<metadata-attr>, <db-role-id>,), ('translators', 4,), ('editors', 5,), ] for attr, role_id in optional_roles: roles = model.metadata.get(attr) if not roles: # Bail out, no roles for this type. continue usernames = [parse_user_uri(x['id']) for x in roles] cursor.execute("""\ INSERT INTO moduleoptionalroles (module_ident, roleid, personids) VALUES (%s, %s, %s)""", (ident, role_id, usernames,))
Inserts the optional roles if values for the optional roles exist.
Below is the the instruction that describes the task: ### Input: Inserts the optional roles if values for the optional roles exist. ### Response: def _insert_optional_roles(cursor, model, ident): """Inserts the optional roles if values for the optional roles exist. """ optional_roles = [ # (<metadata-attr>, <db-role-id>,), ('translators', 4,), ('editors', 5,), ] for attr, role_id in optional_roles: roles = model.metadata.get(attr) if not roles: # Bail out, no roles for this type. continue usernames = [parse_user_uri(x['id']) for x in roles] cursor.execute("""\ INSERT INTO moduleoptionalroles (module_ident, roleid, personids) VALUES (%s, %s, %s)""", (ident, role_id, usernames,))
def modify(db=None, sql=None): ''' Issue an SQL query to sqlite3 (with no return data), usually used to modify the database in some way (insert, delete, create, etc) CLI Example: .. code-block:: bash salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);' ''' cur = _connect(db) if not cur: return False cur.execute(sql) return True
Issue an SQL query to sqlite3 (with no return data), usually used to modify the database in some way (insert, delete, create, etc) CLI Example: .. code-block:: bash salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);'
Below is the the instruction that describes the task: ### Input: Issue an SQL query to sqlite3 (with no return data), usually used to modify the database in some way (insert, delete, create, etc) CLI Example: .. code-block:: bash salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);' ### Response: def modify(db=None, sql=None): ''' Issue an SQL query to sqlite3 (with no return data), usually used to modify the database in some way (insert, delete, create, etc) CLI Example: .. code-block:: bash salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);' ''' cur = _connect(db) if not cur: return False cur.execute(sql) return True
def _set_residue_map(self): """ map each residue to the corresponding molecule. """ self.map_residue_to_mol = {} lookup = {} for idx, mol in enumerate(self.mols): if not mol.formula in lookup: mol.translate_sites(indices=range(len(mol)), vector=-mol.center_of_mass) lookup[mol.formula] = mol.copy() self.map_residue_to_mol["ml{}".format(idx + 1)] = lookup[mol.formula]
map each residue to the corresponding molecule.
Below is the the instruction that describes the task: ### Input: map each residue to the corresponding molecule. ### Response: def _set_residue_map(self): """ map each residue to the corresponding molecule. """ self.map_residue_to_mol = {} lookup = {} for idx, mol in enumerate(self.mols): if not mol.formula in lookup: mol.translate_sites(indices=range(len(mol)), vector=-mol.center_of_mass) lookup[mol.formula] = mol.copy() self.map_residue_to_mol["ml{}".format(idx + 1)] = lookup[mol.formula]
def __get_activities(self, search): """returns list of activities for autocomplete, activity names converted to lowercase""" query = """ SELECT a.name AS name, b.name AS category FROM activities a LEFT JOIN categories b ON coalesce(b.id, -1) = a.category_id LEFT JOIN facts f ON a.id = f.activity_id WHERE deleted IS NULL AND a.search_name LIKE ? ESCAPE '\\' GROUP BY a.id ORDER BY max(f.start_time) DESC, lower(a.name) LIMIT 50 """ search = search.lower() search = search.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_') activities = self.fetchall(query, ('%s%%' % search, )) return activities
returns list of activities for autocomplete, activity names converted to lowercase
Below is the the instruction that describes the task: ### Input: returns list of activities for autocomplete, activity names converted to lowercase ### Response: def __get_activities(self, search): """returns list of activities for autocomplete, activity names converted to lowercase""" query = """ SELECT a.name AS name, b.name AS category FROM activities a LEFT JOIN categories b ON coalesce(b.id, -1) = a.category_id LEFT JOIN facts f ON a.id = f.activity_id WHERE deleted IS NULL AND a.search_name LIKE ? ESCAPE '\\' GROUP BY a.id ORDER BY max(f.start_time) DESC, lower(a.name) LIMIT 50 """ search = search.lower() search = search.replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_') activities = self.fetchall(query, ('%s%%' % search, )) return activities
def remove_out_of_bounds_bins(df, chromosome_size): # type: (pd.DataFrame, int) -> pd.DataFrame """Remove all reads that were shifted outside of the genome endpoints.""" # The dataframe is empty and contains no bins out of bounds if "Bin" not in df: return df df = df.drop(df[df.Bin > chromosome_size].index) return df.drop(df[df.Bin < 0].index)
Remove all reads that were shifted outside of the genome endpoints.
Below is the the instruction that describes the task: ### Input: Remove all reads that were shifted outside of the genome endpoints. ### Response: def remove_out_of_bounds_bins(df, chromosome_size): # type: (pd.DataFrame, int) -> pd.DataFrame """Remove all reads that were shifted outside of the genome endpoints.""" # The dataframe is empty and contains no bins out of bounds if "Bin" not in df: return df df = df.drop(df[df.Bin > chromosome_size].index) return df.drop(df[df.Bin < 0].index)
def main(argv=None): """ Run wake on lan as a CLI application. """ parser = argparse.ArgumentParser( description='Wake one or more computers using the wake on lan' ' protocol.') parser.add_argument( 'macs', metavar='mac address', nargs='+', help='The mac addresses or of the computers you are trying to wake.') parser.add_argument( '-i', metavar='ip', default=BROADCAST_IP, help='The ip address of the host to send the magic packet to.' ' (default {})'.format(BROADCAST_IP)) parser.add_argument( '-p', metavar='port', type=int, default=DEFAULT_PORT, help='The port of the host to send the magic packet to (default 9)') args = parser.parse_args(argv) send_magic_packet(*args.macs, ip_address=args.i, port=args.p)
Run wake on lan as a CLI application.
Below is the the instruction that describes the task: ### Input: Run wake on lan as a CLI application. ### Response: def main(argv=None): """ Run wake on lan as a CLI application. """ parser = argparse.ArgumentParser( description='Wake one or more computers using the wake on lan' ' protocol.') parser.add_argument( 'macs', metavar='mac address', nargs='+', help='The mac addresses or of the computers you are trying to wake.') parser.add_argument( '-i', metavar='ip', default=BROADCAST_IP, help='The ip address of the host to send the magic packet to.' ' (default {})'.format(BROADCAST_IP)) parser.add_argument( '-p', metavar='port', type=int, default=DEFAULT_PORT, help='The port of the host to send the magic packet to (default 9)') args = parser.parse_args(argv) send_magic_packet(*args.macs, ip_address=args.i, port=args.p)
def deflections_from_grid(self, grid): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ return self.grid_to_grid_cartesian(grid=grid, radius=np.full(grid.shape[0], 2.0 * self.einstein_radius_rescaled))
Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on.
Below is the the instruction that describes the task: ### Input: Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. ### Response: def deflections_from_grid(self, grid): """ Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. """ return self.grid_to_grid_cartesian(grid=grid, radius=np.full(grid.shape[0], 2.0 * self.einstein_radius_rescaled))
def equivalent_release_for_product(self, product): """ Returns the release for a specified product with the same channel and major version with the highest minor version, or None if no such releases exist """ releases = self._default_manager.filter( version__startswith=self.major_version() + '.', channel=self.channel, product=product).order_by('-version') if not getattr(settings, 'DEV', False): releases = releases.filter(is_public=True) if releases: return sorted( sorted(releases, reverse=True, key=lambda r: len(r.version.split('.'))), reverse=True, key=lambda r: r.version.split('.')[1])[0]
Returns the release for a specified product with the same channel and major version with the highest minor version, or None if no such releases exist
Below is the the instruction that describes the task: ### Input: Returns the release for a specified product with the same channel and major version with the highest minor version, or None if no such releases exist ### Response: def equivalent_release_for_product(self, product): """ Returns the release for a specified product with the same channel and major version with the highest minor version, or None if no such releases exist """ releases = self._default_manager.filter( version__startswith=self.major_version() + '.', channel=self.channel, product=product).order_by('-version') if not getattr(settings, 'DEV', False): releases = releases.filter(is_public=True) if releases: return sorted( sorted(releases, reverse=True, key=lambda r: len(r.version.split('.'))), reverse=True, key=lambda r: r.version.split('.')[1])[0]
def configure_scraper(self, scraper_config): """ Configures a PrometheusScaper object with query credentials :param scraper: valid PrometheusScaper object :param endpoint: url that will be scraped """ endpoint = scraper_config['prometheus_url'] scraper_config.update( { 'ssl_ca_cert': self._ssl_verify, 'ssl_cert': self._ssl_cert, 'ssl_private_key': self._ssl_private_key, 'extra_headers': self.headers(endpoint) or {}, } )
Configures a PrometheusScaper object with query credentials :param scraper: valid PrometheusScaper object :param endpoint: url that will be scraped
Below is the the instruction that describes the task: ### Input: Configures a PrometheusScaper object with query credentials :param scraper: valid PrometheusScaper object :param endpoint: url that will be scraped ### Response: def configure_scraper(self, scraper_config): """ Configures a PrometheusScaper object with query credentials :param scraper: valid PrometheusScaper object :param endpoint: url that will be scraped """ endpoint = scraper_config['prometheus_url'] scraper_config.update( { 'ssl_ca_cert': self._ssl_verify, 'ssl_cert': self._ssl_cert, 'ssl_private_key': self._ssl_private_key, 'extra_headers': self.headers(endpoint) or {}, } )
def create_folder(self, dir_name: str, parent_dir_id: str) -> str: """ Create folder into Google Drive :param dir_name: :param parent_dir_name: :return: """ service = self.__get_service() file_metadata = { 'name': dir_name, 'mimeType': 'application/vnd.google-apps.folder', 'parents': [parent_dir_id], } folder = service.files() \ .create(body=file_metadata, fields='id') \ .execute() return folder.get('id')
Create folder into Google Drive :param dir_name: :param parent_dir_name: :return:
Below is the the instruction that describes the task: ### Input: Create folder into Google Drive :param dir_name: :param parent_dir_name: :return: ### Response: def create_folder(self, dir_name: str, parent_dir_id: str) -> str: """ Create folder into Google Drive :param dir_name: :param parent_dir_name: :return: """ service = self.__get_service() file_metadata = { 'name': dir_name, 'mimeType': 'application/vnd.google-apps.folder', 'parents': [parent_dir_id], } folder = service.files() \ .create(body=file_metadata, fields='id') \ .execute() return folder.get('id')
def replace_namespaced_replication_controller_status(self, name, namespace, body, **kwargs): """ replace status of the specified ReplicationController This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replication_controller_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicationController (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ReplicationController body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ReplicationController If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_replication_controller_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_replication_controller_status_with_http_info(name, namespace, body, **kwargs) return data
replace status of the specified ReplicationController This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replication_controller_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicationController (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ReplicationController body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ReplicationController If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: replace status of the specified ReplicationController This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replication_controller_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicationController (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ReplicationController body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ReplicationController If the method is called asynchronously, returns the request thread. ### Response: def replace_namespaced_replication_controller_status(self, name, namespace, body, **kwargs): """ replace status of the specified ReplicationController This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replication_controller_status(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicationController (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1ReplicationController body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1ReplicationController If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_replication_controller_status_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_replication_controller_status_with_http_info(name, namespace, body, **kwargs) return data
def check_origin(self): """Do some simple checking of the zone's origin. @raises dns.zone.NoSOA: there is no SOA RR @raises dns.zone.NoNS: there is no NS RRset @raises KeyError: there is no origin node """ if self.relativize: name = dns.name.empty else: name = self.origin if self.get_rdataset(name, dns.rdatatype.SOA) is None: raise NoSOA if self.get_rdataset(name, dns.rdatatype.NS) is None: raise NoNS
Do some simple checking of the zone's origin. @raises dns.zone.NoSOA: there is no SOA RR @raises dns.zone.NoNS: there is no NS RRset @raises KeyError: there is no origin node
Below is the the instruction that describes the task: ### Input: Do some simple checking of the zone's origin. @raises dns.zone.NoSOA: there is no SOA RR @raises dns.zone.NoNS: there is no NS RRset @raises KeyError: there is no origin node ### Response: def check_origin(self): """Do some simple checking of the zone's origin. @raises dns.zone.NoSOA: there is no SOA RR @raises dns.zone.NoNS: there is no NS RRset @raises KeyError: there is no origin node """ if self.relativize: name = dns.name.empty else: name = self.origin if self.get_rdataset(name, dns.rdatatype.SOA) is None: raise NoSOA if self.get_rdataset(name, dns.rdatatype.NS) is None: raise NoNS
def save(self): """ Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation. """ headers = {} headers.setdefault('Content-Type', 'application/json') if not self.exists(): self.create() return put_resp = self.r_session.put( self.document_url, data=self.json(), headers=headers ) put_resp.raise_for_status() data = response_to_json_dict(put_resp) super(Document, self).__setitem__('_rev', data['rev']) return
Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation.
Below is the the instruction that describes the task: ### Input: Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation. ### Response: def save(self): """ Saves changes made to the locally cached Document object's data structures to the remote database. If the document does not exist remotely then it is created in the remote database. If the object does exist remotely then the document is updated remotely. In either case the locally cached Document object is also updated accordingly based on the successful response of the operation. """ headers = {} headers.setdefault('Content-Type', 'application/json') if not self.exists(): self.create() return put_resp = self.r_session.put( self.document_url, data=self.json(), headers=headers ) put_resp.raise_for_status() data = response_to_json_dict(put_resp) super(Document, self).__setitem__('_rev', data['rev']) return
def preferred_mmol(code): """ Get mmol number of preferred biological assembly as listed in the PDBe. Notes ----- First checks for code in mmols.json. If code not yet in this json dictionary, uses requests module to scrape the PDBE for the preferred mmol number. Parameters ---------- code : str A PDB code. Returns ------- mmol : int mmol number of preferred assembly. Raises ------ TypeError If 'mmol number' scraped is not an integer. """ # If preferred mmol number is already known, return it if code in mmols_numbers.keys(): mmol = mmols_numbers[code][1] return mmol elif is_obsolete(code): raise ValueError('Obsolete PDB code {0}'.format(code)) # Otherwise, use requests to scrape the PDBE. else: url_string = "http://www.ebi.ac.uk/pdbe/entry/pdb/{0}/analysis".format(code) r = requests.get(url_string) if not r.ok: raise IOError("Could not get to url {0}".format(url_string)) r_content = r.text ass = re.findall('Assembly\s\d+\s\(preferred\)', r_content) if len(ass) != 1: # To catch a strange error in the pdbe where preferred assembly is not numbered. See for example # http://www.ebi.ac.uk/pdbe/entry/pdb/7msi/analysis ass = re.findall('Assembly\s+\(preferred\)', r_content) if len(ass) == 1: return 1 obs = re.findall('Entry has been obsoleted and replaced by another entry \(OBS\)', r_content) if len(obs) == 1: rep = re.findall('by entry <a href="/pdbe/entry/pdb/\w{4}', r_content) if len(rep) == 1: rep = rep[0][-4:] raise IOError("{0} is obsolete and has been replaced by {1}.".format(code, rep)) raise ValueError("More than one match to preferred assembly") mmol = ass[0].split()[1] try: mmol = int(mmol) except TypeError: raise TypeError("Unexpected match: non-integer mmol") return mmol
Get mmol number of preferred biological assembly as listed in the PDBe. Notes ----- First checks for code in mmols.json. If code not yet in this json dictionary, uses requests module to scrape the PDBE for the preferred mmol number. Parameters ---------- code : str A PDB code. Returns ------- mmol : int mmol number of preferred assembly. Raises ------ TypeError If 'mmol number' scraped is not an integer.
Below is the the instruction that describes the task: ### Input: Get mmol number of preferred biological assembly as listed in the PDBe. Notes ----- First checks for code in mmols.json. If code not yet in this json dictionary, uses requests module to scrape the PDBE for the preferred mmol number. Parameters ---------- code : str A PDB code. Returns ------- mmol : int mmol number of preferred assembly. Raises ------ TypeError If 'mmol number' scraped is not an integer. ### Response: def preferred_mmol(code): """ Get mmol number of preferred biological assembly as listed in the PDBe. Notes ----- First checks for code in mmols.json. If code not yet in this json dictionary, uses requests module to scrape the PDBE for the preferred mmol number. Parameters ---------- code : str A PDB code. Returns ------- mmol : int mmol number of preferred assembly. Raises ------ TypeError If 'mmol number' scraped is not an integer. """ # If preferred mmol number is already known, return it if code in mmols_numbers.keys(): mmol = mmols_numbers[code][1] return mmol elif is_obsolete(code): raise ValueError('Obsolete PDB code {0}'.format(code)) # Otherwise, use requests to scrape the PDBE. else: url_string = "http://www.ebi.ac.uk/pdbe/entry/pdb/{0}/analysis".format(code) r = requests.get(url_string) if not r.ok: raise IOError("Could not get to url {0}".format(url_string)) r_content = r.text ass = re.findall('Assembly\s\d+\s\(preferred\)', r_content) if len(ass) != 1: # To catch a strange error in the pdbe where preferred assembly is not numbered. See for example # http://www.ebi.ac.uk/pdbe/entry/pdb/7msi/analysis ass = re.findall('Assembly\s+\(preferred\)', r_content) if len(ass) == 1: return 1 obs = re.findall('Entry has been obsoleted and replaced by another entry \(OBS\)', r_content) if len(obs) == 1: rep = re.findall('by entry <a href="/pdbe/entry/pdb/\w{4}', r_content) if len(rep) == 1: rep = rep[0][-4:] raise IOError("{0} is obsolete and has been replaced by {1}.".format(code, rep)) raise ValueError("More than one match to preferred assembly") mmol = ass[0].split()[1] try: mmol = int(mmol) except TypeError: raise TypeError("Unexpected match: non-integer mmol") return mmol
def _float_check(self, attribute_array, value, irow, key): '''Checks if value is valid float, appends to array if valid, appends nan if not''' value = value.strip(' ') try: if value: attribute_array = np.hstack([attribute_array, float(value)]) else: attribute_array = np.hstack([attribute_array, np.nan]) except: print(irow, key) msg = 'Input file format error at line: %d' % (irow + 2) msg += ' key: %s' % (key) raise ValueError(msg) return attribute_array
Checks if value is valid float, appends to array if valid, appends nan if not
Below is the the instruction that describes the task: ### Input: Checks if value is valid float, appends to array if valid, appends nan if not ### Response: def _float_check(self, attribute_array, value, irow, key): '''Checks if value is valid float, appends to array if valid, appends nan if not''' value = value.strip(' ') try: if value: attribute_array = np.hstack([attribute_array, float(value)]) else: attribute_array = np.hstack([attribute_array, np.nan]) except: print(irow, key) msg = 'Input file format error at line: %d' % (irow + 2) msg += ' key: %s' % (key) raise ValueError(msg) return attribute_array
def transform_dataframe(self, dataframe): """ Unstack the dataframe so header fields are across the top. """ dataframe.columns.name = "" for i in range(len(self.get_header_fields())): dataframe = dataframe.unstack() # Remove blank rows / columns dataframe = dataframe.dropna( axis=0, how='all' ).dropna( axis=1, how='all' ) return dataframe
Unstack the dataframe so header fields are across the top.
Below is the the instruction that describes the task: ### Input: Unstack the dataframe so header fields are across the top. ### Response: def transform_dataframe(self, dataframe): """ Unstack the dataframe so header fields are across the top. """ dataframe.columns.name = "" for i in range(len(self.get_header_fields())): dataframe = dataframe.unstack() # Remove blank rows / columns dataframe = dataframe.dropna( axis=0, how='all' ).dropna( axis=1, how='all' ) return dataframe
def add(self, child, min_occurs=1): """Add a child node. @param child: The schema for the child node. @param min_occurs: The minimum number of times the child node must occur, if C{None} is given the default is 1. """ if not min_occurs in (0, 1): raise RuntimeError("Unexpected min bound for node schema") self.children[child.tag] = child self.children_min_occurs[child.tag] = min_occurs return child
Add a child node. @param child: The schema for the child node. @param min_occurs: The minimum number of times the child node must occur, if C{None} is given the default is 1.
Below is the the instruction that describes the task: ### Input: Add a child node. @param child: The schema for the child node. @param min_occurs: The minimum number of times the child node must occur, if C{None} is given the default is 1. ### Response: def add(self, child, min_occurs=1): """Add a child node. @param child: The schema for the child node. @param min_occurs: The minimum number of times the child node must occur, if C{None} is given the default is 1. """ if not min_occurs in (0, 1): raise RuntimeError("Unexpected min bound for node schema") self.children[child.tag] = child self.children_min_occurs[child.tag] = min_occurs return child
def boxplot(df, plot_mean=False, plot_ids=None, title=None, xlabel=None, ylabel=None): """ Plot boxplots Plot the boxplots of a dataframe in time Parameters ---------- df: Pandas Dataframe Every collumn is a timeseries plot_mean: bool Wether or not to plot the means plot_ids: [str] List of id's to plot Returns ------- matplotlib figure """ df = df.applymap(float) description = df.apply(pd.DataFrame.describe, axis=1) # plot plt = plot_style() plt.boxplot(df) #plt.setp(bp['boxes'], color='black') #plt.setp(bp['whiskers'], color='black') if plot_ids is not None: for id in plot_ids: if id in df.columns: plt.scatter(x=range(1, len(df) + 1), y=df[id], label=str(id)) if plot_mean: plt.scatter(x=range(1, len(df) + 1), y=description['mean'], label="Mean", color='k', s=30, marker='+') ax = plt.gca() ax.set_xticklabels(df.index) #plt.xticks(rotation=45) plt.legend() if title is not None: plt.title(title) if xlabel is not None: plt.xlabel(xlabel) if ylabel is not None: plt.ylabel(ylabel) return plt.gcf()
Plot boxplots Plot the boxplots of a dataframe in time Parameters ---------- df: Pandas Dataframe Every collumn is a timeseries plot_mean: bool Wether or not to plot the means plot_ids: [str] List of id's to plot Returns ------- matplotlib figure
Below is the the instruction that describes the task: ### Input: Plot boxplots Plot the boxplots of a dataframe in time Parameters ---------- df: Pandas Dataframe Every collumn is a timeseries plot_mean: bool Wether or not to plot the means plot_ids: [str] List of id's to plot Returns ------- matplotlib figure ### Response: def boxplot(df, plot_mean=False, plot_ids=None, title=None, xlabel=None, ylabel=None): """ Plot boxplots Plot the boxplots of a dataframe in time Parameters ---------- df: Pandas Dataframe Every collumn is a timeseries plot_mean: bool Wether or not to plot the means plot_ids: [str] List of id's to plot Returns ------- matplotlib figure """ df = df.applymap(float) description = df.apply(pd.DataFrame.describe, axis=1) # plot plt = plot_style() plt.boxplot(df) #plt.setp(bp['boxes'], color='black') #plt.setp(bp['whiskers'], color='black') if plot_ids is not None: for id in plot_ids: if id in df.columns: plt.scatter(x=range(1, len(df) + 1), y=df[id], label=str(id)) if plot_mean: plt.scatter(x=range(1, len(df) + 1), y=description['mean'], label="Mean", color='k', s=30, marker='+') ax = plt.gca() ax.set_xticklabels(df.index) #plt.xticks(rotation=45) plt.legend() if title is not None: plt.title(title) if xlabel is not None: plt.xlabel(xlabel) if ylabel is not None: plt.ylabel(ylabel) return plt.gcf()
def send_notifications(self, notification_type, *args): """ Fires off the notification for the specific event. Uses var args to pass in a arbitrary list of parameter according to which notification type was fired. Args: notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes) args: variable list of arguments to the callback. """ if notification_type in self.notifications: for notification_id, callback in self.notifications[notification_type]: try: callback(*args) except: self.logger.exception('Problem calling notify callback!')
Fires off the notification for the specific event. Uses var args to pass in a arbitrary list of parameter according to which notification type was fired. Args: notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes) args: variable list of arguments to the callback.
Below is the the instruction that describes the task: ### Input: Fires off the notification for the specific event. Uses var args to pass in a arbitrary list of parameter according to which notification type was fired. Args: notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes) args: variable list of arguments to the callback. ### Response: def send_notifications(self, notification_type, *args): """ Fires off the notification for the specific event. Uses var args to pass in a arbitrary list of parameter according to which notification type was fired. Args: notification_type: Type of notification to fire (String from .helpers.enums.NotificationTypes) args: variable list of arguments to the callback. """ if notification_type in self.notifications: for notification_id, callback in self.notifications[notification_type]: try: callback(*args) except: self.logger.exception('Problem calling notify callback!')
def assets(self): """ Provides access to asset management methods. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets :return: :class:`EnvironmentAssetsProxy <contentful_management.environment_assets_proxy.EnvironmentAssetsProxy>` object. :rtype: contentful.environment_assets_proxy.EnvironmentAssetsProxy Usage: >>> environment_assets_proxy = environment.assets() <EnvironmentAssetsProxy space_id="cfexampleapi" environment_id="master"> """ return EnvironmentAssetsProxy(self._client, self.space.id, self.id)
Provides access to asset management methods. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets :return: :class:`EnvironmentAssetsProxy <contentful_management.environment_assets_proxy.EnvironmentAssetsProxy>` object. :rtype: contentful.environment_assets_proxy.EnvironmentAssetsProxy Usage: >>> environment_assets_proxy = environment.assets() <EnvironmentAssetsProxy space_id="cfexampleapi" environment_id="master">
Below is the the instruction that describes the task: ### Input: Provides access to asset management methods. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets :return: :class:`EnvironmentAssetsProxy <contentful_management.environment_assets_proxy.EnvironmentAssetsProxy>` object. :rtype: contentful.environment_assets_proxy.EnvironmentAssetsProxy Usage: >>> environment_assets_proxy = environment.assets() <EnvironmentAssetsProxy space_id="cfexampleapi" environment_id="master"> ### Response: def assets(self): """ Provides access to asset management methods. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/assets :return: :class:`EnvironmentAssetsProxy <contentful_management.environment_assets_proxy.EnvironmentAssetsProxy>` object. :rtype: contentful.environment_assets_proxy.EnvironmentAssetsProxy Usage: >>> environment_assets_proxy = environment.assets() <EnvironmentAssetsProxy space_id="cfexampleapi" environment_id="master"> """ return EnvironmentAssetsProxy(self._client, self.space.id, self.id)
def print_upper_triangular_matrix_as_complete(matrix): """Prints a CVRP data dict upper triangular matrix as a normal matrix Doesn't print headers. Arguments --------- matrix : dict Description """ for i in sorted(matrix.keys()): for j in sorted(matrix.keys()): a, b = i, j if a > b: a, b = b, a print(matrix[a][b], end=' ') print()
Prints a CVRP data dict upper triangular matrix as a normal matrix Doesn't print headers. Arguments --------- matrix : dict Description
Below is the the instruction that describes the task: ### Input: Prints a CVRP data dict upper triangular matrix as a normal matrix Doesn't print headers. Arguments --------- matrix : dict Description ### Response: def print_upper_triangular_matrix_as_complete(matrix): """Prints a CVRP data dict upper triangular matrix as a normal matrix Doesn't print headers. Arguments --------- matrix : dict Description """ for i in sorted(matrix.keys()): for j in sorted(matrix.keys()): a, b = i, j if a > b: a, b = b, a print(matrix[a][b], end=' ') print()
def argument(self, key=None): """ Get the value of a command argument. """ if key is None: return self._args.arguments() return self._args.argument(key)
Get the value of a command argument.
Below is the the instruction that describes the task: ### Input: Get the value of a command argument. ### Response: def argument(self, key=None): """ Get the value of a command argument. """ if key is None: return self._args.arguments() return self._args.argument(key)
def find_once(self, locator): """ Find wrapper to run a single find @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @type find_all: bool @param find_all: should I find all elements, or just one? @rtype: WebElementWrapper or list[WebElementWrapper] @return: Either a single WebElementWrapper, or a list of WebElementWrappers """ params = [] params.append(self.driver_wrapper.find_attempts) params.append(self.driver_wrapper.implicit_wait) self.driver_wrapper.find_attempts = 1 self.driver_wrapper.implicit_wait = 0 result = self.driver_wrapper._find_immediately(locator, self.element) # restore the original params self.driver_wrapper.implicit_wait = params.pop() self.driver_wrapper.find_attempts = params.pop() return result
Find wrapper to run a single find @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @type find_all: bool @param find_all: should I find all elements, or just one? @rtype: WebElementWrapper or list[WebElementWrapper] @return: Either a single WebElementWrapper, or a list of WebElementWrappers
Below is the the instruction that describes the task: ### Input: Find wrapper to run a single find @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @type find_all: bool @param find_all: should I find all elements, or just one? @rtype: WebElementWrapper or list[WebElementWrapper] @return: Either a single WebElementWrapper, or a list of WebElementWrappers ### Response: def find_once(self, locator): """ Find wrapper to run a single find @type locator: webdriverwrapper.support.locator.Locator @param locator: locator used in search @type find_all: bool @param find_all: should I find all elements, or just one? @rtype: WebElementWrapper or list[WebElementWrapper] @return: Either a single WebElementWrapper, or a list of WebElementWrappers """ params = [] params.append(self.driver_wrapper.find_attempts) params.append(self.driver_wrapper.implicit_wait) self.driver_wrapper.find_attempts = 1 self.driver_wrapper.implicit_wait = 0 result = self.driver_wrapper._find_immediately(locator, self.element) # restore the original params self.driver_wrapper.implicit_wait = params.pop() self.driver_wrapper.find_attempts = params.pop() return result
def process(self, metric): """ Process a metric by sending it to Librato """ path = metric.getCollectorPath() path += '.' path += metric.getMetricPath() if self.config['apply_metric_prefix']: path = metric.getPathPrefix() + '.' + path if self.include_reg.match(path): if metric.metric_type == 'GAUGE': m_type = 'gauge' else: m_type = 'counter' self.queue.add(path, # name float(metric.value), # value type=m_type, source=metric.host, measure_time=metric.timestamp) self.current_n_measurements += 1 else: self.log.debug("LibratoHandler: Skip %s, no include_filters match", path) if (self.current_n_measurements >= self.queue_max_size or time.time() >= self.queue_max_timestamp): self.log.debug("LibratoHandler: Sending batch size: %d", self.current_n_measurements) self._send()
Process a metric by sending it to Librato
Below is the the instruction that describes the task: ### Input: Process a metric by sending it to Librato ### Response: def process(self, metric): """ Process a metric by sending it to Librato """ path = metric.getCollectorPath() path += '.' path += metric.getMetricPath() if self.config['apply_metric_prefix']: path = metric.getPathPrefix() + '.' + path if self.include_reg.match(path): if metric.metric_type == 'GAUGE': m_type = 'gauge' else: m_type = 'counter' self.queue.add(path, # name float(metric.value), # value type=m_type, source=metric.host, measure_time=metric.timestamp) self.current_n_measurements += 1 else: self.log.debug("LibratoHandler: Skip %s, no include_filters match", path) if (self.current_n_measurements >= self.queue_max_size or time.time() >= self.queue_max_timestamp): self.log.debug("LibratoHandler: Sending batch size: %d", self.current_n_measurements) self._send()
def query_json(json_content, query, delimiter='.'): """ Do an xpath-like query with json_content. Args: json_content (dict/list/string): content to be queried. query (str): query string. delimiter (str): delimiter symbol. Returns: str: queried result. Examples: >>> json_content = { "ids": [1, 2, 3, 4], "person": { "name": { "first_name": "Leo", "last_name": "Lee", }, "age": 29, "cities": ["Guangzhou", "Shenzhen"] } } >>> >>> query_json(json_content, "person.name.first_name") >>> Leo >>> >>> query_json(json_content, "person.name.first_name.0") >>> L >>> >>> query_json(json_content, "person.cities.0") >>> Guangzhou """ raise_flag = False response_body = u"response body: {}\n".format(json_content) try: for key in query.split(delimiter): if isinstance(json_content, (list, basestring)): json_content = json_content[int(key)] elif isinstance(json_content, dict): json_content = json_content[key] else: logger.log_error( "invalid type value: {}({})".format(json_content, type(json_content))) raise_flag = True except (KeyError, ValueError, IndexError): raise_flag = True if raise_flag: err_msg = u"Failed to extract! => {}\n".format(query) err_msg += response_body logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) return json_content
Do an xpath-like query with json_content. Args: json_content (dict/list/string): content to be queried. query (str): query string. delimiter (str): delimiter symbol. Returns: str: queried result. Examples: >>> json_content = { "ids": [1, 2, 3, 4], "person": { "name": { "first_name": "Leo", "last_name": "Lee", }, "age": 29, "cities": ["Guangzhou", "Shenzhen"] } } >>> >>> query_json(json_content, "person.name.first_name") >>> Leo >>> >>> query_json(json_content, "person.name.first_name.0") >>> L >>> >>> query_json(json_content, "person.cities.0") >>> Guangzhou
Below is the the instruction that describes the task: ### Input: Do an xpath-like query with json_content. Args: json_content (dict/list/string): content to be queried. query (str): query string. delimiter (str): delimiter symbol. Returns: str: queried result. Examples: >>> json_content = { "ids": [1, 2, 3, 4], "person": { "name": { "first_name": "Leo", "last_name": "Lee", }, "age": 29, "cities": ["Guangzhou", "Shenzhen"] } } >>> >>> query_json(json_content, "person.name.first_name") >>> Leo >>> >>> query_json(json_content, "person.name.first_name.0") >>> L >>> >>> query_json(json_content, "person.cities.0") >>> Guangzhou ### Response: def query_json(json_content, query, delimiter='.'): """ Do an xpath-like query with json_content. Args: json_content (dict/list/string): content to be queried. query (str): query string. delimiter (str): delimiter symbol. Returns: str: queried result. Examples: >>> json_content = { "ids": [1, 2, 3, 4], "person": { "name": { "first_name": "Leo", "last_name": "Lee", }, "age": 29, "cities": ["Guangzhou", "Shenzhen"] } } >>> >>> query_json(json_content, "person.name.first_name") >>> Leo >>> >>> query_json(json_content, "person.name.first_name.0") >>> L >>> >>> query_json(json_content, "person.cities.0") >>> Guangzhou """ raise_flag = False response_body = u"response body: {}\n".format(json_content) try: for key in query.split(delimiter): if isinstance(json_content, (list, basestring)): json_content = json_content[int(key)] elif isinstance(json_content, dict): json_content = json_content[key] else: logger.log_error( "invalid type value: {}({})".format(json_content, type(json_content))) raise_flag = True except (KeyError, ValueError, IndexError): raise_flag = True if raise_flag: err_msg = u"Failed to extract! => {}\n".format(query) err_msg += response_body logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) return json_content
def remove_line(self, section, line): """Remove all instances of a line. Returns: int: the number of lines removed """ try: s = self._get_section(section, create=False) except KeyError: # No such section, skip. return 0 return s.remove(line)
Remove all instances of a line. Returns: int: the number of lines removed
Below is the the instruction that describes the task: ### Input: Remove all instances of a line. Returns: int: the number of lines removed ### Response: def remove_line(self, section, line): """Remove all instances of a line. Returns: int: the number of lines removed """ try: s = self._get_section(section, create=False) except KeyError: # No such section, skip. return 0 return s.remove(line)
def save_df_output( df_output: pd.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: Path = Path('.'),)->list: '''save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files ''' list_path_save = [] list_group = df_output.columns.get_level_values('group').unique() list_grid = df_output.index.get_level_values('grid').unique() for grid in list_grid: for group in list_group: df_output_grid_group = df_output\ .loc[grid, group]\ .dropna(how='all', axis=0) # save output at the runtime frequency (usually 5 min) # 'DailyState' group will be save a daily frequency path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) # resample output if freq_s is different from runtime freq (usually 5 min) freq_save = pd.Timedelta(freq_s, 's') # resample `df_output` at `freq_save` df_rsmp = resample_output(df_output, freq_save) # 'DailyState' group will be dropped in `resample_output` as resampling is not needed df_rsmp = df_rsmp.drop(columns='DailyState') list_group = df_rsmp.columns.get_level_values('group').unique() list_grid = df_rsmp.index.get_level_values('grid').unique() # save output at the resampling frequency for grid in list_grid: for group in list_group: df_output_grid_group = df_rsmp.loc[grid, group] path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) return list_path_save
save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files
Below is the the instruction that describes the task: ### Input: save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files ### Response: def save_df_output( df_output: pd.DataFrame, freq_s: int = 3600, site: str = '', path_dir_save: Path = Path('.'),)->list: '''save supy output dataframe to txt files Parameters ---------- df_output : pd.DataFrame output dataframe of supy simulation freq_s : int, optional output frequency in second (the default is 3600, which indicates the a txt with hourly values) path_dir_save : Path, optional directory to save txt files (the default is '.', which the current working directory) site : str, optional site code used for filename (the default is '', which indicates no site name prepended to the filename) path_runcontrol : str or anything that can be parsed as `Path`, optional path to SUEWS 'RunControl.nml' file (the default is None, which indicates necessary saving options should be specified via other parameters) Returns ------- list a list of `Path` objects for saved txt files ''' list_path_save = [] list_group = df_output.columns.get_level_values('group').unique() list_grid = df_output.index.get_level_values('grid').unique() for grid in list_grid: for group in list_group: df_output_grid_group = df_output\ .loc[grid, group]\ .dropna(how='all', axis=0) # save output at the runtime frequency (usually 5 min) # 'DailyState' group will be save a daily frequency path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) # resample output if freq_s is different from runtime freq (usually 5 min) freq_save = pd.Timedelta(freq_s, 's') # resample `df_output` at `freq_save` df_rsmp = resample_output(df_output, freq_save) # 'DailyState' group will be dropped in `resample_output` as resampling is not needed df_rsmp = df_rsmp.drop(columns='DailyState') list_group = df_rsmp.columns.get_level_values('group').unique() list_grid = df_rsmp.index.get_level_values('grid').unique() # save output at the resampling frequency for grid in list_grid: for group in list_group: df_output_grid_group = df_rsmp.loc[grid, group] path_save = save_df_grid_group( df_output_grid_group, grid, group, site=site, dir_save=path_dir_save) list_path_save.append(path_save) return list_path_save
def add_file(self, path, parent=None, tree=TreeType.SOURCE_ROOT, target_name=None, force=True, file_options=FileOptions()): """ Adds a file to the project, taking care of the type of the file and creating additional structures depending on the file type. For instance, frameworks will be linked, embedded and search paths will be adjusted automatically. Header file will be added to the headers sections, but not compiled, whereas the source files will be added to the compilation phase. :param path: Path to the file to be added :param parent: Parent group to be added under :param tree: Tree where the path is relative to :param target_name: Target name or list of target names where the file should be added (none for every target) :param force: Add the file without checking if the file already exists :param file_options: FileOptions object to be used during the addition of the file to the project. :return: a list of elements that were added to the project successfully as PBXBuildFile objects """ results = [] # if it's not forced to add the file stop if the file already exists. if not force: for section in self.objects.get_sections(): for obj in self.objects.get_objects_in_section(section): if u'path' in obj and ProjectFiles._path_leaf(path) == ProjectFiles._path_leaf(obj.path): return [] file_ref, abs_path, path, tree, expected_build_phase = self._add_file_reference(path, parent, tree, force, file_options) if path is None or tree is None: return None # no need to create the build_files, done if not file_options.create_build_files: return results # create build_files for the targets results.extend(self._create_build_files(file_ref, target_name, expected_build_phase, file_options)) # special case for the frameworks and libraries to update the search paths if tree != TreeType.SOURCE_ROOT or abs_path is None: return results # the path is absolute and it's outside the scope of the project for linking purposes library_path = os.path.join(u'$(SRCROOT)', os.path.split(file_ref.path)[0]) if os.path.isfile(abs_path): self.add_library_search_paths([library_path], recursive=False) else: self.add_framework_search_paths([library_path, u'$(inherited)'], recursive=False) return results
Adds a file to the project, taking care of the type of the file and creating additional structures depending on the file type. For instance, frameworks will be linked, embedded and search paths will be adjusted automatically. Header file will be added to the headers sections, but not compiled, whereas the source files will be added to the compilation phase. :param path: Path to the file to be added :param parent: Parent group to be added under :param tree: Tree where the path is relative to :param target_name: Target name or list of target names where the file should be added (none for every target) :param force: Add the file without checking if the file already exists :param file_options: FileOptions object to be used during the addition of the file to the project. :return: a list of elements that were added to the project successfully as PBXBuildFile objects
Below is the the instruction that describes the task: ### Input: Adds a file to the project, taking care of the type of the file and creating additional structures depending on the file type. For instance, frameworks will be linked, embedded and search paths will be adjusted automatically. Header file will be added to the headers sections, but not compiled, whereas the source files will be added to the compilation phase. :param path: Path to the file to be added :param parent: Parent group to be added under :param tree: Tree where the path is relative to :param target_name: Target name or list of target names where the file should be added (none for every target) :param force: Add the file without checking if the file already exists :param file_options: FileOptions object to be used during the addition of the file to the project. :return: a list of elements that were added to the project successfully as PBXBuildFile objects ### Response: def add_file(self, path, parent=None, tree=TreeType.SOURCE_ROOT, target_name=None, force=True, file_options=FileOptions()): """ Adds a file to the project, taking care of the type of the file and creating additional structures depending on the file type. For instance, frameworks will be linked, embedded and search paths will be adjusted automatically. Header file will be added to the headers sections, but not compiled, whereas the source files will be added to the compilation phase. :param path: Path to the file to be added :param parent: Parent group to be added under :param tree: Tree where the path is relative to :param target_name: Target name or list of target names where the file should be added (none for every target) :param force: Add the file without checking if the file already exists :param file_options: FileOptions object to be used during the addition of the file to the project. :return: a list of elements that were added to the project successfully as PBXBuildFile objects """ results = [] # if it's not forced to add the file stop if the file already exists. if not force: for section in self.objects.get_sections(): for obj in self.objects.get_objects_in_section(section): if u'path' in obj and ProjectFiles._path_leaf(path) == ProjectFiles._path_leaf(obj.path): return [] file_ref, abs_path, path, tree, expected_build_phase = self._add_file_reference(path, parent, tree, force, file_options) if path is None or tree is None: return None # no need to create the build_files, done if not file_options.create_build_files: return results # create build_files for the targets results.extend(self._create_build_files(file_ref, target_name, expected_build_phase, file_options)) # special case for the frameworks and libraries to update the search paths if tree != TreeType.SOURCE_ROOT or abs_path is None: return results # the path is absolute and it's outside the scope of the project for linking purposes library_path = os.path.join(u'$(SRCROOT)', os.path.split(file_ref.path)[0]) if os.path.isfile(abs_path): self.add_library_search_paths([library_path], recursive=False) else: self.add_framework_search_paths([library_path, u'$(inherited)'], recursive=False) return results
def command_py2to3(args): """ Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources. """ from lib2to3.main import main sys.exit(main("lib2to3.fixes", args=args.sources))
Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources.
Below is the the instruction that describes the task: ### Input: Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources. ### Response: def command_py2to3(args): """ Apply '2to3' tool (Python2 to Python3 conversion tool) to Python sources. """ from lib2to3.main import main sys.exit(main("lib2to3.fixes", args=args.sources))
def get_static_dependencies(self, dependencies=None, include_beta=None): """Resolves the project -> dependencies section of cumulusci.yml to convert dynamic github dependencies into static dependencies by inspecting the referenced repositories Keyword arguments: :param dependencies: a list of dependencies to resolve :param include_beta: when true, return the latest github release, even if pre-release; else return the latest stable release """ if not dependencies: dependencies = self.project__dependencies if not dependencies: return [] static_dependencies = [] for dependency in dependencies: if "github" not in dependency: static_dependencies.append(dependency) else: static = self.process_github_dependency( dependency, include_beta=include_beta ) static_dependencies.extend(static) return static_dependencies
Resolves the project -> dependencies section of cumulusci.yml to convert dynamic github dependencies into static dependencies by inspecting the referenced repositories Keyword arguments: :param dependencies: a list of dependencies to resolve :param include_beta: when true, return the latest github release, even if pre-release; else return the latest stable release
Below is the the instruction that describes the task: ### Input: Resolves the project -> dependencies section of cumulusci.yml to convert dynamic github dependencies into static dependencies by inspecting the referenced repositories Keyword arguments: :param dependencies: a list of dependencies to resolve :param include_beta: when true, return the latest github release, even if pre-release; else return the latest stable release ### Response: def get_static_dependencies(self, dependencies=None, include_beta=None): """Resolves the project -> dependencies section of cumulusci.yml to convert dynamic github dependencies into static dependencies by inspecting the referenced repositories Keyword arguments: :param dependencies: a list of dependencies to resolve :param include_beta: when true, return the latest github release, even if pre-release; else return the latest stable release """ if not dependencies: dependencies = self.project__dependencies if not dependencies: return [] static_dependencies = [] for dependency in dependencies: if "github" not in dependency: static_dependencies.append(dependency) else: static = self.process_github_dependency( dependency, include_beta=include_beta ) static_dependencies.extend(static) return static_dependencies
def buildout(directory='.', config='buildout.cfg', parts=None, runas=None, env=(), buildout_ver=None, test_release=False, distribute=None, new_st=None, offline=False, newest=False, python=sys.executable, debug=False, verbose=False, onlyif=None, unless=None, use_vt=False, loglevel=None): ''' Run buildout in a directory. directory directory to execute in config buildout config to use parts specific buildout parts to run runas user used to run buildout as env environment variables to set when running buildout_ver force a specific buildout version (1 | 2) test_release buildout accept test release new_st Forcing use of setuptools >= 0.7 distribute use distribute over setuptools if possible offline does buildout run offline python python to use debug run buildout with -D debug flag onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 newest run buildout in newest mode verbose run buildout in verbose mode (-vvvvv) use_vt Use the new salt VT to stream output [experimental] CLI Example: .. code-block:: bash salt '*' buildout.buildout /srv/mybuildout ''' LOG.info('Running buildout in {0} ({1})'.format(directory, config)) # pylint: disable=str-format-in-logging boot_ret = bootstrap(directory, config=config, buildout_ver=buildout_ver, test_release=test_release, offline=offline, new_st=new_st, env=env, runas=runas, distribute=distribute, python=python, use_vt=use_vt, loglevel=loglevel) buildout_ret = run_buildout(directory=directory, config=config, parts=parts, offline=offline, newest=newest, runas=runas, env=env, verbose=verbose, debug=debug, use_vt=use_vt, loglevel=loglevel) # signal the decorator or our return return _merge_statuses([boot_ret, buildout_ret])
Run buildout in a directory. directory directory to execute in config buildout config to use parts specific buildout parts to run runas user used to run buildout as env environment variables to set when running buildout_ver force a specific buildout version (1 | 2) test_release buildout accept test release new_st Forcing use of setuptools >= 0.7 distribute use distribute over setuptools if possible offline does buildout run offline python python to use debug run buildout with -D debug flag onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 newest run buildout in newest mode verbose run buildout in verbose mode (-vvvvv) use_vt Use the new salt VT to stream output [experimental] CLI Example: .. code-block:: bash salt '*' buildout.buildout /srv/mybuildout
Below is the the instruction that describes the task: ### Input: Run buildout in a directory. directory directory to execute in config buildout config to use parts specific buildout parts to run runas user used to run buildout as env environment variables to set when running buildout_ver force a specific buildout version (1 | 2) test_release buildout accept test release new_st Forcing use of setuptools >= 0.7 distribute use distribute over setuptools if possible offline does buildout run offline python python to use debug run buildout with -D debug flag onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 newest run buildout in newest mode verbose run buildout in verbose mode (-vvvvv) use_vt Use the new salt VT to stream output [experimental] CLI Example: .. code-block:: bash salt '*' buildout.buildout /srv/mybuildout ### Response: def buildout(directory='.', config='buildout.cfg', parts=None, runas=None, env=(), buildout_ver=None, test_release=False, distribute=None, new_st=None, offline=False, newest=False, python=sys.executable, debug=False, verbose=False, onlyif=None, unless=None, use_vt=False, loglevel=None): ''' Run buildout in a directory. directory directory to execute in config buildout config to use parts specific buildout parts to run runas user used to run buildout as env environment variables to set when running buildout_ver force a specific buildout version (1 | 2) test_release buildout accept test release new_st Forcing use of setuptools >= 0.7 distribute use distribute over setuptools if possible offline does buildout run offline python python to use debug run buildout with -D debug flag onlyif Only execute cmd if statement on the host return 0 unless Do not execute cmd if statement on the host return 0 newest run buildout in newest mode verbose run buildout in verbose mode (-vvvvv) use_vt Use the new salt VT to stream output [experimental] CLI Example: .. code-block:: bash salt '*' buildout.buildout /srv/mybuildout ''' LOG.info('Running buildout in {0} ({1})'.format(directory, config)) # pylint: disable=str-format-in-logging boot_ret = bootstrap(directory, config=config, buildout_ver=buildout_ver, test_release=test_release, offline=offline, new_st=new_st, env=env, runas=runas, distribute=distribute, python=python, use_vt=use_vt, loglevel=loglevel) buildout_ret = run_buildout(directory=directory, config=config, parts=parts, offline=offline, newest=newest, runas=runas, env=env, verbose=verbose, debug=debug, use_vt=use_vt, loglevel=loglevel) # signal the decorator or our return return _merge_statuses([boot_ret, buildout_ret])
def _named_tuple_converter(tuple_type): # type: (Type[Tuple]) -> _AggregateConverter """Return an _AggregateConverter for named tuples of the given type.""" def _from_dict(dict_value): if dict_value: return tuple_type(**dict_value) # Cannot construct a namedtuple value from an empty dictionary return None def _to_dict(value): if value: return value._asdict() return {} converter = _AggregateConverter(from_dict=_from_dict, to_dict=_to_dict) return converter
Return an _AggregateConverter for named tuples of the given type.
Below is the the instruction that describes the task: ### Input: Return an _AggregateConverter for named tuples of the given type. ### Response: def _named_tuple_converter(tuple_type): # type: (Type[Tuple]) -> _AggregateConverter """Return an _AggregateConverter for named tuples of the given type.""" def _from_dict(dict_value): if dict_value: return tuple_type(**dict_value) # Cannot construct a namedtuple value from an empty dictionary return None def _to_dict(value): if value: return value._asdict() return {} converter = _AggregateConverter(from_dict=_from_dict, to_dict=_to_dict) return converter
def set_attribute(self, name, value): """ Default handler for those not explicitly defined """ if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
Default handler for those not explicitly defined
Below is the the instruction that describes the task: ### Input: Default handler for those not explicitly defined ### Response: def set_attribute(self, name, value): """ Default handler for those not explicitly defined """ if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
def timestr2time(time_str): ''' Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format. ''' if any(c not in '0123456789:' for c in time_str): raise ValueError('Illegal character in time string') if time_str.count(':') == 2: h, m, s = time_str.split(':') elif time_str.count(':') == 1: h, m = time_str.split(':') s = '00' elif len(time_str) == 6: h = time_str[:2] m = time_str[2:4] s = time_str[4:] else: raise ValueError('Time format not recognised. {}'.format( VALID_TIME_FORMATS_TEXT)) if len(m) == 2 and len(s) == 2: mins = int(m) sec = int(s) else: raise ValueError('m and s must be 2 digits') try: return datetime.time(int(h), mins, sec) except ValueError: raise ValueError('Invalid time {}. {}'.format(time_str, VALID_TIME_FORMATS_TEXT))
Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format.
Below is the the instruction that describes the task: ### Input: Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format. ### Response: def timestr2time(time_str): ''' Turns a string into a datetime.time object. This will only work if the format can be "guessed", so the string must have one of the formats from VALID_TIME_FORMATS_TEXT. Args: time_str (str) a string that represents a date Returns: datetime.time object Raises: ValueError if the input string does not have a valid format. ''' if any(c not in '0123456789:' for c in time_str): raise ValueError('Illegal character in time string') if time_str.count(':') == 2: h, m, s = time_str.split(':') elif time_str.count(':') == 1: h, m = time_str.split(':') s = '00' elif len(time_str) == 6: h = time_str[:2] m = time_str[2:4] s = time_str[4:] else: raise ValueError('Time format not recognised. {}'.format( VALID_TIME_FORMATS_TEXT)) if len(m) == 2 and len(s) == 2: mins = int(m) sec = int(s) else: raise ValueError('m and s must be 2 digits') try: return datetime.time(int(h), mins, sec) except ValueError: raise ValueError('Invalid time {}. {}'.format(time_str, VALID_TIME_FORMATS_TEXT))
def p_qualifierType_2(p): """qualifierType_2 : ':' dataType | ':' dataType defaultValue """ dv = None if len(p) == 4: dv = p[3] p[0] = (p[2], False, None, dv)
qualifierType_2 : ':' dataType | ':' dataType defaultValue
Below is the the instruction that describes the task: ### Input: qualifierType_2 : ':' dataType | ':' dataType defaultValue ### Response: def p_qualifierType_2(p): """qualifierType_2 : ':' dataType | ':' dataType defaultValue """ dv = None if len(p) == 4: dv = p[3] p[0] = (p[2], False, None, dv)
def _encode_penman(self, g, top=None): """ Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected). """ if top is None: top = g.top remaining = set(g.triples()) variables = g.variables() store = defaultdict(lambda: ([], [])) # (preferred, dispreferred) for t in g.triples(): if t.inverted: store[t.target][0].append(t) store[t.source][1].append(Triple(*t, inverted=False)) else: store[t.source][0].append(t) store[t.target][1].append(Triple(*t, inverted=True)) p = defaultdict(list) topolist = [top] def _update(t): src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2]) p[src].append(t) remaining.remove(t) if tgt in variables and t.relation != self.TYPE_REL: topolist.append(tgt) return tgt return None def _explore_preferred(src): ts = store.get(src, ([], []))[0] for t in ts: if t in remaining: tgt = _update(t) if tgt is not None: _explore_preferred(tgt) ts[:] = [] # clear explored list _explore_preferred(top) while remaining: flip_candidates = [store.get(v, ([],[]))[1] for v in topolist] for fc in flip_candidates: fc[:] = [c for c in fc if c in remaining] # clear superfluous if not any(len(fc) > 0 for fc in flip_candidates): raise EncodeError('Invalid graph; possibly disconnected.') c = next(c for fc in flip_candidates for c in fc) tgt = _update(c) if tgt is not None: _explore_preferred(tgt) return self._layout(p, top, 0, set())
Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected).
Below is the the instruction that describes the task: ### Input: Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected). ### Response: def _encode_penman(self, g, top=None): """ Walk graph g and find a spanning dag, then serialize the result. First, depth-first traversal of preferred orientations (whether true or inverted) to create graph p. If any triples remain, select the first remaining triple whose source in the dispreferred orientation exists in p, where 'first' is determined by the order of inserted nodes (i.e. a topological sort). Add this triple, then repeat the depth-first traversal of preferred orientations from its target. Repeat until no triples remain, or raise an error if there are no candidates in the dispreferred orientation (which likely means the graph is disconnected). """ if top is None: top = g.top remaining = set(g.triples()) variables = g.variables() store = defaultdict(lambda: ([], [])) # (preferred, dispreferred) for t in g.triples(): if t.inverted: store[t.target][0].append(t) store[t.source][1].append(Triple(*t, inverted=False)) else: store[t.source][0].append(t) store[t.target][1].append(Triple(*t, inverted=True)) p = defaultdict(list) topolist = [top] def _update(t): src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2]) p[src].append(t) remaining.remove(t) if tgt in variables and t.relation != self.TYPE_REL: topolist.append(tgt) return tgt return None def _explore_preferred(src): ts = store.get(src, ([], []))[0] for t in ts: if t in remaining: tgt = _update(t) if tgt is not None: _explore_preferred(tgt) ts[:] = [] # clear explored list _explore_preferred(top) while remaining: flip_candidates = [store.get(v, ([],[]))[1] for v in topolist] for fc in flip_candidates: fc[:] = [c for c in fc if c in remaining] # clear superfluous if not any(len(fc) > 0 for fc in flip_candidates): raise EncodeError('Invalid graph; possibly disconnected.') c = next(c for fc in flip_candidates for c in fc) tgt = _update(c) if tgt is not None: _explore_preferred(tgt) return self._layout(p, top, 0, set())
def get_previous_month(self): """Returns date range for the previous full month.""" end = utils.get_month_start() - relativedelta(days=1) end = utils.to_datetime(end) start = utils.get_month_start(end) return start, end
Returns date range for the previous full month.
Below is the the instruction that describes the task: ### Input: Returns date range for the previous full month. ### Response: def get_previous_month(self): """Returns date range for the previous full month.""" end = utils.get_month_start() - relativedelta(days=1) end = utils.to_datetime(end) start = utils.get_month_start(end) return start, end