code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _add(self, xer, primary, type): """ Private method for adding a descriptor from the event loop. It takes care of adding it if new or modifying it if already added for another state (read -> read/write for example). """ if xer not in primary: primary[xer] = TwistedSocketNotifier(None, self, xer, type)
Private method for adding a descriptor from the event loop. It takes care of adding it if new or modifying it if already added for another state (read -> read/write for example).
def _logged_in_successful(data): """ Test the login status from the returned communication of the server. :param data: bytes received from server during login :type data: list of bytes :return boolean, True when you are logged in. """ if re.match(r'^:(testserver\.local|tmi\.twitch\.tv)' r' NOTICE \* :' r'(Login unsuccessful|Error logging in)*$', data.strip()): return False else: return True
Test the login status from the returned communication of the server. :param data: bytes received from server during login :type data: list of bytes :return boolean, True when you are logged in.
async def FinishActions(self, results): ''' results : typing.Sequence[~ActionExecutionResult] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='Uniter', request='FinishActions', version=5, params=_params) _params['results'] = results reply = await self.rpc(msg) return reply
results : typing.Sequence[~ActionExecutionResult] Returns -> typing.Sequence[~ErrorResult]
def fmap(order, aij, bij, x, y): """Evaluate the 2D polynomial transformation. u = sum[i=0:order]( sum[j=0:j]( a_ij * x**(i - j) * y**j )) v = sum[i=0:order]( sum[j=0:j]( b_ij * x**(i - j) * y**j )) Parameters ---------- order : int Order of the polynomial transformation. aij : numpy array Polynomial coefficents corresponding to a_ij. bij : numpy array Polynomial coefficents corresponding to b_ij. x : numpy array or float X coordinate values where the transformation is computed. Note that these values correspond to array indices. y : numpy array or float Y coordinate values where the transformation is computed. Note that these values correspond to array indices. Returns ------- u : numpy array or float U coordinate values. v : numpy array or float V coordinate values. """ u = np.zeros_like(x) v = np.zeros_like(y) k = 0 for i in range(order + 1): for j in range(i + 1): u += aij[k] * (x ** (i - j)) * (y ** j) v += bij[k] * (x ** (i - j)) * (y ** j) k += 1 return u, v
Evaluate the 2D polynomial transformation. u = sum[i=0:order]( sum[j=0:j]( a_ij * x**(i - j) * y**j )) v = sum[i=0:order]( sum[j=0:j]( b_ij * x**(i - j) * y**j )) Parameters ---------- order : int Order of the polynomial transformation. aij : numpy array Polynomial coefficents corresponding to a_ij. bij : numpy array Polynomial coefficents corresponding to b_ij. x : numpy array or float X coordinate values where the transformation is computed. Note that these values correspond to array indices. y : numpy array or float Y coordinate values where the transformation is computed. Note that these values correspond to array indices. Returns ------- u : numpy array or float U coordinate values. v : numpy array or float V coordinate values.
def register_magics(store_name='_ampl_cells', ampl_object=None): """ Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells. """ from IPython.core.magic import ( Magics, magics_class, cell_magic, line_magic ) @magics_class class StoreAMPL(Magics): def __init__(self, shell=None, **kwargs): Magics.__init__(self, shell=shell, **kwargs) self._store = [] shell.user_ns[store_name] = self._store @cell_magic def ampl(self, line, cell): """Store the cell in the store""" self._store.append(cell) @cell_magic def ampl_eval(self, line, cell): """Evaluate the cell""" ampl_object.eval(cell) @line_magic def get_ampl(self, line): """Retrieve the store""" return self._store get_ipython().register_magics(StoreAMPL)
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
def timelimit(timeout): """borrowed from web.py""" def _1(function): def _2(*args, **kw): class Dispatch(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None self.error = None self.setDaemon(True) self.start() def run(self): try: self.result = function(*args, **kw) except: self.error = sys.exc_info() c = Dispatch() c.join(timeout) if c.isAlive(): raise TimeoutError, 'took too long' if c.error: raise c.error[0], c.error[1] return c.result return _2 return _1
borrowed from web.py
def to_df_CSV(self, tempfile: str=None, tempkeep: bool=False, **kwargs) -> 'pd.DataFrame': """ Export this SAS Data Set to a Pandas Data Frame via CSV file :param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up :param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it :param kwargs: :return: Pandas data frame :rtype: 'pd.DataFrame' """ return self.to_df(method='CSV', tempfile=tempfile, tempkeep=tempkeep, **kwargs)
Export this SAS Data Set to a Pandas Data Frame via CSV file :param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up :param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it :param kwargs: :return: Pandas data frame :rtype: 'pd.DataFrame'
def getvalue(x): """Return the single value of x or raise TypError if more than one value.""" if isrepeating(x): raise TypeError( "Ambiguous call to getvalue for %r which has more than one value." % x) for value in getvalues(x): return value
Return the single value of x or raise TypError if more than one value.
def get_annotated_lines(self): """Helper function that returns lines with extra information.""" lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)] # find function definition and mark lines if hasattr(self.code, 'co_firstlineno'): lineno = self.code.co_firstlineno - 1 while lineno > 0: if _funcdef_re.match(lines[lineno].code): break lineno -= 1 try: offset = len(inspect.getblock([x.code + '\n' for x in lines[lineno:]])) except TokenError: offset = 0 for line in lines[lineno:lineno + offset]: line.in_frame = True # mark current line try: lines[self.lineno - 1].current = True except IndexError: pass return lines
Helper function that returns lines with extra information.
def tweakback(drzfile, input=None, origwcs = None, newname = None, wcsname = None, extname='SCI', force=False, verbose=False): """ Apply WCS solution recorded in drizzled file to distorted input images (``_flt.fits`` files) used to create the drizzled file. This task relies on the original WCS and updated WCS to be recorded in the drizzled image's header as the last 2 alternate WCSs. Parameters ---------- drzfile : str (Default = '') filename of undistorted image which contains the new WCS and WCS prior to being updated newname : str (Default = None) Value of ``WCSNAME`` to be used to label the updated solution in the output (eq., ``_flt.fits``) files. If left blank or None, it will default to using the current ``WCSNAME`` value from the input drzfile. input : str (Default = '') filenames of distorted images to be updated using new WCS from 'drzfile'. These can be provided either as an ``@-file``, a comma-separated list of filenames or using wildcards. .. note:: A blank value will indicate that the task should derive the filenames from the 'drzfile' itself, if possible. The filenames will be derived from the ``D*DATA`` keywords written out by ``AstroDrizzle``. If they can not be found, the task will quit. origwcs : str (Default = None) Value of ``WCSNAME`` keyword prior to the drzfile image being updated by ``TweakReg``. If left blank or None, it will default to using the second to last ``WCSNAME*`` keyword value found in the header. wcsname : str (Default = None) Value of WCSNAME for updated solution written out by ``TweakReg`` as specified by the `wcsname` parameter from ``TweakReg``. If this is left blank or `None`, it will default to the current ``WCSNAME`` value from the input drzfile. extname : str (Default = 'SCI') Name of extension in `input` files to be updated with new WCS force : bool (Default = False) This parameters specified whether or not to force an update of the WCS even though WCS already exists with this solution or `wcsname`? verbose : bool (Default = False) This parameter specifies whether or not to print out additional messages during processing. Notes ----- The algorithm used by this function is based on linearization of the exact compound operator that converts input image coordinates to the coordinates (in the input image) that would result in alignment with the new drizzled image WCS. If no input distorted files are specified as input, this task will attempt to generate the list of filenames from the drizzled input file's own header. EXAMPLES -------- An image named ``acswfc_mos2_drz.fits`` was created from 4 images using astrodrizzle. This drizzled image was then aligned to another image using tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``. The new WCS can then be used to update each of the 4 images that were combined to make up this drizzled image using: >>> from drizzlepac import tweakback >>> tweakback.tweakback('acswfc_mos2_drz.fits') If the same WCS should be applied to a specific set of images, those images can be updated using: >>> tweakback.tweakback('acswfc_mos2_drz.fits', ... input='img_mos2a_flt.fits,img_mos2e_flt.fits') See Also -------- stwcs.wcsutil.altwcs: Alternate WCS implementation """ print("TweakBack Version {:s}({:s}) started at: {:s}\n" .format(__version__,__version_date__,util._ptime()[0])) # Interpret input list/string into list of filename(s) fltfiles = parseinput.parseinput(input)[0] if fltfiles is None or len(fltfiles) == 0: # try to extract the filenames from the drizzled file's header fltfiles = extract_input_filenames(drzfile) if fltfiles is None: print('*'*60) print('*') print('* ERROR:') print('* No input filenames found! ') print('* Please specify "fltfiles" or insure that input drizzled') print('* image contains D*DATA keywords. ') print('*') print('*'*60) raise ValueError if not isinstance(fltfiles,list): fltfiles = [fltfiles] sciext = determine_extnum(drzfile, extname='SCI') scihdr = fits.getheader(drzfile, ext=sciext, memmap=False) ### Step 1: Read in updated and original WCS solutions # determine keys for all alternate WCS solutions in drizzled image header wkeys = wcsutil.altwcs.wcskeys(drzfile, ext=sciext) wnames = wcsutil.altwcs.wcsnames(drzfile, ext=sciext) if not util.is_blank(newname): final_name = newname else: final_name = wnames[wkeys[-1]] # Read in HSTWCS objects for final,updated WCS and previous WCS from # from drizzled image header # The final solution also serves as reference WCS when using updatehdr if not util.is_blank(wcsname): for k in wnames: if wnames[k] == wcsname: wcskey = k break else: wcskey = wkeys[-1] final_wcs = wcsutil.HSTWCS(drzfile, ext=sciext, wcskey=wkeys[-1]) if not util.is_blank(origwcs): for k in wnames: if wnames[k] == origwcs: orig_wcskey = k orig_wcsname = origwcs break else: orig_wcsname,orig_wcskey = determine_orig_wcsname(scihdr,wnames,wkeys) orig_wcs = wcsutil.HSTWCS(drzfile,ext=sciext,wcskey=orig_wcskey) # read in RMS values reported for new solution crderr1kw = 'CRDER1'+wkeys[-1] crderr2kw = 'CRDER2'+wkeys[-1] if crderr1kw in scihdr: crderr1 = fits.getval(drzfile, crderr1kw, ext=sciext, memmap=False) else: crderr1 = 0.0 if crderr2kw in scihdr: crderr2 = fits.getval(drzfile, crderr2kw, ext=sciext, memmap=False) else: crderr2 = 0.0 del scihdr ### Step 2: Apply solution to input file headers for fname in fltfiles: logstr = "....Updating header for {:s}...".format(fname) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) # reset header WCS keywords to original (OPUS generated) values imhdulist = fits.open(fname, mode='update', memmap=False) extlist = get_ext_list(imhdulist, extname='SCI') if not extlist: extlist = [0] # insure that input PRIMARY WCS has been archived before overwriting # with new solution wcsutil.altwcs.archiveWCS(imhdulist, extlist, reusekey=True) # Process MEF images... for ext in extlist: logstr = "Processing {:s}[{:s}]".format(imhdulist.filename(), ext2str(ext)) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) chip_wcs = wcsutil.HSTWCS(imhdulist, ext=ext) update_chip_wcs(chip_wcs, orig_wcs, final_wcs, xrms=crderr1, yrms = crderr2) # Update FITS file with newly updated WCS for this chip extnum = imhdulist.index(imhdulist[ext]) updatehdr.update_wcs(imhdulist, extnum, chip_wcs, wcsname=final_name, reusename=False, verbose=verbose) imhdulist.close()
Apply WCS solution recorded in drizzled file to distorted input images (``_flt.fits`` files) used to create the drizzled file. This task relies on the original WCS and updated WCS to be recorded in the drizzled image's header as the last 2 alternate WCSs. Parameters ---------- drzfile : str (Default = '') filename of undistorted image which contains the new WCS and WCS prior to being updated newname : str (Default = None) Value of ``WCSNAME`` to be used to label the updated solution in the output (eq., ``_flt.fits``) files. If left blank or None, it will default to using the current ``WCSNAME`` value from the input drzfile. input : str (Default = '') filenames of distorted images to be updated using new WCS from 'drzfile'. These can be provided either as an ``@-file``, a comma-separated list of filenames or using wildcards. .. note:: A blank value will indicate that the task should derive the filenames from the 'drzfile' itself, if possible. The filenames will be derived from the ``D*DATA`` keywords written out by ``AstroDrizzle``. If they can not be found, the task will quit. origwcs : str (Default = None) Value of ``WCSNAME`` keyword prior to the drzfile image being updated by ``TweakReg``. If left blank or None, it will default to using the second to last ``WCSNAME*`` keyword value found in the header. wcsname : str (Default = None) Value of WCSNAME for updated solution written out by ``TweakReg`` as specified by the `wcsname` parameter from ``TweakReg``. If this is left blank or `None`, it will default to the current ``WCSNAME`` value from the input drzfile. extname : str (Default = 'SCI') Name of extension in `input` files to be updated with new WCS force : bool (Default = False) This parameters specified whether or not to force an update of the WCS even though WCS already exists with this solution or `wcsname`? verbose : bool (Default = False) This parameter specifies whether or not to print out additional messages during processing. Notes ----- The algorithm used by this function is based on linearization of the exact compound operator that converts input image coordinates to the coordinates (in the input image) that would result in alignment with the new drizzled image WCS. If no input distorted files are specified as input, this task will attempt to generate the list of filenames from the drizzled input file's own header. EXAMPLES -------- An image named ``acswfc_mos2_drz.fits`` was created from 4 images using astrodrizzle. This drizzled image was then aligned to another image using tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``. The new WCS can then be used to update each of the 4 images that were combined to make up this drizzled image using: >>> from drizzlepac import tweakback >>> tweakback.tweakback('acswfc_mos2_drz.fits') If the same WCS should be applied to a specific set of images, those images can be updated using: >>> tweakback.tweakback('acswfc_mos2_drz.fits', ... input='img_mos2a_flt.fits,img_mos2e_flt.fits') See Also -------- stwcs.wcsutil.altwcs: Alternate WCS implementation
def failed_login_limit_reached(self): """ A boolean method to check for failed login limit being reached""" login_limit = 10 if self.failed_logins and self.failed_logins >= login_limit: return True else: return False
A boolean method to check for failed login limit being reached
def addarchive(self, name): """ Add (i.e. copy) the contents of another tarball to this one. :param name: File path to the tar archive. :type name: unicode | str """ with tarfile.open(name, 'r') as st: for member in st.getmembers(): self.tarfile.addfile(member, st.extractfile(member.name))
Add (i.e. copy) the contents of another tarball to this one. :param name: File path to the tar archive. :type name: unicode | str
def _filter(request, object_, tags=None, more=False, orderby='created'): """Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered """ res = Result() models = QUERY_MODELS idDict = {} objDict = {} data = {} modelmap = {} length = 75 # -- Get all IDs for each model for m in models: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for bucket in tags: searchQuery = "" o = None for item in bucket: if item == 0: # -- filter by tagless idDict[m.model].annotate(num_tags=Count('tags')) if not o: o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): # -- filter by tag if not o: o = Q() o |= Q(tags__id=item) else: # -- add to search string searchQuery += item + ' ' if not HAYSTACK: if not o: o = Q() # -- use a basic search o |= Q(title__icontains=item) if HAYSTACK and searchQuery != "": # -- once all tags have been filtered, filter by search searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o: o = Q() o |= Q(id__in=searchIDs) if o: # -- apply the filters idDict[m.model] = idDict[m.model].annotate(num_tags=Count('tags')).filter(o) else: idDict[m.model] = idDict[m.model].none() # -- Get all ids of filtered objects, this will be a very fast query idDict[m.model] = list(idDict[m.model].order_by('-{}'.format(orderby)).values_list('id', flat=True)) lastid = request.session.get('last_{}'.format(m.model), 0) if not idDict[m.model]: continue if not more: lastid = idDict[m.model][0] index = idDict[m.model].index(lastid) if more and lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index:index + length] # -- perform the main query to retrieve the objects we want objDict[m.model] = m.model_class().objects.filter(id__in=idDict[m.model]) objDict[m.model] = objDict[m.model].select_related('author').prefetch_related('tags').order_by('-{}'.format(orderby)) objDict[m.model] = list(objDict[m.model]) # -- combine and sort all objects by date objects = _sortObjects(orderby, **objDict) if len(models) > 1 else objDict.values()[0] objects = objects[:length] # -- Find out last ids lastids = {} for obj in objects: lastids['last_{}'.format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key] = value # -- serialize objects for i in objects: res.append(i.json()) data['count'] = len(objects) if settings.DEBUG: data['queries'] = connection.queries res.value = data return JsonResponse(res.asDict())
Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered
def post(self): """Register a new model (models)""" self.set_header("Content-Type", "application/json") key = uuid.uuid4().hex metadata = json.loads(self.request.body.decode()) metadata["uuid"] = key self.database[key] = metadata result = json.dumps({"uuid": key}) self.write(result)
Register a new model (models)
def find_embedding(elt, embedding=None): """Try to get elt embedding elements. :param embedding: embedding element. Must have a module. :return: a list of [module [,class]*] embedding elements which define elt. :rtype: list """ result = [] # result is empty in the worst case # start to get module module = getmodule(elt) if module is not None: # if module exists visited = set() # cache to avoid to visit twice same element if embedding is None: embedding = module # list of compounds elements which construct the path to elt compounds = [embedding] while compounds: # while compounds elements exist # get last compound last_embedding = compounds[-1] # stop to iterate on compounds when last embedding is elt if last_embedding == elt: result = compounds # result is compounds break else: # search among embedded elements for name in dir(last_embedding): # get embedded element embedded = getattr(last_embedding, name) try: # check if embedded has already been visited if embedded not in visited: visited.add(embedded) # set it as visited else: continue except TypeError: pass else: # get embedded module embedded_module = getmodule(embedded) # and compare it with elt module if embedded_module is module: # add embedded to compounds compounds.append(embedded) # end the second loop break else: # remove last element if no coumpound element is found compounds.pop(-1) return result
Try to get elt embedding elements. :param embedding: embedding element. Must have a module. :return: a list of [module [,class]*] embedding elements which define elt. :rtype: list
def _plot_estimate( cls, estimate=None, confidence_intervals=None, loc=None, iloc=None, show_censors=False, censor_styles=None, ci_legend=False, ci_force_lines=False, ci_alpha=0.25, ci_show=True, at_risk_counts=False, **kwargs ): """ Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. Returns ------- ax: a pyplot axis object """ plot_estimate_config = PlotEstimateConfig( cls, estimate, confidence_intervals, loc, iloc, show_censors, censor_styles, **kwargs ) dataframe_slicer = create_dataframe_slicer(iloc, loc) if show_censors and cls.event_table["censored"].sum() > 0: cs = {"marker": "+", "ms": 12, "mew": 1} cs.update(plot_estimate_config.censor_styles) times = dataframe_slicer(cls.event_table.loc[(cls.event_table["censored"] > 0)]).index.values.astype(float) v = cls.predict(times) plot_estimate_config.ax.plot(times, v, linestyle="None", color=plot_estimate_config.colour, **cs) dataframe_slicer(plot_estimate_config.estimate_).rename( columns=lambda _: plot_estimate_config.kwargs.pop("label") ).plot(**plot_estimate_config.kwargs) # plot confidence intervals if ci_show: if ci_force_lines: dataframe_slicer(plot_estimate_config.confidence_interval_).plot( linestyle="-", linewidth=1, color=[plot_estimate_config.colour], legend=ci_legend, drawstyle=plot_estimate_config.kwargs["drawstyle"], ax=plot_estimate_config.ax, alpha=0.6, ) else: x = dataframe_slicer(plot_estimate_config.confidence_interval_).index.values.astype(float) lower = dataframe_slicer(plot_estimate_config.confidence_interval_.filter(like="lower")).values[:, 0] upper = dataframe_slicer(plot_estimate_config.confidence_interval_.filter(like="upper")).values[:, 0] if plot_estimate_config.kwargs["drawstyle"] == "default": step = None elif plot_estimate_config.kwargs["drawstyle"].startswith("step"): step = plot_estimate_config.kwargs["drawstyle"].replace("steps-", "") plot_estimate_config.ax.fill_between( x, lower, upper, alpha=ci_alpha, color=plot_estimate_config.colour, linewidth=1.0, step=step ) if at_risk_counts: add_at_risk_counts(cls, ax=plot_estimate_config.ax) return plot_estimate_config.ax
Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. Returns ------- ax: a pyplot axis object
def start_container(self): """Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node. """ self.__container_lengths.append(self.current_container_length) self.current_container_length = 0 new_container_node = _Node() self.__container_node.add_child(new_container_node) self.__container_nodes.append(self.__container_node) self.__container_node = new_container_node
Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node.
def complete_task_from_id(self, task_id): """ Runs the task with the given id. :type task_id: integer :param task_id: The id of the Task object. """ if task_id is None: raise WorkflowException(self.spec, 'task_id is None') for task in self.task_tree: if task.id == task_id: return task.complete() msg = 'A task with the given task_id (%s) was not found' % task_id raise WorkflowException(self.spec, msg)
Runs the task with the given id. :type task_id: integer :param task_id: The id of the Task object.
def get_previous_next_published(self, date): """ Returns a dict of the next and previous date periods with published entries. """ previous_next = getattr(self, 'previous_next', None) if previous_next is None: date_year = datetime(date.year, 1, 1) date_month = datetime(date.year, date.month, 1) date_day = datetime(date.year, date.month, date.day) date_next_week = date_day + timedelta(weeks=1) previous_next = {'year': [None, None], 'week': [None, None], 'month': [None, None], 'day': [None, None]} dates = self.get_queryset().datetimes( 'publication_date', 'day', order='ASC') for d in dates: d_year = datetime(d.year, 1, 1) d_month = datetime(d.year, d.month, 1) d_day = datetime(d.year, d.month, d.day) if d_year < date_year: previous_next['year'][0] = d_year.date() elif d_year > date_year and not previous_next['year'][1]: previous_next['year'][1] = d_year.date() if d_month < date_month: previous_next['month'][0] = d_month.date() elif d_month > date_month and not previous_next['month'][1]: previous_next['month'][1] = d_month.date() if d_day < date_day: previous_next['day'][0] = d_day.date() previous_next['week'][0] = d_day.date() - timedelta( days=d_day.weekday()) elif d_day > date_day and not previous_next['day'][1]: previous_next['day'][1] = d_day.date() if d_day > date_next_week and not previous_next['week'][1]: previous_next['week'][1] = d_day.date() - timedelta( days=d_day.weekday()) setattr(self, 'previous_next', previous_next) return previous_next
Returns a dict of the next and previous date periods with published entries.
def initialize(self): ''' Initialize the handler before requests are called ''' if not hasattr(self.application, 'event_listener'): log.debug('init a listener') self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, 'saltclients'): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { 'local': local_client.run_job_async, # not the actual client we'll use.. but its what we'll use to get args 'local_async': local_client.run_job_async, 'runner': salt.runner.RunnerClient(opts=self.application.opts).cmd_async, 'runner_async': None, # empty, since we use the same client as `runner` } if not hasattr(self, 'ckminions'): self.ckminions = salt.utils.minions.CkMinions(self.application.opts)
Initialize the handler before requests are called
def change_quantiles(x, ql, qh, isabs, f_agg): """ First fixes a corridor given by the quantiles ql and qh of the distribution of x. Then calculates the average, absolute value of consecutive changes of the series x inside this corridor. Think about selecting a corridor on the y-Axis and only calculating the mean of the absolute change of the time series inside this corridor. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param ql: the lower quantile of the corridor :type ql: float :param qh: the higher quantile of the corridor :type qh: float :param isabs: should the absolute differences be taken? :type isabs: bool :param f_agg: the aggregator function that is applied to the differences in the bin :type f_agg: str, name of a numpy function (e.g. mean, var, std, median) :return: the value of this feature :return type: float """ if ql >= qh: ValueError("ql={} should be lower than qh={}".format(ql, qh)) div = np.diff(x) if isabs: div = np.abs(div) # All values that originate from the corridor between the quantiles ql and qh will have the category 0, # other will be np.NaN try: bin_cat = pd.qcut(x, [ql, qh], labels=False) bin_cat_0 = bin_cat == 0 except ValueError: # Occurs when ql are qh effectively equal, e.g. x is not long enough or is too categorical return 0 # We only count changes that start and end inside the corridor ind = (bin_cat_0 & _roll(bin_cat_0, 1))[1:] if sum(ind) == 0: return 0 else: ind_inside_corridor = np.where(ind == 1) aggregator = getattr(np, f_agg) return aggregator(div[ind_inside_corridor])
First fixes a corridor given by the quantiles ql and qh of the distribution of x. Then calculates the average, absolute value of consecutive changes of the series x inside this corridor. Think about selecting a corridor on the y-Axis and only calculating the mean of the absolute change of the time series inside this corridor. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param ql: the lower quantile of the corridor :type ql: float :param qh: the higher quantile of the corridor :type qh: float :param isabs: should the absolute differences be taken? :type isabs: bool :param f_agg: the aggregator function that is applied to the differences in the bin :type f_agg: str, name of a numpy function (e.g. mean, var, std, median) :return: the value of this feature :return type: float
def revokeSystemPermission(self, login, user, perm): """ Parameters: - login - user - perm """ self.send_revokeSystemPermission(login, user, perm) self.recv_revokeSystemPermission()
Parameters: - login - user - perm
def T6(word, rules): '''If a VVV-sequence contains a long vowel, insert a syllable boundary between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].''' offset = 0 try: WORD, rest = tuple(word.split('.', 1)) for vvv in long_vowel_sequences(rest): i = vvv.start(2) vvv = vvv.group(2) i += (2 if phon.is_long(vvv[:2]) else 1) + offset rest = rest[:i] + '.' + rest[i:] offset += 1 except ValueError: WORD = word for vvv in long_vowel_sequences(WORD): i = vvv.start(2) + 2 WORD = WORD[:i] + '.' + WORD[i:] try: WORD += '.' + rest except UnboundLocalError: pass rules += ' T6' if word != WORD else '' return WORD, rules
If a VVV-sequence contains a long vowel, insert a syllable boundary between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
def sky_fraction(self): """ Sky fraction covered by the MOC """ pix_id = self._best_res_pixels() nb_pix_filled = pix_id.size return nb_pix_filled / float(3 << (2*(self.max_order + 1)))
Sky fraction covered by the MOC
def models(self): ''' generator to return the tuple of model and its schema to create on aws. ''' model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model))
generator to return the tuple of model and its schema to create on aws.
def tokenize(self, string): ''' Maps a string to an iterator over tokens. In other words: [char] -> [token] ''' new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger) new_lexer.latest_newline = 0 new_lexer.string_value = None new_lexer.input(string) while True: t = new_lexer.token() if t is None: break t.col = t.lexpos - new_lexer.latest_newline yield t if new_lexer.string_value is not None: raise JsonPathLexerError('Unexpected EOF in string literal or identifier')
Maps a string to an iterator over tokens. In other words: [char] -> [token]
def fit_points_in_bounding_box_params(df_points, bounding_box, padding_fraction=0): ''' Return offset and scale factor to scale ``x``, ``y`` columns of :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- (offset, scale) : (pandas.Series, float) Offset translation and scale required to fit all points in :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. :data:`offset` contains ``x`` and ``y`` values for the offset. ''' width = df_points.x.max() height = df_points.y.max() points_bbox = pd.Series([width, height], index=['width', 'height']) fill_scale = 1 - 2 * padding_fraction assert(fill_scale > 0) scale = scale_to_fit_a_in_b(points_bbox, bounding_box) padded_scale = scale * fill_scale offset = .5 * (bounding_box - points_bbox * padded_scale) offset.index = ['x', 'y'] return offset, padded_scale
Return offset and scale factor to scale ``x``, ``y`` columns of :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- (offset, scale) : (pandas.Series, float) Offset translation and scale required to fit all points in :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. :data:`offset` contains ``x`` and ``y`` values for the offset.
def organize(dirs, config, run_info_yaml, sample_names=None, is_cwl=False, integrations=None): """Organize run information from a passed YAML file or the Galaxy API. Creates the high level structure used for subsequent processing. sample_names is a list of samples to include from the overall file, for cases where we are running multiple pipelines from the same configuration file. """ from bcbio.pipeline import qcsummary if integrations is None: integrations = {} logger.info("Using input YAML configuration: %s" % run_info_yaml) assert run_info_yaml and os.path.exists(run_info_yaml), \ "Did not find input sample YAML file: %s" % run_info_yaml run_details = _run_info_from_yaml(dirs, run_info_yaml, config, sample_names, is_cwl=is_cwl, integrations=integrations) remote_retriever = None for iname, retriever in integrations.items(): if iname in config: run_details = retriever.add_remotes(run_details, config[iname]) remote_retriever = retriever out = [] for item in run_details: item["dirs"] = dirs if "name" not in item: item["name"] = ["", item["description"]] elif isinstance(item["name"], six.string_types): description = "%s-%s" % (item["name"], clean_name(item["description"])) item["name"] = [item["name"], description] item["description"] = description # add algorithm details to configuration, avoid double specification item["resources"] = _add_remote_resources(item["resources"]) item["config"] = config_utils.update_w_custom(config, item) item.pop("algorithm", None) item = add_reference_resources(item, remote_retriever) item["config"]["algorithm"]["qc"] = qcsummary.get_qc_tools(item) item["config"]["algorithm"]["vcfanno"] = vcfanno.find_annotations(item, remote_retriever) # Create temporary directories and make absolute, expanding environmental variables tmp_dir = tz.get_in(["config", "resources", "tmp", "dir"], item) if tmp_dir: # if no environmental variables, make and normalize the directory # otherwise we normalize later in distributed.transaction: if os.path.expandvars(tmp_dir) == tmp_dir: tmp_dir = utils.safe_makedir(os.path.expandvars(tmp_dir)) tmp_dir = genome.abs_file_paths(tmp_dir, do_download=not integrations) item["config"]["resources"]["tmp"]["dir"] = tmp_dir out.append(item) out = _add_provenance(out, dirs, config, not is_cwl) return out
Organize run information from a passed YAML file or the Galaxy API. Creates the high level structure used for subsequent processing. sample_names is a list of samples to include from the overall file, for cases where we are running multiple pipelines from the same configuration file.
def normalize_full_name_true(decl): """ Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name """ if decl.cache.normalized_full_name_true is None: decl.cache.normalized_full_name_true = normalize( declaration_utils.full_name(decl, with_defaults=True)) return decl.cache.normalized_full_name_true
Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name
def _set_default_resource_names(self): """ Generate names for resources based on the running_instance_id. """ self.ip_config_name = ''.join([ self.running_instance_id, '-ip-config' ]) self.nic_name = ''.join([self.running_instance_id, '-nic']) self.public_ip_name = ''.join([self.running_instance_id, '-public-ip'])
Generate names for resources based on the running_instance_id.
def _match_filenames_w_dfs(filenames, lo_dfs): """ Match a list of filenames to their data frame counterparts. Return data frames :param list filenames: Filenames of data frames to retrieve :param dict lo_dfs: All data frames :return dict: Filenames and data frames (filtered) """ logger_dataframes.info("enter match_filenames_w_dfs") dfs = {} for filename in filenames: try: if filename in lo_dfs["chronData"]: dfs[filename] = lo_dfs["chronData"][filename] elif filename in lo_dfs["paleoData"]: dfs[filename] = lo_dfs["paleoData"][filename] except KeyError: logger_dataframes.info("filter_dfs: KeyError: missing data frames keys") logger_dataframes.info("exit match_filenames_w_dfs") return dfs
Match a list of filenames to their data frame counterparts. Return data frames :param list filenames: Filenames of data frames to retrieve :param dict lo_dfs: All data frames :return dict: Filenames and data frames (filtered)
def click(self): """ click extension Returns: ClickExtension """ if self._click_extension is None: from .click_ext import ClickExtension self._click_extension = ClickExtension( config=self ) return self._click_extension
click extension Returns: ClickExtension
def register(self, resource, endpoint): ''' This methods registers a resource with the router and connects all receivers to their respective signals :param resource: The resource class to register :type resource: A subclass of ``Resource`` class :param endpoint: the name of the resource's endpoint as it appears in the URL :type endpoint: str ''' if not issubclass(resource, Resource): raise ValueError('Not and instance of ``Resource`` subclass') # register resource self._registry[endpoint] = resource # connect signal receivers resource.connect_signal_receivers()
This methods registers a resource with the router and connects all receivers to their respective signals :param resource: The resource class to register :type resource: A subclass of ``Resource`` class :param endpoint: the name of the resource's endpoint as it appears in the URL :type endpoint: str
def maybe_show_asm(showasm, tokens): """ Show the asm based on the showasm flag (or file object), writing to the appropriate stream depending on the type of the flag. :param showasm: Flag which determines whether the ingested code is written to sys.stdout or not. (It is also to pass a file like object, into which the asm will be written). :param tokens: The asm tokens to show. """ if showasm: stream = showasm if hasattr(showasm, 'write') else sys.stdout for t in tokens: stream.write(str(t)) stream.write('\n')
Show the asm based on the showasm flag (or file object), writing to the appropriate stream depending on the type of the flag. :param showasm: Flag which determines whether the ingested code is written to sys.stdout or not. (It is also to pass a file like object, into which the asm will be written). :param tokens: The asm tokens to show.
def _cutoff(self, coeffs, vscale): """ Compute cutoff index after which the coefficients are deemed negligible. """ bnd = self._threshold(vscale) inds = np.nonzero(abs(coeffs) >= bnd) if len(inds[0]): N = inds[0][-1] else: N = 0 return N+1
Compute cutoff index after which the coefficients are deemed negligible.
def size_as_bytes(size_, prefix): """ >>> size_as_bytes(7.5, 'T') 8246337208320 """ prefix = prefix.upper() assert prefix in si_prefixes exponent = si_prefixes.index(prefix) + 1 return int(size_ * (1024.0 ** exponent))
>>> size_as_bytes(7.5, 'T') 8246337208320
def remove_from_group(self, group, user): """ Remove a user from a group :type user: str :param user: User's email :type group: str :param group: Group name :rtype: dict :return: an empty dictionary """ data = {'group': group, 'user': user} return self.post('removeUserFromGroup', data)
Remove a user from a group :type user: str :param user: User's email :type group: str :param group: Group name :rtype: dict :return: an empty dictionary
def QA_util_date_stamp(date): """ 字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型 :param date: 字符串str -- 格式必须是 2018-01-01 ,长度10 :return: 类型float """ datestr = str(date)[0:10] date = time.mktime(time.strptime(datestr, '%Y-%m-%d')) return date
字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型 :param date: 字符串str -- 格式必须是 2018-01-01 ,长度10 :return: 类型float
def installed(name, env=None, saltenv='base', user=None): """ Installs a single package, list of packages (comma separated) or packages in a requirements.txt Checks if the package is already in the environment. Check ocurres here so is only needed to `conda list` and `pip freeze` once name name of the package(s) or path to the requirements.txt env : None environment name or path where to put the new enviroment if None (default) will use the default conda environment (`~/anaconda/bin`) saltenv : 'base' Salt environment. Usefull when the name is file using the salt file system (e.g. `salt://.../reqs.txt`) user The user under which to run the commands """ ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} # Generates packages list packages = [] if os.path.exists(name) or name.startswith('salt://'): if name.startswith('salt://'): lines = __salt__['cp.get_file_str'](name, saltenv) lines = lines.split('\n') elif os.path.exists(name): f = open(name, mode='r') lines = f.readlines() f.close() for line in lines: line = line.strip() if line != '' and not line.startswith('#'): line = line.split('#')[0].strip() # Remove inline comments packages.append(line) else: packages = [pkg.strip() for pkg in name.split(',')] conda_list = __salt__['conda.list'](env=env, user=user) def extract_info(pkgname): pkgname, pkgversion = package, '' pkgname, pkgversion = (package.split('==')[0], package.split('==')[1] ) if '==' in package else (package, pkgversion) pkgname, pkgversion = (package.split('>=')[0], package.split('>=')[1] ) if '>=' in package else (pkgname, pkgversion) pkgname, pkgversion = (package.split('>')[0], package.split('>=')[1] ) if '>' in package else (pkgname, pkgversion) return pkgname, pkgversion installed, failed, old = 0, 0, 0 for package in packages: pkgname, pkgversion = extract_info(package) conda_pkgname = pkgname + ' ' * (26 - len(pkgname)) + pkgversion if conda_pkgname not in conda_list: installation = __salt__['conda.install'](package, env=env, user=user) if installation['retcode'] == 0: ret['changes'][package] = 'installed' installed += 1 else: ret['changes'][package] = installation failed += 1 else: old += 1 comments = [] if installed > 0: comments.append('{0} installed'.format(installed)) if failed > 0: ret['result'] = False comments.append('{0} failed'.format(failed)) if old > 0: comments.append('{0} already installed'.format(old)) ret['comment'] = ', '.join(comments) return ret
Installs a single package, list of packages (comma separated) or packages in a requirements.txt Checks if the package is already in the environment. Check ocurres here so is only needed to `conda list` and `pip freeze` once name name of the package(s) or path to the requirements.txt env : None environment name or path where to put the new enviroment if None (default) will use the default conda environment (`~/anaconda/bin`) saltenv : 'base' Salt environment. Usefull when the name is file using the salt file system (e.g. `salt://.../reqs.txt`) user The user under which to run the commands
def set_chain_info(self, chain_id, chain_name, num_groups): """Set the chain information. :param chain_id: the asym chain id from mmCIF :param chain_name: the auth chain id from mmCIF :param num_groups: the number of groups this chain has """ self.chain_id_list.append(chain_id) self.chain_name_list.append(chain_name) self.groups_per_chain.append(num_groups)
Set the chain information. :param chain_id: the asym chain id from mmCIF :param chain_name: the auth chain id from mmCIF :param num_groups: the number of groups this chain has
def count_flag_reads(self, file_name, flag, paired_end): """ Counts the number of reads with the specified flag. :param str file_name: name of reads file :param str flag: sam flag value to be read :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development. """ param = " -c -f" + str(flag) if file_name.endswith("sam"): param += " -S" return self.samtools_view(file_name, param=param)
Counts the number of reads with the specified flag. :param str file_name: name of reads file :param str flag: sam flag value to be read :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development.
def generate_random_id(size=6, chars=string.ascii_uppercase + string.digits): """Generate random id numbers.""" return "".join(random.choice(chars) for x in range(size))
Generate random id numbers.
def class_get_trait_help(cls, trait, inst=None): """Get the help string for a single trait. If `inst` is given, it's current trait values will be used in place of the class default. """ assert inst is None or isinstance(inst, cls) lines = [] header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__) lines.append(header) if inst is not None: lines.append(indent('Current: %r' % getattr(inst, trait.name), 4)) else: try: dvr = repr(trait.get_default_value()) except Exception: dvr = None # ignore defaults we can't construct if dvr is not None: if len(dvr) > 64: dvr = dvr[:61]+'...' lines.append(indent('Default: %s' % dvr, 4)) if 'Enum' in trait.__class__.__name__: # include Enum choices lines.append(indent('Choices: %r' % (trait.values,))) help = trait.get_metadata('help') if help is not None: help = '\n'.join(wrap_paragraphs(help, 76)) lines.append(indent(help, 4)) return '\n'.join(lines)
Get the help string for a single trait. If `inst` is given, it's current trait values will be used in place of the class default.
def fold_string(input_string, max_width): """ Fold a string within a maximum width. Parameters: input_string: The string of data to go into the cell max_width: Maximum width of cell. Data is folded into multiple lines to fit into this width. Return: String representing the folded string """ new_string = input_string if isinstance(input_string, six.string_types): if max_width < len(input_string): # use textwrap to fold the string new_string = textwrap.fill(input_string, max_width) return new_string
Fold a string within a maximum width. Parameters: input_string: The string of data to go into the cell max_width: Maximum width of cell. Data is folded into multiple lines to fit into this width. Return: String representing the folded string
def get_sun_times(dates, lon, lat, time_zone): """Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours """ df = pd.DataFrame(index=dates, columns=['sunrise', 'sunnoon', 'sunset', 'daylength']) doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) # day of year # Day angle and declination after Bourges (1985): day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346)) declination = np.deg2rad( 0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b) + 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b) - 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b) ) # Equation of time with day angle after Spencer (1971): day_angle_s = 2 * np.pi * (doy - 1) / 365. eq_time = 12. / np.pi * ( 0.000075 + 0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) - 0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s) ) # standard_meridian = time_zone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination) omega_nul = np.arccos(omega_nul_arg) sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time # as an approximation, solar noon is independent of the below mentioned # cases: sunnoon = 12. * (1.) - delta_lat_time - eq_time # $kf 2015-11-13: special case midnight sun and polar night # CASE 1: MIDNIGHT SUN # set sunrise and sunset to values that would yield the maximum day # length even though this a crude assumption pos = omega_nul_arg < -1 sunrise[pos] = sunnoon[pos] - 12 sunset[pos] = sunnoon[pos] + 12 # CASE 2: POLAR NIGHT # set sunrise and sunset to values that would yield the minmum day # length even though this a crude assumption pos = omega_nul_arg > 1 sunrise[pos] = sunnoon[pos] sunset[pos] = sunnoon[pos] daylength = sunset - sunrise # adjust if required sunrise[sunrise < 0] += 24 sunset[sunset > 24] -= 24 df.sunrise = sunrise df.sunnoon = sunnoon df.sunset = sunset df.daylength = daylength return df
Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
def open_external_editor(filename=None, sql=None): """Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element. """ message = None filename = filename.strip().split(' ', 1)[0] if filename else None sql = sql or '' MARKER = '# Type your query above this line.\n' # Populate the editor buffer with the partial sql (if available) and a # placeholder comment. query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER), filename=filename, extension='.sql') if filename: try: with open(filename, encoding='utf-8') as f: query = f.read() except IOError: message = 'Error reading file: %s.' % filename if query is not None: query = query.split(MARKER, 1)[0].rstrip('\n') else: # Don't return None for the caller to deal with. # Empty string is ok. query = sql return (query, message)
Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element.
def script_post_save(model, os_path, contents_manager, **kwargs): """convert notebooks to Python script after save with nbconvert replaces `ipython notebook --script` """ from nbconvert.exporters.script import ScriptExporter if model['type'] != 'notebook': return global _script_exporter if _script_exporter is None: _script_exporter = ScriptExporter(parent=contents_manager) log = contents_manager.log base, ext = os.path.splitext(os_path) # py_fname = base + '.py' script, resources = _script_exporter.from_filename(os_path) script_fname = base + resources.get('output_extension', '.txt') log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir)) with io.open(script_fname, 'w', encoding='utf-8') as f: f.write(script)
convert notebooks to Python script after save with nbconvert replaces `ipython notebook --script`
def _default_arguments(self, obj): """Return the list of default arguments of obj if it is callable, or empty list otherwise.""" if not (inspect.isfunction(obj) or inspect.ismethod(obj)): # for classes, check for __init__,__new__ if inspect.isclass(obj): obj = (getattr(obj,'__init__',None) or getattr(obj,'__new__',None)) # for all others, check if they are __call__able elif hasattr(obj, '__call__'): obj = obj.__call__ # XXX: is there a way to handle the builtins ? try: args,_,_1,defaults = inspect.getargspec(obj) if defaults: return args[-len(defaults):] except TypeError: pass return []
Return the list of default arguments of obj if it is callable, or empty list otherwise.
def _pairwise_chisq(self): """Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a list of square and symmetric matrices of test statistics for the null hypothesis that each vector along *axis* is equal to each other. """ return [ self._chi_squared( mr_subvar_proportions, self._margin[idx], self._opposite_axis_margin[idx] / np.sum(self._opposite_axis_margin[idx]), ) for (idx, mr_subvar_proportions) in enumerate(self._proportions) ]
Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a list of square and symmetric matrices of test statistics for the null hypothesis that each vector along *axis* is equal to each other.
def get_updated_data(self, old_data: Dict[str, LinkItem]) -> Dict[str, LinkItem]: """ Get links who needs to be downloaded by comparing old and the new data. :param old_data: old data :type old_data: Dict[str, ~unidown.plugin.link_item.LinkItem] :return: data which is newer or dont exist in the old one :rtype: Dict[str, ~unidown.plugin.link_item.LinkItem] """ if not self.download_data: return {} new_link_item_dict = {} for link, link_item in tqdm(self.download_data.items(), desc="Compare with save", unit="item", leave=True, mininterval=1, ncols=100, disable=dynamic_data.DISABLE_TQDM): # TODO: add methode to log lost items, which are in old but not in new # if link in new_link_item_dict: # TODO: is ever false, since its the key of a dict: move to the right place # self.log.warning("Duplicate: " + link + " - " + new_link_item_dict[link] + " : " + link_item) # if the new_data link does not exists in old_data or new_data time is newer if (link not in old_data) or (link_item.time > old_data[link].time): new_link_item_dict[link] = link_item return new_link_item_dict
Get links who needs to be downloaded by comparing old and the new data. :param old_data: old data :type old_data: Dict[str, ~unidown.plugin.link_item.LinkItem] :return: data which is newer or dont exist in the old one :rtype: Dict[str, ~unidown.plugin.link_item.LinkItem]
def find_holes(db_module, db, table_name, column_name, _range, filter=None): """ FIND HOLES IN A DENSE COLUMN OF INTEGERS RETURNS A LIST OF {"min"min, "max":max} OBJECTS """ if not filter: filter = {"match_all": {}} _range = wrap(_range) params = { "min": _range.min, "max": _range.max - 1, "column_name": db_module.quote_column(column_name), "table_name": db_module.quote_column(table_name), "filter": esfilter2sqlwhere(filter) } min_max = db.query(""" SELECT min({{column_name}}) `min`, max({{column_name}})+1 `max` FROM {{table_name}} a WHERE a.{{column_name}} BETWEEN {{min}} AND {{max}} AND {{filter}} """, params)[0] db.execute("SET @last={{min}}-1", {"min": _range.min}) ranges = db.query(""" SELECT prev_rev+1 `min`, curr_rev `max` FROM ( SELECT a.{{column_name}}-@last diff, @last prev_rev, @last:=a.{{column_name}} curr_rev FROM {{table_name}} a WHERE a.{{column_name}} BETWEEN {{min}} AND {{max}} AND {{filter}} ORDER BY a.{{column_name}} ) a WHERE diff>1 """, params) if ranges: ranges.append({"min": min_max.max, "max": _range.max}) else: if min_max.min: ranges.append({"min": _range.min, "max": min_max.min}) ranges.append({"min": min_max.max, "max": _range.max}) else: ranges.append(_range) return ranges
FIND HOLES IN A DENSE COLUMN OF INTEGERS RETURNS A LIST OF {"min"min, "max":max} OBJECTS
def get_search_results(portal_type=None, uid=None, **kw): """Search the catalog and return the results :returns: Catalog search results :rtype: iterable """ # If we have an UID, return the object immediately if uid is not None: logger.info("UID '%s' found, returning the object immediately" % uid) return u.to_list(get_object_by_uid(uid)) # allow to search search for the Plone Site with portal_type include_portal = False if u.to_string(portal_type) == "Plone Site": include_portal = True # The request may contain a list of portal_types, e.g. # `?portal_type=Document&portal_type=Plone Site` if "Plone Site" in u.to_list(req.get("portal_type")): include_portal = True # Build and execute a catalog query results = search(portal_type=portal_type, uid=uid, **kw) if include_portal: results = list(results) + u.to_list(get_portal()) return results
Search the catalog and return the results :returns: Catalog search results :rtype: iterable
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, bins=251, annotator_id=0): """Computes the results by using the ground truth dataset identified by the annotator parameter. Return ------ results : dict Dictionary of the results (see function compute_results). """ if config["hier"]: ref_times, ref_labels, ref_levels = \ msaf.io.read_hier_references( ref_file, annotation_id=annotator_id, exclude_levels=["segment_salami_function"]) else: jam = jams.load(ref_file, validate=False) ann = jam.search(namespace='segment_.*')[annotator_id] ref_inter, ref_labels = ann.to_interval_values() # Read estimations with correct configuration est_inter, est_labels = io.read_estimations(est_file, boundaries_id, labels_id, **config) # Compute the results and return logging.info("Evaluating %s" % os.path.basename(est_file)) if config["hier"]: # Hierarchical assert len(est_inter) == len(est_labels), "Same number of levels " \ "are required in the boundaries and labels for the hierarchical " \ "evaluation." est_times = [] est_labels = [] # Sort based on how many segments per level est_inter = sorted(est_inter, key=lambda level: len(level)) for inter in est_inter: est_times.append(msaf.utils.intervals_to_times(inter)) # Add fake labels (hierarchical eval does not use labels --yet--) est_labels.append(np.ones(len(est_times[-1]) - 1) * -1) # Align the times utils.align_end_hierarchies(est_times, ref_times, thres=1) # To intervals est_hier = [utils.times_to_intervals(times) for times in est_times] ref_hier = [utils.times_to_intervals(times) for times in ref_times] # Compute evaluations res = {} res["t_recall10"], res["t_precision10"], res["t_measure10"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10) res["t_recall15"], res["t_precision15"], res["t_measure15"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15) res["track_id"] = os.path.basename(est_file)[:-5] return res else: # Flat return compute_results(ref_inter, est_inter, ref_labels, est_labels, bins, est_file)
Computes the results by using the ground truth dataset identified by the annotator parameter. Return ------ results : dict Dictionary of the results (see function compute_results).
def validate_twilio_signature(func=None, backend_name='twilio-backend'): """View decorator to validate requests from Twilio per http://www.twilio.com/docs/security.""" def _dec(view_func): @functools.wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): backend = kwargs.get('backend_name', backend_name) config = settings.INSTALLED_BACKENDS[backend]['config'] validator = RequestValidator(config['auth_token']) signature = request.META.get('HTTP_X_TWILIO_SIGNATURE', '') url = request.build_absolute_uri() body = {} if request.method == 'POST': body = request.POST require_validation = config.get('validate', True) if validator.validate(url, body, signature) or not require_validation: return view_func(request, *args, **kwargs) else: return HttpResponseBadRequest() return _wrapped_view if func is None: return _dec else: return _dec(func)
View decorator to validate requests from Twilio per http://www.twilio.com/docs/security.
def import_url(self,caseSensitiveNetworkCollectionKeys=None,\ caseSensitiveNetworkKeys=None,dataTypeList=None,\ DataTypeTargetForNetworkCollection=None,DataTypeTargetForNetworkList=None,\ delimiters=None,delimitersForDataList=None,firstRowAsColumnNames=None,\ KeyColumnForMapping=None,KeyColumnForMappingNetworkList=None,\ keyColumnIndex=None,newTableName=None,startLoadRow=None,\ TargetNetworkCollection=None,TargetNetworkList=None,url=None,\ WhereImportTable=None,verbose=None): """ Similar to Import Table this uses a long list of input parameters to specify the attributes of the table, the mapping keys, and the destination table for the input. :param caseSensitiveNetworkCollectionKeys (string, optional): Determines wh ether capitalization is considered in matching and sorting :param caseSensitiveNetworkKeys (string, optional): Determines whether capi talization is considered in matching and sorting :param dataTypeList (string, optional): List of column data types ordered b y column index (e.g. "string,int,long,double,boolean,intlist" or jus t "s,i,l,d,b,il") :param DataTypeTargetForNetworkCollection (string, optional): Select whethe r to import the data as Node Table Columns, Edge Table Columns, or N etwork Table Columns :param DataTypeTargetForNetworkList (string, optional): The data type of th e targets :param delimiters (string, optional): The list of delimiters that separate columns in the table. :param delimitersForDataList (string, optional): The delimiters between ele ments of list columns in the table. :param firstRowAsColumnNames (string, optional): If the first imported row contains column names, set this to true. :param KeyColumnForMapping (string, optional): The column in the network to use as the merge key :param KeyColumnForMappingNetworkList (string, optional): The column in the network to use as the merge key :param keyColumnIndex (string, optional): The column that contains the key values for this import. These values will be used to match with the key values in the network. :param newTableName (string, optional): The title of the new table :param startLoadRow (string, optional): The first row of the input table to load. This allows the skipping of headers that are not part of the import. :param TargetNetworkCollection (string, optional): The network collection t o use for the table import :param TargetNetworkList (string, optional): The list of networks into whic h the table is imported :param url (string): The URL of the file or resource that provides the tabl e or network to be imported. :param WhereImportTable (string, optional): Determines what network(s) the imported table will be associated with (if any). A table can be impo rted into a Network Collection, Selected networks or to an unassigne d table. """ PARAMS=set_param(['caseSensitiveNetworkCollectionKeys',\ 'caseSensitiveNetworkKeys','dataTypeList','DataTypeTargetForNetworkCollection',\ 'DataTypeTargetForNetworkList','delimiters','delimitersForDataList',\ 'firstRowAsColumnNames','KeyColumnForMapping','KeyColumnForMappingNetworkList',\ 'keyColumnIndex','newTableName','startLoadRow','TargetNetworkCollection',\ 'TargetNetworkList','url','WhereImportTable'],[caseSensitiveNetworkCollectionKeys,\ caseSensitiveNetworkKeys,dataTypeList,DataTypeTargetForNetworkCollection,\ DataTypeTargetForNetworkList,delimiters,delimitersForDataList,\ firstRowAsColumnNames,KeyColumnForMapping,KeyColumnForMappingNetworkList,\ keyColumnIndex,newTableName,startLoadRow,TargetNetworkCollection,\ TargetNetworkList,url,WhereImportTable]) response=api(url=self.__url+"/import url", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Similar to Import Table this uses a long list of input parameters to specify the attributes of the table, the mapping keys, and the destination table for the input. :param caseSensitiveNetworkCollectionKeys (string, optional): Determines wh ether capitalization is considered in matching and sorting :param caseSensitiveNetworkKeys (string, optional): Determines whether capi talization is considered in matching and sorting :param dataTypeList (string, optional): List of column data types ordered b y column index (e.g. "string,int,long,double,boolean,intlist" or jus t "s,i,l,d,b,il") :param DataTypeTargetForNetworkCollection (string, optional): Select whethe r to import the data as Node Table Columns, Edge Table Columns, or N etwork Table Columns :param DataTypeTargetForNetworkList (string, optional): The data type of th e targets :param delimiters (string, optional): The list of delimiters that separate columns in the table. :param delimitersForDataList (string, optional): The delimiters between ele ments of list columns in the table. :param firstRowAsColumnNames (string, optional): If the first imported row contains column names, set this to true. :param KeyColumnForMapping (string, optional): The column in the network to use as the merge key :param KeyColumnForMappingNetworkList (string, optional): The column in the network to use as the merge key :param keyColumnIndex (string, optional): The column that contains the key values for this import. These values will be used to match with the key values in the network. :param newTableName (string, optional): The title of the new table :param startLoadRow (string, optional): The first row of the input table to load. This allows the skipping of headers that are not part of the import. :param TargetNetworkCollection (string, optional): The network collection t o use for the table import :param TargetNetworkList (string, optional): The list of networks into whic h the table is imported :param url (string): The URL of the file or resource that provides the tabl e or network to be imported. :param WhereImportTable (string, optional): Determines what network(s) the imported table will be associated with (if any). A table can be impo rted into a Network Collection, Selected networks or to an unassigne d table.
def get_texts_and_labels(sentence_chunk): """Given a sentence chunk, extract original texts and labels.""" words = sentence_chunk.split('\n') texts = [] labels = [] for word in words: word = word.strip() if len(word) > 0: toks = word.split('\t') texts.append(toks[0].strip()) labels.append(toks[-1].strip()) return texts, labels
Given a sentence chunk, extract original texts and labels.
def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks. """ # handle index specific formatting # Note: DatetimeIndex does not use this # interface. DatetimeIndex uses matplotlib.date directly if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter( TimeSeries_TimedeltaFormatter()) else: raise TypeError('index type not supported') pylab.draw_if_interactive()
Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks.
def getActiveAxes(self): """Return a list of the selected axes.""" active = [] for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): active.append(i) return active
Return a list of the selected axes.
def add_param(self, param_key, param_val): """ adds parameters as key value pairs """ self.params.append([param_key, param_val]) if param_key == '__success_test': self.success = param_val
adds parameters as key value pairs
def find_elements_by_class_name(self, name): """ Finds elements by class name. :Args: - name: The class name of the elements to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_class_name('foo') """ return self.find_elements(by=By.CLASS_NAME, value=name)
Finds elements by class name. :Args: - name: The class name of the elements to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_class_name('foo')
def build_plan(description, graph, targets=None, reverse=False): """Builds a plan from a list of steps. Args: description (str): an arbitrary string to describe the plan. graph (:class:`Graph`): a list of :class:`Graph` to execute. targets (list): an optional list of step names to filter the graph to. If provided, only these steps, and their transitive dependencies will be executed. If no targets are specified, every node in the graph will be executed. reverse (bool): If provided, the graph will be walked in reverse order (dependencies last). """ # If we want to execute the plan in reverse (e.g. Destroy), transpose the # graph. if reverse: graph = graph.transposed() # If we only want to build a specific target, filter the graph. if targets: nodes = [] for target in targets: for k, step in graph.steps.items(): if step.name == target: nodes.append(step.name) graph = graph.filtered(nodes) return Plan(description=description, graph=graph)
Builds a plan from a list of steps. Args: description (str): an arbitrary string to describe the plan. graph (:class:`Graph`): a list of :class:`Graph` to execute. targets (list): an optional list of step names to filter the graph to. If provided, only these steps, and their transitive dependencies will be executed. If no targets are specified, every node in the graph will be executed. reverse (bool): If provided, the graph will be walked in reverse order (dependencies last).
async def _set_annotations(entity_tag, annotations, connection): """Set annotations on the specified entity. :param annotations map[string]string: the annotations as key/value pairs. """ # TODO: ensure annotations is dict with only string keys # and values. log.debug('Updating annotations on %s', entity_tag) facade = client.AnnotationsFacade.from_connection(connection) args = client.EntityAnnotations( entity=entity_tag, annotations=annotations, ) return await facade.Set([args])
Set annotations on the specified entity. :param annotations map[string]string: the annotations as key/value pairs.
def dict(self, **kwargs): """ Dictionary representation. """ return dict( time = self.timestamp, serial_number = self.serial_number, value = self.value, battery = self.battery, supervision = self.supervision, **kwargs )
Dictionary representation.
def get_desc2nts(self, **kws_usr): """Return grouped, sorted namedtuples in either format: flat, sections.""" # desc2nts contains: (sections hdrgo_prt sortobj) or (flat hdrgo_prt sortobj) # keys_nts: hdrgo_prt section_prt top_n use_sections kws_nts = {k:v for k, v in kws_usr.items() if k in self.keys_nts} return self.get_desc2nts_fnc(**kws_nts)
Return grouped, sorted namedtuples in either format: flat, sections.
def cced(self, user, include=None): """ Retrieve the tickets this user is cc'd into. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.cced, 'ticket', id=user, include=include)
Retrieve the tickets this user is cc'd into. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id
def _NormalizeTime(self, time): """Normalize a time to be an int measured in microseconds.""" if isinstance(time, rdfvalue.RDFDatetime): return time.AsMicrosecondsSinceEpoch() if isinstance(time, rdfvalue.Duration): return time.microseconds return int(time)
Normalize a time to be an int measured in microseconds.
def parse_config_file(config_path, verb=3): """Parse provided json to get configuration Empty default json: { "testfiles": [], "breakfailed": true, "onlyfailed": false, "verb": 3, "dump": 0, "crc": true, "scapy": "scapy", "preexec": {}, "global_preexec": "", "outputfile": null, "local": true, "format": "ansi", "num": null, "modules": [], "kw_ok": [], "kw_ko": [] } """ import json import unicodedata with open(config_path) as config_file: data = json.load(config_file, encoding="utf8") if verb > 2: print("### Loaded config file", config_path, file=sys.stderr) def get_if_exist(key, default): return data[key] if key in data else default return Bunch(testfiles=get_if_exist("testfiles", []), breakfailed=get_if_exist("breakfailed", True), remove_testfiles=get_if_exist("remove_testfiles", []), onlyfailed=get_if_exist("onlyfailed", False), verb=get_if_exist("verb", 3), dump=get_if_exist("dump", 0), crc=get_if_exist("crc", 1), scapy=get_if_exist("scapy", "scapy"), preexec=get_if_exist("preexec", {}), global_preexec=get_if_exist("global_preexec", ""), outfile=get_if_exist("outputfile", sys.stdout), local=get_if_exist("local", False), num=get_if_exist("num", None), modules=get_if_exist("modules", []), kw_ok=get_if_exist("kw_ok", []), kw_ko=get_if_exist("kw_ko", []), format=get_if_exist("format", "ansi"))
Parse provided json to get configuration Empty default json: { "testfiles": [], "breakfailed": true, "onlyfailed": false, "verb": 3, "dump": 0, "crc": true, "scapy": "scapy", "preexec": {}, "global_preexec": "", "outputfile": null, "local": true, "format": "ansi", "num": null, "modules": [], "kw_ok": [], "kw_ko": [] }
def set_attribute(self, name, value): """ Default handler for those not explicitly defined """ if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
Default handler for those not explicitly defined
def run_powerflow(self, session, method='onthefly', export_pypsa=False, debug=False): """ Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process """ if method == 'db': # Empty tables pypsa_io.delete_powerflow_tables(session) for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='db', export_pypsa_dir=export_pypsa_dir, debug=debug) elif method == 'onthefly': for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method='onthefly', export_pypsa_dir=export_pypsa_dir, debug=debug)
Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process
def scroll_event(self, widget, event): """ Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback. """ x, y = event.x, event.y num_degrees = 0 direction = 0 # x, y = coordinates of mouse self.last_win_x, self.last_win_y = x, y # calculate number of degrees of scroll and direction of scroll # both floats in the 0-359.999 range # num_degrees = # direction = self.logger.debug("scroll deg=%f direction=%f" % ( num_degrees, direction)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback('scroll', direction, num_degrees, data_x, data_y)
Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback.
def _delete_file(configurator, path): """ remove file and remove it's directories if empty """ path = os.path.join(configurator.target_directory, path) os.remove(path) try: os.removedirs(os.path.dirname(path)) except OSError: pass
remove file and remove it's directories if empty
def statexml2pdb(topology, state, output=None): """ Given an OpenMM xml file containing the state of the simulation, generate a PDB snapshot for easy visualization. """ state = Restart.from_xml(state) system = SystemHandler.load(topology, positions=state.positions) if output is None: output = topology + '.pdb' system.write_pdb(output)
Given an OpenMM xml file containing the state of the simulation, generate a PDB snapshot for easy visualization.
def release(self, connection: Connection): '''Put a connection back in the pool. Coroutine. ''' assert not self._closed key = connection.key host_pool = self._host_pools[key] _logger.debug('Check in %s', key) yield from host_pool.release(connection) force = self.count() > self._max_count yield from self.clean(force=force)
Put a connection back in the pool. Coroutine.
def define_simulation_graph(batch_env, algo_cls, config): """Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes. """ # pylint: disable=unused-variable step = tf.Variable(0, False, dtype=tf.int32, name='global_step') is_training = tf.placeholder(tf.bool, name='is_training') should_log = tf.placeholder(tf.bool, name='should_log') do_report = tf.placeholder(tf.bool, name='do_report') force_reset = tf.placeholder(tf.bool, name='force_reset') algo = algo_cls(batch_env, step, is_training, should_log, config) done, score, summary = tools.simulate( batch_env, algo, should_log, force_reset) message = 'Graph contains {} trainable variables.' tf.logging.info(message.format(tools.count_weights())) # pylint: enable=unused-variable return tools.AttrDict(locals())
Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes.
def filter(self, *args, **kwargs): """ Works just like the default Manager's :func:`filter` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to retrieve, e.g. ``"path/to/folder"``. """ if 'path' in kwargs: kwargs = self.get_filter_args_with_path(False, **kwargs) return super(FileNodeManager, self).filter(*args, **kwargs)
Works just like the default Manager's :func:`filter` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to retrieve, e.g. ``"path/to/folder"``.
def getheaderAnim(self, im): """ getheaderAnim(im) Get animation header. To replace PILs getheader()[0] """ bb = b'GIF89a' bb += intToBin(im.size[0]) bb += intToBin(im.size[1]) bb += b'\x87\x00\x00' return bb
getheaderAnim(im) Get animation header. To replace PILs getheader()[0]
def publish_minions(self): ''' Publishes minions as a list of dicts. ''' log.debug('in publish minions') minions = {} log.debug('starting loop') for minion, minion_info in six.iteritems(self.minions): log.debug(minion) # log.debug(minion_info) curr_minion = {} curr_minion.update(minion_info) curr_minion.update({'id': minion}) minions[minion] = curr_minion log.debug('ended loop') ret = {'minions': minions} self.handler.write_message( salt.utils.json.dumps(ret) + str('\n\n'))
Publishes minions as a list of dicts.
def make_tx_signatures(txs_to_sign, privkey_list, pubkey_list): """ Loops through txs_to_sign and makes signatures using privkey_list and pubkey_list Not sure what privkeys and pubkeys to supply? Use get_input_addresses() to return a list of addresses. Matching those addresses to keys is up to you and how you store your private keys. A future version of this library may handle this for you, but it is not trivial. Note that if spending multisig funds the process is significantly more complicated. Each tx_to_sign must be signed by *each* private key. In a 2-of-3 transaction, two of [privkey1, privkey2, privkey3] must sign each tx_to_sign http://dev.blockcypher.com/#multisig-transactions """ assert len(privkey_list) == len(pubkey_list) == len(txs_to_sign) # in the event of multiple inputs using the same pub/privkey, # that privkey should be included multiple times signatures = [] for cnt, tx_to_sign in enumerate(txs_to_sign): sig = der_encode_sig(*ecdsa_raw_sign(tx_to_sign.rstrip(' \t\r\n\0'), privkey_list[cnt])) err_msg = 'Bad Signature: sig %s for tx %s with pubkey %s' % ( sig, tx_to_sign, pubkey_list[cnt], ) assert ecdsa_raw_verify(tx_to_sign, der_decode_sig(sig), pubkey_list[cnt]), err_msg signatures.append(sig) return signatures
Loops through txs_to_sign and makes signatures using privkey_list and pubkey_list Not sure what privkeys and pubkeys to supply? Use get_input_addresses() to return a list of addresses. Matching those addresses to keys is up to you and how you store your private keys. A future version of this library may handle this for you, but it is not trivial. Note that if spending multisig funds the process is significantly more complicated. Each tx_to_sign must be signed by *each* private key. In a 2-of-3 transaction, two of [privkey1, privkey2, privkey3] must sign each tx_to_sign http://dev.blockcypher.com/#multisig-transactions
def recursively_save_dict_contents_to_group(h5file, path, dic): """ Parameters ---------- h5file: h5py file to be written to path: path within h5py file to saved dictionary dic: python dictionary to be converted to hdf5 format """ for key, item in dic.items(): if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)): h5file[path + str(key)] = item elif isinstance(item, dict): recursively_save_dict_contents_to_group(h5file, path + key + '/', item) else: raise ValueError('Cannot save %s type' % type(item))
Parameters ---------- h5file: h5py file to be written to path: path within h5py file to saved dictionary dic: python dictionary to be converted to hdf5 format
def clean(ctx): """Clean Sphinx build products. Use this command to clean out build products after a failed build, or in preparation for running a build from a clean state. This command removes the following directories from the ``pipelines_lsst_io`` directory: - ``_build`` (the Sphinx build itself) - ``modules`` (symlinks to the module doc directories of Stack packages) - ``packages`` (symlinks to the package doc directories of Stack packages) - ``py-api`` (pages created by automodapi for the Python API reference) """ logger = logging.getLogger(__name__) dirnames = ['py-api', '_build', 'modules', 'packages'] dirnames = [os.path.join(ctx.obj['root_project_dir'], dirname) for dirname in dirnames] for dirname in dirnames: if os.path.isdir(dirname): shutil.rmtree(dirname) logger.debug('Cleaned up %r', dirname) else: logger.debug('Did not clean up %r (missing)', dirname)
Clean Sphinx build products. Use this command to clean out build products after a failed build, or in preparation for running a build from a clean state. This command removes the following directories from the ``pipelines_lsst_io`` directory: - ``_build`` (the Sphinx build itself) - ``modules`` (symlinks to the module doc directories of Stack packages) - ``packages`` (symlinks to the package doc directories of Stack packages) - ``py-api`` (pages created by automodapi for the Python API reference)
def directives(entrystream, type=None): """ Pull directives out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only directives of the specified type; set to :code:`None` to retrieve all directives """ for directive in entry_type_filter(entrystream, tag.Directive): if not type or type == directive.type: yield directive
Pull directives out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only directives of the specified type; set to :code:`None` to retrieve all directives
def as_unicode(s, encoding='utf-8'): """Force conversion of given string to unicode type. Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x . If the string is already in unicode, then no conversion is done and the same string is returned. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to unicode. encoding: str The encoding of the input string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``str`` for Python3 or ``unicode`` for Python 2. """ if isinstance(s, six.text_type): return s elif isinstance(s, six.binary_type): return s.decode(encoding) else: raise ValueError('Can only convert types {0} and {1}'.format(six.text_type, six.binary_type))
Force conversion of given string to unicode type. Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x . If the string is already in unicode, then no conversion is done and the same string is returned. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to unicode. encoding: str The encoding of the input string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``str`` for Python3 or ``unicode`` for Python 2.
def block_ip(ip_address): """ given the ip, block it """ if not ip_address: # no reason to continue when there is no ip return if config.DISABLE_IP_LOCKOUT: # no need to block, we disabled it. return key = get_ip_blocked_cache_key(ip_address) if config.COOLOFF_TIME: REDIS_SERVER.set(key, 'blocked', config.COOLOFF_TIME) else: REDIS_SERVER.set(key, 'blocked') send_ip_block_signal(ip_address)
given the ip, block it
def add_section(self, section): """You can add section inside a Element, the section must be a subclass of SubSection. You can use this class to represent a tree. """ if not issubclass(section.__class__, SubSection): raise TypeError("Argument should be a subclass of SubSection, \ not :" + str(section.__class__)) self.sections[section.name] = section return section
You can add section inside a Element, the section must be a subclass of SubSection. You can use this class to represent a tree.
def datetime_utc_to_local(utc): """ An ugly hack to convert naive :std:`datetime.datetime` object containing UTC time to a naive :std:`datetime.datetime` object with local time. It seems standard Python 2.3 library doesn't provide any better way to do that. """ # pylint: disable-msg=C0103 ts = time.time() cur = datetime.datetime.fromtimestamp(ts) cur_utc = datetime.datetime.utcfromtimestamp(ts) offset = cur - cur_utc t = utc d = datetime.timedelta(hours = 2) while d > _MINUTE: local = t + offset tm = local.timetuple() tm = tm[0:8] + (0, ) ts = time.mktime(tm) u = datetime.datetime.utcfromtimestamp(ts) diff = u - utc if diff < _MINUTE and diff > -_MINUTE: break if diff > _NULLDELTA: offset -= d else: offset += d d //= 2 return local
An ugly hack to convert naive :std:`datetime.datetime` object containing UTC time to a naive :std:`datetime.datetime` object with local time. It seems standard Python 2.3 library doesn't provide any better way to do that.
def find_max(self, predicate, max_=None): """ Return the largest item in or under this node that satisfies *predicate*. """ if predicate(self.value): max_ = self.value next_node = self._greater else: next_node = self._lesser if next_node is None: return max_ return next_node.find_max(predicate, max_)
Return the largest item in or under this node that satisfies *predicate*.
def _parse_doc(doc): """Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary. """ lines = doc.split("\n") descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines)) if len(descriptions) < 3: description = lines[0] else: description = "{0}\n\n{1}".format( lines[0], textwrap.dedent("\n".join(descriptions[2:]))) args = list(itertools.takewhile( _checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines))) argmap = {} if len(args) > 1: for pair in args[1:]: kv = [v.strip() for v in pair.split(":")] if len(kv) >= 2: argmap[kv[0]] = ":".join(kv[1:]) return dict(headline=descriptions[0], description=description, args=argmap)
Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary.
def process_from_webservice(id_val, id_type='pmcid', source='pmc', with_grounding=True): """Return an output from RLIMS-p for the given PubMed ID or PMC ID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to be "read". id_type : str Either 'pmid' or 'pmcid'. The default is 'pmcid'. source : str Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. with_grounding : bool The RLIMS-P web service provides two endpoints, one pre-grounded, the other not so much. The grounded endpoint returns far less content, and may perform some grounding that can be handled by the grounding mapper. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute. """ if with_grounding: fmt = '%s.normed/%s/%s' else: fmt = '%s/%s/%s' resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val)) if resp.status_code != 200: raise RLIMSP_Error("Bad status code: %d - %s" % (resp.status_code, resp.reason)) rp = RlimspProcessor(resp.json()) rp.extract_statements() return rp
Return an output from RLIMS-p for the given PubMed ID or PMC ID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to be "read". id_type : str Either 'pmid' or 'pmcid'. The default is 'pmcid'. source : str Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. with_grounding : bool The RLIMS-P web service provides two endpoints, one pre-grounded, the other not so much. The grounded endpoint returns far less content, and may perform some grounding that can be handled by the grounding mapper. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute.
def write_ln(self, *text, sep=' '): """ Write line :param text: :param sep: :return: """ if self.text and self.text[-1] != '\n': self.text += '\n' self.text += markdown.text(*text, sep) + '\n' return self
Write line :param text: :param sep: :return:
def format_symbol(symbol): """Returns well formatted Hermann-Mauguin symbol as extected by the database, by correcting the case and adding missing or removing dublicated spaces.""" fixed = [] s = symbol.strip() s = s[0].upper() + s[1:].lower() for c in s: if c.isalpha(): fixed.append(' ' + c + ' ') elif c.isspace(): fixed.append(' ') elif c.isdigit(): fixed.append(c) elif c == '-': fixed.append(' ' + c) elif c == '/': fixed.append(' ' + c) s = ''.join(fixed).strip() return ' '.join(s.split())
Returns well formatted Hermann-Mauguin symbol as extected by the database, by correcting the case and adding missing or removing dublicated spaces.
def get_edges(self): """ Returns the edges of the network Examples -------- >>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml") >>> reader.get_edges() [['family-out', 'light-on'], ['family-out', 'dog-out'], ['bowel-problem', 'dog-out'], ['dog-out', 'hear-bark']] """ edge_list = [[value, key] for key in self.variable_parents for value in self.variable_parents[key]] return edge_list
Returns the edges of the network Examples -------- >>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml") >>> reader.get_edges() [['family-out', 'light-on'], ['family-out', 'dog-out'], ['bowel-problem', 'dog-out'], ['dog-out', 'hear-bark']]
def roll_sparse(x, shift, axis=0): '''Sparse matrix roll This operation is equivalent to ``numpy.roll``, but operates on sparse matrices. Parameters ---------- x : scipy.sparse.spmatrix or np.ndarray The sparse matrix input shift : int The number of positions to roll the specified axis axis : (0, 1, -1) The axis along which to roll. Returns ------- x_rolled : same type as `x` The rolled matrix, with the same format as `x` See Also -------- numpy.roll Examples -------- >>> # Generate a random sparse binary matrix >>> X = scipy.sparse.lil_matrix(np.random.randint(0, 2, size=(5,5))) >>> X_roll = roll_sparse(X, 2, axis=0) # Roll by 2 on the first axis >>> X_dense_r = roll_sparse(X.toarray(), 2, axis=0) # Equivalent dense roll >>> np.allclose(X_roll, X_dense_r.toarray()) True ''' if not scipy.sparse.isspmatrix(x): return np.roll(x, shift, axis=axis) # shift-mod-length lets us have shift > x.shape[axis] if axis not in [0, 1, -1]: raise ParameterError('axis must be one of (0, 1, -1)') shift = np.mod(shift, x.shape[axis]) if shift == 0: return x.copy() fmt = x.format if axis == 0: x = x.tocsc() elif axis in (-1, 1): x = x.tocsr() # lil matrix to start x_r = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype) idx_in = [slice(None)] * x.ndim idx_out = [slice(None)] * x_r.ndim idx_in[axis] = slice(0, -shift) idx_out[axis] = slice(shift, None) x_r[tuple(idx_out)] = x[tuple(idx_in)] idx_out[axis] = slice(0, shift) idx_in[axis] = slice(-shift, None) x_r[tuple(idx_out)] = x[tuple(idx_in)] return x_r.asformat(fmt)
Sparse matrix roll This operation is equivalent to ``numpy.roll``, but operates on sparse matrices. Parameters ---------- x : scipy.sparse.spmatrix or np.ndarray The sparse matrix input shift : int The number of positions to roll the specified axis axis : (0, 1, -1) The axis along which to roll. Returns ------- x_rolled : same type as `x` The rolled matrix, with the same format as `x` See Also -------- numpy.roll Examples -------- >>> # Generate a random sparse binary matrix >>> X = scipy.sparse.lil_matrix(np.random.randint(0, 2, size=(5,5))) >>> X_roll = roll_sparse(X, 2, axis=0) # Roll by 2 on the first axis >>> X_dense_r = roll_sparse(X.toarray(), 2, axis=0) # Equivalent dense roll >>> np.allclose(X_roll, X_dense_r.toarray()) True
def set(self, field, value): """ Sets the value of an app field. :param str field: The name of the app field. Trying to set immutable fields ``uuid`` or ``key`` will raise a ValueError. :param value: The new value of the app field. :raises: ValueError """ if field == 'uuid': raise ValueError('uuid cannot be set') elif field == 'key': raise ValueError( 'key cannot be set. Use \'reset_key\' method') else: self.data[field] = value
Sets the value of an app field. :param str field: The name of the app field. Trying to set immutable fields ``uuid`` or ``key`` will raise a ValueError. :param value: The new value of the app field. :raises: ValueError
def copy_children(self, foreign_id, existing_node): ''' Initiates copying of tree, with existing_node acting as root ''' url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id) self.log( ACTION, "Copying Children", {"existing node type": str(type(existing_node))}) # TODO: create a robust wrapper around this functionality try: self.log(ACTION, "Requesting Data", {"url": url}) response = requests.get(url) content = json.loads(response.content) self.log(SUCCESS, "Data Fetched Successfully", {"url": url}) main_language_child_ids = content["meta"]["main_language_children"] if main_language_child_ids: for main_language_child_id in main_language_child_ids: self.copy_page_and_children( foreign_id=main_language_child_id, parent_id=existing_node.id, depth=1) else: self.log(SUCCESS, "No children to copy") except Exception as e: self.log(ERROR, "Copying Children", {"url": url, "exception": e})
Initiates copying of tree, with existing_node acting as root
def nodes(self): """ Computes the node positions the first time they are requested if no explicit node information was supplied. """ if self._nodes is None: self._nodes = layout_nodes(self, only_nodes=True) return self._nodes
Computes the node positions the first time they are requested if no explicit node information was supplied.
def add_noise(Y, sigma): """Adds noise to Y""" return Y + np.random.normal(0, sigma, Y.shape)
Adds noise to Y
def _cell_to_python(cell): """Convert a PyOpenXL's `Cell` object to the corresponding Python object.""" data_type, value = cell.data_type, cell.value if type(cell) is EmptyCell: return None elif data_type == "f" and value == "=TRUE()": return True elif data_type == "f" and value == "=FALSE()": return False elif cell.number_format.lower() == "yyyy-mm-dd": return str(value).split(" 00:00:00")[0] elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss": return str(value).split(".")[0] elif cell.number_format.endswith("%") and isinstance(value, Number): value = Decimal(str(value)) return "{:%}".format(value) elif value is None: return "" else: return value
Convert a PyOpenXL's `Cell` object to the corresponding Python object.
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): """Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series) """ axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.median( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series)
def config(self, show_row_hdrs=True, show_col_hdrs=True, show_col_hdr_in_cell=False, auto_resize=True): """ Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal """ self.show_row_hdrs = show_row_hdrs self.show_col_hdrs = show_col_hdrs self.show_col_hdr_in_cell = show_col_hdr_in_cell
Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal
def slice(index, template): """Slice a template based on it's positional argument Arguments: index (int): Position at which to slice template (str): Template to slice Example: >>> slice(0, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}' >>> slice(1, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}/assets/{1}' """ try: return re.match("^.*{[%i]}" % index, template).group() except AttributeError: raise ValueError("Index %i not found in template: %s" % (index, template))
Slice a template based on it's positional argument Arguments: index (int): Position at which to slice template (str): Template to slice Example: >>> slice(0, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}' >>> slice(1, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}/assets/{1}'