code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def parse_args(self): """ Parse our arguments. """ # compile the parser self._compile() # clear the args self.args = None self._self_event('before_parse', 'parse', *sys.argv[1:], **{}) # list commands/subcommands in argv cmds = [cmd for cmd in sys.argv[1:] if not cmd.startswith("-")] if (len(cmds) > 0 and not utils.check_help() and self.default_cmd and cmds[0] not in self.commands): # if we have at least one command which is not an help command # and we have a default command and the first command in arguments # is not in commands we insert the default command as second # argument (actually the first command) sys.argv.insert(1, self.default_cmd) # let's parse the arguments self.args = self.parser.parse_args() # set up the output. if self.args: # if we have some arguments if self.add_output and self.args.output is not None: # If add_output is True and we have an output file # setup the encoding self.encoding = self.args.encoding if self.args.encoding.lower() == 'raw': # if we have passed a raw encoding we will write directly # to the output file. self._output = open(self.args.output, self.args.write_mode) else: # else we will use the codecs module to write to the # output file. import codecs self._output = codecs.open( self.args.output, self.args.write_mode, encoding=self.args.encoding) if self._cfg_factory: # if we have a config factory setup the config file with the # right param self.cfg_file = self.args.config # now is parsed. self._is_parsed = True return self
Parse our arguments.
def filter(self, criteria): """ Return a filtered version of this list following the given criteria, which can be a string (take only courses where this attribute is truthy) or a function which takes a course and return a boolean. """ if isinstance(criteria, str) or isinstance(criteria, unicode): _criteria = criteria criteria = lambda x: x.get(_criteria) return CoursesList(filter(criteria, self))
Return a filtered version of this list following the given criteria, which can be a string (take only courses where this attribute is truthy) or a function which takes a course and return a boolean.
def state_args(id_, state, high): ''' Return a set of the arguments passed to the named state ''' args = set() if id_ not in high: return args if state not in high[id_]: return args for item in high[id_][state]: if not isinstance(item, dict): continue if len(item) != 1: continue args.add(next(iter(item))) return args
Return a set of the arguments passed to the named state
def rewrite_elife_datasets_json(json_content, doi): """ this does the work of rewriting elife datasets json """ # Add dates in bulk elife_dataset_dates = [] elife_dataset_dates.append(("10.7554/eLife.00348", "used", "dataro17", u"2010")) elife_dataset_dates.append(("10.7554/eLife.01179", "used", "dataro4", u"2016")) elife_dataset_dates.append(("10.7554/eLife.01603", "used", "dataro2", u"2012")) elife_dataset_dates.append(("10.7554/eLife.02304", "used", "dataro15", u"2005")) elife_dataset_dates.append(("10.7554/eLife.02935", "used", "dataro2", u"2014")) elife_dataset_dates.append(("10.7554/eLife.03583", "used", "dataro5", u"2013")) if doi in map(lambda dataset: dataset[0], elife_dataset_dates): for (match_doi, used_or_generated, id, dataset_date) in elife_dataset_dates: if doi == match_doi: if json_content.get(used_or_generated): for dataset in json_content[used_or_generated]: if dataset.get("id") and dataset["id"] == id: if not dataset.get("date"): dataset["date"] = dataset_date # Continue with individual article JSON rewriting if doi == "10.7554/eLife.01311": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] in ["dataro3", "dataro4", "dataro5"]: if not dataset.get("date"): dataset["date"] = u"2012" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Duke"}] if dataset.get("id") and dataset["id"] == "dataro6": if not dataset.get("date"): dataset["date"] = u"2011" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "FlyBase"}] if dataset.get("id") and dataset["id"] == "dataro7": if not dataset.get("date"): dataset["date"] = u"2011" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Baylor College of Medicine (BCM)"}] if dataset.get("id") and dataset["id"] in ["dataro8", "dataro9"]: if not dataset.get("date"): dataset["date"] = u"2012" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "University of California, Berkeley"}] if doi == "10.7554/eLife.01440": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "EnsemblMetazoa"}] if doi == "10.7554/eLife.01535": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro1": if dataset.get("date") and dataset.get("date") == "2000, 2005": dataset["date"] = u"2000" if doi == "10.7554/eLife.02304": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro11": if not dataset.get("title"): dataset["title"] = u"T.gondii LDH1 ternary complex with APAD+ and oxalate" if doi == "10.7554/eLife.03574": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro2": if not dataset.get("date"): dataset["date"] = u"2006" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Riley,M."}, {"type": "group", "name": "Abe,T."}, {"type": "group", "name": "Arnaud,M.B."}, {"type": "group", "name": "Berlyn,M.K."}, {"type": "group", "name": "Blattner,F.R."}, {"type": "group", "name": "Chaudhuri,R.R."}, {"type": "group", "name": "Glasner,J.D."}, {"type": "group", "name": "Horiuchi,T."}, {"type": "group", "name": "Keseler,I.M."}, {"type": "group", "name": "Kosuge,T."}, {"type": "group", "name": "Mori,H."}, {"type": "group", "name": "Perna,N.T."}, {"type": "group", "name": "Plunkett,G. III"}, {"type": "group", "name": "Rudd,K.E."}, {"type": "group", "name": "Serres,M.H."}, {"type": "group", "name": "Thomas,G.H."}, {"type": "group", "name": "Thomson,N.R."}, {"type": "group", "name": "Wishart,D."}, {"type": "group", "name": "Wanner,B.L."}] if doi == "10.7554/eLife.03676": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro4": if not dataset.get("date"): dataset["date"] = u"2013" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Human Gene Sequencing Center"}] if doi == "10.7554/eLife.03971": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro2": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Vanderperre B."}] if doi == "10.7554/eLife.04660": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro1": if dataset.get("date") and dataset.get("date") == "2014-2015": dataset["date"] = u"2014" if doi == "10.7554/eLife.06421": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro2": if dataset.get("date") and dataset.get("date") == "NA": dataset["date"] = u"2006" if doi == "10.7554/eLife.08445": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "data-ro1": if not dataset.get("date"): dataset["date"] = u"2006" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "BDTNP SELEX"}] if doi == "10.7554/eLife.08916": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro2": if dataset.get("date") and dataset.get("date") == "2008, updated 2014": dataset["date"] = u"2008" if dataset.get("id") and dataset["id"] == "dataro3": if dataset.get("date") and dataset.get("date") == "2013, updated 2014": dataset["date"] = u"2013" if doi == "10.7554/eLife.08955": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro2": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Kurdistani S"}, {"type": "group", "name": "Marrban C"}, {"type": "group", "name": "Su T"}] if doi == "10.7554/eLife.09207": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Prostate Cancer Genome Sequencing Project"}] if doi == "10.7554/eLife.10607": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "data-ro4": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Authors"}] if doi == "10.7554/eLife.10670": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "data-ro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "HIVdb"}] # Add dates, authors, other details if doi == "10.7554/eLife.10856": if json_content.get("generated"): datasets_authors_for_10856 = [{"type": "group", "name": "Dagdas YF"}, {"type": "group", "name": "Belhaj K"}, {"type": "group", "name": "Maqbool A"}, {"type": "group", "name": "Chaparro-Garcia A"}, {"type": "group", "name": "Pandey P"}, {"type": "group", "name": "Petre B"}, {"type": "group", "name": "Tabassum N"}, {"type": "group", "name": "Cruz-Mireles N"}, {"type": "group", "name": "Hughes RK"}, {"type": "group", "name": "Sklenar J"}, {"type": "group", "name": "Win J"}, {"type": "group", "name": "Menke F"}, {"type": "group", "name": "Findlay K"}, {"type": "group", "name": "Banfield MJ"}, {"type": "group", "name": "Kamoun S"}, {"type": "group", "name": "Bozkurt TO"}] for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro7": if not dataset.get("date"): dataset["date"] = u"2016" if not dataset.get("title"): dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor" if not dataset.get("authors"): dataset["authors"] = datasets_authors_for_10856 if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.": dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151098/" if dataset.get("id") and dataset["id"] == "dataro8": if not dataset.get("date"): dataset["date"] = u"2015" if not dataset.get("title"): dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor" if not dataset.get("authors"): dataset["authors"] = datasets_authors_for_10856 if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.": dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151096/" if dataset.get("id") and dataset["id"] == "dataro9": if not dataset.get("authors"): dataset["authors"] = datasets_authors_for_10856 if doi == "10.7554/eLife.10877": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("title"): dataset["title"] = u"Oct4 ChIP-Seq at G1 and G2/M phase of cell cycle in mouse embryonic stem cells" if doi == "10.7554/eLife.10921": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Floor SN"}, {"type": "group", "name": "Doudna JA"}] if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro2": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Sidrauski C"}, {"type": "group", "name": "McGeachy A"}, {"type": "group", "name": "Ingolia N"}, {"type": "group", "name": "Walter P"}] if doi == "10.7554/eLife.11117": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro14": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Authors"}] if doi == "10.7554/eLife.12204": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Rhodes DR"}, {"type": "group", "name": "Kalyana-Sundaram S"}, {"type": "group", "name": "Mahavisno V"}, {"type": "group", "name": "Varambally R"}, {"type": "group", "name": "Yu J"}, {"type": "group", "name": "Briggs BB"}, {"type": "group", "name": "Barrette TR"}, {"type": "group", "name": "Anstet MJ"}, {"type": "group", "name": "Kincead-Beal C"}, {"type": "group", "name": "Kulkarni P"}, {"type": "group", "name": "Varambally S"}, {"type": "group", "name": "Ghosh D"}, {"type": "group", "name": "Chinnaiyan AM."}] if dataset.get("id") and dataset["id"] == "dataro2": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Gaspar C"}, {"type": "group", "name": "Cardoso J"}, {"type": "group", "name": "Franken P"}, {"type": "group", "name": "Molenaar L"}, {"type": "group", "name": "Morreau H"}, {"type": "group", "name": "Möslein G"}, {"type": "group", "name": "Sampson J"}, {"type": "group", "name": "Boer JM"}, {"type": "group", "name": "de Menezes RX"}, {"type": "group", "name": "Fodde R."}] if dataset.get("id") and dataset["id"] == "dataro3": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Graudens E"}, {"type": "group", "name": "Boulanger V"}, {"type": "group", "name": "Mollard C"}, {"type": "group", "name": "Mariage-Samson R"}, {"type": "group", "name": "Barlet X"}, {"type": "group", "name": "Grémy G"}, {"type": "group", "name": "Couillault C"}, {"type": "group", "name": "Lajémi M"}, {"type": "group", "name": "Piatier-Tonneau D"}, {"type": "group", "name": "Zaborski P"}, {"type": "group", "name": "Eveno E"}, {"type": "group", "name": "Auffray C"}, {"type": "group", "name": "Imbeaud S."}] if dataset.get("id") and dataset["id"] == "dataro4": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Hong Y"}, {"type": "group", "name": "Downey T"}, {"type": "group", "name": "Eu KW"}, {"type": "group", "name": "Koh PK"},{"type": "group", "name": "Cheah PY"}] if dataset.get("id") and dataset["id"] == "dataro5": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Kaiser S"}, {"type": "group", "name": "Park YK"}, {"type": "group", "name": "Franklin JL"}, {"type": "group", "name": "Halberg RB"}, {"type": "group", "name": "Yu M"}, {"type": "group", "name": "Jessen WJ"}, {"type": "group", "name": "Freudenberg J"}, {"type": "group", "name": "Chen X"}, {"type": "group", "name": "Haigis K"}, {"type": "group", "name": "Jegga AG"}, {"type": "group", "name": "Kong S"}, {"type": "group", "name": "Sakthivel B"}, {"type": "group", "name": "Xu H"}, {"type": "group", "name": "Reichling T"}, {"type": "group", "name": "Azhar M"}, {"type": "group", "name": "Boivin GP"}, {"type": "group", "name": "Roberts RB"}, {"type": "group", "name": "Bissahoyo AC"}, {"type": "group", "name": "Gonzales F"}, {"type": "group", "name": "Bloom GC"}, {"type": "group", "name": "Eschrich S"}, {"type": "group", "name": "Carter SL"}, {"type": "group", "name": "Aronow JE"}, {"type": "group", "name": "Kleimeyer J"}, {"type": "group", "name": "Kleimeyer M"}, {"type": "group", "name": "Ramaswamy V"}, {"type": "group", "name": "Settle SH"}, {"type": "group", "name": "Boone B"}, {"type": "group", "name": "Levy S"}, {"type": "group", "name": "Graff JM"}, {"type": "group", "name": "Doetschman T"}, {"type": "group", "name": "Groden J"}, {"type": "group", "name": "Dove WF"}, {"type": "group", "name": "Threadgill DW"}, {"type": "group", "name": "Yeatman TJ"}, {"type": "group", "name": "Coffey RJ Jr"}, {"type": "group", "name": "Aronow BJ."}] if dataset.get("id") and dataset["id"] == "dataro6": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Muzny DM et al"}] if dataset.get("id") and dataset["id"] == "dataro7": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Skrzypczak M"}, {"type": "group", "name": "Goryca K"}, {"type": "group", "name": "Rubel T"}, {"type": "group", "name": "Paziewska A"}, {"type": "group", "name": "Mikula M"}, {"type": "group", "name": "Jarosz D"}, {"type": "group", "name": "Pachlewski J"}, {"type": "group", "name": "Oledzki J"}, {"type": "group", "name": "Ostrowski J."}] if dataset.get("id") and dataset["id"] == "dataro8": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Cancer Genome Atlas Network"}] if doi == "10.7554/eLife.12876": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Department of Human Genetics, University of Utah"}] if doi == "10.7554/eLife.13195": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro1": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Microbial Ecology Group, Colorado State University"}] if doi == "10.7554/eLife.14158": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "data-ro1": if not dataset.get("title"): dataset["title"] = u"Bacterial initiation protein" if dataset.get("id") and dataset["id"] == "data-ro2": if not dataset.get("title"): dataset["title"] = u"Bacterial initiation protein in complex with Phage inhibitor protein" if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "dataro3": if not dataset.get("date"): dataset["date"] = u"2007" if doi == "10.7554/eLife.14243": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro2": if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "Tramantano M"}, {"type": "group", "name": "Sun L"}, {"type": "group", "name": "Au C"}, {"type": "group", "name": "Labuz D"}, {"type": "group", "name": "Liu Z"}, {"type": "group", "name": "Chou M"}, {"type": "group", "name": "Shen C"}, {"type": "group", "name": "Luk E"}] if doi == "10.7554/eLife.16078": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro1": if dataset.get("date") and dataset.get("date") == "current manuscript": dataset["date"] = u"2016" if doi == "10.7554/eLife.17082": if json_content.get("used"): for dataset in json_content["used"]: if dataset.get("id") and dataset["id"] == "data-ro4": if not dataset.get("date"): dataset["date"] = u"2012" if dataset.get("id") and dataset["id"] == "data-ro5": if not dataset.get("date"): dataset["date"] = u"2014" if dataset.get("id") and dataset["id"] == "data-ro6": if not dataset.get("date"): dataset["date"] = u"2014" if not dataset.get("authors"): dataset["authors"] = [{"type": "group", "name": "The Cancer Genome Atlas (TCGA)"}] if doi == "10.7554/eLife.17473": if json_content.get("generated"): for dataset in json_content["generated"]: if dataset.get("id") and dataset["id"] == "dataro1": if dataset.get("date") and dataset.get("date").startswith("Release date"): dataset["date"] = u"2016" return json_content
this does the work of rewriting elife datasets json
def prepend(self, other, inplace=True, pad=None, gap=None, resize=True): """Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and return new series, default: `True` .. warning:: `inplace` prepend bypasses the reference check in `numpy.ndarray.resize`, so be carefully to only use this for arrays that haven't been sharing their memory! pad : `float`, optional value with which to pad discontiguous series, by default gaps will result in a `ValueError`. gap : `str`, optional action to perform if there's a gap between the other series and this one. One of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. resize : `bool`, optional resize this array to accommodate new data, otherwise shift the old data to the left (potentially falling off the start) and put the new data in at the end, default: `True`. Returns ------- series : `TimeSeries` time-series containing joined data sets """ out = other.append(self, inplace=False, gap=gap, pad=pad, resize=resize) if inplace: self.resize(out.shape, refcheck=False) self[:] = out[:] self.x0 = out.x0.copy() del out return self return out
Connect another series onto the start of the current one. Parameters ---------- other : `Series` another series of the same type as this one inplace : `bool`, optional perform operation in-place, modifying current series, otherwise copy data and return new series, default: `True` .. warning:: `inplace` prepend bypasses the reference check in `numpy.ndarray.resize`, so be carefully to only use this for arrays that haven't been sharing their memory! pad : `float`, optional value with which to pad discontiguous series, by default gaps will result in a `ValueError`. gap : `str`, optional action to perform if there's a gap between the other series and this one. One of - ``'raise'`` - raise a `ValueError` - ``'ignore'`` - remove gap and join data - ``'pad'`` - pad gap with zeros If `pad` is given and is not `None`, the default is ``'pad'``, otherwise ``'raise'``. resize : `bool`, optional resize this array to accommodate new data, otherwise shift the old data to the left (potentially falling off the start) and put the new data in at the end, default: `True`. Returns ------- series : `TimeSeries` time-series containing joined data sets
def save_imgs(x, fname): """Helper method to save a grid of images to a PNG file. Args: x: A numpy array of shape [n_images, height, width]. fname: The filename to write to (including extension). """ n = x.shape[0] fig = figure.Figure(figsize=(n, 1), frameon=False) canvas = backend_agg.FigureCanvasAgg(fig) for i in range(n): ax = fig.add_subplot(1, n, i+1) ax.imshow(x[i].squeeze(), interpolation="none", cmap=cm.get_cmap("binary")) ax.axis("off") canvas.print_figure(fname, format="png") print("saved %s" % fname)
Helper method to save a grid of images to a PNG file. Args: x: A numpy array of shape [n_images, height, width]. fname: The filename to write to (including extension).
def pelix_infos(self): """ Basic information about the Pelix framework instance """ framework = self.__context.get_framework() return { "version": framework.get_version(), "properties": framework.get_properties(), }
Basic information about the Pelix framework instance
def attribute_labels(self, attribute_id, params=None): """ Gets the security labels from a attribute Yields: Security label json """ if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) for al in self.tc_requests.attribute_labels( self.api_type, self.api_sub_type, self.unique_id, attribute_id, owner=self.owner, params=params, ): yield al
Gets the security labels from a attribute Yields: Security label json
def object_merge(old, new, unique=False): """ Recursively merge two data structures. :param unique: When set to True existing list items are not set. """ if isinstance(old, list) and isinstance(new, list): if old == new: return for item in old[::-1]: if unique and item in new: continue new.insert(0, item) if isinstance(old, dict) and isinstance(new, dict): for key, value in old.items(): if key not in new: new[key] = value else: object_merge(value, new[key])
Recursively merge two data structures. :param unique: When set to True existing list items are not set.
def _scan(positions): """get the region inside the vector with more expression""" scores = [] for start in range(0, len(positions) - 17, 5): end = start = 17 scores.add(_enrichment(positions[start:end], positions[:start], positions[end:]))
get the region inside the vector with more expression
def square_root(n, epsilon=0.001): """Return square root of n, with maximum absolute error epsilon""" guess = n / 2 while abs(guess * guess - n) > epsilon: guess = (guess + (n / guess)) / 2 return guess
Return square root of n, with maximum absolute error epsilon
def create_effect(self, label: str, name: str, *args, **kwargs) -> Effect: """ Create an effect instance adding it to the internal effects dictionary using the label as key. Args: label (str): The unique label for the effect instance name (str): Name or full python path to the effect class we want to instantiate args: Positional arguments to the effect initializer kwargs: Keyword arguments to the effect initializer Returns: The newly created Effect instance """ effect_cls = effects.find_effect_class(name) effect = effect_cls(*args, **kwargs) effect._label = label if label in self._effects: raise ValueError("An effect with label '{}' already exists".format(label)) self._effects[label] = effect return effect
Create an effect instance adding it to the internal effects dictionary using the label as key. Args: label (str): The unique label for the effect instance name (str): Name or full python path to the effect class we want to instantiate args: Positional arguments to the effect initializer kwargs: Keyword arguments to the effect initializer Returns: The newly created Effect instance
def recursively_register_child_states(self, state): """ A function tha registers recursively all child states of a state :param state: :return: """ self.logger.info("Execution status observer add new state {}".format(state)) if isinstance(state, ContainerState): state.add_observer(self, "add_state", notify_after_function=self.on_add_state) for state in list(state.states.values()): self.recursively_register_child_states(state) state.add_observer(self, "state_execution_status", notify_after_function=self.on_state_execution_status_changed_after) if isinstance(state, LibraryState): self.recursively_register_child_states(state.state_copy) state.add_observer(self, "state_execution_status", notify_after_function=self.on_state_execution_status_changed_after)
A function tha registers recursively all child states of a state :param state: :return:
def _send_packet(self, data): """ Send packet to client. """ if self._closed: return data = json.dumps(data) def send(): try: yield From(self.pipe_connection.write(data)) except BrokenPipeError: self.detach_and_close() ensure_future(send())
Send packet to client.
def make_zigzag(points, num_cols): """ Converts linear sequence of points into a zig-zag shape. This function is designed to create input for the visualization software. It orders the points to draw a zig-zag shape which enables generating properly connected lines without any scanlines. Please see the below sketch on the functionality of the ``num_cols`` parameter:: num cols <-=============-> ------->>-------| |------<<-------| |------>>-------| -------<<-------| Please note that this function does not detect the ordering of the input points to detect the input points have already been processed to generate a zig-zag shape. :param points: list of points to be ordered :type points: list :param num_cols: number of elements in a row which the zig-zag is generated :type num_cols: int :return: re-ordered points :rtype: list """ new_points = [] points_size = len(points) forward = True idx = 0 rev_idx = -1 while idx < points_size: if forward: new_points.append(points[idx]) else: new_points.append(points[rev_idx]) rev_idx -= 1 idx += 1 if idx % num_cols == 0: forward = False if forward else True rev_idx = idx + num_cols - 1 return new_points
Converts linear sequence of points into a zig-zag shape. This function is designed to create input for the visualization software. It orders the points to draw a zig-zag shape which enables generating properly connected lines without any scanlines. Please see the below sketch on the functionality of the ``num_cols`` parameter:: num cols <-=============-> ------->>-------| |------<<-------| |------>>-------| -------<<-------| Please note that this function does not detect the ordering of the input points to detect the input points have already been processed to generate a zig-zag shape. :param points: list of points to be ordered :type points: list :param num_cols: number of elements in a row which the zig-zag is generated :type num_cols: int :return: re-ordered points :rtype: list
def TypeFactory(v): """Ensure `v` is a valid Type. This function is used to convert user-specified types into internal types for the verification engine. It allows Type subclasses, Type subclass instances, Python type, and user-defined classes to be passed. Returns an instance of the type of `v`. Users should never access this function directly. """ if v is None: return Nothing() elif issubclass(type(v), Type): return v elif issubclass(v, Type): return v() elif issubclass(type(v), type): return Generic(v) else: raise InvalidTypeError("Invalid type %s" % v)
Ensure `v` is a valid Type. This function is used to convert user-specified types into internal types for the verification engine. It allows Type subclasses, Type subclass instances, Python type, and user-defined classes to be passed. Returns an instance of the type of `v`. Users should never access this function directly.
def get_an_int(self, arg, msg_on_error, min_value=None, max_value=None): """Like cmdfns.get_an_int(), but if there's a stack frame use that in evaluation.""" ret_value = self.get_int_noerr(arg) if ret_value is None: if msg_on_error: self.errmsg(msg_on_error) else: self.errmsg('Expecting an integer, got: %s.' % str(arg)) pass return None if min_value and ret_value < min_value: self.errmsg('Expecting integer value to be at least %d, got: %d.' % (min_value, ret_value)) return None elif max_value and ret_value > max_value: self.errmsg('Expecting integer value to be at most %d, got: %d.' % (max_value, ret_value)) return None return ret_value
Like cmdfns.get_an_int(), but if there's a stack frame use that in evaluation.
def valid_project(self): """Handle an invalid active project.""" try: path = self.projects.get_active_project_path() except AttributeError: return if bool(path): if not self.projects.is_valid_project(path): if path: QMessageBox.critical( self, _('Error'), _("<b>{}</b> is no longer a valid Spyder project! " "Since it is the current active project, it will " "be closed automatically.").format(path)) self.projects.close_project()
Handle an invalid active project.
def line_ball_intersection(start_points, end_points, center, radius): """ Compute the length of the intersection of a line segment with a ball. Parameters ---------- start_points : (n,3) float, list of points in space end_points : (n,3) float, list of points in space center : (3,) float, the sphere center radius : float, the sphere radius Returns -------- lengths: (n,) float, the lengths. """ # We solve for the intersection of |x-c|**2 = r**2 and # x = o + dL. This yields # d = (-l.(o-c) +- sqrt[ l.(o-c)**2 - l.l((o-c).(o-c) - r^**2) ]) / l.l L = end_points - start_points oc = start_points - center # o-c r = radius ldotl = np.einsum('ij, ij->i', L, L) # l.l ldotoc = np.einsum('ij, ij->i', L, oc) # l.(o-c) ocdotoc = np.einsum('ij, ij->i', oc, oc) # (o-c).(o-c) discrims = ldotoc**2 - ldotl * (ocdotoc - r**2) # If discriminant is non-positive, then we have zero length lengths = np.zeros(len(start_points)) # Otherwise we solve for the solns with d2 > d1. m = discrims > 0 # mask d1 = (-ldotoc[m] - np.sqrt(discrims[m])) / ldotl[m] d2 = (-ldotoc[m] + np.sqrt(discrims[m])) / ldotl[m] # Line segment means we have 0 <= d <= 1 d1 = np.clip(d1, 0, 1) d2 = np.clip(d2, 0, 1) # Length is |o + d2 l - o + d1 l| = (d2 - d1) |l| lengths[m] = (d2 - d1) * np.sqrt(ldotl[m]) return lengths
Compute the length of the intersection of a line segment with a ball. Parameters ---------- start_points : (n,3) float, list of points in space end_points : (n,3) float, list of points in space center : (3,) float, the sphere center radius : float, the sphere radius Returns -------- lengths: (n,) float, the lengths.
def functional(self): """All required enzymes for reaction are functional. Returns ------- bool True if the gene-protein-reaction (GPR) rule is fulfilled for this reaction, or if reaction is not associated to a model, otherwise False. """ if self._model: tree, _ = parse_gpr(self.gene_reaction_rule) return eval_gpr(tree, {gene.id for gene in self.genes if not gene.functional}) return True
All required enzymes for reaction are functional. Returns ------- bool True if the gene-protein-reaction (GPR) rule is fulfilled for this reaction, or if reaction is not associated to a model, otherwise False.
def html_elem(e, ct, withtype=False): """ Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type """ # Header cell if ct == 'th': return '<th>{0}</th><th>{1}</th>'.format(*e) if withtype else '<th>{}</th>'.format(e) # Content cell if e[1] in ('uri', 'URIRef'): html = u'<{0} class=val><a href="{1}" target="_other">{2}</a></{0}>'.format(ct, e[0], escape(e[0])) else: html = u'<{0} class=val>{1}</{0}>'.format(ct, escape(e[0])) # Create the optional cell for the type if withtype: html += u'<{0} class=typ>{1}</{0}>'.format(ct, e[1]) return html
Format a result element as an HTML table cell. @param e (list): a pair \c (value,type) @param ct (str): cell type (th or td) @param withtype (bool): add an additional cell with the element type
def _new_extension(name, value, critical=0, issuer=None, _pyfree=1): ''' Create new X509_Extension, This is required because M2Crypto doesn't support getting the publickeyidentifier from the issuer to create the authoritykeyidentifier extension. ''' if name == 'subjectKeyIdentifier' and value.strip('0123456789abcdefABCDEF:') is not '': raise salt.exceptions.SaltInvocationError('value must be precomputed hash') # ensure name and value are bytes name = salt.utils.stringutils.to_str(name) value = salt.utils.stringutils.to_str(value) try: ctx = M2Crypto.m2.x509v3_set_nconf() _fix_ctx(ctx, issuer) if ctx is None: raise MemoryError( 'Not enough memory when creating a new X509 extension') x509_ext_ptr = M2Crypto.m2.x509v3_ext_conf(None, ctx, name, value) lhash = None except AttributeError: lhash = M2Crypto.m2.x509v3_lhash() # pylint: disable=no-member ctx = M2Crypto.m2.x509v3_set_conf_lhash( lhash) # pylint: disable=no-member # ctx not zeroed _fix_ctx(ctx, issuer) x509_ext_ptr = M2Crypto.m2.x509v3_ext_conf( lhash, ctx, name, value) # pylint: disable=no-member # ctx,lhash freed if x509_ext_ptr is None: raise M2Crypto.X509.X509Error( "Cannot create X509_Extension with name '{0}' and value '{1}'".format(name, value)) x509_ext = M2Crypto.X509.X509_Extension(x509_ext_ptr, _pyfree) x509_ext.set_critical(critical) return x509_ext
Create new X509_Extension, This is required because M2Crypto doesn't support getting the publickeyidentifier from the issuer to create the authoritykeyidentifier extension.
def _fw_policy_create(self, drvr_name, data, cache): """Firewall Policy create routine. This function updates its local cache with policy parameters. It checks if local cache has information about the rules associated with the policy. If not, it means a restart has happened. It retrieves the rules associated with the policy by calling Openstack API's and calls the rule create internal routine. """ policy = {} fw_policy = data.get('firewall_policy') tenant_id = fw_policy.get('tenant_id') LOG.info("Creating policy for tenant %s", tenant_id) policy_id = fw_policy.get('id') policy_name = fw_policy.get('name') pol_rule_dict = fw_policy.get('firewall_rules') if tenant_id not in self.fwid_attr: self.fwid_attr[tenant_id] = FwMapAttr(tenant_id) policy['name'] = policy_name policy['rule_dict'] = pol_rule_dict self.fwid_attr[tenant_id].store_policy(policy_id, policy) if not cache: self._check_create_fw(tenant_id, drvr_name) self.tenant_db.store_policy_tenant(policy_id, tenant_id) for rule in pol_rule_dict: rule_id = rule if not self.fwid_attr[tenant_id].is_rule_present(rule_id): rule_data = self.os_helper.get_fw_rule(rule_id) if rule_data is not None: self.fw_rule_create(rule_data, cache=cache)
Firewall Policy create routine. This function updates its local cache with policy parameters. It checks if local cache has information about the rules associated with the policy. If not, it means a restart has happened. It retrieves the rules associated with the policy by calling Openstack API's and calls the rule create internal routine.
def _call_command_in_repo(comm, repo, log, fail=False, log_flag=True): """Use `subprocess` to call a command in a certain (repo) directory. Logs the output (both `stderr` and `stdout`) to the log, and checks the return codes to make sure they're valid. Raises error if not. Raises ------ exception `subprocess.CalledProcessError`: if the command fails """ if log_flag: log.debug("Running '{}'.".format(" ".join(comm))) process = subprocess.Popen( comm, cwd=repo, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = process.communicate() if stderr is not None: err_msg = stderr.decode('ascii').strip().splitlines() for em in err_msg: log.error(em) if stdout is not None: out_msg = stdout.decode('ascii').strip().splitlines() for om in out_msg: log.warning(om) # Raises an error if the command failed. if fail: if process.returncode: raise subprocess.CalledProcessError return
Use `subprocess` to call a command in a certain (repo) directory. Logs the output (both `stderr` and `stdout`) to the log, and checks the return codes to make sure they're valid. Raises error if not. Raises ------ exception `subprocess.CalledProcessError`: if the command fails
def connect(self, dests=[], name=None, id='', props={}): '''Connect this port to other ports. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Required values depend on the type of the two ports being connected. @raises IncompatibleDataPortConnectionPropsError, FailedToConnectError ''' with self._mutex: if self.porttype == 'DataInPort' or self.porttype == 'DataOutPort': for prop in props: if prop in self.properties: if props[prop] not in [x.strip() for x in self.properties[prop].split(',')] and \ 'any' not in self.properties[prop].lower(): # Invalid property selected raise exceptions.IncompatibleDataPortConnectionPropsError for d in dests: if prop in d.properties: if props[prop] not in [x.strip() for x in d.properties[prop].split(',')] and \ 'any' not in d.properties[prop].lower(): # Invalid property selected raise exceptions.IncompatibleDataPortConnectionPropsError if not name: name = self.name + '_'.join([d.name for d in dests]) props = utils.dict_to_nvlist(props) profile = RTC.ConnectorProfile(name, id, [self._obj] + [d._obj for d in dests], props) return_code, profile = self._obj.connect(profile) if return_code != RTC.RTC_OK: raise exceptions.FailedToConnectError(return_code) self.reparse_connections() for d in dests: d.reparse_connections()
Connect this port to other ports. After the connection has been made, a delayed reparse of the connections for this and the destination port will be triggered. @param dests A list of the destination Port objects. Must be provided. @param name The name of the connection. If None, a suitable default will be created based on the names of the two ports. @param id The ID of this connection. If None, one will be generated by the RTC implementation. @param props Properties of the connection. Required values depend on the type of the two ports being connected. @raises IncompatibleDataPortConnectionPropsError, FailedToConnectError
def prefix_iter(self, ns_uri): """Gets an iterator over the prefixes for the given namespace.""" ni = self.__lookup_uri(ns_uri) return iter(ni.prefixes)
Gets an iterator over the prefixes for the given namespace.
def summary_df(df_in, **kwargs): """Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This is similar to pandas.DataFrame.describe but also includes estimates of the numerical uncertainties. The output DataFrame has multiindex levels: 'calculation type': mean and standard deviations of the data. 'result type': value and uncertainty for each quantity. calculation type result type column_1 column_2 ... mean value mean uncertainty std value std uncertainty Parameters ---------- df_in: pandas DataFrame true_values: array Analytical values if known for comparison with mean. Used to calculate root mean squared errors (RMSE). include_true_values: bool, optional Whether or not to include true values in the output DataFrame. include_rmse: bool, optional Whether or not to include root-mean-squared-errors in the output DataFrame. Returns ------- df: MultiIndex DataFrame """ true_values = kwargs.pop('true_values', None) include_true_values = kwargs.pop('include_true_values', False) include_rmse = kwargs.pop('include_rmse', False) if kwargs: raise TypeError('Unexpected **kwargs: {0}'.format(kwargs)) if true_values is not None: assert true_values.shape[0] == df_in.shape[1], ( 'There should be one true value for every column! ' 'true_values.shape=' + str(true_values.shape) + ', ' 'df_in.shape=' + str(df_in.shape)) # make the data frame df = pd.DataFrame([df_in.mean(axis=0), df_in.std(axis=0, ddof=1)], index=['mean', 'std']) if include_true_values: assert true_values is not None df.loc['true values'] = true_values # Make index categorical to allow sorting df.index = pd.CategoricalIndex(df.index.values, ordered=True, categories=['true values', 'mean', 'std', 'rmse'], name='calculation type') # add uncertainties num_cals = df_in.shape[0] mean_unc = df.loc['std'] / np.sqrt(num_cals) std_unc = df.loc['std'] * np.sqrt(1 / (2 * (num_cals - 1))) df['result type'] = pd.Categorical(['value'] * df.shape[0], ordered=True, categories=['value', 'uncertainty']) df.set_index(['result type'], drop=True, append=True, inplace=True) df.loc[('mean', 'uncertainty'), :] = mean_unc.values df.loc[('std', 'uncertainty'), :] = std_unc.values if include_rmse: assert true_values is not None, \ 'Need to input true values for RMSE!' rmse, rmse_unc = rmse_and_unc(df_in.values, true_values) df.loc[('rmse', 'value'), :] = rmse df.loc[('rmse', 'uncertainty'), :] = rmse_unc # Ensure correct row order by sorting df.sort_index(inplace=True) # Cast calculation type index back from categorical to string to allow # adding new calculation types df.set_index( [df.index.get_level_values('calculation type').astype(str), df.index.get_level_values('result type')], inplace=True) return df
Make a panda data frame of the mean and std devs of an array of results, including the uncertainties on the values. This is similar to pandas.DataFrame.describe but also includes estimates of the numerical uncertainties. The output DataFrame has multiindex levels: 'calculation type': mean and standard deviations of the data. 'result type': value and uncertainty for each quantity. calculation type result type column_1 column_2 ... mean value mean uncertainty std value std uncertainty Parameters ---------- df_in: pandas DataFrame true_values: array Analytical values if known for comparison with mean. Used to calculate root mean squared errors (RMSE). include_true_values: bool, optional Whether or not to include true values in the output DataFrame. include_rmse: bool, optional Whether or not to include root-mean-squared-errors in the output DataFrame. Returns ------- df: MultiIndex DataFrame
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if not api.is_floatable(result): # Result is empty/None or not a valid number return False, False result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis,), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False result_range = ResultsRangeDict(result_range) # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.min, result) specs_max = api.to_float(result_range.max, result) in_range = False min_operator = result_range.min_operator if min_operator == "geq": in_range = result >= specs_min else: in_range = result > specs_min max_operator = result_range.max_operator if in_range: if max_operator == "leq": in_range = result <= specs_max else: in_range = result < specs_max # If in range, no need to check shoulders if in_range: return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.warn_min, specs_min) warn_max = api.to_float(result_range.warn_max, specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool)
def _parallel_receive_loop(self, seconds_to_wait): """Run the receiving in parallel.""" sleep(seconds_to_wait) with self._lock: self._number_of_threads_receiving_messages += 1 try: with self._lock: if self.state.is_waiting_for_start(): self.start() while True: with self.lock: if self.state.is_connection_closed(): return self.receive_message() finally: with self._lock: self._number_of_threads_receiving_messages -= 1
Run the receiving in parallel.
def notice(txt, color=False): "print notice" if color: txt = config.Col.WARNING + txt + config.Col.ENDC print(txt)
print notice
def __generate_really (self, prop_set): """ Generates the main target with the given property set and returns a list which first element is property_set object containing usage_requirements of generated target and with generated virtual target in other elements. It's possible that no targets are generated. """ assert isinstance(prop_set, property_set.PropertySet) best_alternative = self.__select_alternatives (prop_set, debug=0) self.best_alternative = best_alternative if not best_alternative: # FIXME: revive. # self.__select_alternatives(prop_set, debug=1) self.manager_.errors()( "No best alternative for '%s'.\n" % (self.full_name(),)) result = best_alternative.generate (prop_set) # Now return virtual targets for the only alternative return result
Generates the main target with the given property set and returns a list which first element is property_set object containing usage_requirements of generated target and with generated virtual target in other elements. It's possible that no targets are generated.
def n_exec_stmt(self, node): """ exec_stmt ::= expr exprlist DUP_TOP EXEC_STMT exec_stmt ::= expr exprlist EXEC_STMT """ self.write(self.indent, 'exec ') self.preorder(node[0]) if not node[1][0].isNone(): sep = ' in ' for subnode in node[1]: self.write(sep); sep = ", " self.preorder(subnode) self.println() self.prune()
exec_stmt ::= expr exprlist DUP_TOP EXEC_STMT exec_stmt ::= expr exprlist EXEC_STMT
def assert_same(self, first, second): """ Compares two items for identity. The items can be either single values or lists of values. When comparing lists, identity obtains when the two lists have the same number of elements and that the element at position in one list is identical to the element at the same position in the other list. This method is meant to be used for comparing lists of DOM nodes. It would also work with lists of booleans, integers, and similar primitive types, but is pointless in such cases. Also note that this method cannot meaningfully compare lists of lists or lists of dictionaries since the objects that would be part of the list would be created anew by Selenium's marshalling procedure. Hence, in these cases, the assertion would always fail. :param first: The first item to compare. :type first: :class:`selenium.webdriver.remote.webelement.WebElement` or array of :class:`selenium.webdriver.remote.webelement.WebElement`. :param second: The second item to compare. :type second: :class:`selenium.webdriver.remote.webelement.WebElement` or :array of :class:`selenium.webdriver.remote.webelement.WebElement`. :raises: :class:`AssertionError` when unequal. """ if not isinstance(first, list): first = [first] if not isinstance(second, list): second = [second] if not self.driver.execute_script(""" var first = arguments[0]; var second = arguments[1]; if (first.length != second.length) return false; for(var i = 0; i < first.length; ++i) if (first[i] !== second[i]) return false; return true; """, first, second): raise AssertionError("unequal")
Compares two items for identity. The items can be either single values or lists of values. When comparing lists, identity obtains when the two lists have the same number of elements and that the element at position in one list is identical to the element at the same position in the other list. This method is meant to be used for comparing lists of DOM nodes. It would also work with lists of booleans, integers, and similar primitive types, but is pointless in such cases. Also note that this method cannot meaningfully compare lists of lists or lists of dictionaries since the objects that would be part of the list would be created anew by Selenium's marshalling procedure. Hence, in these cases, the assertion would always fail. :param first: The first item to compare. :type first: :class:`selenium.webdriver.remote.webelement.WebElement` or array of :class:`selenium.webdriver.remote.webelement.WebElement`. :param second: The second item to compare. :type second: :class:`selenium.webdriver.remote.webelement.WebElement` or :array of :class:`selenium.webdriver.remote.webelement.WebElement`. :raises: :class:`AssertionError` when unequal.
def disassemble(self, *, transforms=None) -> Iterator[Instruction]: """ Disassembles this method, yielding an iterable of :class:`~jawa.util.bytecode.Instruction` objects. """ if transforms is None: if self.cf.classloader: transforms = self.cf.classloader.bytecode_transforms else: transforms = [] transforms = [self._bind_transform(t) for t in transforms] with io.BytesIO(self._code) as code: ins_iter = iter(lambda: read_instruction(code, code.tell()), None) for ins in ins_iter: for transform in transforms: ins = transform(ins) yield ins
Disassembles this method, yielding an iterable of :class:`~jawa.util.bytecode.Instruction` objects.
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ if isinstance(self.input.payload, Instances): inst = None data = self.input.payload elif isinstance(self.input.payload, Instance): inst = self.input.payload data = inst.dataset index = str(self.resolve_option("index")) unset = bool(self.resolve_option("unset")) if unset: data.no_class() else: if index == "first": data.class_is_first() elif index == "last": data.class_is_last() else: data.class_index = int(index) - 1 if inst is None: self._output.append(Token(data)) else: self._output.append(Token(inst)) return None
The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str
def serialize_artifact_json_blobs(artifacts): """ Ensure that JSON artifact blobs passed as dicts are converted to JSON """ for artifact in artifacts: blob = artifact['blob'] if (artifact['type'].lower() == 'json' and not isinstance(blob, str)): artifact['blob'] = json.dumps(blob) return artifacts
Ensure that JSON artifact blobs passed as dicts are converted to JSON
def calc_spectrum(signal, rate): """Return the spectrum and frequency indexes for real-valued input signal""" npts = len(signal) padto = 1 << (npts - 1).bit_length() # print 'length of signal {}, pad to {}'.format(npts, padto) npts = padto sp = np.fft.rfft(signal, n=padto) / npts # print('sp len ', len(sp)) freq = np.arange((npts / 2) + 1) / (npts / rate) # print('freq len ', len(freq)) return freq, abs(sp)
Return the spectrum and frequency indexes for real-valued input signal
def session_to_epoch(timestamp): """ converts Synergy Timestamp for session to UTC zone seconds since epoch """ utc_timetuple = datetime.strptime(timestamp, SYNERGY_SESSION_PATTERN).replace(tzinfo=None).utctimetuple() return calendar.timegm(utc_timetuple)
converts Synergy Timestamp for session to UTC zone seconds since epoch
def predict(self, dataset, missing_value_action='auto'): """ Predict the target column of the given dataset. The target column is provided during :func:`~turicreate.random_forest_regression.create`. If the target column is in the `dataset` it will be ignored. Parameters ---------- dataset : SFrame A dataset that has the same columns that were used during training. If the target column exists in ``dataset`` it will be ignored while making predictions. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : SArray Predicted target value for each example (i.e. row) in the dataset. See Also ---------- create, predict Examples -------- >>> m.predict(testdata) """ return super(RandomForestRegression, self).predict(dataset, output_type='margin', missing_value_action=missing_value_action)
Predict the target column of the given dataset. The target column is provided during :func:`~turicreate.random_forest_regression.create`. If the target column is in the `dataset` it will be ignored. Parameters ---------- dataset : SFrame A dataset that has the same columns that were used during training. If the target column exists in ``dataset`` it will be ignored while making predictions. missing_value_action : str, optional Action to perform when missing values are encountered. Can be one of: - 'auto': By default the model will treat missing value as is. - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with evaluation and terminate with an error message. Returns ------- out : SArray Predicted target value for each example (i.e. row) in the dataset. See Also ---------- create, predict Examples -------- >>> m.predict(testdata)
def preprocessor(dom): "Removes unwanted parts of DOM." options = { "processing_instructions": False, "remove_unknown_tags": False, "safe_attrs_only": False, "page_structure": False, "annoying_tags": False, "frames": False, "meta": False, "links": False, "javascript": False, "scripts": True, "comments": True, "style": True, "embedded": True, "forms": True, "kill_tags": ("head",), } cleaner = Cleaner(**options) return cleaner.clean_html(dom)
Removes unwanted parts of DOM.
def _timer(self, state_transition_event=None): """Timer loop used to keep track of the time while roasting or cooling. If the time remaining reaches zero, the roaster will call the supplied state transistion function or the roaster will be set to the idle state.""" while not self._teardown.value: state = self.get_roaster_state() if(state == 'roasting' or state == 'cooling'): time.sleep(1) self.total_time += 1 if(self.time_remaining > 0): self.time_remaining -= 1 else: if(state_transition_event is not None): state_transition_event.set() else: self.idle() else: time.sleep(0.01)
Timer loop used to keep track of the time while roasting or cooling. If the time remaining reaches zero, the roaster will call the supplied state transistion function or the roaster will be set to the idle state.
def dmag_magic(in_file="measurements.txt", dir_path=".", input_dir_path="", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", plot_by="loc", LT="AF", norm=True, XLP="", save_plots=True, fmt="svg"): """ plots intensity decay curves for demagnetization experiments Parameters ---------- in_file : str, default "measurements.txt" dir_path : str output directory, default "." input_dir_path : str input file directory (if different from dir_path), default "" spec_file : str input specimen file name, default "specimens.txt" samp_file: str input sample file name, default "samples.txt" site_file : str input site file name, default "sites.txt" loc_file : str input location file name, default "locations.txt" plot_by : str [spc, sam, sit, loc] (specimen, sample, site, location), default "loc" LT : str lab treatment [T, AF, M], default AF norm : bool normalize by NRM magnetization, default True XLP : str exclude specific lab protocols, (for example, method codes like LP-PI) default "" save_plots : bool plot and save non-interactively, default True fmt : str ["png", "svg", "pdf", "jpg"], default "svg" Returns --------- type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written) """ dir_path = os.path.realpath(dir_path) if not input_dir_path: input_dir_path = dir_path input_dir_path = os.path.realpath(input_dir_path) # format plot_key name_dict = {'loc': 'location', 'sit': 'site', 'sam': 'sample', 'spc': 'specimen'} if plot_by not in name_dict.values(): try: plot_key = name_dict[plot_by] except KeyError: print('Unrecognized plot_by {}, falling back to plot by location'.format(plot_by)) plot_key = "loc" else: plot_key = plot_by # figure out what kind of experiment LT = "LT-" + LT + "-Z" print('LT', LT) if LT == "LT-T-Z": units, dmag_key = 'K', 'treat_temp' elif LT == "LT-AF-Z": units, dmag_key = 'T', 'treat_ac_field' elif LT == 'LT-M-Z': units, dmag_key = 'J', 'treat_mw_energy' else: units = 'U' # init FIG = {} # plot dictionary FIG['demag'] = 1 # demag is figure 1 # create contribution and add required headers fnames = {"specimens": spec_file, "samples": samp_file, 'sites': site_file, 'locations': loc_file} if not os.path.exists(pmag.resolve_file_name(in_file, input_dir_path)): print('-E- Could not find {}'.format(in_file)) return False, [] contribution = cb.Contribution(input_dir_path, single_file=in_file, custom_filenames=fnames) file_type = list(contribution.tables.keys())[0] print(len(contribution.tables['measurements'].df), ' records read from ', in_file) # add plot_key into measurements table if plot_key not in contribution.tables['measurements'].df.columns: #contribution.propagate_name_down(plot_key, 'measurements') contribution.propagate_location_to_measurements() data_container = contribution.tables[file_type] # pare down to only records with useful data # grab records that have the requested code data_slice = data_container.get_records_for_code(LT) # and don't have the offending code data = data_container.get_records_for_code(XLP, incl=False, use_slice=True, sli=data_slice, strict_match=False) # make sure quality is in the dataframe if 'quality' not in data.columns: data['quality'] = 'g' # get intensity key and make sure intensity data is not blank intlist = ['magn_moment', 'magn_volume', 'magn_mass'] IntMeths = [col_name for col_name in data.columns if col_name in intlist] # get rid of any entirely blank intensity columns for col_name in IntMeths: if not data[col_name].any(): data.drop(col_name, axis=1, inplace=True) IntMeths = [col_name for col_name in data.columns if col_name in intlist] if len(IntMeths) == 0: print('-E- No intensity headers found') return False, [] int_key = IntMeths[0] # plot first intensity method found - normalized to initial value anyway - doesn't matter which used data = data[data[int_key].notnull()] # make list of individual plots # by default, will be by location_name plotlist = data[plot_key].unique() plotlist.sort() pmagplotlib.plot_init(FIG['demag'], 5, 5) last_plot = False # iterate through and plot the data for plot in plotlist: if plot == plotlist[-1]: last_plot = True plot_data = data[data[plot_key] == plot].copy() if not save_plots: print(plot, 'plotting by: ', plot_key) if len(plot_data) > 2: title = plot spcs = [] spcs = plot_data['specimen'].unique() for spc in spcs: INTblock = [] spec_data = plot_data[plot_data['specimen'] == spc] for ind, rec in spec_data.iterrows(): INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, rec['quality']]) if len(INTblock) > 2: pmagplotlib.plot_mag(FIG['demag'], INTblock, title, 0, units, norm) if save_plots: files = {} for key in list(FIG.keys()): if pmagplotlib.isServer: files[key] = title + '_' + LT + '.' + fmt incl_dir = False else: # if not server, include directory in output path files[key] = os.path.join(dir_path, title + '_' + LT + '.' + fmt) incl_dir = True pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir) else: pmagplotlib.draw_figs(FIG) prompt = " S[a]ve to save plot, [q]uit, Return to continue: " ans = input(prompt) if ans == 'q': return True, [] if ans == "a": files = {} for key in list(FIG.keys()): if pmagplotlib.isServer: files[key] = title + '_' + LT + '.' + fmt incl_dir = False else: # if not server, include directory in output path files[key] = os.path.join(dir_path, title + '_' + LT + '.' + fmt) incl_dir = True pmagplotlib.save_plots(FIG, files, incl_directory=incl_dir) pmagplotlib.clearFIG(FIG['demag']) if last_plot: return True, []
plots intensity decay curves for demagnetization experiments Parameters ---------- in_file : str, default "measurements.txt" dir_path : str output directory, default "." input_dir_path : str input file directory (if different from dir_path), default "" spec_file : str input specimen file name, default "specimens.txt" samp_file: str input sample file name, default "samples.txt" site_file : str input site file name, default "sites.txt" loc_file : str input location file name, default "locations.txt" plot_by : str [spc, sam, sit, loc] (specimen, sample, site, location), default "loc" LT : str lab treatment [T, AF, M], default AF norm : bool normalize by NRM magnetization, default True XLP : str exclude specific lab protocols, (for example, method codes like LP-PI) default "" save_plots : bool plot and save non-interactively, default True fmt : str ["png", "svg", "pdf", "jpg"], default "svg" Returns --------- type - Tuple : (True or False indicating if conversion was sucessful, file name(s) written)
def skus_get(self, product_id, session): '''taobao.fenxiao.product.skus.get SKU查询接口 产品sku查询''' request = TOPRequest('taobao.fenxiao.product.skus.get') request['product_id'] = product_id self.create(self.execute(request, session), fields=['skus','total_results'], models={'skus':FenxiaoSku}) return self.skus
taobao.fenxiao.product.skus.get SKU查询接口 产品sku查询
def info(self): """Supplemental description of the list, with length and type""" itext = self.class_info if self.prop.info: itext += ' (each item is {})'.format(self.prop.info) if self.max_length is None and self.min_length is None: return itext if self.max_length is None: lentext = 'length >= {}'.format(self.min_length) elif self.max_length == self.min_length: lentext = 'length of {}'.format(self.min_length) else: lentext = 'length between {mn} and {mx}'.format( mn='0' if self.min_length is None else self.min_length, mx=self.max_length, ) return '{} with {}'.format(itext, lentext)
Supplemental description of the list, with length and type
def save_dict(self, key: str, my_dict: dict, hierarchical: bool = False): """Store the specified dictionary at the specified key.""" for _key, _value in my_dict.items(): if isinstance(_value, dict): if not hierarchical: self._db.hmset(key, {_key: json.dumps(_value)}) else: self.save_dict(key + ':' + _key, _value, hierarchical) elif isinstance(_value, list): if not hierarchical: self._db.hmset(key, {_key: str(_value)}) else: print('saving list at ', key + ':' + _key) self._db.lpush(key + ':' + _key, *_value[::-1]) elif isinstance(_value, bool): self._db.hmset(key, {_key: str(_value)}) else: self._db.hmset(key, {_key: _value})
Store the specified dictionary at the specified key.
def main(argv=None): """Main function to run benchmarks. @param argv: command-line arguments. @return: exit code (0 is OK). """ import getopt # default values test_read = None test_write = None n = 3 # number of repeat if argv is None: argv = sys.argv[1:] try: opts, args = getopt.getopt(argv, 'hn:rw', []) for o,a in opts: if o == '-h': print(HELP) return 0 elif o == '-n': n = int(a) elif o == '-r': test_read = True elif o == '-w': test_write = True if args: raise getopt.GetoptError('Arguments are not used.') except getopt.GetoptError: msg = sys.exc_info()[1] # current exception txt = str(msg) print(txt) return 1 if (test_read, test_write) == (None, None): test_read = test_write = True m = Measure(n, test_read, test_write) m.measure_all() m.print_report() return 0
Main function to run benchmarks. @param argv: command-line arguments. @return: exit code (0 is OK).
def create_record(self, dns_type, name, content, **kwargs): """ Create a dns record :param dns_type: :param name: :param content: :param kwargs: :return: """ data = { 'type': dns_type, 'name': name, 'content': content } if kwargs.get('ttl') and kwargs['ttl'] != 1: data['ttl'] = kwargs['ttl'] if kwargs.get('proxied') is True: data['proxied'] = True else: data['proxied'] = False content = self.request( self.api_url + self.zone['id'] + '/dns_records', 'post', data=data ) print('DNS record successfully created') return content['result']
Create a dns record :param dns_type: :param name: :param content: :param kwargs: :return:
def _get_distance_term(self, C, rrup, backarc): """ Returns the distance scaling term, which varies depending on whether the site is in the forearc or the backarc """ # Geometric attenuation function distance_scale = -np.log10(np.sqrt(rrup ** 2 + 3600.0)) # Anelastic attenuation in the backarc distance_scale[backarc] += (C["c2"] * rrup[backarc]) # Anelastic Attenuation in the forearc idx = np.logical_not(backarc) distance_scale[idx] += (C["c1"] * rrup[idx]) return distance_scale
Returns the distance scaling term, which varies depending on whether the site is in the forearc or the backarc
def welch(timeseries, segmentlength, noverlap=None, scheme=None, **kwargs): """Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. scheme : `pycbc.scheme.Scheme`, optional processing scheme in which to execute FFT, default: `None` **kwargs other keyword arguments to pass to :func:`pycbc.psd.welch` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- pycbc.psd.welch """ from pycbc.psd import welch as pycbc_welch # default to 'standard' welch kwargs.setdefault('avg_method', 'mean') # get scheme if scheme is None: scheme = null_context() # generate pycbc FrequencySeries with scheme: pycbc_fseries = pycbc_welch(timeseries.to_pycbc(copy=False), seg_len=segmentlength, seg_stride=segmentlength-noverlap, **kwargs) # return GWpy FrequencySeries fseries = FrequencySeries.from_pycbc(pycbc_fseries, copy=False) fseries.name = timeseries.name fseries.override_unit(scale_timeseries_unit( timeseries.unit, scaling='density')) return fseries
Calculate a PSD using Welch's method with a mean average Parameters ---------- timeseries : `~gwpy.timeseries.TimeSeries` input `TimeSeries` data. segmentlength : `int` number of samples in single average. noverlap : `int` number of samples to overlap between segments, defaults to 50%. scheme : `pycbc.scheme.Scheme`, optional processing scheme in which to execute FFT, default: `None` **kwargs other keyword arguments to pass to :func:`pycbc.psd.welch` Returns ------- spectrum : `~gwpy.frequencyseries.FrequencySeries` average power `FrequencySeries` See also -------- pycbc.psd.welch
def mkdir(dirname, overwrite=False): """ Wraps around os.mkdir(), but checks for existence first. """ if op.isdir(dirname): if overwrite: shutil.rmtree(dirname) os.mkdir(dirname) logging.debug("Overwrite folder `{0}`.".format(dirname)) else: return False # Nothing is changed else: try: os.mkdir(dirname) except: os.makedirs(dirname) logging.debug("`{0}` not found. Creating new.".format(dirname)) return True
Wraps around os.mkdir(), but checks for existence first.
def posterior_to_xarray(self): """Extract posterior samples from fit.""" posterior = self.posterior # filter posterior_predictive and log_likelihood posterior_predictive = self.posterior_predictive if posterior_predictive is None: posterior_predictive = [] elif isinstance(posterior_predictive, str): posterior_predictive = [posterior_predictive] log_likelihood = self.log_likelihood if not isinstance(log_likelihood, str): log_likelihood = [] else: log_likelihood = [log_likelihood] ignore = posterior_predictive + log_likelihood + ["lp__"] data = get_draws(posterior, ignore=ignore) return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
Extract posterior samples from fit.
def get_logexp(a=1, b=0, a2=None, b2=None, backend=None): """ Utility function for use with :func:symmetricsys. Creates a pair of callbacks for logarithmic transformation (including scaling and shifting): ``u = ln(a*x + b)``. Parameters ---------- a : number Scaling (forward). b : number Shift (forward). a2 : number Scaling (backward). b2 : number Shift (backward). Returns ------- Pair of callbacks. """ if a2 is None: a2 = a if b2 is None: b2 = b if backend is None: import sympy as backend return (lambda x: backend.log(a*x + b), lambda x: (backend.exp(x) - b2)/a2)
Utility function for use with :func:symmetricsys. Creates a pair of callbacks for logarithmic transformation (including scaling and shifting): ``u = ln(a*x + b)``. Parameters ---------- a : number Scaling (forward). b : number Shift (forward). a2 : number Scaling (backward). b2 : number Shift (backward). Returns ------- Pair of callbacks.
def reset(self): """ Empties all internal storage containers """ super(MorseSmaleComplex, self).reset() self.base_partitions = {} self.merge_sequence = {} self.persistences = [] self.min_indices = [] self.max_indices = [] # State properties self.persistence = 0.
Empties all internal storage containers
def get_genes( path_or_buffer, valid_biotypes, chunksize=10000, chromosome_pattern=None, #chromosome_pattern=r'(?:\d\d?|MT|X|Y)$', only_manual=False, remove_duplicates=True, sort_by='name'): """Get all genes of a specific a biotype from an Ensembl GTF file. Parameters ---------- path_or_buffer : str or buffer The GTF file (either the file path or a buffer). valid_biotypes : set of str The set of biotypes to include (e.g., "protein_coding"). chromosome_pattern : str, optional Regular expression specifying valid chromosomes. [None] only_manual : bool, optional Whether to exclude annotations with source "ensembl", which are based only on an automatic annotation pipeline. [True] remove_duplicates : bool, optional Whether to remove duplicate annotations, i.e. those with different Ensembl IDs for the same gene (only applies to protein-coding genes). [True] sort_by : str, optional How to sort the genes. One of: - 'name': Genes are ordered alphabetically by their name - 'position': Genes are sorted by their position in the genome.abs Genes are first sorted by chromosome, then by their starting base pair position on the chromosome. - 'position_fancy': Like 'position', but attempts to sort the chromosomes in a more logical order than strictly alphabetically. This currently works for human and mouse genomes. - 'none': The order from the GTF file is retained. Default: 'name' Returns ------- `pandas.DataFrame` Table with rows corresponding to the genes found. Notes ----- Annotation sources and redundant gene annotations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ According to the Ensembl website (1), the Ensembl gene annotation GTF files for human, mouse, zebrafish, rat and pig essentially contain two sets of annotations: One set consists of all annotations with the "ensembl" source annotation (column 2). These annotations are the product of the automated Ensembl "genebuild" pipeline. The other set consists of genes that are manually annotated by the HAVANA team (source "havana"), some of which have been merged with the automatic annotations (source "ensembl_havana"). There seems to be no overlap between genes annotated with "havana" and "ensembl_havana" sources, respectively. However, there are a few genes for which only annotations with source "ensembl" exist. Our policy is therefore to prefer annotations with source "ensembl_havana" and "havana" over those with source "ensembl", and to only keep annotations with source "ensembl" if there are no manually curated alternative annotations. A special case is represented by mitochondrial genes, which always have the source "insdc". (1) see http://www.ensembl.org/Help/Faq?id=152 Removal of duplicates ~~~~~~~~~~~~~~~~~~~~~ Unfortunately, the Ensembl gene annotations contain duplicates for a handful of genes. For example, for MATR3, there are ENSG00000015479 and ENSG00000280987, both of type "ensembl_havana". There seems to be no clear criterion by which we could rationally and automatically choose one ID over the other, at least based on information contained in the GTF file. We therefore remove duplicates according to following policy: - For genes on '+' strand, keep the gene with the left-most starting position. - For genes on '-' strand, keep the gene with the right-most starting position. (In case the starting positions are equal, we keep the one that occurs first in the GTF file.) We would like to use the pandas.DataFrame.drop_duplicates() function for this. So we're temporarily reordering genes using their signed position, and then we're using the original index (position) to restore the original order. """ chrompat = None if chromosome_pattern is not None: chrompat = re.compile(chromosome_pattern) # make sure this is a set valid_biotypes = set(valid_biotypes) c = 0 num_lines = 0 num_chunks = 0 t0 = time.time() reader = pd.read_csv(path_or_buffer, encoding='ascii', sep='\t', header=None, comment='#', dtype={0: str}, chunksize=chunksize) # "insdc" is required to catch the mitochondrial protein-coding genes valid_sources = set(['ensembl_havana', 'havana', 'insdc']) if not only_manual: # we also accept annotations with source "ensembl", which are the # product of an automated annotation pipeline valid_sources.add('ensembl') excluded_chromosomes = set() # parse GTF file and keep specific information data = [] for j, df in enumerate(reader): num_chunks += 1 num_lines += (df.shape[0]) # select rows of type "gene" sel = (df.iloc[:, 2] == 'gene') for i, row in df.loc[sel].iterrows(): # parse attribute in 9th column attr = gtf.parse_attributes(row[8].lstrip(' ')) # check if biotype is valid biotype = attr['gene_biotype'] if biotype not in valid_biotypes: continue chrom = str(row[0]) source = row[1] if chrompat is not None: match = chrompat.match(chrom) if match is None: excluded_chromosomes.add(chrom) continue c += 1 # extract gene ID and gene name ensembl_id = attr['gene_id'] try: gene_name = attr['gene_name'] except KeyError: # no gene name, so we'll use the ID as the name gene_name = ensembl_id # We define the position to be the index of the 5'-most base of the gene, # according its orientation on the chromosome (DNA sequences are always represented 5'->3'). # We encode the strand as the sign of the index # ("+" strand = positive sign, "-" strand = negative sign). if row[6] == '+': pos = int(row[3])-1 elif row[6] == '-': pos = -(int(row[4])-1) else: raise ValueError('Invalid strand information: %s' % str(row[6])) length = abs(int(row[4]) - int(row[3])) + 1 #data.append([gene_name, ensembl_id, chrom, pos, length, # source, biotype]) data.append([ensembl_id, gene_name, chrom, pos, length, biotype, source]) t1 = time.time() header = ['ensembl_id', 'name', 'chromosome', 'position', 'length', 'type', 'source'] df = pd.DataFrame(columns=header, data=data) if 'protein_coding' in valid_biotypes: if only_manual: # exclude protein-coding genes that are the based on # automatic annotation (source "ensembl") sel = (df['type'] == 'protein_coding' & df['source'] == 'ensembl') df = df.loc[~sel] else: # make sure we only keep protein-coding genes with source "ensembl" # if no manual annotations are available sel_pc = df['type'] == 'protein_coding' sel_ensembl = ((df['source'] == 'ensembl') & sel_pc) sel_manual = ((df['source'] != 'ensembl') & sel_pc) redundant_ensembl_genes = set(df.loc[sel_ensembl, 'name'].values) \ & set(df.loc[sel_manual, 'name'].values) sel_redund = sel_ensembl & df['name'].isin(redundant_ensembl_genes) num_genes_before = df.shape[0] df = df.loc[~sel_redund] num_genes_after = df.shape[0] _LOGGER.info('Removed %d protein-coding genes with source ' '"ensembl" that also had manual annotations.', num_genes_before - num_genes_after) if remove_duplicates: # remove duplicate annotations (two or more Ensembl IDs for the # same gene) num_genes_before = df.shape[0] sel_pc = df['type'] == 'protein_coding' df_sel = df.loc[sel_pc].copy() # sort by signed position value, # in order to make sure we keep the most "upstream" annotation in # the next step df_sel.sort_values('position', kind='mergesort', inplace=True) # remove duplicates by keeping the first occurrence #df.drop_duplicates(['chromosome', 'name'], inplace=True) df_sel.drop_duplicates('name', inplace=True) # combine protein-coding genes and non-protein-coding genes again df = pd.concat([df_sel, df.loc[~sel_pc]]) # restore original order using the numeric index df.sort_index(inplace=True) num_genes_after = df.shape[0] _LOGGER.info('Removed %d duplicate protein-coding gene entries', num_genes_before - num_genes_after) else: # print names of genes with duplicate IDs sel = df['type'] == 'protein_coding' counts = df.loc[sel]['name'].value_counts() sel = counts > 1 if sel.sum() > 0: _LOGGER.info('Protein-coding genes with multiple Ensembl IDs:' '%s', ', '.join(['%s (%d)' % (k, v) for k, v in counts[sel].items()])) if sort_by == 'name': # sort alphabetically by gene name df.sort_values(['name'], kind='mergesort', inplace=True) elif sort_by in ['position', 'position_fancy']: # sort first by chromsome, then by absolute position df_sort = pd.concat([df['chromosome'], df['position'].abs()], axis=1) df_sort = df_sort.sort_values(['chromosome', 'position'], kind='mergesort') df = df.loc[df_sort.index] if sort_by == 'position_fancy': # Perform "fancy" positional sorting. Numbered chromosomes # are ordered numerically, and followed by the X, Y, and MT # chromosomes. def transform_chrom(chrom): """Helper function to obtain specific sort order.""" try: c = int(chrom) except: if chrom in ['X', 'Y']: return chrom elif chrom == 'MT': return '_MT' # sort to the end else: return '__' + chrom # sort to the very end else: # make sure numbered chromosomes are sorted numerically return '%02d' % c chrom_for_sorting = df['chromosome'].apply(transform_chrom) a = chrom_for_sorting.argsort(kind='mergesort') df = df.iloc[a] _LOGGER.info('Performed fancy sorting of chromosomes.') # set index to ensembl ID df.set_index('ensembl_id', inplace=True) _LOGGER.info('Read %d lines (in %d chunks).', num_lines, num_chunks) _LOGGER.info('Found %d valid gene entries.', c) _LOGGER.info('Final number of unique genes: %d', df.shape[0]) _LOGGER.info('Parsing time: %.1f s', t1-t0) # additional statistics all_chromosomes = list(df['chromosome'].unique()) _LOGGER.info('Valid chromosomes (%d): %s', len(all_chromosomes), ', '.join(all_chromosomes)) _LOGGER.info('Excluded chromosomes (%d): %s', len(excluded_chromosomes), ', '.join(sorted(excluded_chromosomes))) _LOGGER.info('Sources:') for i, c in df['source'].value_counts().iteritems(): _LOGGER.info('- %s: %d', i, c) _LOGGER.info('Gene types:') for i, c in df['type'].value_counts().iteritems(): _LOGGER.info('- %s: %d', i, c) return df
Get all genes of a specific a biotype from an Ensembl GTF file. Parameters ---------- path_or_buffer : str or buffer The GTF file (either the file path or a buffer). valid_biotypes : set of str The set of biotypes to include (e.g., "protein_coding"). chromosome_pattern : str, optional Regular expression specifying valid chromosomes. [None] only_manual : bool, optional Whether to exclude annotations with source "ensembl", which are based only on an automatic annotation pipeline. [True] remove_duplicates : bool, optional Whether to remove duplicate annotations, i.e. those with different Ensembl IDs for the same gene (only applies to protein-coding genes). [True] sort_by : str, optional How to sort the genes. One of: - 'name': Genes are ordered alphabetically by their name - 'position': Genes are sorted by their position in the genome.abs Genes are first sorted by chromosome, then by their starting base pair position on the chromosome. - 'position_fancy': Like 'position', but attempts to sort the chromosomes in a more logical order than strictly alphabetically. This currently works for human and mouse genomes. - 'none': The order from the GTF file is retained. Default: 'name' Returns ------- `pandas.DataFrame` Table with rows corresponding to the genes found. Notes ----- Annotation sources and redundant gene annotations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ According to the Ensembl website (1), the Ensembl gene annotation GTF files for human, mouse, zebrafish, rat and pig essentially contain two sets of annotations: One set consists of all annotations with the "ensembl" source annotation (column 2). These annotations are the product of the automated Ensembl "genebuild" pipeline. The other set consists of genes that are manually annotated by the HAVANA team (source "havana"), some of which have been merged with the automatic annotations (source "ensembl_havana"). There seems to be no overlap between genes annotated with "havana" and "ensembl_havana" sources, respectively. However, there are a few genes for which only annotations with source "ensembl" exist. Our policy is therefore to prefer annotations with source "ensembl_havana" and "havana" over those with source "ensembl", and to only keep annotations with source "ensembl" if there are no manually curated alternative annotations. A special case is represented by mitochondrial genes, which always have the source "insdc". (1) see http://www.ensembl.org/Help/Faq?id=152 Removal of duplicates ~~~~~~~~~~~~~~~~~~~~~ Unfortunately, the Ensembl gene annotations contain duplicates for a handful of genes. For example, for MATR3, there are ENSG00000015479 and ENSG00000280987, both of type "ensembl_havana". There seems to be no clear criterion by which we could rationally and automatically choose one ID over the other, at least based on information contained in the GTF file. We therefore remove duplicates according to following policy: - For genes on '+' strand, keep the gene with the left-most starting position. - For genes on '-' strand, keep the gene with the right-most starting position. (In case the starting positions are equal, we keep the one that occurs first in the GTF file.) We would like to use the pandas.DataFrame.drop_duplicates() function for this. So we're temporarily reordering genes using their signed position, and then we're using the original index (position) to restore the original order.
def get_internal_call_graph(fpath, with_doctests=False): """ CommandLine: python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/ibeis/ibeis/init/main_helpers.py --show python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/dtool/dtool/depcache_table.py --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> import utool as ut >>> fpath = ut.get_argval('--modpath', default='.') >>> with_doctests = ut.get_argflag('--with_doctests') >>> G = get_internal_call_graph(fpath, with_doctests) >>> ut.quit_if_noshow() >>> import plottool as pt >>> pt.qt4ensure() >>> pt.show_nx(G, fontsize=8, as_directed=False) >>> z = pt.zoom_factory() >>> p = pt.pan_factory() >>> ut.show_if_requested() """ import utool as ut fpath = ut.truepath(fpath) sourcecode = ut.readfrom(fpath) self = ut.BaronWraper(sourcecode) G = self.internal_call_graph(with_doctests=with_doctests) return G
CommandLine: python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/ibeis/ibeis/init/main_helpers.py --show python -m utool.util_inspect get_internal_call_graph --show --modpath=~/code/dtool/dtool/depcache_table.py --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> import utool as ut >>> fpath = ut.get_argval('--modpath', default='.') >>> with_doctests = ut.get_argflag('--with_doctests') >>> G = get_internal_call_graph(fpath, with_doctests) >>> ut.quit_if_noshow() >>> import plottool as pt >>> pt.qt4ensure() >>> pt.show_nx(G, fontsize=8, as_directed=False) >>> z = pt.zoom_factory() >>> p = pt.pan_factory() >>> ut.show_if_requested()
def run_preassembly_duplicate(preassembler, beliefengine, **kwargs): """Run deduplication stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of unique statements. """ logger.info('Combining duplicates on %d statements...' % len(preassembler.stmts)) dump_pkl = kwargs.get('save') stmts_out = preassembler.combine_duplicates() beliefengine.set_prior_probs(stmts_out) logger.info('%d unique statements' % len(stmts_out)) if dump_pkl: dump_statements(stmts_out, dump_pkl) return stmts_out
Run deduplication stage of preassembly on a list of statements. Parameters ---------- preassembler : indra.preassembler.Preassembler A Preassembler instance beliefengine : indra.belief.BeliefEngine A BeliefEngine instance. save : Optional[str] The name of a pickle file to save the results (stmts_out) into. Returns ------- stmts_out : list[indra.statements.Statement] A list of unique statements.
def task_done(self) -> None: """Indicate that a formerly enqueued task is complete. Used by queue consumers. For each `.get` used to fetch a task, a subsequent call to `.task_done` tells the queue that the processing on the task is complete. If a `.join` is blocking, it resumes when all items have been processed; that is, when every `.put` is matched by a `.task_done`. Raises `ValueError` if called more times than `.put`. """ if self._unfinished_tasks <= 0: raise ValueError("task_done() called too many times") self._unfinished_tasks -= 1 if self._unfinished_tasks == 0: self._finished.set()
Indicate that a formerly enqueued task is complete. Used by queue consumers. For each `.get` used to fetch a task, a subsequent call to `.task_done` tells the queue that the processing on the task is complete. If a `.join` is blocking, it resumes when all items have been processed; that is, when every `.put` is matched by a `.task_done`. Raises `ValueError` if called more times than `.put`.
def entity_to_bulk(entities, resource_type_parent): """Convert Single TC Entity to Bulk format. .. Attention:: This method is subject to frequent changes Args: entities (dictionary): TC Entity to be converted to Bulk. resource_type_parent (string): The resource parent type of the tc_data provided. Returns: (dictionary): A dictionary representing TC Bulk format. """ if not isinstance(entities, list): entities = [entities] bulk_array = [] for e in entities: bulk = {'type': e.get('type'), 'ownerName': e.get('ownerName')} if resource_type_parent in ['Group', 'Task', 'Victim']: bulk['name'] = e.get('value') elif resource_type_parent in ['Indicator']: bulk['confidence'] = e.get('confidence') bulk['rating'] = e.get('rating') bulk['summary'] = e.get('value') bulk_array.append(bulk) if len(bulk_array) == 1: return bulk_array[0] return bulk_array
Convert Single TC Entity to Bulk format. .. Attention:: This method is subject to frequent changes Args: entities (dictionary): TC Entity to be converted to Bulk. resource_type_parent (string): The resource parent type of the tc_data provided. Returns: (dictionary): A dictionary representing TC Bulk format.
def get_default_config(self): """ Return the default config for the handler """ config = super(StatsdHandler, self).get_default_config() config.update({ 'host': '', 'port': 1234, 'batch': 1, }) return config
Return the default config for the handler
def getEmpTraitCorrCoef(self): """ Returns the empirical trait correlation matrix """ cov = self.getEmpTraitCovar() stds=SP.sqrt(cov.diagonal())[:,SP.newaxis] RV = cov/stds/stds.T return RV
Returns the empirical trait correlation matrix
def uniquetwig(self, ps=None): """ see also :meth:`twig` Determine the shortest (more-or-less) twig which will point to this single Parameter in a given parent :class:`ParameterSet` :parameter ps: :class:`ParameterSet` in which the returned uniquetwig will point to this Parameter. If not provided or None this will default to the parent :class:`phoebe.frontend.bundle.Bundle`, if available. :return: uniquetwig :rtype: str """ if ps is None: ps = self._bundle if ps is None: return self.twig return ps._uniquetwig(self.twig)
see also :meth:`twig` Determine the shortest (more-or-less) twig which will point to this single Parameter in a given parent :class:`ParameterSet` :parameter ps: :class:`ParameterSet` in which the returned uniquetwig will point to this Parameter. If not provided or None this will default to the parent :class:`phoebe.frontend.bundle.Bundle`, if available. :return: uniquetwig :rtype: str
def VerifyStructure(self, parser_mediator, lines): """Verify that this file is a SkyDrive log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise. """ try: structure = self._SDF_HEADER.parseString(lines) except pyparsing.ParseException: logger.debug('Not a SkyDrive log file') return False try: dfdatetime_time_elements.TimeElementsInMilliseconds( time_elements_tuple=structure.header_date_time) except ValueError: logger.debug( 'Not a SkyDrive log file, invalid date and time: {0!s}'.format( structure.header_date_time)) return False return True
Verify that this file is a SkyDrive log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. lines (str): one or more lines from the text file. Returns: bool: True if this is the correct parser, False otherwise.
def calculate_normals(vertices): """Return Nx3 normal array from Nx3 vertex array.""" verts = np.array(vertices, dtype=float) normals = np.zeros_like(verts) for start, end in pairwise(np.arange(0, verts.shape[0] + 1, 3)): vecs = np.vstack((verts[start + 1] - verts[start], verts[start + 2] - verts[start])) # Get triangle of vertices and calculate 2-1 and 3-1 vecs /= np.linalg.norm(vecs, axis=1, keepdims=True) # normalize vectors normal = np.cross(*vecs) # normal is the cross products of vectors. normals[start:end, :] = normal / np.linalg.norm(normal) return normals
Return Nx3 normal array from Nx3 vertex array.
def filter_objects_by_section(self, rels, section): """Build a queryset containing all objects in the section subtree.""" subtree = section.get_descendants(include_self=True) kwargs_list = [{'%s__in' % rel.field.name: subtree} for rel in rels] q = Q(**kwargs_list[0]) for kwargs in kwargs_list[1:]: q |= Q(**kwargs) return self.get_manager(get_item_model_class()).filter(q).distinct()
Build a queryset containing all objects in the section subtree.
def image_exists(self, image_name, tag='latest'): """ :param image_name: :return: True the image_name location in docker.neg pos """ code, image = self.image_tags(image_name) if code != httplib.OK: return False tag = tag.lower() return any(x.lower() == tag for x in image.tags)
:param image_name: :return: True the image_name location in docker.neg pos
def _points(self, x_pos): """ Convert given data values into drawable points (x, y) and interpolated points if interpolate option is specified """ for serie in self.all_series: serie.points = [(x_pos[i], v) for i, v in enumerate(serie.values)] if serie.points and self.interpolate: serie.interpolated = self._interpolate(x_pos, serie.values) else: serie.interpolated = []
Convert given data values into drawable points (x, y) and interpolated points if interpolate option is specified
def schema(self, shex: Optional[Union[str, ShExJ.Schema]]) -> None: """ Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema. :param shex: Schema """ self.pfx = None if shex is not None: if isinstance(shex, ShExJ.Schema): self._schema = shex else: shext = shex.strip() loader = SchemaLoader() if ('\n' in shex or '\r' in shex) or shext[0] in '#<_: ': self._schema = loader.loads(shex) else: self._schema = loader.load(shex) if isinstance(shex, str) else shex if self._schema is None: raise ValueError("Unable to parse shex file") self.pfx = PrefixLibrary(loader.schema_text)
Set the schema to be used. Schema can either be a ShExC or ShExJ string or a pre-parsed schema. :param shex: Schema
def _syscal_write_electrode_coords(fid, spacing, N): """helper function that writes out electrode positions to a file descriptor Parameters ---------- fid: file descriptor data is written here spacing: float spacing of electrodes N: int number of electrodes """ fid.write('# X Y Z\n') for i in range(0, N): fid.write('{0} {1} {2} {3}\n'.format(i + 1, i * spacing, 0, 0))
helper function that writes out electrode positions to a file descriptor Parameters ---------- fid: file descriptor data is written here spacing: float spacing of electrodes N: int number of electrodes
def add_named_metadata(self, name, element=None): """ Add a named metadata node to the module, if it doesn't exist, or return the existing node. If *element* is given, it will append a new element to the named metadata node. If *element* is a sequence of values (rather than a metadata value), a new unnamed node will first be created. Example:: module.add_named_metadata("llvm.ident", ["llvmlite/1.0"]) """ if name in self.namedmetadata: nmd = self.namedmetadata[name] else: nmd = self.namedmetadata[name] = values.NamedMetaData(self) if element is not None: if not isinstance(element, values.Value): element = self.add_metadata(element) if not isinstance(element.type, types.MetaDataType): raise TypeError("wrong type for metadata element: got %r" % (element,)) nmd.add(element) return nmd
Add a named metadata node to the module, if it doesn't exist, or return the existing node. If *element* is given, it will append a new element to the named metadata node. If *element* is a sequence of values (rather than a metadata value), a new unnamed node will first be created. Example:: module.add_named_metadata("llvm.ident", ["llvmlite/1.0"])
def getMugshot(self): """ Return the L{Mugshot} associated with this L{Person}, or an unstored L{Mugshot} pointing at a placeholder mugshot image. """ mugshot = self.store.findUnique( Mugshot, Mugshot.person == self, default=None) if mugshot is not None: return mugshot return Mugshot.placeholderForPerson(self)
Return the L{Mugshot} associated with this L{Person}, or an unstored L{Mugshot} pointing at a placeholder mugshot image.
def _dispatch_handler(args, cell, parser, handler, cell_required=False, cell_prohibited=False): """ Makes sure cell magics include cell and line magics don't, before dispatching to handler. Args: args: the parsed arguments from the magic line. cell: the contents of the cell, if any. parser: the argument parser for <cmd>; used for error message. handler: the handler to call if the cell present/absent check passes. cell_required: True for cell magics, False for line magics that can't be cell magics. cell_prohibited: True for line magics, False for cell magics that can't be line magics. Returns: The result of calling the handler. Raises: Exception if the invocation is not valid. """ if cell_prohibited: if cell and len(cell.strip()): parser.print_help() raise Exception( 'Additional data is not supported with the %s command.' % parser.prog) return handler(args) if cell_required and not cell: parser.print_help() raise Exception('The %s command requires additional data' % parser.prog) return handler(args, cell)
Makes sure cell magics include cell and line magics don't, before dispatching to handler. Args: args: the parsed arguments from the magic line. cell: the contents of the cell, if any. parser: the argument parser for <cmd>; used for error message. handler: the handler to call if the cell present/absent check passes. cell_required: True for cell magics, False for line magics that can't be cell magics. cell_prohibited: True for line magics, False for cell magics that can't be line magics. Returns: The result of calling the handler. Raises: Exception if the invocation is not valid.
def validate(self, strict=True): """ check if this Swagger API valid or not. :param bool strict: when in strict mode, exception would be raised if not valid. :return: validation errors :rtype: list of tuple(where, type, msg). """ result = self._validate() if strict and len(result): for r in result: logger.error(r) raise errs.ValidationError('this Swagger App contains error: {0}.'.format(len(result))) return result
check if this Swagger API valid or not. :param bool strict: when in strict mode, exception would be raised if not valid. :return: validation errors :rtype: list of tuple(where, type, msg).
def level_grouper(text, getreffs, level=None, groupby=20): """ Alternative to level_chunker: groups levels together at the latest level :param text: Text object :param getreffs: GetValidReff query callback :param level: Level of citation to retrieve :param groupby: Number of level to groupby :return: Automatically curated references """ if level is None or level > len(text.citation): level = len(text.citation) references = [ref.split(":")[-1] for ref in getreffs(level=level)] _refs = OrderedDict() for key in references: k = ".".join(key.split(".")[:level-1]) if k not in _refs: _refs[k] = [] _refs[k].append(key) del k return [ ( join_or_single(ref[0], ref[-1]), join_or_single(ref[0], ref[-1]) ) for sublist in _refs.values() for ref in [ sublist[i:i+groupby] for i in range(0, len(sublist), groupby) ] ]
Alternative to level_chunker: groups levels together at the latest level :param text: Text object :param getreffs: GetValidReff query callback :param level: Level of citation to retrieve :param groupby: Number of level to groupby :return: Automatically curated references
def _write(self, session, openFile, replaceParamFile=None): """ ProjectFileEvent Write to File Method """ openFile.write( text( yaml.dump([evt.as_yml() for evt in self.events.order_by(ProjectFileEvent.name, ProjectFileEvent.subfolder)] ) ) )
ProjectFileEvent Write to File Method
def forwards(apps, schema_editor): """ Create initial recurrence rules. """ RecurrenceRule = apps.get_model('icekit_events', 'RecurrenceRule') for description, recurrence_rule in RULES: RecurrenceRule.objects.get_or_create( description=description, defaults=dict(recurrence_rule=recurrence_rule), )
Create initial recurrence rules.
def marker_tags(self, iid): """Generator for all the tags of a certain marker""" tags = self._markers[iid]["tags"] for tag in tags: yield tag
Generator for all the tags of a certain marker
def F_beta(self, beta): """ Calculate FBeta score. :param beta: beta parameter :type beta : float :return: FBeta score for classes as dict """ try: F_dict = {} for i in self.TP.keys(): F_dict[i] = F_calc( TP=self.TP[i], FP=self.FP[i], FN=self.FN[i], beta=beta) return F_dict except Exception: return {}
Calculate FBeta score. :param beta: beta parameter :type beta : float :return: FBeta score for classes as dict
def line(self, p1, p2, resolution=1): """Resolve the points to make a line between two points.""" xdiff = max(p1.x, p2.x) - min(p1.x, p2.x) ydiff = max(p1.y, p2.y) - min(p1.y, p2.y) xdir = [-1, 1][int(p1.x <= p2.x)] ydir = [-1, 1][int(p1.y <= p2.y)] r = int(round(max(xdiff, ydiff))) if r == 0: return for i in range((r + 1) * resolution): x = p1.x y = p1.y if xdiff: x += (float(i) * xdiff) / r * xdir / resolution if ydiff: y += (float(i) * ydiff) / r * ydir / resolution yield Point((x, y))
Resolve the points to make a line between two points.
def is_terminal(self): """True if this result will stop the test.""" return (self.raised_exception or self.is_timeout or self.phase_result == openhtf.PhaseResult.STOP)
True if this result will stop the test.
def update_search_space(self, search_space): """ Update search space definition in tuner by search_space in parameters. Will called when first setup experiemnt or update search space in WebUI. Parameters ---------- search_space : dict """ self.json = search_space search_space_instance = json2space(self.json) rstate = np.random.RandomState() trials = hp.Trials() domain = hp.Domain(None, search_space_instance, pass_expr_memo_ctrl=None) algorithm = self._choose_tuner(self.algorithm_name) self.rval = hp.FMinIter(algorithm, domain, trials, max_evals=-1, rstate=rstate, verbose=0) self.rval.catch_eval_exceptions = False
Update search space definition in tuner by search_space in parameters. Will called when first setup experiemnt or update search space in WebUI. Parameters ---------- search_space : dict
def bytes2str_in_dicts(inp # type: Union[MutableMapping[Text, Any], MutableSequence[Any], Any] ): # type: (...) -> Union[Text, MutableSequence[Any], MutableMapping[Text, Any]] """ Convert any present byte string to unicode string, inplace. input is a dict of nested dicts and lists """ # if input is dict, recursively call for each value if isinstance(inp, MutableMapping): for k in inp: inp[k] = bytes2str_in_dicts(inp[k]) return inp # if list, iterate through list and fn call # for all its elements if isinstance(inp, MutableSequence): for idx, value in enumerate(inp): inp[idx] = bytes2str_in_dicts(value) return inp # if value is bytes, return decoded string, elif isinstance(inp, bytes): return inp.decode('utf-8') # simply return elements itself return inp
Convert any present byte string to unicode string, inplace. input is a dict of nested dicts and lists
def _get_address_translations_table(address_translations): """Yields a formatted table to print address translations. :param List[dict] address_translations: List of address translations. :return Table: Formatted for address translation output. """ table = formatting.Table(['id', 'static IP address', 'static IP address id', 'remote IP address', 'remote IP address id', 'note']) for address_translation in address_translations: table.add_row([address_translation.get('id', ''), address_translation.get('internalIpAddressRecord', {}) .get('ipAddress', ''), address_translation.get('internalIpAddressId', ''), address_translation.get('customerIpAddressRecord', {}) .get('ipAddress', ''), address_translation.get('customerIpAddressId', ''), address_translation.get('notes', '')]) return table
Yields a formatted table to print address translations. :param List[dict] address_translations: List of address translations. :return Table: Formatted for address translation output.
def replicate_directory_tree(input_dir, output_dir): """ _replicate_directory_tree_ clone dir structure under input_dir into output dir All subdirs beneath input_dir will be created under output_dir :param input_dir: path to dir tree to be cloned :param output_dir: path to new dir where dir structure will be created """ def transplant_dir(target, dirname): x = dirname.replace(input_dir, target) if not os.path.exists(x): LOGGER.info('Creating: {}'.format(x)) os.makedirs(x) dir_visitor( input_dir, functools.partial(transplant_dir, output_dir) )
_replicate_directory_tree_ clone dir structure under input_dir into output dir All subdirs beneath input_dir will be created under output_dir :param input_dir: path to dir tree to be cloned :param output_dir: path to new dir where dir structure will be created
def stream_url(self): '''stream for this song - not re-encoded''' path = '/Audio/{}/universal'.format(self.id) return self.connector.get_url(path, userId=self.connector.userid, MaxStreamingBitrate=140000000, Container='opus', TranscodingContainer='opus', AudioCodec='opus', MaxSampleRate=48000, PlaySessionId=1496213367201 #TODO no hard code )
stream for this song - not re-encoded
def send(self, send_email=True): """Marks the invoice as sent in Holvi If send_email is False then the invoice is *not* automatically emailed to the recipient and your must take care of sending the invoice yourself. """ url = str(self.api.base_url + '{code}/status/').format(code=self.code) # six.u messes this up payload = { 'mark_as_sent': True, 'send_email': send_email, } stat = self.api.connection.make_put(url, payload)
Marks the invoice as sent in Holvi If send_email is False then the invoice is *not* automatically emailed to the recipient and your must take care of sending the invoice yourself.
def get_body(self, msg): """ Extracts and returns the decoded body from an EmailMessage object""" body = "" charset = "" if msg.is_multipart(): for part in msg.walk(): ctype = part.get_content_type() cdispo = str(part.get('Content-Disposition')) # skip any text/plain (txt) attachments if ctype == 'text/plain' and 'attachment' not in cdispo: body = part.get_payload(decode=True) # decode charset = part.get_content_charset() break # not multipart - i.e. plain text, no attachments, keeping fingers crossed else: body = msg.get_payload(decode=True) charset = msg.get_content_charset() return body.decode(charset)
Extracts and returns the decoded body from an EmailMessage object
def fetchall_sp(s,p): """ fetch all triples for a property """ query = """ SELECT * WHERE {{ <{s}> <{p}> ?x }} """.format(s=s, p=prefixmap[p]) bindings = run_sparql(query) rows = [r['x']['value'] for r in bindings] return rows
fetch all triples for a property
def obj_to_grid(self, file_path=None, delim=None, tab=None, quote_numbers=True, quote_empty_str=False): """ This will return a str of a grid table. :param file_path: path to data file, defaults to self's contents if left alone :param delim: dict of deliminators, defaults to obj_to_str's method: :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: string representing the grid formation of the relevant data """ div_delims = {"top": ['top left corner', 'top intersect', 'top edge', 'top right corner'], "divide": ['left major intersect', 'internal major intersect', 'bottom edge', 'right major intersect'], "middle": ['left intersect', 'internal intersect', 'internal horizontal edge', 'right intersect'], "bottom": ['bottom left intersect', 'bottom intersect', 'bottom edge', 'bottom right corner']} delim = delim if delim else {} for tag in self.FANCY.keys(): delim[tag] = delim[tag] if tag in delim.keys() \ else self.FANCY[tag] tab = self.tab if tab is None else tab list_of_list, column_widths = self.get_data_and_shared_column_widths( data_kwargs=dict(quote_numbers=quote_numbers, quote_empty_str=quote_empty_str), width_kwargs=dict(padding=0, pad_last_column=True)) ret = [[cell.ljust(column_widths[i]) for i, cell in enumerate(row)] for row in list_of_list] grid_row = {} for key in div_delims.keys(): draw = div_delims[key] grid_row[key] = delim[draw[0]] grid_row[key] += delim[draw[1]].join( [delim[draw[2]] * width for width in column_widths]) grid_row[key] += delim[draw[3]] ret = [delim['left edge'] + delim['internal vertical edge'].join(row) + delim['right edge'] for row in ret] header = [grid_row["top"], ret[0], grid_row["divide"]] body = [[row, grid_row["middle"]] for row in ret[1:]] body = [item for pair in body for item in pair][:-1] ret = header + body + [grid_row["bottom"]] ret = tab + (u'\n' + tab).join(ret) self._save_file(file_path, ret) return ret
This will return a str of a grid table. :param file_path: path to data file, defaults to self's contents if left alone :param delim: dict of deliminators, defaults to obj_to_str's method: :param tab: string of offset of the table :param quote_numbers: bool if True will quote numbers that are strings :param quote_empty_str: bool if True will quote empty strings :return: string representing the grid formation of the relevant data
def list_nodes_full(call=None): ''' List devices, with all available information. CLI Example: .. code-block:: bash salt-cloud -F salt-cloud --full-query salt-cloud -f list_nodes_full packet-provider .. ''' if call == 'action': raise SaltCloudException( 'The list_nodes_full function must be called with -f or --function.' ) ret = {} for device in get_devices_by_token(): ret[device.hostname] = device.__dict__ return ret
List devices, with all available information. CLI Example: .. code-block:: bash salt-cloud -F salt-cloud --full-query salt-cloud -f list_nodes_full packet-provider ..
def validate_url(url): """ Auxiliary method to validate an urllib :param url: An url to be validated :type url: string :returns: True if the url is valid :rtype: bool """ scheme = url.split('://')[0].lower() if scheme not in url_schemes: return False if not bool(url_regex.search(url)): return False return True
Auxiliary method to validate an urllib :param url: An url to be validated :type url: string :returns: True if the url is valid :rtype: bool
def _run_sbgenomics(args): """Run CWL on SevenBridges platform and Cancer Genomics Cloud. """ assert not args.no_container, "Seven Bridges runs require containers" main_file, json_file, project_name = _get_main_and_json(args.directory) flags = [] cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file] _run_tool(cmd)
Run CWL on SevenBridges platform and Cancer Genomics Cloud.
def pcapname(dev): """Get the device pcap name by device name or Scapy NetworkInterface """ if isinstance(dev, NetworkInterface): if dev.is_invalid(): return None return dev.pcap_name try: return IFACES.dev_from_name(dev).pcap_name except ValueError: return IFACES.dev_from_pcapname(dev).pcap_name
Get the device pcap name by device name or Scapy NetworkInterface
def override_options(config: DictLike, selected_options: Tuple[Any, ...], set_of_possible_options: Tuple[enum.Enum, ...], config_containing_override: DictLike = None) -> DictLike: """ Determine override options for a particular configuration. The options are determined by searching following the order specified in selected_options. For the example config, .. code-block:: yaml config: value: 3 override: 2.76: track: value: 5 value will be assigned the value 5 if we are at 2.76 TeV with a track bias, regardless of the event activity or leading hadron bias. The order of this configuration is specified by the order of the selected_options passed. The above example configuration is from the jet-hadron analysis. Since anchors aren't kept for scalar values, if you want to override an anchored value, you need to specify it as a single value in a list (or dict, but list is easier). After the anchor values propagate, single element lists can be converted into scalar values using ``simplify_data_representations()``. Args: config: The dict-like configuration from ruamel.yaml which should be overridden. selected_options: The selected analysis options. They will be checked in the order with which they are passed, so make certain that it matches the order in the configuration file! set_of_possible_options (tuple of enums): Possible options for the override value categories. config_containing_override: The dict-like config containing the override options in a map called "override". If it is not specified, it will look for it in the main config. Returns: dict-like object: The updated configuration """ if config_containing_override is None: config_containing_override = config override_opts = config_containing_override.pop("override") override_dict = determine_override_options(selected_options, override_opts, set_of_possible_options) logger.debug(f"override_dict: {override_dict}") # Set the configuration values to those specified in the override options # Cannot just use update() on config because we need to maintain the anchors. for k, v in override_dict.items(): # Check if key is there and if it is not None! (The second part is important) if k in config: try: # If it has an anchor, we know that we want to preserve the type. So we check for the anchor # by trying to access it (Note that we don't actually care what the value is - just that it # exists). If it fails with an AttributeError, then we know we can just assign the value. If it # has an anchor, then we want to preserve the anchor information. config[k].anchor logger.debug(f"type: {type(config[k])}, k: {k}") if isinstance(config[k], list): # Clear out the existing list entries del config[k][:] if isinstance(override_dict[k], (str, int, float, bool)): # We have to treat str carefully because it is an iterable, but it will be expanded as # individual characters if it's treated the same as a list, which is not the desired # behavior! If we wrap it in [], then it will be treated as the only entry in the list # NOTE: We also treat the basic types this way because they will be passed this way if # overriding indirectly with anchors (since the basic scalar types don't yet support # reassignment while maintaining their anchors). config[k].append(override_dict[k]) else: # Here we just assign all entries of the list to all entries of override_dict[k] config[k].extend(override_dict[k]) elif isinstance(config[k], dict): # Clear out the existing entries because we are trying to replace everything # Then we can simply update the dict with our new values config[k].clear() config[k].update(override_dict[k]) elif isinstance(config[k], (int, float, bool)): # This isn't really very good (since we lose information), but there's nothing that can be done # about it at the moment (Dec 2018) logger.debug("Overwriting YAML anchor object. It is currently unclear how to reassign this value.") config[k] = v else: # Raise a value error on all of the cases that we aren't already aware of. raise ValueError(f"Object {k} (type {type(config[k])}) somehow has an anchor, but is something other than a list or dict. Attempting to assign directly to it.") except AttributeError: # If no anchor, just overwrite the value at this key config[k] = v else: raise KeyError(k, f"Trying to override key \"{k}\" that it is not in the config.") return config
Determine override options for a particular configuration. The options are determined by searching following the order specified in selected_options. For the example config, .. code-block:: yaml config: value: 3 override: 2.76: track: value: 5 value will be assigned the value 5 if we are at 2.76 TeV with a track bias, regardless of the event activity or leading hadron bias. The order of this configuration is specified by the order of the selected_options passed. The above example configuration is from the jet-hadron analysis. Since anchors aren't kept for scalar values, if you want to override an anchored value, you need to specify it as a single value in a list (or dict, but list is easier). After the anchor values propagate, single element lists can be converted into scalar values using ``simplify_data_representations()``. Args: config: The dict-like configuration from ruamel.yaml which should be overridden. selected_options: The selected analysis options. They will be checked in the order with which they are passed, so make certain that it matches the order in the configuration file! set_of_possible_options (tuple of enums): Possible options for the override value categories. config_containing_override: The dict-like config containing the override options in a map called "override". If it is not specified, it will look for it in the main config. Returns: dict-like object: The updated configuration
def get_phone_numbers(self): """ : returns: dict of type and phone number list :rtype: dict(str, list(str)) """ phone_dict = {} for child in self.vcard.getChildren(): if child.name == "TEL": # phone types type = helpers.list_to_string( self._get_types_for_vcard_object(child, "voice"), ", ") if type not in phone_dict: phone_dict[type] = [] # phone value # # vcard version 4.0 allows URI scheme "tel" in phone attribute value # Doc: https://tools.ietf.org/html/rfc6350#section-6.4.1 # example: TEL;VALUE=uri;PREF=1;TYPE="voice,home":tel:+1-555-555-5555;ext=5555 if child.value.lower().startswith("tel:"): # cut off the "tel:" uri prefix phone_dict[type].append(child.value[4:]) else: # free text field phone_dict[type].append(child.value) # sort phone number lists for number_list in phone_dict.values(): number_list.sort() return phone_dict
: returns: dict of type and phone number list :rtype: dict(str, list(str))
def match_resource_id(self, resource_id, match): """Sets the resource ``Id`` for this query. arg: resource_id (osid.id.Id): a resource ``Id`` arg: match (boolean): ``true`` if a positive match, ``false`` for a negative match raise: NullArgument - ``resource_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(resource_id, Id): raise errors.InvalidArgument() self._add_match('resourceId', str(resource_id), match)
Sets the resource ``Id`` for this query. arg: resource_id (osid.id.Id): a resource ``Id`` arg: match (boolean): ``true`` if a positive match, ``false`` for a negative match raise: NullArgument - ``resource_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
def delete_file(self, sass_filename, sass_fileurl): """ Delete a *.css file, but only if it has been generated through a SASS/SCSS file. """ if self.use_static_root: destpath = os.path.join(self.static_root, os.path.splitext(sass_fileurl)[0] + '.css') else: destpath = os.path.splitext(sass_filename)[0] + '.css' if os.path.isfile(destpath): os.remove(destpath) self.processed_files.append(sass_filename) if self.verbosity > 1: self.stdout.write("Deleted '{0}'\n".format(destpath))
Delete a *.css file, but only if it has been generated through a SASS/SCSS file.
def deserialize_logical(self, node): """ Reads the logical tag from the given node, returns a Condition object. node -- the xml node (xml.dom.minidom.Node) """ term1_attrib = node.getAttribute('left-field') term1_value = node.getAttribute('left-value') op = node.nodeName.lower() term2_attrib = node.getAttribute('right-field') term2_value = node.getAttribute('right-value') if op not in _op_map: _exc('Invalid operator') if term1_attrib != '' and term1_value != '': _exc('Both, left-field and left-value attributes found') elif term1_attrib == '' and term1_value == '': _exc('left-field or left-value attribute required') elif term1_value != '': left = term1_value else: left = operators.Attrib(term1_attrib) if term2_attrib != '' and term2_value != '': _exc('Both, right-field and right-value attributes found') elif term2_attrib == '' and term2_value == '': _exc('right-field or right-value attribute required') elif term2_value != '': right = term2_value else: right = operators.Attrib(term2_attrib) return _op_map[op](left, right)
Reads the logical tag from the given node, returns a Condition object. node -- the xml node (xml.dom.minidom.Node)
def _create_model(model, ident, **params): """ Create a model by cloning and then setting params """ with log_errors(pdb=True): model = clone(model).set_params(**params) return model, {"model_id": ident, "params": params, "partial_fit_calls": 0}
Create a model by cloning and then setting params
def _set_mpls_traffic_lsps(self, v, load=False): """ Setter method for mpls_traffic_lsps, mapped from YANG variable /telemetry/profile/mpls_traffic_lsp/mpls_traffic_lsps (list) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_traffic_lsps is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_traffic_lsps() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("mpls_traffic_lsp_name",mpls_traffic_lsps.mpls_traffic_lsps, yang_name="mpls-traffic-lsps", rest_name="lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-lsp-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficlsp', u'cli-suppress-mode': None, u'alt-name': u'lsp', u'info': u'MPLS Stats profile by LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-lsps", rest_name="lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficlsp', u'cli-suppress-mode': None, u'alt-name': u'lsp', u'info': u'MPLS Stats profile by LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """mpls_traffic_lsps must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("mpls_traffic_lsp_name",mpls_traffic_lsps.mpls_traffic_lsps, yang_name="mpls-traffic-lsps", rest_name="lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-lsp-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficlsp', u'cli-suppress-mode': None, u'alt-name': u'lsp', u'info': u'MPLS Stats profile by LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-lsps", rest_name="lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficlsp', u'cli-suppress-mode': None, u'alt-name': u'lsp', u'info': u'MPLS Stats profile by LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__mpls_traffic_lsps = t if hasattr(self, '_set'): self._set()
Setter method for mpls_traffic_lsps, mapped from YANG variable /telemetry/profile/mpls_traffic_lsp/mpls_traffic_lsps (list) If this variable is read-only (config: false) in the source YANG file, then _set_mpls_traffic_lsps is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_mpls_traffic_lsps() directly.
def half_duration(self): """Half of the duration of the current interval.""" if self._interval is not None: a, b = self._interval return (b - a) * .5 else: return self.interval_duration * .5
Half of the duration of the current interval.