_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q13700
default_links_factory_with_additional
train
def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in
python
{ "resource": "" }
q13701
geolocation_sort
train
def geolocation_sort(field_name, argument, unit, mode=None, distance_type=None): """Sort field factory for geo-location based sorting. :param argument: Name of URL query string field to parse pin location from. Multiple locations can be provided. Each location can be either a string "latitude,longitude" or a geohash. :param unit: Distance unit (e.g. km). :param mode: Sort mode (avg, min, max). :param distance_type: Distance calculation mode. :returns: Function that returns geolocation sort field. """ def inner(asc): locations = request.values.getlist(argument, type=str) field = { '_geo_distance': {
python
{ "resource": "" }
q13702
eval_field
train
def eval_field(field, asc): """Evaluate a field for sorting purpose. :param field: Field definition (string, dict or callable). :param asc: ``True`` if order is ascending, ``False`` if descending. :returns: Dictionary with the sort field query. """ if isinstance(field, dict): if asc: return field else: # Field should only have one key and must have an order subkey. field = copy.deepcopy(field) key = list(field.keys())[0]
python
{ "resource": "" }
q13703
default_sorter_factory
train
def default_sorter_factory(search, index): """Default sort query factory. :param query: Search query. :param index: Index to search in. :returns: Tuple of (query, URL arguments). """ sort_arg_name = 'sort' urlfield = request.values.get(sort_arg_name, '', type=str) # Get default sorting if sort is not specified. if not urlfield: # cast to six.text_type to handle unicodes in Python 2 has_query = request.values.get('q', type=six.text_type) urlfield = current_app.config['RECORDS_REST_DEFAULT_SORT'].get( index, {}).get('query' if has_query else 'noquery', '') # Parse sort argument key, asc = parse_sort_field(urlfield) # Get sort options
python
{ "resource": "" }
q13704
RecordMetadataSchemaJSONV1.inject_pid
train
def inject_pid(self, data): """Inject context PID in the RECID field.""" # Remove already deserialized "pid" field
python
{ "resource": "" }
q13705
BasicNode.get_render
train
def get_render(self, context): """ Returns a `Context` object with all the necesarry stuff for rendering the form :param context: `django.template.Context` variable holding the context for the node `self.form` and `self.helper` are resolved into real Python objects resolving them from the `context`. The `actual_form` can be a form or a formset. If it's a formset `is_formset` is set to True. If the helper has a layout we use it, for rendering the form or the formset's forms. """ actual_form = self.form.resolve(context) attrs = {} if self.helper is not None: helper = self.helper.resolve(context) if not isinstance(helper, FormHelper): raise TypeError('helper object provided to uni_form tag must be a uni_form.helpers.FormHelper object.') attrs = helper.get_attributes() else: helper = None # We get the response dictionary is_formset = isinstance(actual_form, BaseFormSet) response_dict = self.get_response_dict(attrs, context, is_formset) # If we have a helper's layout we use it, for the form or the formset's forms if helper and
python
{ "resource": "" }
q13706
FormHelper.get_attributes
train
def get_attributes(self): """ Used by the uni_form_tags to get helper attributes """ items = {} items['form_method'] = self.form_method.strip() items['form_tag'] = self.form_tag items['form_style'] = self.form_style.strip() if self.form_action: items['form_action'] = self.form_action.strip() if self.form_id: items['id'] = self.form_id.strip() if self.form_class: items['class'] = self.form_class.strip() if self.inputs:
python
{ "resource": "" }
q13707
sequence_unique.add_exp
train
def add_exp(self,gr,exp): """Function to add the counts for each sample :param gr: name of the sample :param exp: counts of sample **gr** :returns: dict
python
{ "resource": "" }
q13708
MakeJoint
train
def MakeJoint(pmf1, pmf2): """Joint distribution of values from pmf1 and pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: Joint pmf of value pairs """ joint = Joint() for
python
{ "resource": "" }
q13709
MakeHistFromList
train
def MakeHistFromList(t, name=''): """Makes a histogram from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this histogram
python
{ "resource": "" }
q13710
MakePmfFromList
train
def MakePmfFromList(t, name=''): """Makes a PMF from an unsorted sequence of values. Args: t: sequence of numbers name: string name for this PMF Returns: Pmf object """
python
{ "resource": "" }
q13711
MakePmfFromDict
train
def MakePmfFromDict(d, name=''): """Makes a PMF from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this
python
{ "resource": "" }
q13712
MakePmfFromItems
train
def MakePmfFromItems(t, name=''): """Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF
python
{ "resource": "" }
q13713
MakePmfFromHist
train
def MakePmfFromHist(hist, name=None): """Makes a normalized PMF from a Hist object. Args: hist: Hist object name: string name Returns: Pmf object """ if name is None:
python
{ "resource": "" }
q13714
MakePmfFromCdf
train
def MakePmfFromCdf(cdf, name=None): """Makes a normalized Pmf from a Cdf object. Args: cdf: Cdf object name: string name for the new Pmf Returns: Pmf object """ if name is None:
python
{ "resource": "" }
q13715
MakeMixture
train
def MakeMixture(metapmf, name='mix'): """Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. name: string name
python
{ "resource": "" }
q13716
MakeUniformPmf
train
def MakeUniformPmf(low, high, n): """Make a uniform Pmf. low: lowest value (inclusive) high: highest value (inclusize) n: number of values """
python
{ "resource": "" }
q13717
MakeCdfFromPmf
train
def MakeCdfFromPmf(pmf, name=None): """Makes a CDF from a Pmf object. Args: pmf: Pmf.Pmf object name: string name for the data. Returns: Cdf object """
python
{ "resource": "" }
q13718
MakeSuiteFromList
train
def MakeSuiteFromList(t, name=''): """Makes a suite from an unsorted sequence of values. Args: t: sequence of numbers
python
{ "resource": "" }
q13719
MakeSuiteFromHist
train
def MakeSuiteFromHist(hist, name=None): """Makes a normalized suite from a Hist object. Args: hist: Hist object name: string name Returns: Suite object """ if name is None:
python
{ "resource": "" }
q13720
MakeSuiteFromDict
train
def MakeSuiteFromDict(d, name=''): """Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values
python
{ "resource": "" }
q13721
MakeSuiteFromCdf
train
def MakeSuiteFromCdf(cdf, name=None): """Makes a normalized Suite from a Cdf object. Args: cdf: Cdf object name: string name for the new Suite Returns: Suite object """ if name is None:
python
{ "resource": "" }
q13722
Percentile
train
def Percentile(pmf, percentage): """Computes a percentile of a given Pmf. percentage: float 0-100 """ p = percentage / 100.0
python
{ "resource": "" }
q13723
CredibleInterval
train
def CredibleInterval(pmf, percentage=90): """Computes a credible interval for a given distribution. If percentage=90, computes the 90% CI. Args: pmf: Pmf object representing a posterior distribution percentage: float between 0 and 100 Returns: sequence of two floats, low
python
{ "resource": "" }
q13724
PmfProbLess
train
def PmfProbLess(pmf1, pmf2): """Probability that a value from pmf1 is less than a value from pmf2. Args: pmf1: Pmf object pmf2: Pmf object Returns: float probability """ total = 0.0 for v1,
python
{ "resource": "" }
q13725
SampleSum
train
def SampleSum(dists, n): """Draws a sample of sums from a list of distributions. dists: sequence of Pmf or Cdf objects n: sample size returns:
python
{ "resource": "" }
q13726
EvalGaussianPdf
train
def EvalGaussianPdf(x, mu, sigma): """Computes the unnormalized PDF of the normal distribution. x: value mu: mean sigma: standard deviation
python
{ "resource": "" }
q13727
EvalBinomialPmf
train
def EvalBinomialPmf(k, n, p): """Evaluates the binomial pmf. Returns the probabily of k successes in n trials with
python
{ "resource": "" }
q13728
EvalPoissonPmf
train
def EvalPoissonPmf(k, lam): """Computes the Poisson PMF. k: number of events lam: parameter lambda in events per unit time returns: float probability """ # don't use the scipy function (yet).
python
{ "resource": "" }
q13729
MakePoissonPmf
train
def MakePoissonPmf(lam, high, step=1): """Makes a PMF discrete approx to a Poisson distribution. lam: parameter lambda in events per unit time high: upper bound of the Pmf returns: normalized Pmf """ pmf = Pmf() for k
python
{ "resource": "" }
q13730
MakeExponentialPmf
train
def MakeExponentialPmf(lam, high, n=200): """Makes a PMF discrete approx to an exponential distribution. lam: parameter lambda in events per unit time high: upper bound n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf()
python
{ "resource": "" }
q13731
GaussianCdfInverse
train
def GaussianCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter
python
{ "resource": "" }
q13732
LogBinomialCoef
train
def LogBinomialCoef(n, k): """Computes the log of the binomial coefficient. http://math.stackexchange.com/questions/64716/ approximating-the-logarithm-of-the-binomial-coefficient n: number of trials
python
{ "resource": "" }
q13733
Interpolator.Lookup
train
def Lookup(self, x): """Looks up x and returns the corresponding value of y."""
python
{ "resource": "" }
q13734
Interpolator.Reverse
train
def Reverse(self, y): """Looks up y and returns the corresponding value of x."""
python
{ "resource": "" }
q13735
_DictWrapper.InitMapping
train
def InitMapping(self, values): """Initializes with a map from value to probability.
python
{ "resource": "" }
q13736
_DictWrapper.InitPmf
train
def InitPmf(self, values): """Initializes with a Pmf. values: Pmf object """
python
{ "resource": "" }
q13737
_DictWrapper.Copy
train
def Copy(self, name=None): """Returns a copy. Make a shallow copy of d. If you want a deep copy of d, use copy.deepcopy on the whole object. Args: name: string name for the new Hist
python
{ "resource": "" }
q13738
_DictWrapper.Scale
train
def Scale(self, factor): """Multiplies the values by a factor. factor: what to multiply by Returns: new object """ new = self.Copy() new.d.clear()
python
{ "resource": "" }
q13739
_DictWrapper.Log
train
def Log(self, m=None): """Log transforms the probabilities. Removes values with probability 0. Normalizes so that the largest logprob is 0. """ if self.log: raise ValueError("Pmf/Hist already under a log transform") self.log = True
python
{ "resource": "" }
q13740
_DictWrapper.Exp
train
def Exp(self, m=None): """Exponentiates the probabilities. m: how much to shift the ps before exponentiating If m is None, normalizes so that the largest prob is 1. """ if not self.log: raise ValueError("Pmf/Hist
python
{ "resource": "" }
q13741
Hist.IsSubset
train
def IsSubset(self, other): """Checks whether the values in this histogram are a subset of the values in the given histogram.""" for val, freq in self.Items():
python
{ "resource": "" }
q13742
Hist.Subtract
train
def Subtract(self, other): """Subtracts the values in the given histogram from this histogram."""
python
{ "resource": "" }
q13743
Pmf.ProbGreater
train
def ProbGreater(self, x): """Probability that a sample from this Pmf exceeds x. x: number
python
{ "resource": "" }
q13744
Pmf.Normalize
train
def Normalize(self, fraction=1.0): """Normalizes this PMF so the sum of all probs is fraction. Args: fraction: what the total should be after normalization Returns: the total probability before normalizing """ if self.log: raise ValueError("Pmf is under a log transform") total = self.Total() if total == 0.0: raise ValueError('total probability is zero.')
python
{ "resource": "" }
q13745
Pmf.Random
train
def Random(self): """Chooses a random element from this PMF. Returns: float value from the Pmf """ if len(self.d) == 0: raise ValueError('Pmf contains no values.') target = random.random() total = 0.0 for
python
{ "resource": "" }
q13746
Pmf.Mean
train
def Mean(self): """Computes the mean of a PMF. Returns: float mean """ mu = 0.0 for x, p in
python
{ "resource": "" }
q13747
Pmf.Var
train
def Var(self, mu=None): """Computes the variance of a PMF. Args: mu: the point around which the variance is computed; if omitted, computes the mean Returns: float variance """
python
{ "resource": "" }
q13748
Pmf.MaximumLikelihood
train
def MaximumLikelihood(self): """Returns the value with the highest probability. Returns: float probability
python
{ "resource": "" }
q13749
Pmf.AddPmf
train
def AddPmf(self, other): """Computes the Pmf of the sum of values drawn from self and other. other: another Pmf returns: new Pmf """ pmf = Pmf()
python
{ "resource": "" }
q13750
Pmf.AddConstant
train
def AddConstant(self, other): """Computes the Pmf of the sum a constant and values from self. other: a number returns: new Pmf """ pmf = Pmf()
python
{ "resource": "" }
q13751
Joint.Marginal
train
def Marginal(self, i, name=''): """Gets the marginal distribution of the indicated variable. i: index of the variable we want Returns: Pmf """ pmf = Pmf(name=name)
python
{ "resource": "" }
q13752
Joint.Conditional
train
def Conditional(self, i, j, val, name=''): """Gets the conditional distribution of the indicated variable. Distribution of vs[i], conditioned on vs[j] = val. i: index of the variable we want j: which variable is conditioned on val: the value the jth variable has to have
python
{ "resource": "" }
q13753
Joint.MaxLikeInterval
train
def MaxLikeInterval(self, percentage=90): """Returns the maximum-likelihood credible interval. If percentage=90, computes a 90% CI containing the values with the highest likelihoods. percentage: float between 0 and 100 Returns: list of values from the suite """ interval = [] total = 0 t = [(prob, val) for val, prob in self.Items()]
python
{ "resource": "" }
q13754
Cdf.Copy
train
def Copy(self, name=None): """Returns a copy of this Cdf. Args: name: string name for the new Cdf """ if name is None:
python
{ "resource": "" }
q13755
Cdf.Shift
train
def Shift(self, term): """Adds a term to the xs. term: how much to add """
python
{ "resource": "" }
q13756
Cdf.Scale
train
def Scale(self, factor): """Multiplies the xs by a factor. factor: what to multiply by """
python
{ "resource": "" }
q13757
Cdf.Mean
train
def Mean(self): """Computes the mean of a CDF. Returns: float mean """ old_p = 0 total = 0.0 for x, new_p in zip(self.xs, self.ps):
python
{ "resource": "" }
q13758
Cdf.CredibleInterval
train
def CredibleInterval(self, percentage=90): """Computes the central credible interval. If percentage=90, computes the 90% CI. Args: percentage: float between 0 and 100 Returns: sequence of two
python
{ "resource": "" }
q13759
Cdf.Render
train
def Render(self): """Generates a sequence of points suitable for plotting. An empirical CDF is a step function; linear interpolation can be misleading. Returns: tuple of (xs, ps)
python
{ "resource": "" }
q13760
Suite.LogUpdate
train
def LogUpdate(self, data): """Updates a suite of hypotheses based on new data. Modifies the suite directly; if you want to keep the original, make a copy. Note: unlike Update, LogUpdate does not normalize. Args:
python
{ "resource": "" }
q13761
Suite.UpdateSet
train
def UpdateSet(self, dataset): """Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want
python
{ "resource": "" }
q13762
Suite.Print
train
def Print(self): """Prints the hypotheses and their probabilities.""" for
python
{ "resource": "" }
q13763
Suite.MakeOdds
train
def MakeOdds(self): """Transforms from probabilities to odds. Values with prob=0 are removed. """ for hypo, prob in self.Items(): if prob:
python
{ "resource": "" }
q13764
Suite.MakeProbs
train
def MakeProbs(self): """Transforms from odds to probabilities.""" for
python
{ "resource": "" }
q13765
Pdf.MakePmf
train
def MakePmf(self, xs, name=''): """Makes a discrete version of this Pdf, evaluated at xs. xs: equally-spaced sequence of values Returns: new Pmf """ pmf = Pmf(name=name)
python
{ "resource": "" }
q13766
Beta.Update
train
def Update(self, data): """Updates a Beta distribution. data: pair of int (heads, tails) """
python
{ "resource": "" }
q13767
Beta.Sample
train
def Sample(self, n): """Generates a random sample from this distribution.
python
{ "resource": "" }
q13768
Beta.MakePmf
train
def MakePmf(self, steps=101, name=''): """Returns a Pmf of this distribution. Note: Normally, we just evaluate the PDF at a sequence of points and treat the probability density as a probability mass. But if alpha or beta is less than one, we have to be more careful because the PDF goes to infinity at x=0 and x=1. In that case we evaluate the CDF and compute differences. """ if self.alpha < 1 or self.beta < 1:
python
{ "resource": "" }
q13769
Beta.MakeCdf
train
def MakeCdf(self, steps=101): """Returns the CDF of this distribution.""" xs = [i / (steps -
python
{ "resource": "" }
q13770
Dirichlet.Update
train
def Update(self, data): """Updates a Dirichlet distribution. data: sequence of observations, in order corresponding to params
python
{ "resource": "" }
q13771
Dirichlet.Random
train
def Random(self): """Generates a random variate from this distribution. Returns: normalized vector of fractions """
python
{ "resource": "" }
q13772
Dirichlet.Likelihood
train
def Likelihood(self, data): """Computes the likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float probability """ m = len(data)
python
{ "resource": "" }
q13773
Dirichlet.LogLikelihood
train
def LogLikelihood(self, data): """Computes the log likelihood of the data. Selects a random vector of probabilities from this distribution. Returns: float log probability """ m = len(data)
python
{ "resource": "" }
q13774
Dirichlet.MarginalBeta
train
def MarginalBeta(self, i): """Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object
python
{ "resource": "" }
q13775
Dirichlet.PredictivePmf
train
def PredictivePmf(self, xs, name=''): """Makes a predictive distribution. xs: values to go into the Pmf Returns: Pmf
python
{ "resource": "" }
q13776
_get_ann
train
def _get_ann(dbs, features): """ Gives format to annotation for html table output """ value = "" for db,
python
{ "resource": "" }
q13777
make_profile
train
def make_profile(data, out_dir, args): """ Make data report for each cluster """ safe_dirs(out_dir) main_table = [] header = ['id', 'ann'] n = len(data[0]) bar = ProgressBar(maxval=n) bar.start() bar.update(0) for itern, c in enumerate(data[0]): bar.update(itern) logger.debug("creating cluser: {}".format(c)) safe_dirs(os.path.join(out_dir, c)) valid, ann, pos_structure = _single_cluster(c, data, os.path.join(out_dir, c, "maps.tsv"), args)
python
{ "resource": "" }
q13778
_expand
train
def _expand(dat, counts, start, end): """ expand the same counts from start to end """ for pos in range(start, end):
python
{ "resource": "" }
q13779
_convert_to_df
train
def _convert_to_df(in_file, freq, raw_file): """ convert data frame into table with pandas """ dat = defaultdict(Counter) if isinstance(in_file, (str, unicode)): with open(in_file) as in_handle: for line in in_handle: cols = line.strip().split("\t") counts = freq[cols[3]] dat = _expand(dat, counts, int(cols[1]), int(cols[2])) else: if raw_file: out_handle = open(raw_file, "w") for name in in_file: counts = freq[name]
python
{ "resource": "" }
q13780
_make
train
def _make(c): """ create html from template, adding figure, annotation and sequences counts """ ann = defaultdict(list) for pos in c['ann']: for db in pos: ann[db] += list(pos[db]) logger.debug(ann)
python
{ "resource": "" }
q13781
_single_cluster
train
def _single_cluster(c, data, out_file, args): """ Map sequences on precursors and create expression profile """ valid, ann = 0, 0 raw_file = None freq = defaultdict() [freq.update({s.keys()[0]: s.values()[0]}) for s in data[0][c]['freq']] names = [s.keys()[0] for s in data[0][c]['seqs']] seqs = [s.values()[0] for s in data[0][c]['seqs']] loci = data[0][c]['loci'] if loci[0][3] - loci[0][2] > 500: logger.info("locus bigger > 500 nt, skipping: %s" % loci) return valid, ann, {} if not file_exists(out_file): if args.razer: logger.debug("map with razer all sequences to all loci %s " % loci)
python
{ "resource": "" }
q13782
read_cluster
train
def read_cluster(data, id=1): """Read json cluster and populate as cluster class""" cl = cluster(1) # seqs = [s.values()[0] for s in data['seqs']] names = [s.keys()[0] for s in data['seqs']]
python
{ "resource": "" }
q13783
write_data
train
def write_data(data, out_file): """write json file from seqcluster cluster"""
python
{ "resource": "" }
q13784
get_sequences_from_cluster
train
def get_sequences_from_cluster(c1, c2, data): """get all sequences from on cluster""" seqs1 = data[c1]['seqs'] seqs2 = data[c2]['seqs'] seqs = list(set(seqs1 + seqs2)) names = [] for s in seqs: if s in seqs1 and s in seqs2:
python
{ "resource": "" }
q13785
map_to_precursors
train
def map_to_precursors(seqs, names, loci, out_file, args): """map sequences to precursors with razers3""" with make_temp_directory() as temp: pre_fasta = os.path.join(temp, "pre.fa") seqs_fasta = os.path.join(temp, "seqs.fa") out_sam = os.path.join(temp, "out.sam") pre_fasta = get_loci_fasta(loci, pre_fasta, args.ref) out_precursor_file = out_file.replace("tsv", "fa") seqs_fasta = get_seqs_fasta(seqs, names, seqs_fasta)
python
{ "resource": "" }
q13786
precursor_sequence
train
def precursor_sequence(loci, reference): """Get sequence from genome""" region = "%s\t%s\t%s\t.\t.\t%s" % (loci[1], loci[2], loci[3], loci[4]) precursor =
python
{ "resource": "" }
q13787
map_to_precursors_on_fly
train
def map_to_precursors_on_fly(seqs, names, loci, args): """map sequences to precursors with franpr algorithm to avoid writting on disk""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = pyMatch.Match(precursor, str(s), 1, 3)
python
{ "resource": "" }
q13788
map_to_precursor_biopython
train
def map_to_precursor_biopython(seqs, names, loci, args): """map the sequences using biopython package""" precursor = precursor_sequence(loci, args.ref).upper() dat = dict() for s, n in itertools.izip(seqs, names): res = _align(str(s), precursor)
python
{ "resource": "" }
q13789
get_seqs_fasta
train
def get_seqs_fasta(seqs, names, out_fa): """get fasta from sequences""" with open(out_fa, 'w') as fa_handle: for s, n in itertools.izip(seqs, names):
python
{ "resource": "" }
q13790
get_loci_fasta
train
def get_loci_fasta(loci, out_fa, ref): """get fasta from precursor""" if not find_cmd("bedtools"): raise ValueError("Not bedtools installed") with make_temp_directory() as temp: bed_file = os.path.join(temp, "file.bed") for nc, loci in loci.iteritems(): for l in loci: with open(bed_file, 'w') as bed_handle: logger.debug("get_fasta: loci %s" % l)
python
{ "resource": "" }
q13791
read_alignment
train
def read_alignment(out_sam, loci, seqs, out_file): """read which seqs map to which loci and return a tab separated file""" hits = defaultdict(list) with open(out_file, "w") as out_handle: samfile = pysam.Samfile(out_sam, "r") for a in samfile.fetch(): if not a.is_unmapped: nm = int([t[1] for t in a.tags if t[0] == "NM"][0]) a = makeBED(a) if not a: continue
python
{ "resource": "" }
q13792
_download_mirbase
train
def _download_mirbase(args, version="CURRENT"): """ Download files from mirbase """ if not args.hairpin or not args.mirna: logger.info("Working with version %s" % version) hairpin_fn = op.join(op.abspath(args.out), "hairpin.fa.gz") mirna_fn = op.join(op.abspath(args.out), "miRNA.str.gz") if not file_exists(hairpin_fn): cmd_h = "wget ftp://mirbase.org/pub/mirbase/%s/hairpin.fa.gz -O %s && gunzip -f !$" % (version,
python
{ "resource": "" }
q13793
_make_unique
train
def _make_unique(name, idx): """Make name unique in case only counts there""" p = re.compile(".[aA-zZ]+_x[0-9]+") if p.match(name): tags = name[1:].split("_x")
python
{ "resource": "" }
q13794
_filter_seqs
train
def _filter_seqs(fn): """Convert names of sequences to unique ids""" out_file = op.splitext(fn)[0] + "_unique.fa" idx = 0 if not file_exists(out_file): with open(out_file, 'w') as out_handle: with open(fn) as in_handle: for line in in_handle: if line.startswith("@") or line.startswith(">"): fixed_name
python
{ "resource": "" }
q13795
_read_precursor
train
def _read_precursor(precursor, sps): """ Load precursor file for that species """ hairpin = defaultdict(str) name = None with open(precursor) as in_handle: for line in in_handle: if line.startswith(">"):
python
{ "resource": "" }
q13796
_read_gtf
train
def _read_gtf(gtf): """ Load GTF file with precursor positions on genome """ if not gtf: return gtf db = defaultdict(list) with open(gtf) as in_handle: for line in in_handle: if line.startswith("#"):
python
{ "resource": "" }
q13797
_coord
train
def _coord(sequence, start, mirna, precursor, iso): """ Define t5 and t3 isomirs """ dif = abs(mirna[0] - start) if start < mirna[0]: iso.t5 = sequence[:dif].upper() elif start > mirna[0]: iso.t5 = precursor[mirna[0] - 1:mirna[0] - 1 + dif].lower() elif start == mirna[0]: iso.t5 = "NA" if dif > 4: logger.debug("start > 3 %s %s %s %s %s" % (start, len(sequence), dif, mirna, iso.format())) return None end = start + (len(sequence) - len(iso.add)) - 1 dif = abs(mirna[1] - end) if iso.add: sequence = sequence[:-len(iso.add)] # if dif > 3: # return None
python
{ "resource": "" }
q13798
_realign
train
def _realign(seq, precursor, start): """ The actual fn that will realign the sequence """ error = set() pattern_addition = [[1, 1, 0], [1, 0, 1], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 1, 1]] for pos in range(0, len(seq)): if seq[pos] != precursor[(start + pos)]: error.add(pos) subs, add = [], [] for e in error: if e < len(seq) - 3: subs.append([e, seq[e], precursor[start + e]]) pattern, error_add = [], [] for e in range(len(seq) - 3, len(seq)): if e in error:
python
{ "resource": "" }
q13799
_clean_hits
train
def _clean_hits(reads): """ Select only best matches """ new_reads = defaultdict(realign) for r in reads: world = {} sc = 0 for p in reads[r].precursors: world[p] = reads[r].precursors[p].get_score(len(reads[r].sequence)) if sc < world[p]: sc = world[p] new_reads[r] = reads[r] for p in world: logger.debug("score %s %s
python
{ "resource": "" }