text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def muc_request_voice(self):
"""
Request voice (participant role) in the room and wait for the request
to be sent.
The participant role allows occupants to send messages while the room
is in moderated mode.
There is no guarantee that the request will be granted. To detect that
voice has been granted, observe the :meth:`on_role_change` signal.
.. versionadded:: 0.8
"""
msg = aioxmpp.Message(
to=self._mucjid,
type_=aioxmpp.MessageType.NORMAL
)
data = aioxmpp.forms.Data(
aioxmpp.forms.DataType.SUBMIT,
)
data.fields.append(
aioxmpp.forms.Field(
type_=aioxmpp.forms.FieldType.HIDDEN,
var="FORM_TYPE",
values=["http://jabber.org/protocol/muc#request"],
),
)
data.fields.append(
aioxmpp.forms.Field(
type_=aioxmpp.forms.FieldType.LIST_SINGLE,
var="muc#role",
values=["participant"],
)
)
msg.xep0004_data.append(data)
yield from self.service.client.send(msg) | [
"def",
"muc_request_voice",
"(",
"self",
")",
":",
"msg",
"=",
"aioxmpp",
".",
"Message",
"(",
"to",
"=",
"self",
".",
"_mucjid",
",",
"type_",
"=",
"aioxmpp",
".",
"MessageType",
".",
"NORMAL",
")",
"data",
"=",
"aioxmpp",
".",
"forms",
".",
"Data",
... | 28.195122 | 20.97561 |
def create(cls, ip_version, datacenter, bandwidth, vm=None, vlan=None,
ip=None, background=False):
""" Create a public ip and attach it if vm is given. """
return Iface.create(ip_version, datacenter, bandwidth, vlan, vm, ip,
background) | [
"def",
"create",
"(",
"cls",
",",
"ip_version",
",",
"datacenter",
",",
"bandwidth",
",",
"vm",
"=",
"None",
",",
"vlan",
"=",
"None",
",",
"ip",
"=",
"None",
",",
"background",
"=",
"False",
")",
":",
"return",
"Iface",
".",
"create",
"(",
"ip_versi... | 58.2 | 13.8 |
def _field_name_from_uri(self, uri):
"""helper, returns the name of an attribute (without namespace prefix)
"""
# TODO - should use graph API
uri = str(uri)
parts = uri.split('#')
if len(parts) == 1:
return uri.split('/')[-1] or uri
return parts[-1] | [
"def",
"_field_name_from_uri",
"(",
"self",
",",
"uri",
")",
":",
"# TODO - should use graph API",
"uri",
"=",
"str",
"(",
"uri",
")",
"parts",
"=",
"uri",
".",
"split",
"(",
"'#'",
")",
"if",
"len",
"(",
"parts",
")",
"==",
"1",
":",
"return",
"uri",
... | 34.333333 | 7.555556 |
def get_release_id(self, package_name: str, version: str) -> bytes:
"""
Returns the 32 byte identifier of a release for the given package name and version,
if they are available on the current registry.
"""
validate_package_name(package_name)
validate_package_version(version)
self._validate_set_registry()
return self.registry._get_release_id(package_name, version) | [
"def",
"get_release_id",
"(",
"self",
",",
"package_name",
":",
"str",
",",
"version",
":",
"str",
")",
"->",
"bytes",
":",
"validate_package_name",
"(",
"package_name",
")",
"validate_package_version",
"(",
"version",
")",
"self",
".",
"_validate_set_registry",
... | 46.888889 | 14 |
def percentile(arr, percent):
"""
Calculate the given percentile of arr.
"""
arr = sorted(arr)
index = (len(arr) - 1) * percent
floor = math.floor(index)
ceil = math.ceil(index)
if floor == ceil:
return arr[int(index)]
low_value = arr[int(floor)] * (ceil - index)
high_value = arr[int(ceil)] * (index - floor)
return low_value + high_value | [
"def",
"percentile",
"(",
"arr",
",",
"percent",
")",
":",
"arr",
"=",
"sorted",
"(",
"arr",
")",
"index",
"=",
"(",
"len",
"(",
"arr",
")",
"-",
"1",
")",
"*",
"percent",
"floor",
"=",
"math",
".",
"floor",
"(",
"index",
")",
"ceil",
"=",
"mat... | 29.153846 | 8.692308 |
def parse_genes(transcripts):
"""Parse transcript information and get the gene information from there.
Use hgnc_id as identifier for genes and ensembl transcript id to identify transcripts
Args:
transcripts(iterable(dict))
Returns:
genes (list(dict)): A list with dictionaries that represents genes
"""
# Dictionary to group the transcripts by hgnc_id
genes_to_transcripts = {}
# List with all genes and there transcripts
genes = []
hgvs_identifier = None
canonical_transcript = None
exon = None
# Group all transcripts by gene
for transcript in transcripts:
# Check what hgnc_id a transcript belongs to
hgnc_id = transcript['hgnc_id']
hgnc_symbol = transcript['hgnc_symbol']
if (transcript['is_canonical'] and transcript.get('coding_sequence_name')):
hgvs_identifier = transcript.get('coding_sequence_name')
canonical_transcript = transcript['transcript_id']
exon = transcript['exon']
# If there is a identifier we group the transcripts under gene
if hgnc_id:
if hgnc_id in genes_to_transcripts:
genes_to_transcripts[hgnc_id].append(transcript)
else:
genes_to_transcripts[hgnc_id] = [transcript]
else:
if hgnc_symbol:
if hgnc_symbol in genes_to_transcripts:
genes_to_transcripts[hgnc_symbol].append(transcript)
else:
genes_to_transcripts[hgnc_symbol] = [transcript]
# We need to find out the most severe consequence in all transcripts
# and save in what transcript we found it
# Loop over all genes
for gene_id in genes_to_transcripts:
# Get the transcripts for a gene
gene_transcripts = genes_to_transcripts[gene_id]
# This will be a consequece from SO_TERMS
most_severe_consequence = None
# Set the most severe score to infinity
most_severe_rank = float('inf')
# The most_severe_transcript is a dict
most_severe_transcript = None
most_severe_region = None
most_severe_sift = None
most_severe_polyphen = None
# Loop over all transcripts for a gene to check which is most severe
for transcript in gene_transcripts:
hgnc_id = transcript['hgnc_id']
hgnc_symbol = transcript['hgnc_symbol']
# Loop over the consequences for a transcript
for consequence in transcript['functional_annotations']:
# Get the rank based on SO_TERM
# Lower rank is worse
new_rank = SO_TERMS[consequence]['rank']
if new_rank < most_severe_rank:
# If a worse consequence is found, update the parameters
most_severe_rank = new_rank
most_severe_consequence = consequence
most_severe_transcript = transcript
most_severe_sift = transcript['sift_prediction']
most_severe_polyphen = transcript['polyphen_prediction']
most_severe_region = SO_TERMS[consequence]['region']
gene = {
'transcripts': gene_transcripts,
'most_severe_transcript': most_severe_transcript,
'most_severe_consequence': most_severe_consequence,
'most_severe_sift': most_severe_sift,
'most_severe_polyphen': most_severe_polyphen,
'hgnc_id': hgnc_id,
'hgnc_symbol': hgnc_symbol,
'region_annotation': most_severe_region,
'hgvs_identifier': transcript['coding_sequence_name'],
'canonical_transcript': transcript['transcript_id'],
'exon': transcript['exon'],
}
genes.append(gene)
return genes | [
"def",
"parse_genes",
"(",
"transcripts",
")",
":",
"# Dictionary to group the transcripts by hgnc_id",
"genes_to_transcripts",
"=",
"{",
"}",
"# List with all genes and there transcripts",
"genes",
"=",
"[",
"]",
"hgvs_identifier",
"=",
"None",
"canonical_transcript",
"=",
... | 38.767677 | 19 |
def _intermediary_to_dot(tables, relationships):
""" Returns the dot source representing the database in a string. """
t = '\n'.join(t.to_dot() for t in tables)
r = '\n'.join(r.to_dot() for r in relationships)
return '{}\n{}\n{}\n}}'.format(GRAPH_BEGINNING, t, r) | [
"def",
"_intermediary_to_dot",
"(",
"tables",
",",
"relationships",
")",
":",
"t",
"=",
"'\\n'",
".",
"join",
"(",
"t",
".",
"to_dot",
"(",
")",
"for",
"t",
"in",
"tables",
")",
"r",
"=",
"'\\n'",
".",
"join",
"(",
"r",
".",
"to_dot",
"(",
")",
"... | 55 | 8.4 |
def searchTriples(expnums,ccd):
"""Given a list of exposure numbers, find all the KBOs in that set of exposures"""
import MOPfits,os
import MOPdbaccess
if len(expnums)!=3:
return(-1)
### Some program Constants
proc_file = open("proc-these-files","w")
proc_file.write("# Files to be planted and searched\n")
proc_file.write("# image fwhm plant\n")
import string
import os.path
filenames=[]
import pyfits
for expnum in expnums:
try:
mysql=MOPdbaccess.connect('bucket','cfhls','MYSQL')
bucket=mysql.cursor()
except:
raise TaskError, "mysql failed"
bucket.execute("SELECT obs_iq_refccd FROM exposure WHERE expnum=%s" , (expnum, ) )
row=bucket.fetchone()
mysql.close()
fwhm=row[0]
if not fwhm > 0:
fwhm=1.0
if int(ccd)<18:
cutout="[-*,-*]"
else:
cutout=None
filename=MOPfits.adGet(str(expnum)+"p",extno=int(ccd),cutout=cutout)
if not os.access(filename,os.R_OK):
raise TaskError, 'adGet Failed'
filename=os.path.splitext(filename)
filenames.append(filename[0])
proc_file.write("%s %f %s \n" % ( filename[0], fwhm/0.183, "no"))
proc_file.flush()
proc_file.close()
command="find.pl -p '' -d ./ "
try:
os.system(command)
except:
raise TaskErorr, "execute find"
file_extens=[
"cands.comb",
"measure3.cands.astrom",
"measure3.WARNING",
"measure3.astrom.scatter"]
if not os.access("find.OK",os.R_OK):
raise TaskError, "find failed"
astrom=filenames[0]+".measure3.cands.astrom"
if os.access(astrom,os.R_OK):
return(1)
else:
return(0) | [
"def",
"searchTriples",
"(",
"expnums",
",",
"ccd",
")",
":",
"import",
"MOPfits",
",",
"os",
"import",
"MOPdbaccess",
"if",
"len",
"(",
"expnums",
")",
"!=",
"3",
":",
"return",
"(",
"-",
"1",
")",
"### Some program Constants",
"proc_file",
"=",
"open",
... | 24.611111 | 21.555556 |
def as_dict(self):
"""
Json-serializable dict representation.
"""
d = {"vasp_version": self.vasp_version,
"has_vasp_completed": True,
"nsites": len(self.final_structure)}
comp = self.final_structure.composition
d["unit_cell_formula"] = comp.as_dict()
d["reduced_cell_formula"] = Composition(comp.reduced_formula).as_dict()
d["pretty_formula"] = comp.reduced_formula
symbols = [s.split()[1] for s in self.potcar_symbols]
symbols = [re.split(r"_", s)[0] for s in symbols]
d["is_hubbard"] = self.is_hubbard
d["hubbards"] = self.hubbards
unique_symbols = sorted(list(set(self.atomic_symbols)))
d["elements"] = unique_symbols
d["nelements"] = len(unique_symbols)
d["run_type"] = self.run_type
vin = {"incar": {k: v for k, v in self.incar.items()},
"crystal": self.final_structure.as_dict(),
"kpoints": self.kpoints.as_dict()}
actual_kpts = [{"abc": list(self.actual_kpoints[i]),
"weight": self.actual_kpoints_weights[i]}
for i in range(len(self.actual_kpoints))]
vin["kpoints"]["actual_points"] = actual_kpts
vin["potcar"] = [s.split(" ")[1] for s in self.potcar_symbols]
vin["potcar_spec"] = self.potcar_spec
vin["potcar_type"] = [s.split(" ")[0] for s in self.potcar_symbols]
vin["parameters"] = {k: v for k, v in self.parameters.items()}
vin["lattice_rec"] = self.lattice_rec.as_dict()
d["input"] = vin
vout = {"crystal": self.final_structure.as_dict(),
"efermi": self.efermi}
if self.eigenvalues:
eigen = defaultdict(dict)
for spin, values in self.eigenvalues.items():
for i, v in enumerate(values):
eigen[i][str(spin)] = v
vout["eigenvalues"] = eigen
(gap, cbm, vbm, is_direct) = self.eigenvalue_band_properties
vout.update(dict(bandgap=gap, cbm=cbm, vbm=vbm,
is_gap_direct=is_direct))
if self.projected_eigenvalues:
peigen = []
for i in range(len(eigen)):
peigen.append({})
for spin, v in self.projected_eigenvalues.items():
for kpoint_index, vv in enumerate(v):
if str(spin) not in peigen[kpoint_index]:
peigen[kpoint_index][str(spin)] = vv
vout['projected_eigenvalues'] = peigen
d['output'] = vout
return jsanitize(d, strict=True) | [
"def",
"as_dict",
"(",
"self",
")",
":",
"d",
"=",
"{",
"\"vasp_version\"",
":",
"self",
".",
"vasp_version",
",",
"\"has_vasp_completed\"",
":",
"True",
",",
"\"nsites\"",
":",
"len",
"(",
"self",
".",
"final_structure",
")",
"}",
"comp",
"=",
"self",
"... | 43.016393 | 16.229508 |
def handle_property(self, obj):
"""Handle a property event.
This function will set an attribute on an object if the event requires
it.
:param obj: A :py:class:`~turberfield.dialogue.model.Model.Property`
object.
:return: The supplied object.
"""
if obj.object is not None:
try:
setattr(obj.object, obj.attr, obj.val)
except AttributeError as e:
self.log.error(". ".join(getattr(e, "args", e) or e))
try:
print(
"{t.dim}{obj.object._name}.{obj.attr} = {obj.val!s}{t.normal}".format(
obj=obj, t=self.terminal
),
end="\n" * 2,
file=self.terminal.stream
)
except AttributeError as e:
self.log.error(". ".join(getattr(e, "args", e) or e))
return obj | [
"def",
"handle_property",
"(",
"self",
",",
"obj",
")",
":",
"if",
"obj",
".",
"object",
"is",
"not",
"None",
":",
"try",
":",
"setattr",
"(",
"obj",
".",
"object",
",",
"obj",
".",
"attr",
",",
"obj",
".",
"val",
")",
"except",
"AttributeError",
"... | 34.407407 | 19.814815 |
def revoke(self):
"""Revoke the current tokens then empty all stored tokens
This returns nothing since the endpoint return HTTP/200
whatever the result is...
Currently not working with JWT, left here for compatibility.
"""
if not self.refresh_token and not self.access_token:
raise AttributeError('No access/refresh token are defined.')
if self.refresh_token:
data = {
'token_type_hint': 'refresh_token',
'token': self.refresh_token,
}
else:
data = {
'token_type_hint': 'access_token',
'token': self.access_token,
}
request_data = self.__prepare_token_request(data, self.oauth_revoke)
self._session.post(**request_data)
self.access_token = None
self.refresh_token = None
self.token_expiry = None | [
"def",
"revoke",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"refresh_token",
"and",
"not",
"self",
".",
"access_token",
":",
"raise",
"AttributeError",
"(",
"'No access/refresh token are defined.'",
")",
"if",
"self",
".",
"refresh_token",
":",
"data",
"... | 34.333333 | 18.851852 |
def routeDefault(self, request, year=None):
"""Route a request to the default calendar view."""
eventsView = request.GET.get('view', self.default_view)
if eventsView in ("L", "list"):
return self.serveUpcoming(request)
elif eventsView in ("W", "weekly"):
return self.serveWeek(request, year)
else:
return self.serveMonth(request, year) | [
"def",
"routeDefault",
"(",
"self",
",",
"request",
",",
"year",
"=",
"None",
")",
":",
"eventsView",
"=",
"request",
".",
"GET",
".",
"get",
"(",
"'view'",
",",
"self",
".",
"default_view",
")",
"if",
"eventsView",
"in",
"(",
"\"L\"",
",",
"\"list\"",... | 44.777778 | 8.888889 |
def ols_covariance(self):
""" Creates OLS estimate of the covariance matrix
Returns
----------
The OLS estimate of the covariance matrix
"""
Y = np.array([reg[self.lags:reg.shape[0]] for reg in self.data])
return (1.0/(Y[0].shape[0]))*np.dot(self.residuals(Y),np.transpose(self.residuals(Y))) | [
"def",
"ols_covariance",
"(",
"self",
")",
":",
"Y",
"=",
"np",
".",
"array",
"(",
"[",
"reg",
"[",
"self",
".",
"lags",
":",
"reg",
".",
"shape",
"[",
"0",
"]",
"]",
"for",
"reg",
"in",
"self",
".",
"data",
"]",
")",
"return",
"(",
"1.0",
"/... | 35.8 | 24.5 |
def summary_table(pairs, key_header, descr_header="Description", width=78):
"""
List of one-liner strings containing a reStructuredText summary table
for the given pairs ``(name, object)``.
"""
from .lazy_text import rst_table, small_doc
max_width = width - max(len(k) for k, v in pairs)
table = [(k, small_doc(v, max_width=max_width)) for k, v in pairs]
return rst_table(table, (key_header, descr_header)) | [
"def",
"summary_table",
"(",
"pairs",
",",
"key_header",
",",
"descr_header",
"=",
"\"Description\"",
",",
"width",
"=",
"78",
")",
":",
"from",
".",
"lazy_text",
"import",
"rst_table",
",",
"small_doc",
"max_width",
"=",
"width",
"-",
"max",
"(",
"len",
"... | 46 | 13.777778 |
def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options):
"""
Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
"""
mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf)
with open(mutect_vcf, 'r') as infile, open(mutect_vcf + 'mutect_parsed.tmp', 'w') as outfile:
for line in infile:
line = line.strip()
if line.startswith('#'):
print(line, file=outfile)
continue
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=outfile)
return outfile.name | [
"def",
"process_mutect_vcf",
"(",
"job",
",",
"mutect_vcf",
",",
"work_dir",
",",
"univ_options",
")",
":",
"mutect_vcf",
"=",
"job",
".",
"fileStore",
".",
"readGlobalFile",
"(",
"mutect_vcf",
")",
"with",
"open",
"(",
"mutect_vcf",
",",
"'r'",
")",
"as",
... | 38.363636 | 17.363636 |
def set_error(self, error_shortmsg: str, error_longmsg: str):
""" Set the stage to error and add a message """
LOG.error(f"Update session: error in stage {self._stage.name}: "
f"{error_shortmsg}: {error_longmsg}")
self._error = Value(error_shortmsg, error_longmsg)
self.set_stage(Stages.ERROR) | [
"def",
"set_error",
"(",
"self",
",",
"error_shortmsg",
":",
"str",
",",
"error_longmsg",
":",
"str",
")",
":",
"LOG",
".",
"error",
"(",
"f\"Update session: error in stage {self._stage.name}: \"",
"f\"{error_shortmsg}: {error_longmsg}\"",
")",
"self",
".",
"_error",
... | 56.333333 | 15 |
def LogoPlot(sites, datatype, data, plotfile, nperline,
numberevery=10, allowunsorted=False, ydatamax=1.01,
overlay=None, fix_limits={}, fixlongname=False,
overlay_cmap=None, ylimits=None, relativestackheight=1,
custom_cmap='jet', map_metric='kd', noseparator=False,
underlay=False, scalebar=False):
"""Create sequence logo showing amino-acid or nucleotide preferences.
The heights of each letter is equal to the preference of
that site for that amino acid or nucleotide.
Note that stop codons may or may not be included in the logo
depending on whether they are present in *pi_d*.
CALLING VARIABLES:
* *sites* is a list of all of the sites that are being included
in the logo, as strings. They must be in natural sort or an error
will be raised **unless** *allowunsorted* is *True*. The sites
in the plot are ordered in the same arrangement
listed in *sites*. These should be **strings**, not integers.
* *datatype* should be one of the following strings:
* 'prefs' for preferences
* 'diffprefs' for differential preferences
* 'diffsel' for differential selection
* *data* is a dictionary that has a key for every entry in
*sites*. For every site *r* in *sites*, *sites[r][x]*
is the value for character *x*.
Preferences must sum to one; differential preferences to zero.
All sites must have the same set of characters. The characters
must be the set of nucleotides or amino acids with or without
stop codons.
* *plotfile* is a string giving the name of the created PDF file
of the logo plot.
It must end in the extension ``.pdf``.
* *nperline* is the number of sites per line. Often 40 to 80 are good values.
* *numberevery* is specifies how frequently we put labels for the sites on
x-axis.
* *allowunsorted* : if *True* then we allow the entries in *sites* to
**not** be sorted. This means that the logo plot will **not** have
sites in sorted order.
* *ydatamax* : meaningful only if *datatype* is 'diffprefs'. In this case, it gives
the maximum that the logo stacks extend in the positive and negative directions.
Cannot be smaller than the maximum extent of the differential preferences.
* *ylimits*: is **mandatory** if *datatype* is 'diffsel', and meaningless
otherwise. It is *(ymin, ymax)* where *ymax > 0 > ymin*, and gives extent
of the data in the positive and negative directions. Must encompass the
actual maximum and minimum of the data.
* *overlay* : make overlay bars that indicate other properties for
the sites. If you set to something other than `None`, it should be
a list giving one to three properties. Each property is a tuple:
*(prop_d, shortname, longname)* where:
- *prop_d* is a dictionary keyed by site numbers that are in *sites*.
For each *r* in *sites*, *prop_d[r]* gives the value of the property,
or if there is no entry in *prop_d* for *r*, then the property
is undefined and is colored white. Properties can either be:
* continuous: in this case, all of the values should be numbers.
* discrete : in this case, all of the values should be strings.
While in practice, if you have more than a few discrete
categories (different strings), the plot will be a mess.
- *shortname* : short name for the property; will not format well
if more than 4 or 5 characters.
- *longname* : longer name for property used on axes label. Can be the
same as *shortname* if you don't need a different long name.
- In the special case where both *shortname* and *longname* are
the string `wildtype`, then rather than an overlay bar we
right the one-character wildtype identity in `prop_d` for each
site.
* *fix_limits* is only meaningful if *overlay* is being used. In this case, for any
*shortname* in *overlay* that also keys an entry in *fix_limits*, we use
*fix_limits[shortname]* to set the limits for *shortname*. Specifically,
*fix_limits[shortname]* should be the 2-tuple *(ticks, ticknames)*. *ticks*
should be a list of tick locations (numbers) and *ticknames* should be a list of
the corresponding tick label for that tick.
* If *fixlongname* is *True*, then we use the *longname* in *overlay* exactly as written;
otherwise we add a parenthesis indicating the *shortname* for which this *longname*
stands.
* *overlay_cmap* can be the name of a valid *matplotlib.colors.Colormap*, such as the
string *jet* or *bwr*. Otherwise, it can be *None* and a (hopefully) good choice will
be made for you.
* *custom_cmap* can be the name of a valid *matplotlib.colors.Colormap* which will be
used to color amino-acid one-letter codes in the logoplot by the *map_metric* when
either 'kd' or 'mw' is used as *map_metric*.
* *relativestackheight* indicates how high the letter stack is relative to
the default. The default is multiplied by this number, so make it > 1
for a higher letter stack.
* *map_metric* specifies the amino-acid property metric used to map colors to amino-acid
letters. Valid options are 'kd' (Kyte-Doolittle hydrophobicity scale, default), 'mw'
(molecular weight), 'functionalgroup' (functional groups: small, nucleophilic, hydrophobic,
aromatic, basic, acidic, and amide), and 'charge' (charge at neutral pH). If 'charge' is used, then the
argument for *custom_cmap* will no longer be meaningful, since 'charge' uses its own
blue/black/red colormapping. Similarly, 'functionalgroup' uses its own colormapping.
* *noseparator* is only meaningful if *datatype* is 'diffsel' or 'diffprefs'.
If it set to *True*, then we do **not** print a black horizontal line to
separate positive and negative values.
* *underlay* if `True` then make an underlay rather than an overlay.
* *scalebar*: show a scale bar. If `False`, no scale bar shown. Otherwise
should be a 2-tuple of `(scalebarlen, scalebarlabel)`. Currently only
works when data is `diffsel`.
"""
assert datatype in ['prefs', 'diffprefs', 'diffsel'], "Invalid datatype {0}".format(datatype)
# check data, and get characters
assert sites, "No sites specified"
assert set(sites) == set(data.keys()), "Not a match between sites and the keys of data"
characters = list(data[sites[0]].keys())
aas = sorted(AA_TO_INDEX.keys())
if set(characters) == set(NT_TO_INDEX.keys()):
alphabet_type = 'nt'
elif set(characters) == set(aas) or set(characters) == set(aas + ['*']):
alphabet_type = 'aa'
else:
raise ValueError("Invalid set of characters in data. Does not specify either nucleotides or amino acids:\n%s" % str(characters))
for r in sites:
if set(data[r].keys()) != set(characters):
raise ValueError("Not all sites in data have the same set of characters")
firstblankchar = 'B' # character for first blank space for diffprefs / diffsel
assert firstblankchar not in characters, "firstblankchar in characters"
lastblankchar = 'b' # character for last blank space for diffprefs / diffsel
assert lastblankchar not in characters, "lastblankchar in characters"
separatorchar = '-' # separates positive and negative for diffprefs / diffsel
assert separatorchar not in characters, "lastblankchar in characters"
if noseparator:
separatorheight = 0
else:
separatorheight = 0.02 # height of separator as frac of total for diffprefs / diffsel
if os.path.splitext(plotfile)[1].lower() != '.pdf':
raise ValueError("plotfile must end in .pdf: %s" % plotfile)
if os.path.isfile(plotfile):
os.remove(plotfile) # remove existing plot
if not allowunsorted:
sorted_sites = natsort.natsorted([r for r in sites])
if sorted_sites != sites:
raise ValueError("sites is not properly sorted")
# Following are specifications of weblogo sizing taken from its documentation
stackwidth = 9.5 # stack width in points, not default size of 10.8, but set to this in weblogo call below
barheight = 5.5 # height of bars in points if using overlay
barspacing = 2.0 # spacing between bars in points if using overlay
stackaspectratio = 4.4 # ratio of stack height:width, doesn't count part going over maximum value of 1
assert relativestackheight > 0, "relativestackheight must be > 0"
stackaspectratio *= relativestackheight
if overlay:
if len(overlay) > 3:
raise ValueError("overlay cannot have more than 3 entries")
ymax = (stackaspectratio * stackwidth + len(overlay) * (barspacing + barheight)) / float(stackaspectratio * stackwidth)
aspectratio = ymax * stackaspectratio # effective aspect ratio for full range
else:
ymax = 1.0
aspectratio = stackaspectratio
rmargin = 11.5 # right margin in points, fixed by weblogo
stackheightmargin = 16 # margin between stacks in points, fixed by weblogo
showscalebar = False
try:
# write data into transfacfile (a temporary file)
(fd, transfacfile) = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
ordered_alphabets = {} # keyed by site index (0, 1, ...) with values ordered lists for characters from bottom to top
if datatype == 'prefs':
chars_for_string = characters
f.write('ID ID\nBF BF\nP0 %s\n' % ' '.join(chars_for_string))
for (isite, r) in enumerate(sites):
f.write('%d %s\n' % (isite, ' '.join([str(data[r][x]) for x in characters])))
pi_r = [(data[r][x], x) for x in characters]
pi_r.sort()
ordered_alphabets[isite] = [tup[1] for tup in pi_r] # order from smallest to biggest
elif datatype == 'diffprefs':
chars_for_string = characters + [firstblankchar, lastblankchar, separatorchar]
ydatamax *= 2.0 # maximum possible range of data, multiply by two for range
f.write('ID ID\nBF BF\nP0 %s\n' % ' '.join(chars_for_string))
for (isite, r) in enumerate(sites):
positivesum = sum([data[r][x] for x in characters if data[r][x] > 0]) + separatorheight / 2.0
negativesum = sum([data[r][x] for x in characters if data[r][x] < 0]) - separatorheight / 2.0
if abs(positivesum + negativesum) > 1.0e-3:
raise ValueError("Differential preferences sum of %s is not close to zero for site %s" % (positivesum + negativesum, r))
if 2.0 * positivesum > ydatamax:
raise ValueError("You need to increase ydatamax: the total differential preferences sum to more than the y-axis limits. Right now, ydatamax is %.3f while the total differential preferences are %.3f" % (ydatamax, 2.0 * positivesum))
f.write('%d' % isite)
deltapi_r = []
for x in characters:
deltapi_r.append((data[r][x], x))
f.write(' %s' % (abs(data[r][x]) / float(ydatamax)))
deltapi_r.sort()
firstpositiveindex = 0
while deltapi_r[firstpositiveindex][0] < 0:
firstpositiveindex += 1
ordered_alphabets[isite] = [firstblankchar] + [tup[1] for tup in deltapi_r[ : firstpositiveindex]] + [separatorchar] + [tup[1] for tup in deltapi_r[firstpositiveindex : ]] + [lastblankchar] # order from most negative to most positive with blank characters and separators
f.write(' %g %g %g\n' % (0.5 * (ydatamax + 2.0 * negativesum) / ydatamax, 0.5 * (ydatamax + 2.0 * negativesum) / ydatamax, separatorheight)) # heights for blank charactors and separators
elif datatype == 'diffsel':
assert ylimits, "You must specify ylimits if using diffsel"
(dataymin, dataymax) = ylimits
assert dataymax > 0 > dataymin, "Invalid ylimits of {0}".format(ylimits)
yextent = float(dataymax - dataymin)
separatorheight *= yextent
chars_for_string = characters + [firstblankchar, lastblankchar, separatorchar]
f.write('ID ID\nBF BF\nP0 {0}\n'.format(' '.join(chars_for_string)))
for (isite, r) in enumerate(sites):
positivesum = sum([data[r][x] for x in characters if data[r][x] > 0]) + separatorheight / 2.0
negativesum = sum([data[r][x] for x in characters if data[r][x] < 0]) - separatorheight / 2.0
assert positivesum <= dataymax, "Data exceeds ylimits in positive direction"
assert negativesum >= dataymin, "Data exceeds ylimits in negative direction"
f.write('{0}'.format(isite))
diffsel_r = []
for x in characters:
diffsel_r.append((data[r][x], x))
f.write(' {0}'.format(abs(data[r][x]) / yextent))
diffsel_r.sort()
firstpositiveindex = 0
while diffsel_r[firstpositiveindex][0] < 0:
firstpositiveindex += 1
ordered_alphabets[isite] = [firstblankchar] + [tup[1] for tup in diffsel_r[ : firstpositiveindex]] + [separatorchar] + [tup[1] for tup in diffsel_r[firstpositiveindex : ]] + [lastblankchar] # order from most negative to most positive with blank characters and separators
f.write(' %g %g %g\n' % ((negativesum - dataymin) / yextent, (dataymax - positivesum) / yextent, separatorheight / yextent)) # heights for blank charactors and separators
# height of one unit on y-axis in points
heightofone = stackwidth * stackaspectratio / yextent
assert heightofone > 0
if scalebar:
showscalebar = (heightofone * scalebar[0], scalebar[1])
else:
raise ValueError("Invalid datatype of %s" % datatype)
f.close()
# create web logo
charstring = ''.join(chars_for_string)
assert len(charstring) == len(chars_for_string), "Length of charstring doesn't match length of chars_for_string. Do you have unallowable multi-letter characters?\n%s" % (str(chars_for_string))
logoprior = weblogolib.parse_prior('equiprobable', charstring, 0)
motif = _my_Motif.read_transfac(open(transfacfile), charstring)
logodata = weblogolib.LogoData.from_counts(motif.alphabet, motif, logoprior)
logo_options = weblogolib.LogoOptions()
logo_options.fineprint = None
logo_options.stacks_per_line = nperline
logo_options.stack_aspect_ratio = aspectratio
logo_options.stack_width = stackwidth
logo_options.unit_name = 'probability'
logo_options.show_yaxis = False
logo_options.yaxis_scale = ymax
if alphabet_type == 'aa':
map_functions = {'kd':KyteDoolittleColorMapping,
'mw': MWColorMapping,
'charge' : ChargeColorMapping,
'functionalgroup':FunctionalGroupColorMapping}
map_fcn = map_functions[map_metric]
(cmap, colormapping, mapper) = map_fcn(maptype=custom_cmap)
elif alphabet_type == 'nt':
colormapping = {}
colormapping['A'] = '#008000'
colormapping['T'] = '#FF0000'
colormapping['C'] = '#0000FF'
colormapping['G'] = '#FFA500'
else:
raise ValueError("Invalid alphabet_type %s" % alphabet_type)
colormapping[firstblankchar] = colormapping[lastblankchar] = '#000000' # black, but color doesn't matter as modified weblogo code replaces with empty space
colormapping[separatorchar] = '#000000' # black
color_scheme = weblogolib.colorscheme.ColorScheme()
for x in chars_for_string:
if hasattr(color_scheme, 'rules'):
color_scheme.rules.append(weblogolib.colorscheme.SymbolColor(x, colormapping[x], "'%s'" % x))
else:
# this part is needed for weblogo 3.4
color_scheme.groups.append(weblogolib.colorscheme.ColorGroup(x, colormapping[x], "'%s'" % x))
logo_options.color_scheme = color_scheme
logo_options.annotate = [{True:r, False:''}[0 == isite % numberevery] for (isite, r) in enumerate(sites)]
logoformat = weblogolib.LogoFormat(logodata, logo_options)
# _my_pdf_formatter is modified from weblogo version 3.4 source code
# to allow custom ordering of the symbols.
pdf = _my_pdf_formatter(logodata, logoformat, ordered_alphabets)
with open(plotfile, 'wb') as f:
f.write(pdf)
assert os.path.isfile(plotfile), "Failed to find expected plotfile %s" % plotfile
finally:
# close if still open
try:
f.close()
except:
pass
# remove temporary file
if os.path.isfile(transfacfile):
os.remove(transfacfile)
# now build the overlay
if overlay or showscalebar:
try:
(fdoverlay, overlayfile) = tempfile.mkstemp(suffix='.pdf')
(fdmerged, mergedfile) = tempfile.mkstemp(suffix='.pdf')
foverlay = os.fdopen(fdoverlay, 'wb')
foverlay.close() # close, but we still have the path overlayfile...
fmerged = os.fdopen(fdmerged, 'wb')
logoheight = stackwidth * stackaspectratio + stackheightmargin
LogoOverlay(sites, overlayfile, overlay, nperline, sitewidth=stackwidth, rmargin=rmargin, logoheight=logoheight, barheight=barheight, barspacing=barspacing, fix_limits=fix_limits, fixlongname=fixlongname, overlay_cmap=overlay_cmap, underlay=underlay, scalebar=showscalebar)
plotfile_f = open(plotfile, 'rb')
plot = PyPDF2.PdfFileReader(plotfile_f).getPage(0)
overlayfile_f = open(overlayfile, 'rb')
overlaypdf = PyPDF2.PdfFileReader(overlayfile_f).getPage(0)
xshift = overlaypdf.artBox[2] - plot.artBox[2]
yshift = (barheight + barspacing) * len(overlay) - 0.5 * barspacing
overlaypdf.mergeTranslatedPage(plot, xshift,
yshift * int(underlay), expand=True)
overlaypdf.compressContentStreams()
output = PyPDF2.PdfFileWriter()
output.addPage(overlaypdf)
output.write(fmerged)
fmerged.close()
shutil.move(mergedfile, plotfile)
finally:
try:
plotfile_f.close()
except:
pass
try:
overlayfile_f.close()
except:
pass
try:
foverlay.close()
except:
pass
try:
fmerged.close()
except:
pass
for fname in [overlayfile, mergedfile]:
if os.path.isfile(fname):
os.remove(fname) | [
"def",
"LogoPlot",
"(",
"sites",
",",
"datatype",
",",
"data",
",",
"plotfile",
",",
"nperline",
",",
"numberevery",
"=",
"10",
",",
"allowunsorted",
"=",
"False",
",",
"ydatamax",
"=",
"1.01",
",",
"overlay",
"=",
"None",
",",
"fix_limits",
"=",
"{",
... | 53.580737 | 29.8017 |
def locked_put(self, credentials):
"""Write a Credentials to the Django datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity, _ = self.model_class.objects.get_or_create(
**{self.key_name: self.key_value})
setattr(entity, self.property_name, credentials)
entity.save() | [
"def",
"locked_put",
"(",
"self",
",",
"credentials",
")",
":",
"entity",
",",
"_",
"=",
"self",
".",
"model_class",
".",
"objects",
".",
"get_or_create",
"(",
"*",
"*",
"{",
"self",
".",
"key_name",
":",
"self",
".",
"key_value",
"}",
")",
"setattr",
... | 32.545455 | 17.818182 |
def _serialize_scalar_from_string_representation_factory(type_name, types, str_func=str):
"""Builds functions that leverage Python ``str()`` or similar functionality.
Args:
type_name (str): The name of the Ion type.
types (Union[Sequence[type],type]): The Python types to validate for.
str_func (Optional[Callable]): The function to convert the value with, defaults to ``str``.
Returns:
function: The function for serializing scalars of a given type to Ion text bytes.
"""
def serialize(ion_event):
value = ion_event.value
validate_scalar_value(value, types)
return six.b(str_func(value))
serialize.__name__ = '_serialize_' + type_name
return serialize | [
"def",
"_serialize_scalar_from_string_representation_factory",
"(",
"type_name",
",",
"types",
",",
"str_func",
"=",
"str",
")",
":",
"def",
"serialize",
"(",
"ion_event",
")",
":",
"value",
"=",
"ion_event",
".",
"value",
"validate_scalar_value",
"(",
"value",
",... | 42.470588 | 23.470588 |
def url(self):
""":class:`str`: The URL to the Tibia.com page of the house."""
return self.get_url(self.id, self.world) if self.id and self.world else None | [
"def",
"url",
"(",
"self",
")",
":",
"return",
"self",
".",
"get_url",
"(",
"self",
".",
"id",
",",
"self",
".",
"world",
")",
"if",
"self",
".",
"id",
"and",
"self",
".",
"world",
"else",
"None"
] | 56.333333 | 23.333333 |
def smart_search_pool(self):
""" Perform a smart pool search.
The "smart" search function tries extract a query from
a text string. This query is then passed to the search_pool
function, which performs the search.
"""
search_options = {}
if 'query_id' in request.json:
search_options['query_id'] = request.json['query_id']
if 'max_result' in request.json:
search_options['max_result'] = request.json['max_result']
if 'offset' in request.json:
search_options['offset'] = request.json['offset']
try:
result = Pool.smart_search(request.json['query_string'],
search_options
)
# Remove error key in result from backend as it interferes with the
# error handling of the web interface.
# TODO: Reevaluate how to deal with different types of errors; soft
# errors like query string parser errors and hard errors like lost
# database.
del result['error']
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(result, cls=NipapJSONEncoder) | [
"def",
"smart_search_pool",
"(",
"self",
")",
":",
"search_options",
"=",
"{",
"}",
"if",
"'query_id'",
"in",
"request",
".",
"json",
":",
"search_options",
"[",
"'query_id'",
"]",
"=",
"request",
".",
"json",
"[",
"'query_id'",
"]",
"if",
"'max_result'",
... | 38.71875 | 22.6875 |
def start(self):
"""Start discovering and listing to connections."""
if self._state == CLOSED:
raise NSQException('producer already closed')
if self.is_running:
self.logger.warn('producer already started')
return
self.logger.debug('starting producer...')
self._state = RUNNING
for address in self.nsqd_tcp_addresses:
address, port = address.split(':')
self.connect_to_nsqd(address, int(port)) | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_state",
"==",
"CLOSED",
":",
"raise",
"NSQException",
"(",
"'producer already closed'",
")",
"if",
"self",
".",
"is_running",
":",
"self",
".",
"logger",
".",
"warn",
"(",
"'producer already started... | 32.6 | 17.6 |
def get_hfs_accounts(netid):
"""
Return a restclients.models.hfs.HfsAccounts object on the given uwnetid
"""
url = ACCOUNTS_URL.format(uwnetid=netid)
response = get_resource(url)
return _object_from_json(response) | [
"def",
"get_hfs_accounts",
"(",
"netid",
")",
":",
"url",
"=",
"ACCOUNTS_URL",
".",
"format",
"(",
"uwnetid",
"=",
"netid",
")",
"response",
"=",
"get_resource",
"(",
"url",
")",
"return",
"_object_from_json",
"(",
"response",
")"
] | 33 | 8.714286 |
def evaluate(dataset):
"""Evaluate model on Dataset for a number of steps."""
with tf.Graph().as_default():
# Get images and labels from the dataset.
images, labels = image_processing.inputs(dataset)
# Number of classes in the Dataset label set plus 1.
# Label 0 is reserved for an (unused) background class.
num_classes = dataset.num_classes() + 1
# Build a Graph that computes the logits predictions from the
# inference model.
logits, _ = inception.inference(images, num_classes)
# Calculate predictions.
top_1_op = tf.nn.in_top_k(logits, labels, 1)
top_5_op = tf.nn.in_top_k(logits, labels, 5)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
inception.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
graph_def=graph_def)
while True:
_eval_once(saver, summary_writer, top_1_op, top_5_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs) | [
"def",
"evaluate",
"(",
"dataset",
")",
":",
"with",
"tf",
".",
"Graph",
"(",
")",
".",
"as_default",
"(",
")",
":",
"# Get images and labels from the dataset.",
"images",
",",
"labels",
"=",
"image_processing",
".",
"inputs",
"(",
"dataset",
")",
"# Number of... | 38.111111 | 20.277778 |
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes') | [
"def",
"coerce_to_bytes",
"(",
"x",
",",
"charset",
"=",
"sys",
".",
"getdefaultencoding",
"(",
")",
",",
"errors",
"=",
"'strict'",
")",
":",
"PY2",
"=",
"sys",
".",
"version_info",
"[",
"0",
"]",
"==",
"2",
"if",
"PY2",
":",
"# pragma: nocover",
"if"... | 30.909091 | 12.545455 |
def intersect_with(self, polygon):
"""
Calculates the intersection between the polygons in this surface
and other polygon, in the z=0 projection.
This method rely on the ``shapely.Polygon.intersects()`` method.
The way this method is used is intersecting this polygon
recursively with all identified polygons which overlaps with it
in the z=0 projection.
:param polygon: Polygon to intersect with the Surface.
:type polygon: pyny.Polygon
:returns: Multiple polygons product of the intersections.
:rtype: dict of ndarrays (keys are the number of the polygon
inside the surface)
"""
intersections = {}
for i, poly in enumerate(self):
if polygon.get_shapely().intersects(poly.get_shapely()):
inter = polygon.get_shapely().intersection(poly.get_shapely())
intersections[i] = np.array(list(inter.exterior.coords))[:-1]
return intersections | [
"def",
"intersect_with",
"(",
"self",
",",
"polygon",
")",
":",
"intersections",
"=",
"{",
"}",
"for",
"i",
",",
"poly",
"in",
"enumerate",
"(",
"self",
")",
":",
"if",
"polygon",
".",
"get_shapely",
"(",
")",
".",
"intersects",
"(",
"poly",
".",
"ge... | 45.130435 | 20.086957 |
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals | [
"def",
"resample",
"(",
"self",
",",
"N",
",",
"*",
"*",
"kwargs",
")",
":",
"lovals",
"=",
"self",
".",
"mu",
"-",
"np",
".",
"absolute",
"(",
"rand",
".",
"normal",
"(",
"size",
"=",
"N",
")",
"*",
"self",
".",
"siglo",
")",
"hivals",
"=",
... | 35.714286 | 18.642857 |
def list_nodes_full(call=None):
'''
Return a list of the instances that are on the provider.
CLI Examples:
.. code-block:: bash
salt-cloud -F my-qingcloud
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or --function.'
)
zone = _get_specified_zone()
params = {
'action': 'DescribeInstances',
'zone': zone,
'status': ['pending', 'running', 'stopped', 'suspended'],
}
items = query(params=params)
log.debug('Total %s instances found in zone %s', items['total_count'], zone)
result = {}
if items['total_count'] == 0:
return result
for node in items['instance_set']:
normalized_node = _show_normalized_node(node)
node.update(normalized_node)
result[node['instance_id']] = node
provider = __active_provider_name__ or 'qingcloud'
if ':' in provider:
comps = provider.split(':')
provider = comps[0]
__opts__['update_cachedir'] = True
__utils__['cloud.cache_node_list'](result, provider, __opts__)
return result | [
"def",
"list_nodes_full",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes_full function must be called with -f or --function.'",
")",
"zone",
"=",
"_get_specified_zone",
"(",
")",
"params",
... | 24.173913 | 23.652174 |
def levinson_durbin(acdata, order=None):
"""
Solve the Yule-Walker linear system of equations.
They're given by:
.. math::
R . a = r
where :math:`R` is a simmetric Toeplitz matrix where each element are lags
from the given autocorrelation list. :math:`R` and :math:`r` are defined
(Python indexing starts with zero and slices don't include the last
element):
.. math::
R[i][j] = acdata[abs(j - i)]
r = acdata[1 : order + 1]
Parameters
----------
acdata :
Autocorrelation lag list, commonly the ``acorr`` function output.
order :
The order of the resulting ZFilter object. Defaults to
``len(acdata) - 1``.
Returns
-------
A FIR filter, as a ZFilter object. The mean squared error over the given
data (variance of the white noise) is in its "error" attribute.
See Also
--------
acorr:
Calculate the autocorrelation of a given block.
lpc :
Calculate the Linear Predictive Coding (LPC) coefficients.
parcor :
Partial correlation coefficients (PARCOR), or reflection coefficients,
relative to the lattice implementation of a filter, obtained by reversing
the Levinson-Durbin algorithm.
Examples
--------
>>> data = [2, 2, 0, 0, -1, -1, 0, 0, 1, 1]
>>> acdata = acorr(data)
>>> acdata
[12, 6, 0, -3, -6, -3, 0, 2, 4, 2]
>>> ldfilt = levinson_durbin(acorr(data), 3)
>>> ldfilt
1 - 0.625 * z^-1 + 0.25 * z^-2 + 0.125 * z^-3
>>> ldfilt.error # Squared! See lpc for more information about this
7.875
Notes
-----
The Levinson-Durbin algorithm used to solve the equations needs
:math:`O(order^2)` floating point operations.
"""
if order is None:
order = len(acdata) - 1
elif order >= len(acdata):
acdata = Stream(acdata).append(0).take(order + 1)
# Inner product for filters based on above statistics
def inner(a, b): # Be careful, this depends on acdata !!!
return sum(acdata[abs(i-j)] * ai * bj
for i, ai in enumerate(a.numlist)
for j, bj in enumerate(b.numlist)
)
try:
A = ZFilter(1)
for m in xrange(1, order + 1):
B = A(1 / z) * z ** -m
A -= inner(A, z ** -m) / inner(B, B) * B
except ZeroDivisionError:
raise ParCorError("Can't find next PARCOR coefficient")
A.error = inner(A, A)
return A | [
"def",
"levinson_durbin",
"(",
"acdata",
",",
"order",
"=",
"None",
")",
":",
"if",
"order",
"is",
"None",
":",
"order",
"=",
"len",
"(",
"acdata",
")",
"-",
"1",
"elif",
"order",
">=",
"len",
"(",
"acdata",
")",
":",
"acdata",
"=",
"Stream",
"(",
... | 26.235294 | 24.423529 |
def execute_go_cmd(self, cmd, gopath=None, args=None, env=None,
workunit_factory=None, workunit_name=None, workunit_labels=None, **kwargs):
"""Runs a Go command that is optionally targeted to a Go workspace.
If a `workunit_factory` is supplied the command will run in a work unit context.
:param string cmd: Go command to execute, e.g. 'test' for `go test`
:param string gopath: An optional $GOPATH which points to a valid Go workspace from which to run
the command.
:param list args: An optional list of arguments and flags to pass to the Go command.
:param dict env: A custom environment to launch the Go command in. If `None` the current
environment is used.
:param workunit_factory: An optional callable that can produce a `WorkUnit` context
:param string workunit_name: An optional name for the work unit; defaults to the `cmd`
:param list workunit_labels: An optional sequence of labels for the work unit.
:param kwargs: Keyword arguments to pass through to `subprocess.Popen`.
:returns: A tuple of the exit code and the go command that was run.
:rtype: (int, :class:`GoDistribution.GoCommand`)
"""
go_cmd = self.create_go_cmd(cmd, gopath=gopath, args=args)
if workunit_factory is None:
return go_cmd.spawn(**kwargs).wait()
else:
name = workunit_name or cmd
labels = [WorkUnitLabel.TOOL] + (workunit_labels or [])
with workunit_factory(name=name, labels=labels, cmd=str(go_cmd)) as workunit:
process = go_cmd.spawn(env=env,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
**kwargs)
returncode = process.wait()
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
return returncode, go_cmd | [
"def",
"execute_go_cmd",
"(",
"self",
",",
"cmd",
",",
"gopath",
"=",
"None",
",",
"args",
"=",
"None",
",",
"env",
"=",
"None",
",",
"workunit_factory",
"=",
"None",
",",
"workunit_name",
"=",
"None",
",",
"workunit_labels",
"=",
"None",
",",
"*",
"*"... | 57.363636 | 25.969697 |
def hash(self):
"""Generate a hash value."""
h = hash_pandas_object(self, index=True)
return hashlib.md5(h.values.tobytes()).hexdigest() | [
"def",
"hash",
"(",
"self",
")",
":",
"h",
"=",
"hash_pandas_object",
"(",
"self",
",",
"index",
"=",
"True",
")",
"return",
"hashlib",
".",
"md5",
"(",
"h",
".",
"values",
".",
"tobytes",
"(",
")",
")",
".",
"hexdigest",
"(",
")"
] | 39.25 | 12.75 |
def get_person_from_legacy_format(profile_record):
"""
Given a whole profile, convert it into
zone-file format. In the full profile JSON,
this method operates on the 'data_record' object.
@profile is a dict that contains the legacy profile data
Return a dict with the zone-file formatting.
"""
if not is_profile_in_legacy_format(profile_record):
raise ValueError("Not a legacy profile")
profile = profile_record
try:
profile = json.loads(json.dumps(profile))
except ValueError:
pass
images = []
accounts = []
profile_data = {
"@type": "Person"
}
if profile.has_key("name") and type(profile["name"]) == dict \
and profile["name"].has_key("formatted"):
profile_data["name"] = profile["name"]["formatted"]
if profile.has_key("bio"):
profile_data["description"] = profile["bio"]
if profile.has_key("location") and type(profile["location"]) == dict \
and profile["location"].has_key("formatted"):
profile_data["address"] = {
"@type": "PostalAddress",
"addressLocality": profile["location"]["formatted"]
}
if profile.has_key("avatar") and type(profile["avatar"]) == dict and \
profile["avatar"].has_key("url"):
images.append({
"@type": "ImageObject",
"name": "avatar",
"contentUrl": profile["avatar"]["url"]
})
if profile.has_key("cover") and type(profile["cover"]) == dict and \
profile["cover"].has_key("url"):
images.append({
"@type": "ImageObject",
"name": "cover",
"contentUrl": profile["cover"]["url"]
})
if len(images) > 0:
profile_data["image"] = images
if profile.has_key("website") and type(profile["website"]) in [str, unicode]:
profile_data["website"] = [{
"@type": "WebSite",
"url": profile["website"]
}]
for service_name in ["twitter", "facebook", "github"]:
if profile.has_key(service_name):
accounts.append(
format_account(service_name, profile[service_name])
)
if profile.has_key("bitcoin") and type(profile["bitcoin"]) == dict and \
profile["bitcoin"].has_key("address"):
accounts.append({
"@type": "Account",
"role": "payment",
"service": "bitcoin",
"identifier": profile["bitcoin"]["address"]
})
if profile.has_key("auth"):
if len(profile["auth"]) > 0 and type(profile["auth"]) == dict:
if profile["auth"][0].has_key("publicKeychain"):
accounts.append({
"@type": "Account",
"role": "key",
"service": "bip32",
"identifier": profile["auth"][0]["publicKeychain"]
})
if profile.has_key("pgp") and type(profile["pgp"]) == dict \
and profile["pgp"].has_key("url") \
and profile["pgp"].has_key("fingerprint"):
accounts.append({
"@type": "Account",
"role": "key",
"service": "pgp",
"identifier": profile["pgp"]["fingerprint"],
"contentUrl": profile["pgp"]["url"]
})
profile_data["account"] = accounts
return profile_data | [
"def",
"get_person_from_legacy_format",
"(",
"profile_record",
")",
":",
"if",
"not",
"is_profile_in_legacy_format",
"(",
"profile_record",
")",
":",
"raise",
"ValueError",
"(",
"\"Not a legacy profile\"",
")",
"profile",
"=",
"profile_record",
"try",
":",
"profile",
... | 31.428571 | 19.904762 |
def connect_to_database(host=None, port=None, connect=False, **kwargs):
"""
Explicitly begins a database connection for the application
(if this function is not called, a connection is created when
it is first needed). Takes arguments identical to
pymongo.MongoClient.__init__
@param host: the hostname to connect to
@param port: the port to connect to
@param connect: if True, immediately begin connecting to MongoDB in the
background; otherwise connect on the first operation
"""
return CONNECTION.connect(host=host, port=port, connect=connect, **kwargs) | [
"def",
"connect_to_database",
"(",
"host",
"=",
"None",
",",
"port",
"=",
"None",
",",
"connect",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"CONNECTION",
".",
"connect",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"conne... | 46.076923 | 18.076923 |
def write_csv_header(mol, csv_writer):
"""
Write the csv header
"""
# create line list where line elements for writing will be stored
line = []
# ID
line.append('id')
# status
line.append('status')
# query labels
queryList = mol.properties.keys()
for queryLabel in queryList:
line.append(queryLabel)
# write line
csv_writer.writerow(line) | [
"def",
"write_csv_header",
"(",
"mol",
",",
"csv_writer",
")",
":",
"# create line list where line elements for writing will be stored",
"line",
"=",
"[",
"]",
"# ID",
"line",
".",
"append",
"(",
"'id'",
")",
"# status",
"line",
".",
"append",
"(",
"'status'",
")"... | 18.142857 | 21.333333 |
def load_accounts(extra_path=None, load_user=True):
"""Load the yaml account files
:param load_user:
:return: An `AttrDict`
"""
from os.path import getmtime
try:
accts_file = find_config_file(ACCOUNTS_FILE, extra_path=extra_path, load_user=load_user)
except ConfigurationError:
accts_file = None
if accts_file is not None and os.path.exists(accts_file):
config = AttrDict()
config.update_yaml(accts_file)
if not 'accounts' in config:
config.remotes = AttrDict()
config.accounts.loaded = [accts_file, getmtime(accts_file)]
return config
else:
return None | [
"def",
"load_accounts",
"(",
"extra_path",
"=",
"None",
",",
"load_user",
"=",
"True",
")",
":",
"from",
"os",
".",
"path",
"import",
"getmtime",
"try",
":",
"accts_file",
"=",
"find_config_file",
"(",
"ACCOUNTS_FILE",
",",
"extra_path",
"=",
"extra_path",
"... | 24.884615 | 22.461538 |
def delete_message(queue, region, receipthandle, opts=None, user=None):
'''
Delete one or more messages from a queue in a region
queue
The name of the queue to delete messages from
region
Region where SQS queues exists
receipthandle
The ReceiptHandle of the message to delete. The ReceiptHandle
is obtained in the return from receive_message
opts : None
Any additional options to add to the command line
user : None
Run as a user other than what the minion runs as
CLI Example:
.. code-block:: bash
salt '*' aws_sqs.delete_message <sqs queue> <region> receipthandle='<sqs ReceiptHandle>'
.. versionadded:: 2014.7.0
'''
queues = list_queues(region, opts, user)
url_map = _parse_queue_list(queues)
if queue not in url_map:
log.info('"%s" queue does not exist.', queue)
return False
out = _run_aws('delete-message', region, opts, user,
receipthandle=receipthandle, queue=url_map[queue],)
return True | [
"def",
"delete_message",
"(",
"queue",
",",
"region",
",",
"receipthandle",
",",
"opts",
"=",
"None",
",",
"user",
"=",
"None",
")",
":",
"queues",
"=",
"list_queues",
"(",
"region",
",",
"opts",
",",
"user",
")",
"url_map",
"=",
"_parse_queue_list",
"("... | 26.973684 | 26.447368 |
def dailysummary(start_date=None, end_date=None, return_format=None):
"""Returns daily summary totals of targets, attacks and sources. Limit to
30 days at a time. (Query 2002-01-01 to present)
In the return data:
Sources: Distinct source IP addresses the packets originate from.
Targets: Distinct target IP addresses the packets were sent to.
Reports: Number of packets reported.
:param start_date: string or datetime.date(), default is today
:param end_date: string or datetime.date(), default is today
"""
uri = 'dailysummary'
if not start_date:
# default today
start_date = datetime.datetime.now()
try:
uri = '/'.join([uri, start_date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, start_date])
if end_date:
try:
uri = '/'.join([uri, end_date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, end_date])
return _get(uri, return_format) | [
"def",
"dailysummary",
"(",
"start_date",
"=",
"None",
",",
"end_date",
"=",
"None",
",",
"return_format",
"=",
"None",
")",
":",
"uri",
"=",
"'dailysummary'",
"if",
"not",
"start_date",
":",
"# default today",
"start_date",
"=",
"datetime",
".",
"datetime",
... | 33.931034 | 20.068966 |
def plugin_info(self):
"""
Property for accessing :class:`PluginInfoManager` instance, which is used to manage pipeline configurations.
:rtype: yagocd.resources.plugin_info.PluginInfoManager
"""
if self._plugin_info_manager is None:
self._plugin_info_manager = PluginInfoManager(session=self._session)
return self._plugin_info_manager | [
"def",
"plugin_info",
"(",
"self",
")",
":",
"if",
"self",
".",
"_plugin_info_manager",
"is",
"None",
":",
"self",
".",
"_plugin_info_manager",
"=",
"PluginInfoManager",
"(",
"session",
"=",
"self",
".",
"_session",
")",
"return",
"self",
".",
"_plugin_info_ma... | 43 | 22.333333 |
def read_string(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0] | [
"def",
"read_string",
"(",
"self",
",",
"len",
")",
":",
"format",
"=",
"'!'",
"+",
"str",
"(",
"len",
")",
"+",
"'s'",
"length",
"=",
"struct",
".",
"calcsize",
"(",
"format",
")",
"info",
"=",
"struct",
".",
"unpack",
"(",
"format",
",",
"self",
... | 39.125 | 8.625 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'tooling') and self.tooling is not None:
_dict['tooling'] = self.tooling._to_dict()
if hasattr(self, 'disambiguation') and self.disambiguation is not None:
_dict['disambiguation'] = self.disambiguation._to_dict()
if hasattr(
self,
'human_agent_assist') and self.human_agent_assist is not None:
_dict['human_agent_assist'] = self.human_agent_assist
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'tooling'",
")",
"and",
"self",
".",
"tooling",
"is",
"not",
"None",
":",
"_dict",
"[",
"'tooling'",
"]",
"=",
"self",
".",
"tooling",
".",
"_to_d... | 47.416667 | 22.666667 |
def get_associated_profiles(self):
"""
Gets the URIs of profiles which are using an Ethernet network.
Args:
id_or_uri: Can be either the logical interconnect group id or the logical interconnect group uri
Returns:
list: URIs of the associated profiles.
"""
uri = "{}/associatedProfiles".format(self.data['uri'])
return self._helper.do_get(uri) | [
"def",
"get_associated_profiles",
"(",
"self",
")",
":",
"uri",
"=",
"\"{}/associatedProfiles\"",
".",
"format",
"(",
"self",
".",
"data",
"[",
"'uri'",
"]",
")",
"return",
"self",
".",
"_helper",
".",
"do_get",
"(",
"uri",
")"
] | 31.846154 | 23.692308 |
def downloadSessionImages(server, filename=None, height=150, width=150,
opacity=100, saturation=100): # pragma: no cover
""" Helper to download a bif image or thumb.url from plex.server.sessions.
Parameters:
filename (str): default to None,
height (int): Height of the image.
width (int): width of the image.
opacity (int): Opacity of the resulting image (possibly deprecated).
saturation (int): Saturating of the resulting image.
Returns:
{'hellowlol': {'filepath': '<filepath>', 'url': 'http://<url>'},
{'<username>': {filepath, url}}, ...
"""
info = {}
for media in server.sessions():
url = None
for part in media.iterParts():
if media.thumb:
url = media.thumb
if part.indexes: # always use bif images if available.
url = '/library/parts/%s/indexes/%s/%s' % (part.id, part.indexes.lower(), media.viewOffset)
if url:
if filename is None:
prettyname = media._prettyfilename()
filename = 'session_transcode_%s_%s_%s' % (media.usernames[0], prettyname, int(time.time()))
url = server.transcodeImage(url, height, width, opacity, saturation)
filepath = download(url, filename=filename)
info['username'] = {'filepath': filepath, 'url': url}
return info | [
"def",
"downloadSessionImages",
"(",
"server",
",",
"filename",
"=",
"None",
",",
"height",
"=",
"150",
",",
"width",
"=",
"150",
",",
"opacity",
"=",
"100",
",",
"saturation",
"=",
"100",
")",
":",
"# pragma: no cover",
"info",
"=",
"{",
"}",
"for",
"... | 45.83871 | 22.516129 |
def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs):
"""Perform conversion for :ref:`supported flux units <synphot-flux-units>`.
Parameters
----------
wavelengths : array-like or `~astropy.units.quantity.Quantity`
Wavelength values. If not a Quantity, assumed to be in
Angstrom.
fluxes : array-like or `~astropy.units.quantity.Quantity`
Flux values. If not a Quantity, assumed to be in PHOTLAM.
out_flux_unit : str or `~astropy.units.core.Unit`
Output flux unit.
area : float or `~astropy.units.quantity.Quantity`
Area that fluxes cover. If not a Quantity, assumed to be in
:math:`cm^{2}`. This value *must* be provided for conversions involving
OBMAG and count, otherwise it is not needed.
vegaspec : `~synphot.spectrum.SourceSpectrum`
Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`.
This is *only* used for conversions involving VEGAMAG.
Returns
-------
out_flux : `~astropy.units.quantity.Quantity`
Converted flux values.
Raises
------
astropy.units.core.UnitsError
Conversion failed.
synphot.exceptions.SynphotError
Area or Vega spectrum is not given when needed.
"""
if not isinstance(fluxes, u.Quantity):
fluxes = fluxes * PHOTLAM
out_flux_unit = validate_unit(out_flux_unit)
out_flux_unit_name = out_flux_unit.to_string()
in_flux_unit_name = fluxes.unit.to_string()
# No conversion necessary
if in_flux_unit_name == out_flux_unit_name:
return fluxes
in_flux_type = fluxes.unit.physical_type
out_flux_type = out_flux_unit.physical_type
# Wavelengths must Quantity
if not isinstance(wavelengths, u.Quantity):
wavelengths = wavelengths * u.AA
eqv = u.spectral_density(wavelengths)
# Use built-in astropy equivalencies
try:
out_flux = fluxes.to(out_flux_unit, eqv)
# Use PHOTLAM as in-between unit
except u.UnitConversionError:
# Convert input unit to PHOTLAM
if fluxes.unit == PHOTLAM:
flux_photlam = fluxes
elif in_flux_type != 'unknown':
flux_photlam = fluxes.to(PHOTLAM, eqv)
else:
flux_photlam = _convert_flux(
wavelengths, fluxes, PHOTLAM, **kwargs)
# Convert PHOTLAM to output unit
if out_flux_unit == PHOTLAM:
out_flux = flux_photlam
elif out_flux_type != 'unknown':
out_flux = flux_photlam.to(out_flux_unit, eqv)
else:
out_flux = _convert_flux(
wavelengths, flux_photlam, out_flux_unit, **kwargs)
return out_flux | [
"def",
"convert_flux",
"(",
"wavelengths",
",",
"fluxes",
",",
"out_flux_unit",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"fluxes",
",",
"u",
".",
"Quantity",
")",
":",
"fluxes",
"=",
"fluxes",
"*",
"PHOTLAM",
"out_flux_unit",
"=... | 31.542169 | 19.301205 |
def parse_search_url(url):
"""Parses a search URL."""
config = {}
url = urlparse.urlparse(url)
# Remove query strings.
path = url.path[1:]
path = path.split('?', 2)[0]
if url.scheme in SEARCH_SCHEMES:
config["ENGINE"] = SEARCH_SCHEMES[url.scheme]
if url.scheme in USES_URL:
config["URL"] = urlparse.urlunparse(("http",) + url[1:])
if url.scheme in USES_INDEX:
if path.endswith("/"):
path = path[:-1]
split = path.rsplit("/", 1)
if len(split) > 1:
path = split[:-1]
index = split[-1]
else:
path = ""
index = split[0]
config.update({
"URL": urlparse.urlunparse(("http",) + url[1:2] + (path,) + url[3:]),
"INDEX_NAME": index,
})
if url.scheme in USES_PATH:
config.update({
"PATH": path,
})
return config | [
"def",
"parse_search_url",
"(",
"url",
")",
":",
"config",
"=",
"{",
"}",
"url",
"=",
"urlparse",
".",
"urlparse",
"(",
"url",
")",
"# Remove query strings.",
"path",
"=",
"url",
".",
"path",
"[",
"1",
":",
"]",
"path",
"=",
"path",
".",
"split",
"("... | 21.731707 | 21.829268 |
def from_ic50(ic50, max_ic50=50000.0):
"""
Convert ic50s to regression targets in the range [0.0, 1.0].
Parameters
----------
ic50 : numpy.array of float
Returns
-------
numpy.array of float
"""
x = 1.0 - (numpy.log(ic50) / numpy.log(max_ic50))
return numpy.minimum(
1.0,
numpy.maximum(0.0, x)) | [
"def",
"from_ic50",
"(",
"ic50",
",",
"max_ic50",
"=",
"50000.0",
")",
":",
"x",
"=",
"1.0",
"-",
"(",
"numpy",
".",
"log",
"(",
"ic50",
")",
"/",
"numpy",
".",
"log",
"(",
"max_ic50",
")",
")",
"return",
"numpy",
".",
"minimum",
"(",
"1.0",
",",... | 20.294118 | 20.176471 |
def get_help_width():
"""Returns the integer width of help lines that is used in TextWrap."""
if not sys.stdout.isatty() or termios is None or fcntl is None:
return _DEFAULT_HELP_WIDTH
try:
data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234')
columns = struct.unpack('hh', data)[1]
# Emacs mode returns 0.
# Here we assume that any value below 40 is unreasonable.
if columns >= _MIN_HELP_WIDTH:
return columns
# Returning an int as default is fine, int(int) just return the int.
return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH))
except (TypeError, IOError, struct.error):
return _DEFAULT_HELP_WIDTH | [
"def",
"get_help_width",
"(",
")",
":",
"if",
"not",
"sys",
".",
"stdout",
".",
"isatty",
"(",
")",
"or",
"termios",
"is",
"None",
"or",
"fcntl",
"is",
"None",
":",
"return",
"_DEFAULT_HELP_WIDTH",
"try",
":",
"data",
"=",
"fcntl",
".",
"ioctl",
"(",
... | 40.25 | 17.1875 |
def declare_vars(self, d):
"""Declare the variables defined in the dictionary d."""
for k, v in d.items():
self.declare_var(k, v) | [
"def",
"declare_vars",
"(",
"self",
",",
"d",
")",
":",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
":",
"self",
".",
"declare_var",
"(",
"k",
",",
"v",
")"
] | 38.5 | 7.5 |
def read_bits(self, num):
"""Read ``num`` number of bits from the stream
:num: number of bits to read
:returns: a list of ``num`` bits, or an empty list if EOF has been reached
"""
if num > len(self._bits):
needed = num - len(self._bits)
num_bytes = int(math.ceil(needed / 8.0))
read_bytes = self._stream.read(num_bytes)
for bit in bytes_to_bits(read_bytes):
self._bits.append(bit)
res = []
while len(res) < num and len(self._bits) > 0:
res.append(self._bits.popleft())
return res | [
"def",
"read_bits",
"(",
"self",
",",
"num",
")",
":",
"if",
"num",
">",
"len",
"(",
"self",
".",
"_bits",
")",
":",
"needed",
"=",
"num",
"-",
"len",
"(",
"self",
".",
"_bits",
")",
"num_bytes",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"need... | 31.894737 | 17.315789 |
def expand_brackets(s):
"""Remove whitespace and expand all brackets."""
s = ''.join(s.split())
while True:
start = s.find('(')
if start == -1:
break
count = 1 # Number of hanging open brackets
p = start + 1
while p < len(s):
if s[p] == '(':
count += 1
if s[p] == ')':
count -= 1
if not count:
break
p += 1
if count:
raise ValueError("Unbalanced parenthesis in '{0}'.".format(s))
if start == 0 or s[start - 1] != '*':
s = s[0:start] + s[start + 1:p] + s[p + 1:]
else:
m = BRACKET_RE.search(s)
if m:
factor = int(m.group('factor'))
matchstart = m.start('factor')
s = s[0:matchstart] + (factor - 1) * (s[start + 1:p] + ',') + s[start + 1:p] + s[p + 1:]
else:
raise ValueError("Failed to parse '{0}'.".format(s))
return s | [
"def",
"expand_brackets",
"(",
"s",
")",
":",
"s",
"=",
"''",
".",
"join",
"(",
"s",
".",
"split",
"(",
")",
")",
"while",
"True",
":",
"start",
"=",
"s",
".",
"find",
"(",
"'('",
")",
"if",
"start",
"==",
"-",
"1",
":",
"break",
"count",
"="... | 33.366667 | 18.366667 |
def source_uris(self):
"""The fully-qualified URIs that point to your data in Google Cloud Storage.
Each URI can contain one '*' wildcard character and it must come after the 'bucket' name."""
return [x.path for x in luigi.task.flatten(self.input())] | [
"def",
"source_uris",
"(",
"self",
")",
":",
"return",
"[",
"x",
".",
"path",
"for",
"x",
"in",
"luigi",
".",
"task",
".",
"flatten",
"(",
"self",
".",
"input",
"(",
")",
")",
"]"
] | 54.2 | 16.6 |
def __IsInitialized(self):
""" Returns true if IAM user initialization has completed. """
is_initialized = False
iam_id = self.GetAccessKeyId()
if iam_id:
if core.CirrusAccessIdMetadata(self.s3, iam_id).IsInitialized():
is_initialized = True
return is_initialized | [
"def",
"__IsInitialized",
"(",
"self",
")",
":",
"is_initialized",
"=",
"False",
"iam_id",
"=",
"self",
".",
"GetAccessKeyId",
"(",
")",
"if",
"iam_id",
":",
"if",
"core",
".",
"CirrusAccessIdMetadata",
"(",
"self",
".",
"s3",
",",
"iam_id",
")",
".",
"I... | 37.25 | 13.75 |
def delete(id):
"""Delete a post.
Ensures that the post exists and that the logged in user is the
author of the post.
"""
post = get_post(id)
db.session.delete(post)
db.session.commit()
return redirect(url_for("blog.index")) | [
"def",
"delete",
"(",
"id",
")",
":",
"post",
"=",
"get_post",
"(",
"id",
")",
"db",
".",
"session",
".",
"delete",
"(",
"post",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"redirect",
"(",
"url_for",
"(",
"\"blog.index\"",
")",
"... | 24.8 | 15.8 |
def read(cls, proto):
""" capnp deserialization method for the anomaly likelihood object
:param proto: (Object) capnp proto object specified in
nupic.regions.anomaly_likelihood.capnp
:returns: (Object) the deserialized AnomalyLikelihood object
"""
# pylint: disable=W0212
anomalyLikelihood = object.__new__(cls)
anomalyLikelihood._iteration = proto.iteration
anomalyLikelihood._historicalScores = collections.deque(
maxlen=proto.historicWindowSize)
for i, score in enumerate(proto.historicalScores):
anomalyLikelihood._historicalScores.append((i, score.value,
score.anomalyScore))
if proto.distribution.name: # is "" when there is no distribution.
anomalyLikelihood._distribution = dict()
anomalyLikelihood._distribution['distribution'] = dict()
anomalyLikelihood._distribution['distribution']["name"] = proto.distribution.name
anomalyLikelihood._distribution['distribution']["mean"] = proto.distribution.mean
anomalyLikelihood._distribution['distribution']["variance"] = proto.distribution.variance
anomalyLikelihood._distribution['distribution']["stdev"] = proto.distribution.stdev
anomalyLikelihood._distribution["movingAverage"] = {}
anomalyLikelihood._distribution["movingAverage"]["windowSize"] = proto.distribution.movingAverage.windowSize
anomalyLikelihood._distribution["movingAverage"]["historicalValues"] = []
for value in proto.distribution.movingAverage.historicalValues:
anomalyLikelihood._distribution["movingAverage"]["historicalValues"].append(value)
anomalyLikelihood._distribution["movingAverage"]["total"] = proto.distribution.movingAverage.total
anomalyLikelihood._distribution["historicalLikelihoods"] = []
for likelihood in proto.distribution.historicalLikelihoods:
anomalyLikelihood._distribution["historicalLikelihoods"].append(likelihood)
else:
anomalyLikelihood._distribution = None
anomalyLikelihood._probationaryPeriod = proto.probationaryPeriod
anomalyLikelihood._learningPeriod = proto.learningPeriod
anomalyLikelihood._reestimationPeriod = proto.reestimationPeriod
# pylint: enable=W0212
return anomalyLikelihood | [
"def",
"read",
"(",
"cls",
",",
"proto",
")",
":",
"# pylint: disable=W0212",
"anomalyLikelihood",
"=",
"object",
".",
"__new__",
"(",
"cls",
")",
"anomalyLikelihood",
".",
"_iteration",
"=",
"proto",
".",
"iteration",
"anomalyLikelihood",
".",
"_historicalScores"... | 51.363636 | 28.295455 |
def build_specfile_sections(spec):
""" Builds the sections of a rpm specfile.
"""
str = ""
mandatory_sections = {
'DESCRIPTION' : '\n%%description\n%s\n\n', }
str = str + SimpleTagCompiler(mandatory_sections).compile( spec )
optional_sections = {
'DESCRIPTION_' : '%%description -l %s\n%s\n\n',
'CHANGELOG' : '%%changelog\n%s\n\n',
'X_RPM_PREINSTALL' : '%%pre\n%s\n\n',
'X_RPM_POSTINSTALL' : '%%post\n%s\n\n',
'X_RPM_PREUNINSTALL' : '%%preun\n%s\n\n',
'X_RPM_POSTUNINSTALL' : '%%postun\n%s\n\n',
'X_RPM_VERIFY' : '%%verify\n%s\n\n',
# These are for internal use but could possibly be overridden
'X_RPM_PREP' : '%%prep\n%s\n\n',
'X_RPM_BUILD' : '%%build\n%s\n\n',
'X_RPM_INSTALL' : '%%install\n%s\n\n',
'X_RPM_CLEAN' : '%%clean\n%s\n\n',
}
# Default prep, build, install and clean rules
# TODO: optimize those build steps, to not compile the project a second time
if 'X_RPM_PREP' not in spec:
spec['X_RPM_PREP'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"' + '\n%setup -q'
if 'X_RPM_BUILD' not in spec:
spec['X_RPM_BUILD'] = '[ ! -e "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && mkdir "$RPM_BUILD_ROOT"'
if 'X_RPM_INSTALL' not in spec:
spec['X_RPM_INSTALL'] = 'scons --install-sandbox="$RPM_BUILD_ROOT" "$RPM_BUILD_ROOT"'
if 'X_RPM_CLEAN' not in spec:
spec['X_RPM_CLEAN'] = '[ -n "$RPM_BUILD_ROOT" -a "$RPM_BUILD_ROOT" != / ] && rm -rf "$RPM_BUILD_ROOT"'
str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )
return str | [
"def",
"build_specfile_sections",
"(",
"spec",
")",
":",
"str",
"=",
"\"\"",
"mandatory_sections",
"=",
"{",
"'DESCRIPTION'",
":",
"'\\n%%description\\n%s\\n\\n'",
",",
"}",
"str",
"=",
"str",
"+",
"SimpleTagCompiler",
"(",
"mandatory_sections",
")",
".",
"compile... | 39.837209 | 25.906977 |
def fetch(elastic, backend, limit=None, search_after_value=None, scroll=True):
""" Fetch the items from raw or enriched index """
logging.debug("Creating a elastic items generator.")
elastic_scroll_id = None
search_after = search_after_value
while True:
if scroll:
rjson = get_elastic_items(elastic, elastic_scroll_id, limit)
else:
rjson = get_elastic_items_search(elastic, search_after, limit)
if rjson and "_scroll_id" in rjson:
elastic_scroll_id = rjson["_scroll_id"]
if rjson and "hits" in rjson:
if not rjson["hits"]["hits"]:
break
for hit in rjson["hits"]["hits"]:
item = hit['_source']
if 'sort' in hit:
search_after = hit['sort']
try:
backend._fix_item(item)
except Exception:
pass
yield item
else:
logging.error("No results found from %s", elastic.index_url)
break
return | [
"def",
"fetch",
"(",
"elastic",
",",
"backend",
",",
"limit",
"=",
"None",
",",
"search_after_value",
"=",
"None",
",",
"scroll",
"=",
"True",
")",
":",
"logging",
".",
"debug",
"(",
"\"Creating a elastic items generator.\"",
")",
"elastic_scroll_id",
"=",
"No... | 31.088235 | 19.970588 |
def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False,
root=None):
'''
Build a systemctl command line. Treat unit names without one
of the valid suffixes as a service.
'''
ret = []
if systemd_scope \
and salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
ret.extend(['systemd-run', '--scope'])
ret.append('systemctl')
if no_block:
ret.append('--no-block')
if root:
ret.extend(['--root', root])
if isinstance(action, six.string_types):
action = shlex.split(action)
ret.extend(action)
if name is not None:
ret.append(_canonical_unit_name(name))
if 'status' in ret:
ret.extend(['-n', '0'])
return ret | [
"def",
"_systemctl_cmd",
"(",
"action",
",",
"name",
"=",
"None",
",",
"systemd_scope",
"=",
"False",
",",
"no_block",
"=",
"False",
",",
"root",
"=",
"None",
")",
":",
"ret",
"=",
"[",
"]",
"if",
"systemd_scope",
"and",
"salt",
".",
"utils",
".",
"s... | 32.708333 | 16.875 |
def logger(function):
"""Decorate passed in function and log message to module logger."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
"""Wrap function."""
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '') # do not add newline by default
out = sep.join([repr(x) for x in args])
out = out + end
_LOGGER.debug(out)
return function(*args, **kwargs)
return wrapper | [
"def",
"logger",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrap function.\"\"\"",
"sep",
"=",
"kwargs",
".",
"get",
"(",
"'sep'",
",",
... | 36.75 | 10.666667 |
def _find_types(pkgs):
'''Form a package names list, find prefixes of packages types.'''
return sorted({pkg.split(':', 1)[0] for pkg in pkgs
if len(pkg.split(':', 1)) == 2}) | [
"def",
"_find_types",
"(",
"pkgs",
")",
":",
"return",
"sorted",
"(",
"{",
"pkg",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"0",
"]",
"for",
"pkg",
"in",
"pkgs",
"if",
"len",
"(",
"pkg",
".",
"split",
"(",
"':'",
",",
"1",
")",
")",
"==",... | 49.25 | 18.25 |
def push(self, obj):
"""Prepend an element to the beginnging of the list.
Parameters
----------
obj : KQMLObject or str
If a string is passed, it is instantiated as a
KQMLToken before being added to the list.
"""
if isinstance(obj, str):
obj = KQMLToken(obj)
self.data.insert(0, obj) | [
"def",
"push",
"(",
"self",
",",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"str",
")",
":",
"obj",
"=",
"KQMLToken",
"(",
"obj",
")",
"self",
".",
"data",
".",
"insert",
"(",
"0",
",",
"obj",
")"
] | 30.416667 | 14 |
def find_connection_file(filename, profile=None):
"""find a connection file, and return its absolute path.
The current working directory and the profile's security
directory will be searched for the file if it is not given by
absolute path.
If profile is unspecified, then the current running application's
profile will be used, or 'default', if not run from IPython.
If the argument does not match an existing file, it will be interpreted as a
fileglob, and the matching file in the profile's security dir with
the latest access time will be used.
Parameters
----------
filename : str
The connection file or fileglob to search for.
profile : str [optional]
The name of the profile to use when searching for the connection file,
if different from the current IPython session or 'default'.
Returns
-------
str : The absolute path of the connection file.
"""
from IPython.core.application import BaseIPythonApplication as IPApp
try:
# quick check for absolute path, before going through logic
return filefind(filename)
except IOError:
pass
if profile is None:
# profile unspecified, check if running from an IPython app
if IPApp.initialized():
app = IPApp.instance()
profile_dir = app.profile_dir
else:
# not running in IPython, use default profile
profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), 'default')
else:
# find profiledir by profile name:
profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile)
security_dir = profile_dir.security_dir
try:
# first, try explicit name
return filefind(filename, ['.', security_dir])
except IOError:
pass
# not found by full name
if '*' in filename:
# given as a glob already
pat = filename
else:
# accept any substring match
pat = '*%s*' % filename
matches = glob.glob( os.path.join(security_dir, pat) )
if not matches:
raise IOError("Could not find %r in %r" % (filename, security_dir))
elif len(matches) == 1:
return matches[0]
else:
# get most recent match, by access time:
return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1] | [
"def",
"find_connection_file",
"(",
"filename",
",",
"profile",
"=",
"None",
")",
":",
"from",
"IPython",
".",
"core",
".",
"application",
"import",
"BaseIPythonApplication",
"as",
"IPApp",
"try",
":",
"# quick check for absolute path, before going through logic",
"retu... | 34.558824 | 22.191176 |
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token in ['[CLS]', '[SEP]']:
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = '[MASK]'
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0,
len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels) | [
"def",
"create_masked_lm_predictions",
"(",
"tokens",
",",
"masked_lm_prob",
",",
"max_predictions_per_seq",
",",
"vocab_words",
",",
"rng",
")",
":",
"cand_indexes",
"=",
"[",
"]",
"for",
"(",
"i",
",",
"token",
")",
"in",
"enumerate",
"(",
"tokens",
")",
"... | 31.5 | 19.269231 |
def load(self, schema_file: Union[str, TextIO], schema_location: Optional[str]=None) -> ShExJ.Schema:
""" Load a ShEx Schema from schema_location
:param schema_file: name or file-like object to deserialize
:param schema_location: URL or file name of schema. Used to create the base_location
:return: ShEx Schema represented by schema_location
"""
if isinstance(schema_file, str):
schema_file = self.location_rewrite(schema_file)
self.schema_text = load_shex_file(schema_file)
else:
self.schema_text = schema_file.read()
if self.base_location:
self.root_location = self.base_location
elif schema_location:
self.root_location = os.path.dirname(schema_location) + '/'
else:
self.root_location = None
return self.loads(self.schema_text) | [
"def",
"load",
"(",
"self",
",",
"schema_file",
":",
"Union",
"[",
"str",
",",
"TextIO",
"]",
",",
"schema_location",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"ShExJ",
".",
"Schema",
":",
"if",
"isinstance",
"(",
"schema_file",
",",
... | 43.85 | 20.55 |
def flag_is_related(self, flag):
'''
Checks for relationship between a flag and this block.
Returns:
True if the flag is related to this block.
'''
same_worksheet = flag.worksheet == self.worksheet
if isinstance(flag.location, (tuple, list)):
return (flag.location[0] >= self.start[0] and flag.location[0] < self.end[0] and
flag.location[1] >= self.start[1] and flag.location[1] < self.end[1] and
same_worksheet)
else:
return same_worksheet | [
"def",
"flag_is_related",
"(",
"self",
",",
"flag",
")",
":",
"same_worksheet",
"=",
"flag",
".",
"worksheet",
"==",
"self",
".",
"worksheet",
"if",
"isinstance",
"(",
"flag",
".",
"location",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"(... | 40 | 24.142857 |
def mixin(self):
"""
Add your own custom functions to the Underscore object, ensuring that
they're correctly added to the OOP wrapper as well.
"""
methods = self.obj
for i, k in enumerate(methods):
setattr(underscore, k, methods[k])
self.makeStatic()
return self._wrap(self.obj) | [
"def",
"mixin",
"(",
"self",
")",
":",
"methods",
"=",
"self",
".",
"obj",
"for",
"i",
",",
"k",
"in",
"enumerate",
"(",
"methods",
")",
":",
"setattr",
"(",
"underscore",
",",
"k",
",",
"methods",
"[",
"k",
"]",
")",
"self",
".",
"makeStatic",
"... | 31.363636 | 14.636364 |
def _check_rot_sym(self, axis):
"""
Determines the rotational symmetry about supplied axis. Used only for
symmetric top molecules which has possible rotational symmetry
operations > 2.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
max_sym = len(min_set)
for i in range(max_sym, 0, -1):
if max_sym % i != 0:
continue
op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)
rotvalid = self.is_valid_op(op)
if rotvalid:
self.symmops.append(op)
self.rot_sym.append((axis, i))
return i
return 1 | [
"def",
"_check_rot_sym",
"(",
"self",
",",
"axis",
")",
":",
"min_set",
"=",
"self",
".",
"_get_smallest_set_not_on_axis",
"(",
"axis",
")",
"max_sym",
"=",
"len",
"(",
"min_set",
")",
"for",
"i",
"in",
"range",
"(",
"max_sym",
",",
"0",
",",
"-",
"1",... | 37.166667 | 13.5 |
def create(self, name, *args, **kwargs):
"""
Need to wrap the default call to handle exceptions.
"""
try:
return super(ImageMemberManager, self).create(name, *args, **kwargs)
except Exception as e:
if e.http_status == 403:
raise exc.UnsharableImage("You cannot share a public image.")
else:
raise | [
"def",
"create",
"(",
"self",
",",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"super",
"(",
"ImageMemberManager",
",",
"self",
")",
".",
"create",
"(",
"name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"... | 35.818182 | 16.363636 |
def shards(self, add_shard=False):
"""Get a list of shards belonging to this instance.
:param bool add_shard: A boolean indicating whether to add a new shard to the specified
instance.
"""
url = self._service_url + 'shards/'
if add_shard:
response = requests.post(url, **self._instances._default_request_kwargs)
else:
response = requests.get(url, **self._instances._default_request_kwargs)
return response.json() | [
"def",
"shards",
"(",
"self",
",",
"add_shard",
"=",
"False",
")",
":",
"url",
"=",
"self",
".",
"_service_url",
"+",
"'shards/'",
"if",
"add_shard",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"*",
"*",
"self",
".",
"_instances",
... | 38 | 23.538462 |
def close(self):
"""Close the socket"""
if self.is_open():
fd = self._fd
self._fd = -1
if self.uses_nanoconfig:
wrapper.nc_close(fd)
else:
_nn_check_positive_rtn(wrapper.nn_close(fd)) | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_open",
"(",
")",
":",
"fd",
"=",
"self",
".",
"_fd",
"self",
".",
"_fd",
"=",
"-",
"1",
"if",
"self",
".",
"uses_nanoconfig",
":",
"wrapper",
".",
"nc_close",
"(",
"fd",
")",
"else",
... | 30.111111 | 13.222222 |
def copy(self, **replacements):
"""Returns a clone of this M2Coordinate with the given replacements kwargs overlaid."""
cls = type(self)
kwargs = {'org': self.org, 'name': self.name, 'ext': self.ext, 'classifier': self.classifier, 'rev': self.rev}
for key, val in replacements.items():
kwargs[key] = val
return cls(**kwargs) | [
"def",
"copy",
"(",
"self",
",",
"*",
"*",
"replacements",
")",
":",
"cls",
"=",
"type",
"(",
"self",
")",
"kwargs",
"=",
"{",
"'org'",
":",
"self",
".",
"org",
",",
"'name'",
":",
"self",
".",
"name",
",",
"'ext'",
":",
"self",
".",
"ext",
","... | 49.142857 | 19.571429 |
def create_token(cls, obj_id, data, expires_at=None):
"""Create the secret link token."""
if expires_at:
s = TimedSecretLinkSerializer(expires_at=expires_at)
else:
s = SecretLinkSerializer()
return s.create_token(obj_id, data) | [
"def",
"create_token",
"(",
"cls",
",",
"obj_id",
",",
"data",
",",
"expires_at",
"=",
"None",
")",
":",
"if",
"expires_at",
":",
"s",
"=",
"TimedSecretLinkSerializer",
"(",
"expires_at",
"=",
"expires_at",
")",
"else",
":",
"s",
"=",
"SecretLinkSerializer",... | 34.5 | 15.875 |
def rel_links(cls, page):
"""return rel= links that should be scraped, skipping obviously data links."""
for match in cls.REL_RE.finditer(page):
href, rel = match.group(0), match.group(1)
if rel not in cls.REL_TYPES:
continue
href_match = cls.HREF_RE.search(href)
if href_match:
href = cls.href_match_to_url(href_match)
parsed_href = urlparse(href)
if any(parsed_href.path.endswith(ext) for ext in cls.REL_SKIP_EXTENSIONS):
continue
yield href | [
"def",
"rel_links",
"(",
"cls",
",",
"page",
")",
":",
"for",
"match",
"in",
"cls",
".",
"REL_RE",
".",
"finditer",
"(",
"page",
")",
":",
"href",
",",
"rel",
"=",
"match",
".",
"group",
"(",
"0",
")",
",",
"match",
".",
"group",
"(",
"1",
")",... | 39.461538 | 13.615385 |
def camelcase_search_options(self, options):
"""change all underscored variants back to what the API is expecting"""
new_options = {}
for key in options:
value = options[key]
new_key = SEARCH_OPTIONS_DICT.get(key, key)
if new_key == 'sort':
value = SORT_OPTIONS_DICT.get(value, value)
elif new_key == 'timePivot':
value = TIME_PIVOT_OPTIONS_DICT.get(value, value)
elif new_key in BOOLEAN_SEARCH_OPTIONS:
value = str(value).lower()
new_options[new_key] = value
return new_options | [
"def",
"camelcase_search_options",
"(",
"self",
",",
"options",
")",
":",
"new_options",
"=",
"{",
"}",
"for",
"key",
"in",
"options",
":",
"value",
"=",
"options",
"[",
"key",
"]",
"new_key",
"=",
"SEARCH_OPTIONS_DICT",
".",
"get",
"(",
"key",
",",
"key... | 44.071429 | 9.571429 |
def box(text, width=100, height=3, corner="+", horizontal="-", vertical="|"):
"""Return a ascii box, with your text center-aligned.
Usage Example::
>>> StringTemplate.box("Hello world!", 20, 5)
+------------------+
| |
| Hello world! |
| |
+------------------+
"""
if width <= len(text) - 4:
print("width is not large enough! apply auto-adjust...")
width = len(text) + 4
if height <= 2:
print("height is too small! apply auto-adjust...")
height = 3
if (height % 2) == 0:
print("height has to be odd! apply auto-adjust...")
height += 1
head = tail = corner + horizontal * (width - 2) + corner
pad = "%s%s%s" % (vertical, " " * (width - 2), vertical)
pad_number = (height - 3) // 2
pattern = "{: ^%s}" % (width - 2, )
body = vertical + pattern.format(text) + vertical
return "\n".join([head,] + [pad,] * pad_number + [body,] + [pad,] * pad_number + [tail,]) | [
"def",
"box",
"(",
"text",
",",
"width",
"=",
"100",
",",
"height",
"=",
"3",
",",
"corner",
"=",
"\"+\"",
",",
"horizontal",
"=",
"\"-\"",
",",
"vertical",
"=",
"\"|\"",
")",
":",
"if",
"width",
"<=",
"len",
"(",
"text",
")",
"-",
"4",
":",
"p... | 38.3 | 18.233333 |
def get_display(unicode_or_str, encoding='utf-8', upper_is_rtl=False,
base_dir=None, debug=False):
"""Accepts unicode or string. In case it's a string, `encoding`
is needed as it works on unicode ones (default:"utf-8").
Set `upper_is_rtl` to True to treat upper case chars as strong 'R'
for debugging (default: False).
Set `base_dir` to 'L' or 'R' to override the calculated base_level.
Set `debug` to True to display (using sys.stderr) the steps taken with the
algorithm.
Returns the display layout, either as unicode or `encoding` encoded
string.
"""
storage = get_empty_storage()
# utf-8 ? we need unicode
if isinstance(unicode_or_str, six.text_type):
text = unicode_or_str
decoded = False
else:
text = unicode_or_str.decode(encoding)
decoded = True
if base_dir is None:
base_level = get_base_level(text, upper_is_rtl)
else:
base_level = PARAGRAPH_LEVELS[base_dir]
storage['base_level'] = base_level
storage['base_dir'] = ('L', 'R')[base_level]
get_embedding_levels(text, storage, upper_is_rtl, debug)
explicit_embed_and_overrides(storage, debug)
resolve_weak_types(storage, debug)
resolve_neutral_types(storage, debug)
resolve_implicit_levels(storage, debug)
reorder_resolved_levels(storage, debug)
apply_mirroring(storage, debug)
chars = storage['chars']
display = u''.join([_ch['ch'] for _ch in chars])
if decoded:
return display.encode(encoding)
else:
return display | [
"def",
"get_display",
"(",
"unicode_or_str",
",",
"encoding",
"=",
"'utf-8'",
",",
"upper_is_rtl",
"=",
"False",
",",
"base_dir",
"=",
"None",
",",
"debug",
"=",
"False",
")",
":",
"storage",
"=",
"get_empty_storage",
"(",
")",
"# utf-8 ? we need unicode",
"if... | 30.62 | 20.26 |
def _update_subplot(self, subplot, spec):
"""
Updates existing subplots when the subplot has been assigned
to plot an element that is not an exact match to the object
it was initially assigned.
"""
# See if the precise spec has already been assigned a cyclic
# index otherwise generate a new one
if spec in self.cyclic_index_lookup:
cyclic_index = self.cyclic_index_lookup[spec]
else:
group_key = spec[:self.style_grouping]
self.group_counter[group_key] += 1
cyclic_index = self.group_counter[group_key]
self.cyclic_index_lookup[spec] = cyclic_index
subplot.cyclic_index = cyclic_index
if subplot.overlay_dims:
odim_key = util.wrap_tuple(spec[-1])
new_dims = zip(subplot.overlay_dims, odim_key)
subplot.overlay_dims = util.OrderedDict(new_dims) | [
"def",
"_update_subplot",
"(",
"self",
",",
"subplot",
",",
"spec",
")",
":",
"# See if the precise spec has already been assigned a cyclic",
"# index otherwise generate a new one",
"if",
"spec",
"in",
"self",
".",
"cyclic_index_lookup",
":",
"cyclic_index",
"=",
"self",
... | 41.318182 | 14.954545 |
def get_choice(cls, value):
"""
Return the underlying :class:`ChoiceItem` for a given value.
"""
attribute_for_value = cls.attributes[value]
return cls._fields[attribute_for_value] | [
"def",
"get_choice",
"(",
"cls",
",",
"value",
")",
":",
"attribute_for_value",
"=",
"cls",
".",
"attributes",
"[",
"value",
"]",
"return",
"cls",
".",
"_fields",
"[",
"attribute_for_value",
"]"
] | 35.833333 | 9.833333 |
def _format_fields(self, fields, title_width=12):
"""Formats a list of fields for display.
Parameters
----------
fields : list
A list of 2-tuples: (field_title, field_content)
title_width : int
How many characters to pad titles to. Default 12.
"""
out = []
header = self.__head
for title, content in fields:
if len(content.splitlines()) > 1:
title = header(title + ":") + "\n"
else:
title = header((title+":").ljust(title_width))
out.append(title + content)
return "\n".join(out) | [
"def",
"_format_fields",
"(",
"self",
",",
"fields",
",",
"title_width",
"=",
"12",
")",
":",
"out",
"=",
"[",
"]",
"header",
"=",
"self",
".",
"__head",
"for",
"title",
",",
"content",
"in",
"fields",
":",
"if",
"len",
"(",
"content",
".",
"splitlin... | 33.157895 | 14.473684 |
def validate_key(self, activation_key):
"""
Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or ``None`` if not.
"""
try:
username = signing.loads(
activation_key,
salt=self.key_salt,
max_age=conf.get('ACCOUNT_ACTIVATION_DAYS') * 86400
)
return username
# SignatureExpired is a subclass of BadSignature, so this will
# catch either one.
except signing.BadSignature:
return None | [
"def",
"validate_key",
"(",
"self",
",",
"activation_key",
")",
":",
"try",
":",
"username",
"=",
"signing",
".",
"loads",
"(",
"activation_key",
",",
"salt",
"=",
"self",
".",
"key_salt",
",",
"max_age",
"=",
"conf",
".",
"get",
"(",
"'ACCOUNT_ACTIVATION_... | 33.388889 | 15.166667 |
def run(self, *args):
"""Remove unique identities or identities from the registry.
By default, it removes the unique identity identified by <identifier>.
To remove an identity, set <identity> parameter.
"""
params = self.parser.parse_args(args)
identifier = params.identifier
identity = params.identity
code = self.remove(identifier, identity)
return code | [
"def",
"run",
"(",
"self",
",",
"*",
"args",
")",
":",
"params",
"=",
"self",
".",
"parser",
".",
"parse_args",
"(",
"args",
")",
"identifier",
"=",
"params",
".",
"identifier",
"identity",
"=",
"params",
".",
"identity",
"code",
"=",
"self",
".",
"r... | 29.857143 | 19.642857 |
def custom_prefix_lax(instance):
"""Ensure custom content follows lenient naming style conventions
for forward-compatibility.
"""
for error in chain(custom_object_prefix_lax(instance),
custom_property_prefix_lax(instance),
custom_observable_object_prefix_lax(instance),
custom_object_extension_prefix_lax(instance),
custom_observable_properties_prefix_lax(instance)):
yield error | [
"def",
"custom_prefix_lax",
"(",
"instance",
")",
":",
"for",
"error",
"in",
"chain",
"(",
"custom_object_prefix_lax",
"(",
"instance",
")",
",",
"custom_property_prefix_lax",
"(",
"instance",
")",
",",
"custom_observable_object_prefix_lax",
"(",
"instance",
")",
",... | 48.6 | 16.8 |
def reboot(vm_):
'''
Reboot a domain via ACPI request
CLI Example:
.. code-block:: bash
salt '*' virt.reboot <vm name>
'''
with _get_xapi_session() as xapi:
vm_uuid = _get_label_uuid(xapi, 'VM', vm_)
if vm_uuid is False:
return False
try:
xapi.VM.clean_reboot(vm_uuid)
return True
except Exception:
return False | [
"def",
"reboot",
"(",
"vm_",
")",
":",
"with",
"_get_xapi_session",
"(",
")",
"as",
"xapi",
":",
"vm_uuid",
"=",
"_get_label_uuid",
"(",
"xapi",
",",
"'VM'",
",",
"vm_",
")",
"if",
"vm_uuid",
"is",
"False",
":",
"return",
"False",
"try",
":",
"xapi",
... | 21.473684 | 19.684211 |
def get_barcode_umis(read, cell_barcode=False):
''' extract the umi +/- cell barcode from the read name where the barcodes
were extracted using umis'''
umi, cell = None, None
try:
read_name_elements = read.qname.split(":")
for element in read_name_elements:
if element.startswith("UMI_"):
umi = element[4:].encode('utf-8')
elif element.startswith("CELL_") and cell_barcode:
cell = element[5:].encode('utf-8')
if umi is None:
raise ValueError()
return umi, cell
except:
raise ValueError("Could not extract UMI +/- cell barcode from the "
"read tag") | [
"def",
"get_barcode_umis",
"(",
"read",
",",
"cell_barcode",
"=",
"False",
")",
":",
"umi",
",",
"cell",
"=",
"None",
",",
"None",
"try",
":",
"read_name_elements",
"=",
"read",
".",
"qname",
".",
"split",
"(",
"\":\"",
")",
"for",
"element",
"in",
"re... | 32.619048 | 20.238095 |
def request_cert(domain, master, ticket, port):
'''
Request CA cert from master icinga2 node.
Returns::
icinga2 pki request --host master.domain.tld --port 5665 --ticket TICKET_ID --key /etc/icinga2/pki/domain.tld.key --cert /etc/icinga2/pki/domain.tld.crt --trustedcert \
/etc/icinga2/pki/trusted-master.crt --ca /etc/icinga2/pki/ca.crt
CLI Example:
.. code-block:: bash
salt '*' icinga2.request_cert domain.tld master.domain.tld TICKET_ID
'''
result = __salt__['cmd.run_all'](["icinga2", "pki", "request", "--host", master, "--port", port, "--ticket", ticket, "--key", "{0}{1}.key".format(get_certs_path(), domain), "--cert",
"{0}{1}.crt".format(get_certs_path(), domain), "--trustedcert", "{0}trusted-master.crt".format(get_certs_path()), "--ca", "{0}ca.crt".format(get_certs_path())], python_shell=False)
return result | [
"def",
"request_cert",
"(",
"domain",
",",
"master",
",",
"ticket",
",",
"port",
")",
":",
"result",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"[",
"\"icinga2\"",
",",
"\"pki\"",
",",
"\"request\"",
",",
"\"--host\"",
",",
"master",
",",
"\"--port\"... | 50.666667 | 50.222222 |
def _state_stopped(self):
"""
The service is not running.
This is the initial state, and the state after L{stopService} was
called. To get out of this state, call L{startService}. If there is a
current connection, we disconnect.
"""
if self._reconnectDelayedCall:
self._reconnectDelayedCall.cancel()
self._reconnectDelayedCall = None
self.loseConnection() | [
"def",
"_state_stopped",
"(",
"self",
")",
":",
"if",
"self",
".",
"_reconnectDelayedCall",
":",
"self",
".",
"_reconnectDelayedCall",
".",
"cancel",
"(",
")",
"self",
".",
"_reconnectDelayedCall",
"=",
"None",
"self",
".",
"loseConnection",
"(",
")"
] | 36.083333 | 13.083333 |
def cmd(send, msg, args):
"""Gets a definition from urban dictionary.
Syntax: {command} <[#<num>] <term>|--blacklist (word)|--unblacklist (word)>
"""
key = args['config']['api']['bitlykey']
parser = arguments.ArgParser(args['config'])
parser.add_argument('--blacklist')
parser.add_argument('--unblacklist')
try:
cmdargs, msg = parser.parse_known_args(msg)
msg = ' '.join(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.blacklist:
if args['is_admin'](args['nick']):
send(blacklist_word(args['db'], cmdargs.blacklist))
else:
send("Blacklisting is admin-only")
elif cmdargs.unblacklist:
if args['is_admin'](args['nick']):
send(unblacklist_word(args['db'], cmdargs.unblacklist))
else:
send("Unblacklisting is admin-only")
else:
defn, url = get_urban(msg, args['db'], key)
send(defn)
if url:
send("See full definition at %s" % url) | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"key",
"=",
"args",
"[",
"'config'",
"]",
"[",
"'api'",
"]",
"[",
"'bitlykey'",
"]",
"parser",
"=",
"arguments",
".",
"ArgParser",
"(",
"args",
"[",
"'config'",
"]",
")",
"parser",
".",
... | 31.96875 | 16.90625 |
def _snapshot_to_data(snapshot):
'''
Returns snapshot data from a D-Bus response.
A snapshot D-Bus response is a dbus.Struct containing the
information related to a snapshot:
[id, type, pre_snapshot, timestamp, user, description,
cleanup_algorithm, userdata]
id: dbus.UInt32
type: dbus.UInt16
pre_snapshot: dbus.UInt32
timestamp: dbus.Int64
user: dbus.UInt32
description: dbus.String
cleaup_algorithm: dbus.String
userdata: dbus.Dictionary
'''
data = {}
data['id'] = snapshot[0]
data['type'] = ['single', 'pre', 'post'][snapshot[1]]
if data['type'] == 'post':
data['pre'] = snapshot[2]
if snapshot[3] != -1:
data['timestamp'] = snapshot[3]
else:
data['timestamp'] = int(time.time())
data['user'] = getpwuid(snapshot[4])[0]
data['description'] = snapshot[5]
data['cleanup'] = snapshot[6]
data['userdata'] = {}
for key, value in snapshot[7].items():
data['userdata'][key] = value
return data | [
"def",
"_snapshot_to_data",
"(",
"snapshot",
")",
":",
"data",
"=",
"{",
"}",
"data",
"[",
"'id'",
"]",
"=",
"snapshot",
"[",
"0",
"]",
"data",
"[",
"'type'",
"]",
"=",
"[",
"'single'",
",",
"'pre'",
",",
"'post'",
"]",
"[",
"snapshot",
"[",
"1",
... | 24.975 | 18.675 |
def do_POST(self, ):
"""Handle POST requests
When the user is redirected, this handler will respond with a website
which will send a post request with the url fragment as parameters.
This will get the parameters and store the original redirection
url and fragments in :data:`LoginServer.tokenurl`.
:returns: None
:rtype: None
:raises: None
"""
log.debug('POST')
self._set_headers()
# convert the parameters back to the original fragment
# because we need to send the original uri to set_token
# url fragments will not show up in self.path though.
# thats why we make the hassle to send it as a post request.
# Note: oauth does not allow for http connections
# but twitch does, so we fake it
ruri = constants.REDIRECT_URI.replace('http://', 'https://')
self.server.set_token(ruri + self.path.replace('?', '#')) | [
"def",
"do_POST",
"(",
"self",
",",
")",
":",
"log",
".",
"debug",
"(",
"'POST'",
")",
"self",
".",
"_set_headers",
"(",
")",
"# convert the parameters back to the original fragment",
"# because we need to send the original uri to set_token",
"# url fragments will not show up... | 42.818182 | 21.363636 |
def loaders(*specifiers):
"""
Generates loaders in the specified order.
Arguments can be `.Locality` instances, producing the loader(s) available
for that locality, `str` instances (used as file path templates) or
`callable`s. These can be mixed:
.. code-block:: python
# define a load order using predefined user-local locations,
# an explicit path, a template and a user-defined function
load_order = loaders(Locality.user,
'/etc/defaults/hard-coded.yaml',
'/path/to/{name}.{extension}',
my_loader)
# load configuration for name 'my-application' using the load order
# defined above
config = load_name('my-application', load_order=load_order)
:param specifiers:
:return: a `generator` of configuration loaders in the specified order
"""
for specifier in specifiers:
if isinstance(specifier, Locality):
# localities can carry multiple loaders, flatten this
yield from _LOADERS[specifier]
else:
# something not a locality, pass along verbatim
yield specifier | [
"def",
"loaders",
"(",
"*",
"specifiers",
")",
":",
"for",
"specifier",
"in",
"specifiers",
":",
"if",
"isinstance",
"(",
"specifier",
",",
"Locality",
")",
":",
"# localities can carry multiple loaders, flatten this",
"yield",
"from",
"_LOADERS",
"[",
"specifier",
... | 37.806452 | 20.387097 |
def send_message(self, to_number, message, from_number=None):
""" Send a message to the specified number and return a response dictionary.
The numbers must be specified in international format starting with a '+'.
Returns a dictionary that contains a 'MessageId' key with the sent message id value or
contains 'errorCode' and 'message' on error.
Possible error codes:
40001 - Parameter validation
40002 - Missing parameter
40003 - Invalid request
40100 - Illegal authorization header
40200 - There is not enough funds to send the message
40300 - Forbidden request
40301 - Invalid authorization scheme for calling the method
50000 - Internal error
"""
values = {'Message': message}
if from_number is not None:
values['From'] = from_number
return self._request(self.SEND_SMS_URL + to_number, values) | [
"def",
"send_message",
"(",
"self",
",",
"to_number",
",",
"message",
",",
"from_number",
"=",
"None",
")",
":",
"values",
"=",
"{",
"'Message'",
":",
"message",
"}",
"if",
"from_number",
"is",
"not",
"None",
":",
"values",
"[",
"'From'",
"]",
"=",
"fr... | 44.130435 | 18.782609 |
def Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ and [2]_ correlated by
[3]_, as presented in [4]_, [5]_, and [6]_.
.. math::
Nu_H = 0.59 Ra_H^{0.25},\; 10^{4} < Ra < 10^{9}
Nu_H = 0.13 Ra_H^{1/3.},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given. For ranges under 10^4, a graph is provided, not included here.
Examples
--------
>>> Nu_vertical_cylinder_McAdams_Weiss_Saunders(.7, 2E10)
313.31849434277973
References
----------
.. [1] Weise, Rudolf. "Warmeubergang durch freie Konvektion an
quadratischen Platten." Forschung auf dem Gebiet des Ingenieurwesens
A 6, no. 6 (November 1935): 281-92. doi:10.1007/BF02592565.
.. [2] Saunders, O. A. "The Effect of Pressure Upon Natural Convection in
Air." Proceedings of the Royal Society of London A: Mathematical,
Physical and Engineering Sciences 157, no. 891 (November 2, 1936):
278-91. doi:10.1098/rspa.1936.0194.
.. [3] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
.. [4] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [5] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [6] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.13*Ra**(1/3.)
else:
return 0.59*Ra**0.25 | [
"def",
"Nu_vertical_cylinder_McAdams_Weiss_Saunders",
"(",
"Pr",
",",
"Gr",
",",
"turbulent",
"=",
"None",
")",
":",
"Ra",
"=",
"Pr",
"*",
"Gr",
"if",
"turbulent",
"or",
"(",
"Ra",
">",
"1E9",
"and",
"turbulent",
"is",
"None",
")",
":",
"return",
"0.13",... | 38.704918 | 26.04918 |
def start_mon_service(distro, cluster, hostname):
"""
start mon service depending on distro init
"""
if distro.init == 'sysvinit':
service = distro.conn.remote_module.which_service()
remoto.process.run(
distro.conn,
[
service,
'ceph',
'-c',
'/etc/ceph/{cluster}.conf'.format(cluster=cluster),
'start',
'mon.{hostname}'.format(hostname=hostname)
],
timeout=7,
)
system.enable_service(distro.conn)
elif distro.init == 'upstart':
remoto.process.run(
distro.conn,
[
'initctl',
'emit',
'ceph-mon',
'cluster={cluster}'.format(cluster=cluster),
'id={hostname}'.format(hostname=hostname),
],
timeout=7,
)
elif distro.init == 'systemd':
# enable ceph target for this host (in case it isn't already enabled)
remoto.process.run(
distro.conn,
[
'systemctl',
'enable',
'ceph.target'
],
timeout=7,
)
# enable and start this mon instance
remoto.process.run(
distro.conn,
[
'systemctl',
'enable',
'ceph-mon@{hostname}'.format(hostname=hostname),
],
timeout=7,
)
remoto.process.run(
distro.conn,
[
'systemctl',
'start',
'ceph-mon@{hostname}'.format(hostname=hostname),
],
timeout=7,
) | [
"def",
"start_mon_service",
"(",
"distro",
",",
"cluster",
",",
"hostname",
")",
":",
"if",
"distro",
".",
"init",
"==",
"'sysvinit'",
":",
"service",
"=",
"distro",
".",
"conn",
".",
"remote_module",
".",
"which_service",
"(",
")",
"remoto",
".",
"process... | 26.75 | 18.75 |
def from_array(array):
"""
Deserialize a new Game from a given dictionary.
:return: new Game instance.
:rtype: Game
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
data = {}
data['title'] = u(array.get('title'))
data['description'] = u(array.get('description'))
data['photo'] = PhotoSize.from_array_list(array.get('photo'), list_level=1)
data['text'] = u(array.get('text')) if array.get('text') is not None else None
data['text_entities'] = MessageEntity.from_array_list(array.get('text_entities'), list_level=1) if array.get('text_entities') is not None else None
data['animation'] = Animation.from_array(array.get('animation')) if array.get('animation') is not None else None
data['_raw'] = array
return Game(**data) | [
"def",
"from_array",
"(",
"array",
")",
":",
"if",
"array",
"is",
"None",
"or",
"not",
"array",
":",
"return",
"None",
"# end if",
"assert_type_or_raise",
"(",
"array",
",",
"dict",
",",
"parameter_name",
"=",
"\"array\"",
")",
"data",
"=",
"{",
"}",
"da... | 43.52381 | 26.666667 |
def get_indices(integers):
"""
:param integers: a sequence of integers (with repetitions)
:returns: a dict integer -> [(start, stop), ...]
>>> get_indices([0, 0, 3, 3, 3, 2, 2, 0])
{0: [(0, 2), (7, 8)], 3: [(2, 5)], 2: [(5, 7)]}
"""
indices = AccumDict(accum=[]) # idx -> [(start, stop), ...]
start = 0
for i, vals in itertools.groupby(integers):
n = sum(1 for val in vals)
indices[i].append((start, start + n))
start += n
return indices | [
"def",
"get_indices",
"(",
"integers",
")",
":",
"indices",
"=",
"AccumDict",
"(",
"accum",
"=",
"[",
"]",
")",
"# idx -> [(start, stop), ...]",
"start",
"=",
"0",
"for",
"i",
",",
"vals",
"in",
"itertools",
".",
"groupby",
"(",
"integers",
")",
":",
"n"... | 32.6 | 14.466667 |
def density(a_M, *args, **kwargs):
"""
ARGS
a_M matrix to analyze
*args[0] optional mask matrix; if passed, calculate
density of a_M using non-zero elements of
args[0] as a mask.
DESC
Determine the "density" of a passed matrix. Two densities are returned:
o f_actualDensity -- density of the matrix using matrix values
as "mass"
o f_binaryDensity -- density of the matrix irrespective of actual
matrix values
If the passed matrix contains only "ones", the f_binaryDensity will
be equal to the f_actualDensity.
"""
rows, cols = a_M.shape
a_Mmask = ones( (rows, cols) )
if len(args):
a_Mmask = args[0]
a_M *= a_Mmask
# The "binary" density determines the density of nonzero elements,
# irrespective of their actual value
f_binaryMass = float(size(nonzero(a_M)[0]))
f_actualMass = a_M.sum()
f_area = float(size(nonzero(a_Mmask)[0]))
f_binaryDensity = f_binaryMass / f_area;
f_actualDensity = f_actualMass / f_area;
return f_actualDensity, f_binaryDensity | [
"def",
"density",
"(",
"a_M",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"rows",
",",
"cols",
"=",
"a_M",
".",
"shape",
"a_Mmask",
"=",
"ones",
"(",
"(",
"rows",
",",
"cols",
")",
")",
"if",
"len",
"(",
"args",
")",
":",
"a_Mmask",
... | 32.210526 | 21 |
def check(self, state, when):
"""
Checks state `state` to see if the breakpoint should fire.
:param state: The state.
:param when: Whether the check is happening before or after the event.
:return: A boolean representing whether the checkpoint should fire.
"""
ok = self.enabled and (when == self.when or self.when == BP_BOTH)
if not ok:
return ok
l.debug("... after enabled and when: %s", ok)
for a in [ _ for _ in self.kwargs if not _.endswith("_unique") ]:
current_expr = getattr(state.inspect, a)
needed = self.kwargs.get(a, None)
l.debug("... checking condition %s", a)
if current_expr is None and needed is None:
l.debug("...... both None, True")
c_ok = True
elif current_expr is not None and needed is not None:
if state.solver.solution(current_expr, needed):
l.debug("...... is_solution!")
c_ok = True
else:
l.debug("...... not solution...")
c_ok = False
if c_ok and self.kwargs.get(a+'_unique', True):
l.debug("...... checking uniqueness")
if not state.solver.unique(current_expr):
l.debug("...... not unique")
c_ok = False
else:
l.debug("...... one None, False")
c_ok = False
ok = ok and c_ok
if not ok:
return ok
l.debug("... after condition %s: %s", a, ok)
ok = ok and (self.condition is None or self.condition(state))
l.debug("... after condition func: %s", ok)
return ok | [
"def",
"check",
"(",
"self",
",",
"state",
",",
"when",
")",
":",
"ok",
"=",
"self",
".",
"enabled",
"and",
"(",
"when",
"==",
"self",
".",
"when",
"or",
"self",
".",
"when",
"==",
"BP_BOTH",
")",
"if",
"not",
"ok",
":",
"return",
"ok",
"l",
".... | 37.808511 | 20.06383 |
def _load_hangul_syllable_types():
"""
Helper function for parsing the contents of "HangulSyllableType.txt" from the Unicode Character Database (UCD) and
generating a lookup table for determining whether or not a given Hangul syllable is of type "L", "V", "T", "LV" or
"LVT". For more info on the UCD, see the following website: https://www.unicode.org/ucd/
"""
filename = "HangulSyllableType.txt"
current_dir = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(current_dir, filename), mode="r", encoding="utf-8") as fp:
for line in fp:
if not line.strip() or line.startswith("#"):
continue # Skip empty lines or lines that are comments (comments start with '#')
data = line.strip().split(";")
syllable_type, _ = map(six.text_type.strip, data[1].split("#"))
if ".." in data[0]: # If it is a range and not a single value
start, end = map(lambda x: int(x, 16), data[0].strip().split(".."))
for idx in range(start, end + 1):
_hangul_syllable_types[idx] = syllable_type
else:
_hangul_syllable_types[int(data[0].strip(), 16)] = syllable_type | [
"def",
"_load_hangul_syllable_types",
"(",
")",
":",
"filename",
"=",
"\"HangulSyllableType.txt\"",
"current_dir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"with",
"codecs",
".",
"open",
"(",
... | 61.35 | 29.35 |
def rtsp_url(self, channelno=None, typeno=None):
"""
Return RTSP streaming url
Params:
channelno: integer, the video channel index which starts from 1,
default 1 if not specified.
typeno: the stream type, default 0 if not specified. It can be
the following value:
0-Main Stream
1-Extra Stream 1 (Sub Stream)
2-Extra Stream 2 (Sub Stream)
"""
if channelno is None:
channelno = 1
if typeno is None:
typeno = 0
cmd = 'cam/realmonitor?channel={0}&subtype={1}'.format(
channelno, typeno)
try:
port = ':' + [x.split('=')[1] for x in self.rtsp_config.split()
if x.startswith('table.RTSP.Port=')][0]
except IndexError:
port = ''
return 'rtsp://{}:{}@{}{}/{}'.format(
self._user, self._password, self._host, port, cmd) | [
"def",
"rtsp_url",
"(",
"self",
",",
"channelno",
"=",
"None",
",",
"typeno",
"=",
"None",
")",
":",
"if",
"channelno",
"is",
"None",
":",
"channelno",
"=",
"1",
"if",
"typeno",
"is",
"None",
":",
"typeno",
"=",
"0",
"cmd",
"=",
"'cam/realmonitor?chann... | 31.935484 | 20.129032 |
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., strictly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
See Also
--------
merge
merge_ordered
Examples
--------
>>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']})
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
... 'right_val': [1, 2, 3, 6, 7]})
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10])
>>> left
left_val
1 a
5 b
10 c
>>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]},
... index=[1, 2, 3, 6, 7])
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms between the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms between the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propagate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN
3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result() | [
"def",
"merge_asof",
"(",
"left",
",",
"right",
",",
"on",
"=",
"None",
",",
"left_on",
"=",
"None",
",",
"right_on",
"=",
"None",
",",
"left_index",
"=",
"False",
",",
"right_index",
"=",
"False",
",",
"by",
"=",
"None",
",",
"left_by",
"=",
"None",... | 35.060086 | 21.927039 |
def compose_title(projects, data):
""" Compose the projects JSON file only with the projects name
:param projects: projects.json
:param data: eclipse JSON with the origin format
:return: projects.json with titles
"""
for project in data:
projects[project] = {
'meta': {
'title': data[project]['title']
}
}
return projects | [
"def",
"compose_title",
"(",
"projects",
",",
"data",
")",
":",
"for",
"project",
"in",
"data",
":",
"projects",
"[",
"project",
"]",
"=",
"{",
"'meta'",
":",
"{",
"'title'",
":",
"data",
"[",
"project",
"]",
"[",
"'title'",
"]",
"}",
"}",
"return",
... | 28.071429 | 14.142857 |
def attack_single_step(self, x, eta, g_feat):
"""
TensorFlow implementation of the Fast Feature Gradient. This is a
single step attack similar to Fast Gradient Method that attacks an
internal representation.
:param x: the input placeholder
:param eta: A tensor the same shape as x that holds the perturbation.
:param g_feat: model's internal tensor for guide
:return: a tensor for the adversarial example
"""
adv_x = x + eta
a_feat = self.model.fprop(adv_x)[self.layer]
# feat.shape = (batch, c) or (batch, w, h, c)
axis = list(range(1, len(a_feat.shape)))
# Compute loss
# This is a targeted attack, hence the negative sign
loss = -reduce_sum(tf.square(a_feat - g_feat), axis)
# Define gradient of loss wrt input
grad, = tf.gradients(loss, adv_x)
# Multiply by constant epsilon
scaled_signed_grad = self.eps_iter * tf.sign(grad)
# Add perturbation to original example to obtain adversarial example
adv_x = adv_x + scaled_signed_grad
# If clipping is needed,
# reset all values outside of [clip_min, clip_max]
if (self.clip_min is not None) and (self.clip_max is not None):
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
adv_x = tf.stop_gradient(adv_x)
eta = adv_x - x
eta = clip_eta(eta, self.ord, self.eps)
return eta | [
"def",
"attack_single_step",
"(",
"self",
",",
"x",
",",
"eta",
",",
"g_feat",
")",
":",
"adv_x",
"=",
"x",
"+",
"eta",
"a_feat",
"=",
"self",
".",
"model",
".",
"fprop",
"(",
"adv_x",
")",
"[",
"self",
".",
"layer",
"]",
"# feat.shape = (batch, c) or ... | 31.571429 | 20.571429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.