text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def delete_collection(self, collection_name, database_name=None):
"""
Deletes an existing collection in the CosmosDB database.
"""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
self.get_conn().DeleteContainer(
get_collection_link(self.__get_database_name(database_name), collection_name)) | [
"def",
"delete_collection",
"(",
"self",
",",
"collection_name",
",",
"database_name",
"=",
"None",
")",
":",
"if",
"collection_name",
"is",
"None",
":",
"raise",
"AirflowBadRequest",
"(",
"\"Collection name cannot be None.\"",
")",
"self",
".",
"get_conn",
"(",
"... | 42.888889 | 19.333333 |
def getLatency(self, instId: int) -> float:
"""
Return a dict with client identifier as a key and calculated latency as a value
"""
if len(self.clientAvgReqLatencies) == 0:
return 0.0
return self.clientAvgReqLatencies[instId].get_avg_latency() | [
"def",
"getLatency",
"(",
"self",
",",
"instId",
":",
"int",
")",
"->",
"float",
":",
"if",
"len",
"(",
"self",
".",
"clientAvgReqLatencies",
")",
"==",
"0",
":",
"return",
"0.0",
"return",
"self",
".",
"clientAvgReqLatencies",
"[",
"instId",
"]",
".",
... | 41.285714 | 14.714286 |
def parse_reports(self):
""" Find RSeQC junction_saturation frequency reports and parse their data """
# Set up vars
self.junction_saturation_all = dict()
self.junction_saturation_known = dict()
self.junction_saturation_novel = dict()
# Go through files and parse data
for f in self.find_log_files('rseqc/junction_saturation'):
parsed = dict()
for l in f['f'].splitlines():
r = re.search(r"^([xyzw])=c\(([\d,]+)\)$", l)
if r:
parsed[r.group(1)] = [float(i) for i in r.group(2).split(',')]
if len(parsed) == 4:
if parsed['z'][-1] == 0:
log.warn("Junction saturation data all zeroes, skipping: '{}'".format(f['s_name']))
else:
if f['s_name'] in self.junction_saturation_all:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='junction_saturation')
self.junction_saturation_all[f['s_name']] = OrderedDict()
self.junction_saturation_known[f['s_name']] = OrderedDict()
self.junction_saturation_novel[f['s_name']] = OrderedDict()
for k, v in enumerate(parsed['x']):
self.junction_saturation_all[f['s_name']][v] = parsed['z'][k]
self.junction_saturation_known[f['s_name']][v] = parsed['y'][k]
self.junction_saturation_novel[f['s_name']][v] = parsed['w'][k]
# Filter to strip out ignored sample names
self.junction_saturation_all = self.ignore_samples(self.junction_saturation_all)
self.junction_saturation_known = self.ignore_samples(self.junction_saturation_known)
self.junction_saturation_novel = self.ignore_samples(self.junction_saturation_novel)
if len(self.junction_saturation_all) > 0:
# Add line graph to section
pconfig = {
'id': 'rseqc_junction_saturation_plot',
'title': 'RSeQC: Junction Saturation',
'ylab': 'Number of Junctions',
'ymin': 0,
'xlab': "Percent of reads",
'xmin': 0,
'xmax': 100,
'tt_label': "<strong>{point.x}% of reads</strong>: {point.y:.2f}",
'data_labels': [
{'name': 'Known Junctions'},
{'name': 'Novel Junctions'},
{'name': 'All Junctions'}
],
'cursor': 'pointer',
'click_func': plot_single()
}
self.add_section (
name = 'Junction Saturation',
anchor = 'rseqc-junction_saturation',
description = '''<a href="http://rseqc.sourceforge.net/#junction-saturation-py" target="_blank">Junction Saturation</a>
counts the number of known splicing junctions that are observed
in each dataset. If sequencing depth is sufficient, all (annotated) splice junctions should
be rediscovered, resulting in a curve that reaches a plateau. Missing low abundance splice
junctions can affect downstream analysis.</p>
<div class="alert alert-info" id="rseqc-junction_sat_single_hint">
<span class="glyphicon glyphicon-hand-up"></span>
Click a line to see the data side by side (as in the original RSeQC plot).
</div><p>''',
plot = linegraph.plot([
self.junction_saturation_known,
self.junction_saturation_novel,
self.junction_saturation_all
], pconfig)
)
# Return number of samples found
return len(self.junction_saturation_all) | [
"def",
"parse_reports",
"(",
"self",
")",
":",
"# Set up vars",
"self",
".",
"junction_saturation_all",
"=",
"dict",
"(",
")",
"self",
".",
"junction_saturation_known",
"=",
"dict",
"(",
")",
"self",
".",
"junction_saturation_novel",
"=",
"dict",
"(",
")",
"# ... | 47.973684 | 23.197368 |
def dotplot(args):
"""
%prog dotplot map.csv ref.fasta
Make dotplot between chromosomes and linkage maps.
The input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2
"""
from jcvi.assembly.allmaps import CSVMapLine
from jcvi.formats.sizes import Sizes
from jcvi.utils.natsort import natsorted
from jcvi.graphics.base import shorten
from jcvi.graphics.dotplot import plt, savefig, markup, normalize_axes, \
downsample, plot_breaks_and_labels, thousands
p = OptionParser(dotplot.__doc__)
p.set_outfile(outfile=None)
opts, args, iopts = p.set_image_options(args, figsize="8x8",
style="dark", dpi=90, cmap="copper")
if len(args) != 2:
sys.exit(not p.print_help())
csvfile, fastafile = args
sizes = natsorted(Sizes(fastafile).mapping.items())
seen = set()
raw_data = []
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # the whole canvas
ax = fig.add_axes([.1, .1, .8, .8]) # the dot plot
fp = must_open(csvfile)
for row in fp:
m = CSVMapLine(row)
seen.add(m.seqid)
raw_data.append(m)
# X-axis is the genome assembly
ctgs, ctg_sizes = zip(*sizes)
xsize = sum(ctg_sizes)
qb = list(np.cumsum(ctg_sizes))
qbreaks = list(zip(ctgs, [0] + qb, qb))
qstarts = dict(zip(ctgs, [0] + qb))
# Y-axis is the map
key = lambda x: x.lg
raw_data.sort(key=key)
ssizes = {}
for lg, d in groupby(raw_data, key=key):
ssizes[lg] = max([x.cm for x in d])
ssizes = natsorted(ssizes.items())
lgs, lg_sizes = zip(*ssizes)
ysize = sum(lg_sizes)
sb = list(np.cumsum(lg_sizes))
sbreaks = list(zip([("LG" + x) for x in lgs], [0] + sb, sb))
sstarts = dict(zip(lgs, [0] + sb))
# Re-code all the scatter dots
data = [(qstarts[x.seqid] + x.pos, sstarts[x.lg] + x.cm, 'g') \
for x in raw_data if (x.seqid in qstarts)]
npairs = downsample(data)
x, y, c = zip(*data)
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0)
# Flip X-Y label
gy, gx = op.basename(csvfile).split(".")[:2]
gx, gy = shorten(gx, maxchar=30), shorten(gy, maxchar=30)
xlim, ylim = plot_breaks_and_labels(fig, root, ax, gx, gy,
xsize, ysize, qbreaks, sbreaks)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
title = "Alignment: {} vs {}".format(gx, gy)
title += " ({} markers)".format(thousands(npairs))
root.set_title(markup(title), x=.5, y=.96, color="k")
logging.debug(title)
normalize_axes(root)
image_name = opts.outfile or \
(csvfile.rsplit(".", 1)[0] + "." + iopts.format)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
fig.clear() | [
"def",
"dotplot",
"(",
"args",
")",
":",
"from",
"jcvi",
".",
"assembly",
".",
"allmaps",
"import",
"CSVMapLine",
"from",
"jcvi",
".",
"formats",
".",
"sizes",
"import",
"Sizes",
"from",
"jcvi",
".",
"utils",
".",
"natsort",
"import",
"natsorted",
"from",
... | 32.37931 | 17.758621 |
def dhcp_options_exists(dhcp_options_id=None, name=None, dhcp_options_name=None,
tags=None, region=None, key=None, keyid=None, profile=None):
'''
Check if a dhcp option exists.
Returns True if the dhcp option exists; Returns False otherwise.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.dhcp_options_exists dhcp_options_id='dhcp-a0bl34pp'
'''
if name:
log.warning('boto_vpc.dhcp_options_exists: name parameter is deprecated '
'use dhcp_options_name instead.')
dhcp_options_name = name
return resource_exists('dhcp_options', name=dhcp_options_name,
resource_id=dhcp_options_id, tags=tags,
region=region, key=key, keyid=keyid,
profile=profile) | [
"def",
"dhcp_options_exists",
"(",
"dhcp_options_id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"dhcp_options_name",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
... | 34.083333 | 29.75 |
def create_model(model_folder, model_type, topology, override):
"""
Create a model if it doesn't exist already.
Parameters
----------
model_folder :
The path to the folder where the model is described with an `info.yml`
model_type :
MLP
topology :
Something like 160:500:369 - that means the first layer has 160
neurons, the second layer has 500 neurons and the last layer has 369
neurons.
override : boolean
If a model exists, override it.
"""
latest_model = utils.get_latest_in_folder(model_folder, ".json")
if (latest_model == "") or override:
logging.info("Create a base model...")
model_src = os.path.join(model_folder, "model-0.json")
command = "%s make %s %s > %s" % (utils.get_nntoolkit(),
model_type,
topology,
model_src)
logging.info(command)
os.system(command)
else:
logging.info("Model file already existed.") | [
"def",
"create_model",
"(",
"model_folder",
",",
"model_type",
",",
"topology",
",",
"override",
")",
":",
"latest_model",
"=",
"utils",
".",
"get_latest_in_folder",
"(",
"model_folder",
",",
"\".json\"",
")",
"if",
"(",
"latest_model",
"==",
"\"\"",
")",
"or"... | 36.689655 | 19.103448 |
def headloss_manifold(FlowRate, Diam, Length, KMinor, Nu, PipeRough, NumOutlets):
"""Return the total head loss through the manifold."""
#Checking input validity - inputs not checked here are checked by
#functions this function calls.
ut.check_range([NumOutlets, ">0, int", 'Number of outlets'])
return (headloss(FlowRate, Diam, Length, Nu, PipeRough, KMinor).magnitude
* ((1/3 )
+ (1 / (2*NumOutlets))
+ (1 / (6*NumOutlets**2))
)
) | [
"def",
"headloss_manifold",
"(",
"FlowRate",
",",
"Diam",
",",
"Length",
",",
"KMinor",
",",
"Nu",
",",
"PipeRough",
",",
"NumOutlets",
")",
":",
"#Checking input validity - inputs not checked here are checked by",
"#functions this function calls.",
"ut",
".",
"check_rang... | 46.454545 | 19 |
def locale(self):
'''
Do a lookup for the locale code that is set for this layout.
NOTE: USB HID specifies only 35 different locales. If your layout does not fit, it should be set to Undefined/0
@return: Tuple (<USB HID locale code>, <name>)
'''
name = self.json_data['hid_locale']
# Set to Undefined/0 if not set
if name is None:
name = "Undefined"
return (int(self.json_data['from_hid_locale'][name]), name) | [
"def",
"locale",
"(",
"self",
")",
":",
"name",
"=",
"self",
".",
"json_data",
"[",
"'hid_locale'",
"]",
"# Set to Undefined/0 if not set",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"\"Undefined\"",
"return",
"(",
"int",
"(",
"self",
".",
"json_data",
... | 32.2 | 27.933333 |
def delete_priority_rule(db, rule_id: int) -> None:
"""Delete a file priority rule."""
with db:
cur = db.cursor()
cur.execute('DELETE FROM file_priority WHERE id=?', (rule_id,)) | [
"def",
"delete_priority_rule",
"(",
"db",
",",
"rule_id",
":",
"int",
")",
"->",
"None",
":",
"with",
"db",
":",
"cur",
"=",
"db",
".",
"cursor",
"(",
")",
"cur",
".",
"execute",
"(",
"'DELETE FROM file_priority WHERE id=?'",
",",
"(",
"rule_id",
",",
")... | 39.4 | 17 |
def rename(self, new_name, session=None, **kwargs):
"""Rename this collection.
If operating in auth mode, client must be authorized as an
admin to perform this operation. Raises :class:`TypeError` if
`new_name` is not an instance of :class:`basestring`
(:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidName`
if `new_name` is not a valid collection name.
:Parameters:
- `new_name`: new name for this collection
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
- `**kwargs` (optional): additional arguments to the rename command
may be passed as keyword arguments to this helper method
(i.e. ``dropTarget=True``)
.. note:: The :attr:`~pymongo.collection.Collection.write_concern` of
this collection is automatically applied to this operation when using
MongoDB >= 3.4.
.. versionchanged:: 3.6
Added ``session`` parameter.
.. versionchanged:: 3.4
Apply this collection's write concern automatically to this operation
when connected to MongoDB >= 3.4.
"""
if not isinstance(new_name, string_type):
raise TypeError("new_name must be an "
"instance of %s" % (string_type.__name__,))
if not new_name or ".." in new_name:
raise InvalidName("collection names cannot be empty")
if new_name[0] == "." or new_name[-1] == ".":
raise InvalidName("collecion names must not start or end with '.'")
if "$" in new_name and not new_name.startswith("oplog.$main"):
raise InvalidName("collection names must not contain '$'")
new_name = "%s.%s" % (self.__database.name, new_name)
cmd = SON([("renameCollection", self.__full_name), ("to", new_name)])
cmd.update(kwargs)
write_concern = self._write_concern_for_cmd(cmd, session)
with self._socket_for_writes(session) as sock_info:
with self.__database.client._tmp_session(session) as s:
return sock_info.command(
'admin', cmd,
write_concern=write_concern,
parse_write_concern_error=True,
session=s, client=self.__database.client) | [
"def",
"rename",
"(",
"self",
",",
"new_name",
",",
"session",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"new_name",
",",
"string_type",
")",
":",
"raise",
"TypeError",
"(",
"\"new_name must be an \"",
"\"instance of %s\... | 44.615385 | 22.25 |
def _handle_result(self, test, status, exception=None, message=None):
"""Create a :class:`~.TestResult` and add it to this
:class:`~ResultCollector`.
Parameters
----------
test : unittest.TestCase
The test that this result will represent.
status : haas.result.TestCompletionStatus
The status of the test.
exception : tuple
``exc_info`` tuple ``(type, value, traceback)``.
message : str
Optional message associated with the result (e.g. skip
reason).
"""
if self.buffer:
stderr = self._stderr_buffer.getvalue()
stdout = self._stdout_buffer.getvalue()
else:
stderr = stdout = None
started_time = self._test_timing.get(self._testcase_to_key(test))
if started_time is None and isinstance(test, ErrorHolder):
started_time = datetime.utcnow()
elif started_time is None:
raise RuntimeError(
'Missing test start! Please report this error as a bug in '
'haas.')
completion_time = datetime.utcnow()
duration = TestDuration(started_time, completion_time)
result = TestResult.from_test_case(
test,
status,
duration=duration,
exception=exception,
message=message,
stdout=stdout,
stderr=stderr,
)
self.add_result(result)
return result | [
"def",
"_handle_result",
"(",
"self",
",",
"test",
",",
"status",
",",
"exception",
"=",
"None",
",",
"message",
"=",
"None",
")",
":",
"if",
"self",
".",
"buffer",
":",
"stderr",
"=",
"self",
".",
"_stderr_buffer",
".",
"getvalue",
"(",
")",
"stdout",... | 33.568182 | 17.363636 |
def parse(self, filenames):
"""
Read and parse a filename or a list of filenames.
Files that cannot be opened are ignored. A single filename may also be given.
Return: list of successfully read files.
"""
filenames = list_strings(filenames)
read_ok = []
for fname in filenames:
try:
fh = open(fname)
except IOError:
logger.warning("Cannot open file %s" % fname)
continue
try:
self._read(fh, fname)
read_ok.append(fname)
except self.Error as e:
logger.warning("exception while parsing file %s:\n%s" % (fname, str(e)))
continue
finally:
fh.close()
# Add read_ok to the list of files that have been parsed.
self._filenames.extend(read_ok)
return read_ok | [
"def",
"parse",
"(",
"self",
",",
"filenames",
")",
":",
"filenames",
"=",
"list_strings",
"(",
"filenames",
")",
"read_ok",
"=",
"[",
"]",
"for",
"fname",
"in",
"filenames",
":",
"try",
":",
"fh",
"=",
"open",
"(",
"fname",
")",
"except",
"IOError",
... | 29.096774 | 19.806452 |
def _is_domain_match(domain: str, hostname: str) -> bool:
"""Implements domain matching adhering to RFC 6265."""
if hostname == domain:
return True
if not hostname.endswith(domain):
return False
non_matching = hostname[:-len(domain)]
if not non_matching.endswith("."):
return False
return not is_ip_address(hostname) | [
"def",
"_is_domain_match",
"(",
"domain",
":",
"str",
",",
"hostname",
":",
"str",
")",
"->",
"bool",
":",
"if",
"hostname",
"==",
"domain",
":",
"return",
"True",
"if",
"not",
"hostname",
".",
"endswith",
"(",
"domain",
")",
":",
"return",
"False",
"n... | 27.928571 | 17.642857 |
def delete_member(self, user):
"""Returns a response after attempting to remove
a member from the list.
"""
if not self.email_enabled:
raise EmailNotEnabledError("See settings.EMAIL_ENABLED")
return requests.delete(
f"{self.api_url}/{self.address}/members/{user.email}",
auth=("api", self.api_key),
) | [
"def",
"delete_member",
"(",
"self",
",",
"user",
")",
":",
"if",
"not",
"self",
".",
"email_enabled",
":",
"raise",
"EmailNotEnabledError",
"(",
"\"See settings.EMAIL_ENABLED\"",
")",
"return",
"requests",
".",
"delete",
"(",
"f\"{self.api_url}/{self.address}/members... | 37.5 | 12 |
def p_exprlt(p):
""" expr : expr LT expr
"""
a = int(p[1]) if p[1].isdigit() else 0
b = int(p[3]) if p[3].isdigit() else 0
p[0] = '1' if a < b else '0' | [
"def",
"p_exprlt",
"(",
"p",
")",
":",
"a",
"=",
"int",
"(",
"p",
"[",
"1",
"]",
")",
"if",
"p",
"[",
"1",
"]",
".",
"isdigit",
"(",
")",
"else",
"0",
"b",
"=",
"int",
"(",
"p",
"[",
"3",
"]",
")",
"if",
"p",
"[",
"3",
"]",
".",
"isdi... | 23.714286 | 10.857143 |
def getPayloadStruct(self, attributes, objType):
""" Function getPayloadStruct
Get the payload structure to do a creation or a modification
@param attribute: The data
@param objType: SubItem type (e.g: hostgroup for hostgroup_class)
@return RETURN: the payload
"""
payload = {self.payloadObj: attributes,
objType + "_class":
{self.payloadObj: attributes}}
return payload | [
"def",
"getPayloadStruct",
"(",
"self",
",",
"attributes",
",",
"objType",
")",
":",
"payload",
"=",
"{",
"self",
".",
"payloadObj",
":",
"attributes",
",",
"objType",
"+",
"\"_class\"",
":",
"{",
"self",
".",
"payloadObj",
":",
"attributes",
"}",
"}",
"... | 38.833333 | 13.333333 |
def getActiveJobsForClientInfo(self, clientInfo, fields=[]):
""" Fetch jobIDs for jobs in the table with optional fields given a
specific clientInfo """
# Form the sequence of field name strings that will go into the
# request
dbFields = [self._jobs.pubToDBNameDict[x] for x in fields]
dbFieldsStr = ','.join(['job_id'] + dbFields)
with ConnectionFactory.get() as conn:
query = 'SELECT %s FROM %s ' \
'WHERE client_info = %%s ' \
' AND status != %%s' % (dbFieldsStr, self.jobsTableName)
conn.cursor.execute(query, [clientInfo, self.STATUS_COMPLETED])
rows = conn.cursor.fetchall()
return rows | [
"def",
"getActiveJobsForClientInfo",
"(",
"self",
",",
"clientInfo",
",",
"fields",
"=",
"[",
"]",
")",
":",
"# Form the sequence of field name strings that will go into the",
"# request",
"dbFields",
"=",
"[",
"self",
".",
"_jobs",
".",
"pubToDBNameDict",
"[",
"x",
... | 38.705882 | 18.823529 |
def range(self, name):
"""Returns a list of :class:`Cell` objects from a specified range.
:param name: A string with range value in A1 notation, e.g. 'A1:A5'.
:type name: str
Alternatively, you may specify numeric boundaries. All values
index from 1 (one):
:param first_row: Row number
:type first_row: int
:param first_col: Row number
:type first_col: int
:param last_row: Row number
:type last_row: int
:param last_col: Row number
:type last_col: int
Example::
>>> # Using A1 notation
>>> worksheet.range('A1:B7')
[<Cell R1C1 "42">, ...]
>>> # Same with numeric boundaries
>>> worksheet.range(1, 1, 7, 2)
[<Cell R1C1 "42">, ...]
"""
range_label = '%s!%s' % (self.title, name)
data = self.spreadsheet.values_get(range_label)
start, end = name.split(':')
(row_offset, column_offset) = a1_to_rowcol(start)
(last_row, last_column) = a1_to_rowcol(end)
values = data.get('values', [])
rect_values = fill_gaps(
values,
rows=last_row - row_offset + 1,
cols=last_column - column_offset + 1
)
return [
Cell(row=i + row_offset, col=j + column_offset, value=value)
for i, row in enumerate(rect_values)
for j, value in enumerate(row)
] | [
"def",
"range",
"(",
"self",
",",
"name",
")",
":",
"range_label",
"=",
"'%s!%s'",
"%",
"(",
"self",
".",
"title",
",",
"name",
")",
"data",
"=",
"self",
".",
"spreadsheet",
".",
"values_get",
"(",
"range_label",
")",
"start",
",",
"end",
"=",
"name"... | 28.058824 | 19.098039 |
def user_line(self, frame, breakpoint_hits=None):
"""This function is called when we stop or break at this line."""
if not breakpoint_hits:
self.interaction(frame, None)
else:
commands_result = self.bp_commands(frame, breakpoint_hits)
if not commands_result:
self.interaction(frame, None)
else:
doprompt, silent = commands_result
if not silent:
self.print_stack_entry(self.stack[self.curindex])
if doprompt:
self._cmdloop()
self.forget() | [
"def",
"user_line",
"(",
"self",
",",
"frame",
",",
"breakpoint_hits",
"=",
"None",
")",
":",
"if",
"not",
"breakpoint_hits",
":",
"self",
".",
"interaction",
"(",
"frame",
",",
"None",
")",
"else",
":",
"commands_result",
"=",
"self",
".",
"bp_commands",
... | 41 | 12.4 |
def belanno(keyword: str, file: TextIO):
"""Write as a BEL annotation."""
directory = get_data_dir(keyword)
obo_url = f'http://purl.obolibrary.org/obo/{keyword}.obo'
obo_path = os.path.join(directory, f'{keyword}.obo')
obo_cache_path = os.path.join(directory, f'{keyword}.obo.pickle')
obo_getter = make_obo_getter(obo_url, obo_path, preparsed_path=obo_cache_path)
graph = obo_getter()
convert_obo_graph_to_belanno(
graph,
file=file,
) | [
"def",
"belanno",
"(",
"keyword",
":",
"str",
",",
"file",
":",
"TextIO",
")",
":",
"directory",
"=",
"get_data_dir",
"(",
"keyword",
")",
"obo_url",
"=",
"f'http://purl.obolibrary.org/obo/{keyword}.obo'",
"obo_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
... | 36.538462 | 19.769231 |
def attach_volume(self, volume, device="/dev/sdp"):
"""
Attach an EBS volume to this server
:param volume: EBS Volume to attach
:type volume: boto.ec2.volume.Volume
:param device: Device to attach to (default to /dev/sdp)
:type device: string
"""
if hasattr(volume, "id"):
volume_id = volume.id
else:
volume_id = volume
return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device) | [
"def",
"attach_volume",
"(",
"self",
",",
"volume",
",",
"device",
"=",
"\"/dev/sdp\"",
")",
":",
"if",
"hasattr",
"(",
"volume",
",",
"\"id\"",
")",
":",
"volume_id",
"=",
"volume",
".",
"id",
"else",
":",
"volume_id",
"=",
"volume",
"return",
"self",
... | 33.8 | 16.733333 |
def create_license_helper(self, lic):
"""
Handle single(no conjunction/disjunction) licenses.
Return the created node.
"""
if isinstance(lic, document.ExtractedLicense):
return self.create_extracted_license(lic)
if lic.identifier.rstrip('+') in config.LICENSE_MAP:
return URIRef(lic.url)
else:
matches = [l for l in self.document.extracted_licenses if l.identifier == lic.identifier]
if len(matches) != 0:
return self.create_extracted_license(matches[0])
else:
raise InvalidDocumentError('Missing extracted license: {0}'.format(lic.identifier)) | [
"def",
"create_license_helper",
"(",
"self",
",",
"lic",
")",
":",
"if",
"isinstance",
"(",
"lic",
",",
"document",
".",
"ExtractedLicense",
")",
":",
"return",
"self",
".",
"create_extracted_license",
"(",
"lic",
")",
"if",
"lic",
".",
"identifier",
".",
... | 45.2 | 18.933333 |
def _add_catch(self, catch_block):
"""Add a catch block (exception variable declaration and block)
to this try block structure.
"""
assert isinstance(catch_block, self.CodeCatchBlock)
self.catches.append(catch_block) | [
"def",
"_add_catch",
"(",
"self",
",",
"catch_block",
")",
":",
"assert",
"isinstance",
"(",
"catch_block",
",",
"self",
".",
"CodeCatchBlock",
")",
"self",
".",
"catches",
".",
"append",
"(",
"catch_block",
")"
] | 42.5 | 4.166667 |
def exception_handler(exc, context):
"""
Returns the response that should be used for any given exception.
By default we handle the REST framework `APIException`, and also
Django's built-in `Http404` and `PermissionDenied` exceptions.
Any unhandled exceptions may return `None`, which will cause a 500 error
to be raised.
"""
if isinstance(exc, exceptions.APIException):
headers = {}
if getattr(exc, 'auth_header', None):
headers['WWW-Authenticate'] = exc.auth_header
if getattr(exc, 'wait', None):
headers['Retry-After'] = '%d' % exc.wait
if isinstance(exc.detail, (list, dict)):
data = exc.detail
else:
data = {'message': exc.detail}
set_rollback()
return Response(data, status=exc.status_code, headers=headers)
elif isinstance(exc, Http404):
msg = _('Not found.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_404_NOT_FOUND)
elif isinstance(exc, PermissionDenied):
msg = _('Permission denied.')
data = {'message': six.text_type(msg)}
set_rollback()
return Response(data, status=status.HTTP_403_FORBIDDEN)
# Note: Unhandled exceptions will raise a 500 error.
return None | [
"def",
"exception_handler",
"(",
"exc",
",",
"context",
")",
":",
"if",
"isinstance",
"(",
"exc",
",",
"exceptions",
".",
"APIException",
")",
":",
"headers",
"=",
"{",
"}",
"if",
"getattr",
"(",
"exc",
",",
"'auth_header'",
",",
"None",
")",
":",
"hea... | 31.853659 | 20.097561 |
def get_impala_queries(self, start_time, end_time, filter_str="", limit=100,
offset=0):
"""
Returns a list of queries that satisfy the filter
@type start_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param start_time: Queries must have ended after this time
@type end_time: datetime.datetime. Note that the datetime must either be
time zone aware or specified in the server time zone. See
the python datetime documentation for more details about
python's time zone handling.
@param end_time: Queries must have started before this time
@param filter_str: A filter to apply to the queries. For example:
'user = root and queryDuration > 5s'
@param limit: The maximum number of results to return
@param offset: The offset into the return list
@since: API v4
"""
params = {
'from': start_time.isoformat(),
'to': end_time.isoformat(),
'filter': filter_str,
'limit': limit,
'offset': offset,
}
return self._get("impalaQueries", ApiImpalaQueryResponse,
params=params, api_version=4) | [
"def",
"get_impala_queries",
"(",
"self",
",",
"start_time",
",",
"end_time",
",",
"filter_str",
"=",
"\"\"",
",",
"limit",
"=",
"100",
",",
"offset",
"=",
"0",
")",
":",
"params",
"=",
"{",
"'from'",
":",
"start_time",
".",
"isoformat",
"(",
")",
",",... | 45.433333 | 21.166667 |
def export_sbml(model, y0=None, volume=1.0, is_valid=True):
"""
Export a model as a SBMLDocument.
Parameters
----------
model : NetworkModel
y0 : dict
Initial condition.
volume : Real or Real3, optional
A size of the simulation volume. 1 as a default.
is_valid : bool, optional
Check if the generated model is valid. True as a default.
"""
y0 = y0 or {}
import libsbml
document = libsbml.SBMLDocument(3, 1)
# ns = libsbml.XMLNamespaces()
# ns.add("http://www.ecell.org/ns/ecell4", "ecell4") #XXX: DUMMY URI
# document.setNamespaces(ns)
m = document.createModel()
comp1 = m.createCompartment()
comp1.setId('world')
comp1.setConstant(True)
if unit.HAS_PINT:
if isinstance(volume, unit._Quantity):
if unit.STRICT:
if isinstance(volume.magnitude, ecell4_base.core.Real3) and not unit.check_dimensionality(volume, '[length]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[length]'".format(
volume.dimensionality, volume.u))
elif not unit.check_dimensionality(volume, '[volume]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[volume]'".format(
volume.dimensionality, volume.u))
volume = volume.to_base_units().magnitude
y0 = y0.copy()
for key, value in y0.items():
if isinstance(value, unit._Quantity):
if not unit.STRICT:
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[substance]'):
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[concentration]'):
volume = w.volume() if not isinstance(w, ecell4_base.spatiocyte.SpatiocyteWorld) else w.actual_volume()
y0[key] = value.to_base_units().magnitude * volume
else:
raise ValueError(
"Cannot convert a quantity for [{}] from '{}' ({}) to '[substance]'".format(
key, value.dimensionality, value.u))
if isinstance(volume, ecell4_base.core.Real3):
comp1.setSize(volume[0] * volume[1] * volume[2])
else:
comp1.setSize(volume)
comp1.setSpatialDimensions(3)
species_list = []
for rr in model.reaction_rules():
for sp in itertools.chain(rr.reactants(), rr.products()):
species_list.append(sp)
species_list = list(set(species_list))
species_list.sort()
sid_map = {}
for cnt, sp in enumerate(species_list):
sid_map[sp.serial()] = "s{:d}".format(cnt)
for sp in species_list:
sid = sid_map[sp.serial()]
s1 = m.createSpecies()
s1.setId(sid)
s1.setName(sp.serial())
s1.setCompartment('world')
s1.setConstant(False)
if sp.serial() in y0.keys():
s1.setInitialAmount(y0[sp.serial()])
else:
s1.setInitialAmount(0)
s1.setBoundaryCondition(False)
s1.setHasOnlySubstanceUnits(False)
# s1.appendAnnotation('<annotation><ecell4:extension><ecell4:species serial="{:s}"/></ecell4:extension></annotation>'.format(sp.serial()))
for cnt, rr in enumerate(model.reaction_rules()):
desc = rr.get_descriptor()
r1 = m.createReaction()
r1.setId("r{:d}".format(cnt))
r1.setReversible(True)
r1.setFast(False)
kinetic_law = r1.createKineticLaw()
species_coef_map = {}
if desc is None:
for sp in rr.reactants():
if sp not in species_coef_map.keys():
species_coef_map[sp] = 1
else:
species_coef_map[sp] += 1
else:
for sp, coef in zip(rr.reactants(), desc.reactant_coefficients()):
if sp not in species_coef_map.keys():
species_coef_map[sp] = coef
else:
species_coef_map[sp] += coef
if desc is None or isinstance(desc, ecell4_base.core.ReactionRuleDescriptorMassAction):
p1 = m.createParameter()
p1.setId("k{:d}".format(cnt))
# p1 = kinetic_law.createLocalParameter()
# p1.setId("k")
p1.setConstant(True)
p1.setValue(rr.k() if desc is None else desc.k())
# math_exp = "k"
math_exp = "k{:d}".format(cnt)
for sp, coef in species_coef_map.items():
sid = sid_map[sp.serial()]
if coef == 1.0:
math_exp += "*{:s}".format(sid)
else:
math_exp += "*pow({:s},{:g})".format(sid, coef)
elif isinstance(desc, ecell4_base.core.ReactionRuleDescriptorPyfunc):
math_exp = desc.as_string()
if math_exp in ('', '<lambda>'):
warnings.warn(
"The given ReactionRuleDescriptorPyfunc [{:s}] might be invalid.".format(
rr.as_string()))
math_exp = replace_parseobj(math_exp, sid_map)
else:
raise RuntimeError('Unknown derived type of ReactionRuleDescriptor was given [{}].'.format(type(desc)))
for sp, coef in species_coef_map.items():
sid = sid_map[sp.serial()]
s1 = r1.createReactant()
s1.setSpecies(sid)
s1.setConstant(False)
s1.setStoichiometry(coef)
if desc is None:
for sp in rr.products():
if sp not in species_coef_map.keys():
species_coef_map[sp] = 1
else:
species_coef_map[sp] += 1
else:
species_coef_map = {}
for sp, coef in zip(rr.products(), desc.product_coefficients()):
if sp not in species_coef_map.keys():
species_coef_map[sp] = coef
else:
species_coef_map[sp] += coef
for sp, coef in species_coef_map.items():
sid = sid_map[sp.serial()]
s1 = r1.createProduct()
s1.setSpecies(sid)
s1.setConstant(False)
s1.setStoichiometry(coef)
math_ast = libsbml.parseL3Formula(math_exp)
kinetic_law.setMath(math_ast)
if is_valid:
document.validateSBML()
num_errors = (document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)
+ document.getNumErrors(libsbml.LIBSBML_SEV_FATAL))
if num_errors > 0:
messages = "The generated document is not valid."
messages += " {} errors were found:\n".format(num_errors)
for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_ERROR)):
err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_ERROR)
messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage())
for i in range(document.getNumErrors(libsbml.LIBSBML_SEV_FATAL)):
err = document.getErrorWithSeverity(i, libsbml.LIBSBML_SEV_FATAL)
messages += "{}: {}\n".format(err.getSeverityAsString(), err.getShortMessage())
raise RuntimeError(messages)
return document | [
"def",
"export_sbml",
"(",
"model",
",",
"y0",
"=",
"None",
",",
"volume",
"=",
"1.0",
",",
"is_valid",
"=",
"True",
")",
":",
"y0",
"=",
"y0",
"or",
"{",
"}",
"import",
"libsbml",
"document",
"=",
"libsbml",
".",
"SBMLDocument",
"(",
"3",
",",
"1"... | 38.18617 | 20.654255 |
def url_for(self, *subgroups, **groups):
"""Build URL."""
parsed = re.sre_parse.parse(self._pattern.pattern)
subgroups = {n:str(v) for n, v in enumerate(subgroups, 1)}
groups_ = dict(parsed.pattern.groupdict)
subgroups.update({
groups_[k0]: str(v0)
for k0, v0 in groups.items()
if k0 in groups_
})
path = ''.join(str(val) for val in Traverser(parsed, subgroups))
return URL.build(path=path, encoded=True) | [
"def",
"url_for",
"(",
"self",
",",
"*",
"subgroups",
",",
"*",
"*",
"groups",
")",
":",
"parsed",
"=",
"re",
".",
"sre_parse",
".",
"parse",
"(",
"self",
".",
"_pattern",
".",
"pattern",
")",
"subgroups",
"=",
"{",
"n",
":",
"str",
"(",
"v",
")"... | 41.083333 | 13.083333 |
def get_args(self, state, all_params, remainder, argspec, im_self):
'''
Determines the arguments for a controller based upon parameters
passed the argument specification for the controller.
'''
args = []
varargs = []
kwargs = dict()
valid_args = argspec.args[:]
if ismethod(state.controller) or im_self:
valid_args.pop(0) # pop off `self`
pecan_state = state.request.pecan
remainder = [x for x in remainder if x]
if im_self is not None:
args.append(im_self)
# grab the routing args from nested REST controllers
if 'routing_args' in pecan_state:
remainder = pecan_state['routing_args'] + list(remainder)
del pecan_state['routing_args']
# handle positional arguments
if valid_args and remainder:
args.extend(remainder[:len(valid_args)])
remainder = remainder[len(valid_args):]
valid_args = valid_args[len(args):]
# handle wildcard arguments
if [i for i in remainder if i]:
if not argspec[1]:
abort(404)
varargs.extend(remainder)
# get the default positional arguments
if argspec[3]:
defaults = dict(izip(argspec[0][-len(argspec[3]):], argspec[3]))
else:
defaults = dict()
# handle positional GET/POST params
for name in valid_args:
if name in all_params:
args.append(all_params.pop(name))
elif name in defaults:
args.append(defaults[name])
else:
break
# handle wildcard GET/POST params
if argspec[2]:
for name, value in six.iteritems(all_params):
if name not in argspec[0]:
kwargs[name] = value
return args, varargs, kwargs | [
"def",
"get_args",
"(",
"self",
",",
"state",
",",
"all_params",
",",
"remainder",
",",
"argspec",
",",
"im_self",
")",
":",
"args",
"=",
"[",
"]",
"varargs",
"=",
"[",
"]",
"kwargs",
"=",
"dict",
"(",
")",
"valid_args",
"=",
"argspec",
".",
"args",
... | 32.684211 | 16.54386 |
def add_postfix(file_path, postfix):
# type: (AnyStr, AnyStr) -> AnyStr
"""Add postfix for a full file path.
Examples:
>>> FileClass.add_postfix('/home/zhulj/dem.tif', 'filled')
'/home/zhulj/dem_filled.tif'
>>> FileClass.add_postfix('dem.tif', 'filled')
'dem_filled.tif'
>>> FileClass.add_postfix('dem', 'filled')
'dem_filled'
"""
cur_sep = ''
for sep in ['\\', '/', os.sep]:
if sep in file_path:
cur_sep = sep
break
corename = FileClass.get_core_name_without_suffix(file_path)
tmpspliter = os.path.basename(file_path).split('.')
suffix = ''
if len(tmpspliter) > 1:
suffix = tmpspliter[-1]
newname = os.path.dirname(file_path) + cur_sep + corename + '_' + postfix
if suffix != '':
newname += '.' + suffix
return str(newname) | [
"def",
"add_postfix",
"(",
"file_path",
",",
"postfix",
")",
":",
"# type: (AnyStr, AnyStr) -> AnyStr",
"cur_sep",
"=",
"''",
"for",
"sep",
"in",
"[",
"'\\\\'",
",",
"'/'",
",",
"os",
".",
"sep",
"]",
":",
"if",
"sep",
"in",
"file_path",
":",
"cur_sep",
... | 36.307692 | 14.423077 |
def get_proficiency_objective_bank_assignment_session(self, proxy):
"""Gets the ``OsidSession`` associated with assigning proficiencies to objective banks.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: a ``ProficiencyObjectiveBankAssignmentSession``
:rtype: ``osid.learning.ProficiencyObjectiveBankAssignmentSession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_proficiency_objective_bank_assignment()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_proficiency_objective_bank_assignment()`` is ``true``.*
"""
if not self.supports_proficiency_objective_bank_assignment():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ProficiencyObjectiveBankAssignmentSession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | [
"def",
"get_proficiency_objective_bank_assignment_session",
"(",
"self",
",",
"proxy",
")",
":",
"if",
"not",
"self",
".",
"supports_proficiency_objective_bank_assignment",
"(",
")",
":",
"raise",
"Unimplemented",
"(",
")",
"try",
":",
"from",
".",
"import",
"sessio... | 46.115385 | 24.423077 |
def apply_nsigma_separation(fitind,fluxes,separation,niter=10):
"""
Remove sources which are within nsigma*fwhm/2 pixels of each other, leaving
only a single valid source in that region.
This algorithm only works for sources which end up sequentially next to each other
based on Y position and removes enough duplicates to make the final source list more
managable. It sorts the positions by Y value in order to group those at the
same positions as much as possible.
"""
for n in range(niter):
if len(fitind) < 1:
break
fitarr = np.array(fitind,np.float32)
fluxarr = np.array(fluxes,np.float32)
inpind = np.argsort(fitarr[:,1])
npind = fitarr[inpind]
fluxind = fluxarr[inpind]
fitind = npind.tolist()
fluxes = fluxind.tolist()
dx = npind[1:,0] - npind[:-1,0]
dy = npind[1:,1] - npind[:-1,1]
dr = np.sqrt(np.power(dx,2)+np.power(dy,2))
nsame = np.where(dr <= separation)[0]
if nsame.shape[0] > 0:
for ind in nsame[-1::-1]:
#continue # <- turn off filtering by source separation
del fitind[ind]
del fluxes[ind]
else:
break
return fitind,fluxes | [
"def",
"apply_nsigma_separation",
"(",
"fitind",
",",
"fluxes",
",",
"separation",
",",
"niter",
"=",
"10",
")",
":",
"for",
"n",
"in",
"range",
"(",
"niter",
")",
":",
"if",
"len",
"(",
"fitind",
")",
"<",
"1",
":",
"break",
"fitarr",
"=",
"np",
"... | 39 | 15 |
def InferUserAndSubjectFromUrn(self):
"""Infers user name and subject urn from self.urn."""
_, client_id, user, _ = self.urn.Split(4)
return (user, rdf_client.ClientURN(client_id)) | [
"def",
"InferUserAndSubjectFromUrn",
"(",
"self",
")",
":",
"_",
",",
"client_id",
",",
"user",
",",
"_",
"=",
"self",
".",
"urn",
".",
"Split",
"(",
"4",
")",
"return",
"(",
"user",
",",
"rdf_client",
".",
"ClientURN",
"(",
"client_id",
")",
")"
] | 47.25 | 4.5 |
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
rangeFlag = retFlag = 0
startStr = endStr = ''
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
for cre, rflag in [(self.ptc.CRE_TIMERNG1, 1),
(self.ptc.CRE_TIMERNG2, 2),
(self.ptc.CRE_TIMERNG4, 7),
(self.ptc.CRE_TIMERNG3, 3),
(self.ptc.CRE_DATERNG1, 4),
(self.ptc.CRE_DATERNG2, 5),
(self.ptc.CRE_DATERNG3, 6)]:
m = cre.search(s)
if m is not None:
rangeFlag = rflag
break
debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
sourceTime, ctx = self.parse(s, sourceTime,
VERSION_CONTEXT_STYLE)
if not ctx.hasDateOrTime:
sourceTime = None
else:
parseStr = s
if rangeFlag in (1, 2):
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag in (3, 7):
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startStr = parseStr[:m.start()] + self.ptc.meridian[0]
else:
startStr = parseStr[:m.start()] + self.ptc.meridian[1]
else:
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 2
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
endStr = parseStr[m.start() + 1:]
retFlag = 1
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endStr = parseStr[m.start() + 1:]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endStr)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startStr = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startStr)
startYear = date.group('year')
if startYear is None:
startStr = startStr + ', ' + endYear
else:
startStr = parseStr[:m.start()]
retFlag = 1
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startStr = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startStr)
mth = mth.group('mthname')
# appending the month name to the end date
endStr = mth + parseStr[(m.start() + 1):]
retFlag = 1
else:
# if range is not found
startDT = endDT = time.localtime()
if retFlag:
startDT, sctx = self.parse(startStr, sourceTime,
VERSION_CONTEXT_STYLE)
endDT, ectx = self.parse(endStr, sourceTime,
VERSION_CONTEXT_STYLE)
if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
retFlag = 0
return startDT, endDT, retFlag | [
"def",
"evalRanges",
"(",
"self",
",",
"datetimeString",
",",
"sourceTime",
"=",
"None",
")",
":",
"rangeFlag",
"=",
"retFlag",
"=",
"0",
"startStr",
"=",
"endStr",
"=",
"''",
"s",
"=",
"datetimeString",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
... | 34.212121 | 18.530303 |
def p_iteration_statement_6(self, p):
"""
iteration_statement \
: FOR LPAREN VAR identifier initializer_noin IN expr RPAREN statement
"""
p[0] = ast.ForIn(item=ast.VarDecl(identifier=p[4], initializer=p[5]),
iterable=p[7], statement=p[9]) | [
"def",
"p_iteration_statement_6",
"(",
"self",
",",
"p",
")",
":",
"p",
"[",
"0",
"]",
"=",
"ast",
".",
"ForIn",
"(",
"item",
"=",
"ast",
".",
"VarDecl",
"(",
"identifier",
"=",
"p",
"[",
"4",
"]",
",",
"initializer",
"=",
"p",
"[",
"5",
"]",
"... | 42.714286 | 15 |
def destroy_local_fw_db(self):
"""Delete the FW dict and its attributes. """
del self.fw_dict
del self.in_dcnm_net_dict
del self.in_dcnm_subnet_dict
del self.out_dcnm_net_dict
del self.out_dcnm_subnet_dict | [
"def",
"destroy_local_fw_db",
"(",
"self",
")",
":",
"del",
"self",
".",
"fw_dict",
"del",
"self",
".",
"in_dcnm_net_dict",
"del",
"self",
".",
"in_dcnm_subnet_dict",
"del",
"self",
".",
"out_dcnm_net_dict",
"del",
"self",
".",
"out_dcnm_subnet_dict"
] | 35.285714 | 6.571429 |
def add_select(self, *column):
"""
Add a new select column to query
:param column: The column to add
:type column: str
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
if not column:
column = []
self.columns += list(column)
return self | [
"def",
"add_select",
"(",
"self",
",",
"*",
"column",
")",
":",
"if",
"not",
"column",
":",
"column",
"=",
"[",
"]",
"self",
".",
"columns",
"+=",
"list",
"(",
"column",
")",
"return",
"self"
] | 20.9375 | 16.6875 |
def delete(context, id, etag):
"""delete(context, id, etag)
Delete a Feeder.
>>> dcictl feeder-delete [OPTIONS]
:param string id: ID of the feeder to delete [required]
:param string etag: Entity tag of the feeder resource [required]
"""
result = feeder.delete(context, id=id, etag=etag)
if result.status_code == 204:
utils.print_json({'id': id, 'message': 'Feeder deleted.'})
else:
utils.format_output(result, context.format) | [
"def",
"delete",
"(",
"context",
",",
"id",
",",
"etag",
")",
":",
"result",
"=",
"feeder",
".",
"delete",
"(",
"context",
",",
"id",
"=",
"id",
",",
"etag",
"=",
"etag",
")",
"if",
"result",
".",
"status_code",
"==",
"204",
":",
"utils",
".",
"p... | 29.125 | 20.4375 |
def append_from_list(self, content, fill_title=False):
"""
Appends rows created from the data contained in the provided
list of tuples of strings. The first tuple of the list can be
set as table title.
Args:
content (list): list of tuples of strings. Each tuple is a row.
fill_title (bool): if true, the first tuple in the list will
be set as title.
"""
row_index = 0
for row in content:
tr = TableRow()
column_index = 0
for item in row:
if row_index == 0 and fill_title:
ti = TableTitle(item)
else:
ti = TableItem(item)
tr.append(ti, str(column_index))
column_index = column_index + 1
self.append(tr, str(row_index))
row_index = row_index + 1 | [
"def",
"append_from_list",
"(",
"self",
",",
"content",
",",
"fill_title",
"=",
"False",
")",
":",
"row_index",
"=",
"0",
"for",
"row",
"in",
"content",
":",
"tr",
"=",
"TableRow",
"(",
")",
"column_index",
"=",
"0",
"for",
"item",
"in",
"row",
":",
... | 37.041667 | 14.375 |
def mro(*bases):
"""Calculate the Method Resolution Order of bases using the C3 algorithm.
Suppose you intended creating a class K with the given base classes. This
function returns the MRO which K would have, *excluding* K itself (since
it doesn't yet exist), as if you had actually created the class.
Another way of looking at this, if you pass a single class K, this will
return the linearization of K (the MRO of K, *including* itself).
Found at:
http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/
"""
seqs = [list(C.__mro__) for C in bases] + [list(bases)]
res = []
while True:
non_empty = list(filter(None, seqs))
if not non_empty:
# Nothing left to process, we're done.
return tuple(res)
for seq in non_empty: # Find merge candidates among seq heads.
candidate = seq[0]
not_head = [s for s in non_empty if candidate in s[1:]]
if not_head:
# Reject the candidate.
candidate = None
else:
break
if not candidate:
raise TypeError("inconsistent hierarchy, no C3 MRO is possible")
res.append(candidate)
for seq in non_empty:
# Remove candidate.
if seq[0] == candidate:
del seq[0] | [
"def",
"mro",
"(",
"*",
"bases",
")",
":",
"seqs",
"=",
"[",
"list",
"(",
"C",
".",
"__mro__",
")",
"for",
"C",
"in",
"bases",
"]",
"+",
"[",
"list",
"(",
"bases",
")",
"]",
"res",
"=",
"[",
"]",
"while",
"True",
":",
"non_empty",
"=",
"list"... | 38.285714 | 20.571429 |
def _check_index(self, index):
"""Verify that the given index is consistent with the degree of the node.
"""
if self.degree is None:
raise UnknownDegreeError(
'Cannot access child DataNode on a parent with degree of None. '\
'Set the degree on the parent first.')
if index < 0 or index >= self.degree:
raise IndexOutOfRangeError(
'Out of range index %s. DataNode parent has degree %s, so index '\
'should be in the range 0 to %s' % (
index, self.degree, self.degree-1)) | [
"def",
"_check_index",
"(",
"self",
",",
"index",
")",
":",
"if",
"self",
".",
"degree",
"is",
"None",
":",
"raise",
"UnknownDegreeError",
"(",
"'Cannot access child DataNode on a parent with degree of None. '",
"'Set the degree on the parent first.'",
")",
"if",
"index",... | 49.833333 | 12.666667 |
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1) | [
"def",
"super",
"(",
"self",
")",
":",
"if",
"self",
".",
"_depth",
"+",
"1",
">=",
"len",
"(",
"self",
".",
"_stack",
")",
":",
"return",
"self",
".",
"_context",
".",
"environment",
".",
"undefined",
"(",
"'there is no parent block called %r.'",
"%",
"... | 46.125 | 13.375 |
def get_request_headers(self):
"""
Determine the headers to send along with the request. These are
pretty much the same for every request, with Route53.
"""
date_header = time.asctime(time.gmtime())
# We sign the time string above with the user's AWS secret access key
# in order to authenticate our request.
signing_key = self._hmac_sign_string(date_header)
# Amazon's super fun auth token.
auth_header = "AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=HmacSHA256,Signature=%s" % (
self.connection._aws_access_key_id,
signing_key,
)
return {
'X-Amzn-Authorization': auth_header,
'x-amz-date': date_header,
'Host': 'route53.amazonaws.com',
} | [
"def",
"get_request_headers",
"(",
"self",
")",
":",
"date_header",
"=",
"time",
".",
"asctime",
"(",
"time",
".",
"gmtime",
"(",
")",
")",
"# We sign the time string above with the user's AWS secret access key",
"# in order to authenticate our request.",
"signing_key",
"="... | 35.409091 | 19.318182 |
def filter_recordings(recordings):
"""Remove all recordings which have points without time.
Parameters
----------
recordings : list of dicts
Each dictionary has the keys 'data' and 'segmentation'
Returns
-------
list of dicts :
Only recordings where all points have time values.
"""
new_recordings = []
for recording in recordings:
recording['data'] = json.loads(recording['data'])
tmp = json.loads(recording['segmentation'])
recording['segmentation'] = normalize_segmentation(tmp)
had_none = False
for stroke in recording['data']:
for point in stroke:
if point['time'] is None:
logging.debug("Had None-time: %i", recording['id'])
had_none = True
break
if had_none:
break
if not had_none:
new_recordings.append(recording)
recordings = new_recordings
logging.info("Done filtering")
return recordings | [
"def",
"filter_recordings",
"(",
"recordings",
")",
":",
"new_recordings",
"=",
"[",
"]",
"for",
"recording",
"in",
"recordings",
":",
"recording",
"[",
"'data'",
"]",
"=",
"json",
".",
"loads",
"(",
"recording",
"[",
"'data'",
"]",
")",
"tmp",
"=",
"jso... | 31.65625 | 15.875 |
def get(self, key, default=None):
"""
:return: the value behind :paramref:`key` in the specification.
If no value was found, :paramref:`default` is returned.
:param key: a :ref:`specification key <prototype-key>`
"""
for base in self.__specification:
if key in base:
return base[key]
return default | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"for",
"base",
"in",
"self",
".",
"__specification",
":",
"if",
"key",
"in",
"base",
":",
"return",
"base",
"[",
"key",
"]",
"return",
"default"
] | 37.5 | 12.5 |
def ancestor(self):
"""This browse node's immediate ancestor in the browse node tree.
:return:
The ancestor as an :class:`~.AmazonBrowseNode`, or None.
"""
ancestors = getattr(self.element, 'Ancestors', None)
if hasattr(ancestors, 'BrowseNode'):
return AmazonBrowseNode(ancestors['BrowseNode'])
return None | [
"def",
"ancestor",
"(",
"self",
")",
":",
"ancestors",
"=",
"getattr",
"(",
"self",
".",
"element",
",",
"'Ancestors'",
",",
"None",
")",
"if",
"hasattr",
"(",
"ancestors",
",",
"'BrowseNode'",
")",
":",
"return",
"AmazonBrowseNode",
"(",
"ancestors",
"[",... | 37 | 17.8 |
def _set_global_metric_type(self, v, load=False):
"""
Setter method for global_metric_type, mapped from YANG variable /routing_system/ipv6/router/ospf/global_metric_type (ospf:metric-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_global_metric_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_global_metric_type() directly.
YANG Description: OSPFv3 metric type for redistributed routes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'type1': {'value': 1}, u'type2': {'value': 2}},), is_leaf=True, yang_name="global-metric-type", rest_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPFv3 metric type for redistributed routes', u'alt-name': u'metric-type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='ospf:metric-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """global_metric_type must be of a type compatible with ospf:metric-type""",
'defined-type': "ospf:metric-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'type1': {'value': 1}, u'type2': {'value': 2}},), is_leaf=True, yang_name="global-metric-type", rest_name="metric-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPFv3 metric type for redistributed routes', u'alt-name': u'metric-type'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='ospf:metric-type', is_config=True)""",
})
self.__global_metric_type = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_global_metric_type",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
","... | 91.583333 | 45 |
def tag(self, tokens):
"""Return a list of (token, tag) tuples for a given list of tokens."""
tags = []
for token in tokens:
normalized = self.lexicon[token].normalized
for regex, tag in self.regexes:
if regex.match(normalized):
tags.append((token, tag))
break
else:
tags.append((token, None))
return tags | [
"def",
"tag",
"(",
"self",
",",
"tokens",
")",
":",
"tags",
"=",
"[",
"]",
"for",
"token",
"in",
"tokens",
":",
"normalized",
"=",
"self",
".",
"lexicon",
"[",
"token",
"]",
".",
"normalized",
"for",
"regex",
",",
"tag",
"in",
"self",
".",
"regexes... | 36.166667 | 11.666667 |
def get_time_slide_id(xmldoc, time_slide, create_new = None, superset_ok = False, nonunique_ok = False):
"""
Return the time_slide_id corresponding to the offset vector
described by time_slide, a dictionary of instrument/offset pairs.
Example:
>>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0})
'time_slide:time_slide_id:10'
This function is a wrapper around the .get_time_slide_id() method
of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the
documentation for that class for the meaning of the create_new,
superset_ok and nonunique_ok keyword arguments.
This function requires the document to contain exactly one
time_slide table. If the document does not contain exactly one
time_slide table then ValueError is raised, unless the optional
create_new argument is not None. In that case a new table is
created. This effect of the create_new argument is in addition to
the affects described by the TimeSlideTable class.
"""
try:
tisitable = lsctables.TimeSlideTable.get_table(xmldoc)
except ValueError:
# table not found
if create_new is None:
raise
tisitable = lsctables.New(lsctables.TimeSlideTable)
xmldoc.childNodes[0].appendChild(tisitable)
# make sure the next_id attribute is correct
tisitable.sync_next_id()
# get the id
return tisitable.get_time_slide_id(time_slide, create_new = create_new, superset_ok = superset_ok, nonunique_ok = nonunique_ok) | [
"def",
"get_time_slide_id",
"(",
"xmldoc",
",",
"time_slide",
",",
"create_new",
"=",
"None",
",",
"superset_ok",
"=",
"False",
",",
"nonunique_ok",
"=",
"False",
")",
":",
"try",
":",
"tisitable",
"=",
"lsctables",
".",
"TimeSlideTable",
".",
"get_table",
"... | 40.529412 | 24.294118 |
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model):
""" The how well do the features plus a constant base rate sum up to the model output.
"""
X_train, X_test = to_array(X_train, X_test)
# how many features to mask
assert X_train.shape[1] == X_test.shape[1]
# keep nkeep top features and re-train the model for each test explanation
yp_test = trained_model.predict(X_test)
return metric(yp_test, strip_list(attr_test).sum(1)) | [
"def",
"local_accuracy",
"(",
"X_train",
",",
"y_train",
",",
"X_test",
",",
"y_test",
",",
"attr_test",
",",
"model_generator",
",",
"metric",
",",
"trained_model",
")",
":",
"X_train",
",",
"X_test",
"=",
"to_array",
"(",
"X_train",
",",
"X_test",
")",
"... | 38.615385 | 23.307692 |
def find_covalent_bonds(ampal, max_range=2.2, threshold=1.1, tag=True):
"""Finds all covalent bonds in the AMPAL object.
Parameters
----------
ampal : AMPAL Object
Any AMPAL object with a `get_atoms` method.
max_range : float, optional
Used to define the sector size, so interactions at longer ranges
will not be found.
threshold : float, optional
Allows deviation from ideal covalent bond distance to be included.
For example, a value of 1.1 would allow interactions up to 10% further
from the ideal distance to be included.
tag : bool, optional
If `True`, will add the covalent bond to the tags dictionary of
each `Atom` involved in the interaction under the `covalent_bonds`
key.
"""
sectors=gen_sectors(ampal.get_atoms(), max_range * 1.1)
bonds=[]
for sector in sectors.values():
atoms=itertools.combinations(sector, 2)
bonds.extend(covalent_bonds(atoms, threshold=threshold))
bond_set=list(set(bonds))
if tag:
for bond in bond_set:
a, b=bond.a, bond.b
if 'covalent_bonds' not in a.tags:
a.tags['covalent_bonds']=[b]
else:
a.tags['covalent_bonds'].append(b)
if 'covalent_bonds' not in b.tags:
b.tags['covalent_bonds']=[a]
else:
b.tags['covalent_bonds'].append(a)
return bond_set | [
"def",
"find_covalent_bonds",
"(",
"ampal",
",",
"max_range",
"=",
"2.2",
",",
"threshold",
"=",
"1.1",
",",
"tag",
"=",
"True",
")",
":",
"sectors",
"=",
"gen_sectors",
"(",
"ampal",
".",
"get_atoms",
"(",
")",
",",
"max_range",
"*",
"1.1",
")",
"bond... | 38.459459 | 17.621622 |
def _caps_add_machine(machines, node):
'''
Parse the <machine> element of the host capabilities and add it
to the machines list.
'''
maxcpus = node.get('maxCpus')
canonical = node.get('canonical')
name = node.text
alternate_name = ""
if canonical:
alternate_name = name
name = canonical
machine = machines.get(name)
if not machine:
machine = {'alternate_names': []}
if maxcpus:
machine['maxcpus'] = int(maxcpus)
machines[name] = machine
if alternate_name:
machine['alternate_names'].append(alternate_name) | [
"def",
"_caps_add_machine",
"(",
"machines",
",",
"node",
")",
":",
"maxcpus",
"=",
"node",
".",
"get",
"(",
"'maxCpus'",
")",
"canonical",
"=",
"node",
".",
"get",
"(",
"'canonical'",
")",
"name",
"=",
"node",
".",
"text",
"alternate_name",
"=",
"\"\"",... | 27 | 17.545455 |
def _parse_tree(self, node):
""" Parse a <checksum> object """
if 'filename' in node.attrib:
self.filename = node.attrib['filename']
if 'type' in node.attrib:
self.kind = node.attrib['type']
if 'target' in node.attrib:
self.target = node.attrib['target']
self.value = node.text | [
"def",
"_parse_tree",
"(",
"self",
",",
"node",
")",
":",
"if",
"'filename'",
"in",
"node",
".",
"attrib",
":",
"self",
".",
"filename",
"=",
"node",
".",
"attrib",
"[",
"'filename'",
"]",
"if",
"'type'",
"in",
"node",
".",
"attrib",
":",
"self",
"."... | 38.333333 | 6.444444 |
def update(self, payload):
"""Updates the queried record with `payload` and returns the updated record after validating the response
:param payload: Payload to update the record with
:raise:
:NoResults: if query returned no results
:MultipleResults: if query returned more than one result (currently not supported)
:return:
- The updated record
"""
try:
result = self.get_one()
if 'sys_id' not in result:
raise NoResults()
except MultipleResults:
raise MultipleResults("Update of multiple records is not supported")
except NoResults as e:
e.args = ('Cannot update a non-existing record',)
raise
if not isinstance(payload, dict):
raise InvalidUsage("Update payload must be of type dict")
response = self.session.put(self._get_table_url(sys_id=result['sys_id']), data=json.dumps(payload))
return self._get_content(response) | [
"def",
"update",
"(",
"self",
",",
"payload",
")",
":",
"try",
":",
"result",
"=",
"self",
".",
"get_one",
"(",
")",
"if",
"'sys_id'",
"not",
"in",
"result",
":",
"raise",
"NoResults",
"(",
")",
"except",
"MultipleResults",
":",
"raise",
"MultipleResults... | 40.48 | 20.72 |
def decode(self, covertext):
"""Given an input string ``unrank(X[:n]) || X[n:]`` returns ``X``.
"""
if not isinstance(covertext, str):
raise InvalidInputException('Input must be of type string.')
insufficient = (len(covertext) < self._fixed_slice)
if insufficient:
raise DecodeFailureError(
"Covertext is shorter than self._fixed_slice, can't decode.")
maximumBytesToRank = int(math.floor(self.getCapacity() / 8.0))
rank_payload = self._dfa.rank(covertext[:self._fixed_slice])
X = fte.bit_ops.long_to_bytes(rank_payload)
X = string.rjust(X, maximumBytesToRank, '\x00')
msg_len_header = self._encrypter.decryptOneBlock(
X[:DfaEncoderObject._COVERTEXT_HEADER_LEN_CIPHERTTEXT])
msg_len_header = msg_len_header[8:16]
msg_len = fte.bit_ops.bytes_to_long(
msg_len_header[:DfaEncoderObject._COVERTEXT_HEADER_LEN_PLAINTEXT])
retval = X[16:16 + msg_len]
retval += covertext[self._fixed_slice:]
ctxt_len = self._encrypter.getCiphertextLen(retval)
remaining_buffer = retval[ctxt_len:]
retval = retval[:ctxt_len]
retval = self._encrypter.decrypt(retval)
return retval, remaining_buffer | [
"def",
"decode",
"(",
"self",
",",
"covertext",
")",
":",
"if",
"not",
"isinstance",
"(",
"covertext",
",",
"str",
")",
":",
"raise",
"InvalidInputException",
"(",
"'Input must be of type string.'",
")",
"insufficient",
"=",
"(",
"len",
"(",
"covertext",
")",
... | 39.53125 | 19.5625 |
def symlink_to(self, target, target_is_directory=False):
"""
Make this path a symlink pointing to the given path.
Note the order of arguments (self, target) is the reverse of
os.symlink's.
"""
if self._closed:
self._raise_closed()
self._accessor.symlink(target, self, target_is_directory) | [
"def",
"symlink_to",
"(",
"self",
",",
"target",
",",
"target_is_directory",
"=",
"False",
")",
":",
"if",
"self",
".",
"_closed",
":",
"self",
".",
"_raise_closed",
"(",
")",
"self",
".",
"_accessor",
".",
"symlink",
"(",
"target",
",",
"self",
",",
"... | 38.666667 | 14.666667 |
def create(model_config, epochs, optimizer, model, source, storage, scheduler=None, callbacks=None, max_grad_norm=None):
""" Vel factory function """
return SimpleTrainCommand(
epochs=epochs,
model_config=model_config,
model_factory=model,
optimizer_factory=optimizer,
scheduler_factory=scheduler,
source=source,
storage=storage,
callbacks=callbacks,
max_grad_norm=max_grad_norm
) | [
"def",
"create",
"(",
"model_config",
",",
"epochs",
",",
"optimizer",
",",
"model",
",",
"source",
",",
"storage",
",",
"scheduler",
"=",
"None",
",",
"callbacks",
"=",
"None",
",",
"max_grad_norm",
"=",
"None",
")",
":",
"return",
"SimpleTrainCommand",
"... | 34.769231 | 16.923077 |
def download_data(dataset_name=None, prompt=prompt_stdin):
"""Check with the user that the are happy with terms and conditions for the data set, then download it."""
dr = data_resources[dataset_name]
if not authorize_download(dataset_name, prompt=prompt):
raise Exception("Permission to download data set denied.")
if 'suffices' in dr:
for url, files, suffices in zip(dr['urls'], dr['files'], dr['suffices']):
for file, suffix in zip(files, suffices):
download_url(url=os.path.join(url,file),
dir_name = data_path,
store_directory=dataset_name,
suffix=suffix)
elif 'dirs' in dr:
for url, dirs, files in zip(dr['urls'], dr['dirs'], dr['files']):
for file, dir in zip(files, dirs):
print(file, dir)
download_url(
url=os.path.join(url,dir,file),
dir_name = data_path,
store_directory=os.path.join(dataset_name,dir)
)
else:
for url, files in zip(dr['urls'], dr['files']):
for file in files:
download_url(
url=os.path.join(url,file),
dir_name = data_path,
store_directory=dataset_name
)
return True | [
"def",
"download_data",
"(",
"dataset_name",
"=",
"None",
",",
"prompt",
"=",
"prompt_stdin",
")",
":",
"dr",
"=",
"data_resources",
"[",
"dataset_name",
"]",
"if",
"not",
"authorize_download",
"(",
"dataset_name",
",",
"prompt",
"=",
"prompt",
")",
":",
"ra... | 43.21875 | 15.96875 |
def _query_string_params(flask_request):
"""
Constructs an APIGW equivalent query string dictionary
Parameters
----------
flask_request request
Request from Flask
Returns dict (str: str)
-------
Empty dict if no query params where in the request otherwise returns a dictionary of key to value
"""
query_string_dict = {}
# Flask returns an ImmutableMultiDict so convert to a dictionary that becomes
# a dict(str: list) then iterate over
for query_string_key, query_string_list in flask_request.args.lists():
query_string_value_length = len(query_string_list)
# if the list is empty, default to empty string
if not query_string_value_length:
query_string_dict[query_string_key] = ""
else:
# APIGW doesn't handle duplicate query string keys, picking the last one in the list
query_string_dict[query_string_key] = query_string_list[-1]
return query_string_dict | [
"def",
"_query_string_params",
"(",
"flask_request",
")",
":",
"query_string_dict",
"=",
"{",
"}",
"# Flask returns an ImmutableMultiDict so convert to a dictionary that becomes",
"# a dict(str: list) then iterate over",
"for",
"query_string_key",
",",
"query_string_list",
"in",
"f... | 36.482759 | 24.689655 |
def _getConfigData(self, all_dependencies, component, builddir, build_info_header_path):
''' returns (path_to_config_header, cmake_set_definitions) '''
# ordered_json, , read/write ordered json, internal
from yotta.lib import ordered_json
add_defs_header = ''
set_definitions = ''
# !!! backwards-compatible "TARGET_LIKE" definitions for the top-level
# of the config. NB: THESE WILL GO AWAY
definitions = []
definitions.append(('TARGET', sanitizePreprocessorSymbol(self.target.getName())))
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(self.target.getName()),None))
# make the path to the build-info header available both to CMake and
# in the preprocessor:
full_build_info_header_path = replaceBackslashes(os.path.abspath(build_info_header_path))
logger.debug('build info header include path: "%s"', full_build_info_header_path)
definitions.append(('YOTTA_BUILD_INFO_HEADER', '"'+full_build_info_header_path+'"'))
for target in self.target.getSimilarTo_Deprecated():
if '*' not in target:
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(target),None))
merged_config = self.target.getMergedConfig()
logger.debug('target configuration data: %s', merged_config)
definitions += self._definitionsForConfig(merged_config, ['YOTTA', 'CFG'])
add_defs_header += '// yotta config data (including backwards-compatible definitions)\n'
for k, v in definitions:
if v is not None:
add_defs_header += '#define %s %s\n' % (k, v)
set_definitions += 'set(%s %s)\n' % (k, v)
else:
add_defs_header += '#define %s\n' % k
set_definitions += 'set(%s TRUE)\n' % k
add_defs_header += '\n// version definitions\n'
for dep in list(all_dependencies.values()) + [component]:
add_defs_header += "#define YOTTA_%s_VERSION_STRING \"%s\"\n" % (sanitizePreprocessorSymbol(dep.getName()), str(dep.getVersion()))
add_defs_header += "#define YOTTA_%s_VERSION_MAJOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().major())
add_defs_header += "#define YOTTA_%s_VERSION_MINOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().minor())
add_defs_header += "#define YOTTA_%s_VERSION_PATCH %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().patch())
# add the component's definitions
defines = component.getDefines()
if defines:
add_defs_header += "\n// direct definitions (defines.json)\n"
for name, value in defines.items():
add_defs_header += "#define %s %s\n" % (name, value)
add_defs_header += '\n'
# use -include <definitions header> instead of lots of separate
# defines... this is compiler specific, but currently testing it
# out for gcc-compatible compilers only:
config_include_file = os.path.join(builddir, 'yotta_config.h')
config_json_file = os.path.join(builddir, 'yotta_config.json')
set_definitions += 'set(YOTTA_CONFIG_MERGED_JSON_FILE \"%s\")\n' % replaceBackslashes(os.path.abspath(config_json_file))
self._writeFile(
config_include_file,
'#ifndef __YOTTA_CONFIG_H__\n'+
'#define __YOTTA_CONFIG_H__\n'+
add_defs_header+
'#endif // ndef __YOTTA_CONFIG_H__\n'
)
self._writeFile(
config_json_file,
ordered_json.dumps(merged_config)
)
return (config_include_file, set_definitions, config_json_file) | [
"def",
"_getConfigData",
"(",
"self",
",",
"all_dependencies",
",",
"component",
",",
"builddir",
",",
"build_info_header_path",
")",
":",
"# ordered_json, , read/write ordered json, internal",
"from",
"yotta",
".",
"lib",
"import",
"ordered_json",
"add_defs_header",
"=",... | 52.197183 | 30.507042 |
def write_metadata(self, symbol, metadata, prune_previous_version=True, **kwargs):
"""
Write 'metadata' under the specified 'symbol' name to this library.
The data will remain unchanged. A new version will be created.
If the symbol is missing, it causes a write with empty data (None, pickled, can't append)
and the supplied metadata.
Returns a VersionedItem object only with a metadata element.
Fast operation: Zero data/segment read/write operations.
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict` or `None`
dictionary of metadata to persist along with the symbol
prune_previous_version : `bool`
Removes previous (non-snapshotted) versions from the database.
Default: True
kwargs :
passed through to the write handler (only used if symbol does not already exist or is deleted)
Returns
-------
`VersionedItem`
VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
"""
# Make a normal write with empty data and supplied metadata if symbol does not exist
try:
previous_version = self._read_metadata(symbol)
except NoDataFoundException:
return self.write(symbol, data=None, metadata=metadata,
prune_previous_version=prune_previous_version, **kwargs)
# Reaching here means that and/or metadata exist and we are set to update the metadata
new_version_num = self._version_nums.find_one_and_update({'symbol': symbol},
{'$inc': {'version': 1}},
upsert=True, new=True)['version']
# Populate the new version entry, preserving existing data, and updating with the supplied metadata
version = {k: previous_version[k] for k in previous_version.keys() if k != 'parent'} # don't copy snapshots
version['_id'] = bson.ObjectId()
version['version'] = new_version_num
version['metadata'] = metadata
version['base_version_id'] = previous_version.get('base_version_id', previous_version['_id'])
return self._add_new_version_using_reference(symbol, version, previous_version, prune_previous_version) | [
"def",
"write_metadata",
"(",
"self",
",",
"symbol",
",",
"metadata",
",",
"prune_previous_version",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# Make a normal write with empty data and supplied metadata if symbol does not exist",
"try",
":",
"previous_version",
"="... | 52.434783 | 31.869565 |
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables) | [
"def",
"parse_tables",
"(",
"self",
")",
":",
"tables",
"=",
"self",
".",
"_parse_tables",
"(",
"self",
".",
"_build_doc",
"(",
")",
",",
"self",
".",
"match",
",",
"self",
".",
"attrs",
")",
"return",
"(",
"self",
".",
"_parse_thead_tbody_tfoot",
"(",
... | 34 | 21.2 |
def has_main_target (self, name):
"""Tells if a main target with the specified name exists."""
assert isinstance(name, basestring)
if not self.built_main_targets_:
self.build_main_targets()
return name in self.main_target_ | [
"def",
"has_main_target",
"(",
"self",
",",
"name",
")",
":",
"assert",
"isinstance",
"(",
"name",
",",
"basestring",
")",
"if",
"not",
"self",
".",
"built_main_targets_",
":",
"self",
".",
"build_main_targets",
"(",
")",
"return",
"name",
"in",
"self",
".... | 37.285714 | 7.571429 |
def get_mcu_definition(self, project_file):
""" Parse project file to get mcu definition """
# TODO: check the extension here if it's valid IAR project or we
# should at least check if syntax is correct check something IAR defines and return error if not
project_file = join(getcwd(), project_file)
ewp_dic = xmltodict.parse(file(project_file), dict_constructor=dict)
mcu = MCU_TEMPLATE
try:
ewp_dic['project']['configuration']
except KeyError:
# validity check for iar project
logging.debug("The project_file %s seems to be not valid .ewp file.")
return mcu
# Fill in only must-have values, fpu will be added if defined for mcu
mcu['tool_specific'] = {
'iar' : {
# MCU selection
'OGChipSelectEditMenu' : {
'state' : [],
},
# we use mcu
'OGCoreOrChip' : {
'state' : [1],
},
}
}
# we take 0 configuration or just configuration, as multiple configuration possible
# debug, release, for mcu - does not matter, try and adjust
try:
index_general = self._get_option(ewp_dic['project']['configuration'][0]['settings'], 'General')
configuration = ewp_dic['project']['configuration'][0]
except KeyError:
index_general = self._get_option(ewp_dic['project']['configuration']['settings'], 'General')
configuration = ewp_dic['project']['configuration']
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'OGChipSelectEditMenu')
OGChipSelectEditMenu = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['OGChipSelectEditMenu']['state'].append(OGChipSelectEditMenu['state'].replace('\t', ' ', 1))
# we keep this as the internal version. FPU - version 1, FPU2 version 2.
# TODO:We shall look at IAR versioning to get this right
fileVersion = 1
try:
if self._get_option(configuration['settings'][index_general]['data']['option'], 'FPU2'):
fileVersion = 2
except TypeError:
pass
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GBECoreSlave')
GBECoreSlave = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GBECoreSlave'] = { 'state': [int(GBECoreSlave['state'])] }
if fileVersion == 2:
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GFPUCoreSlave2')
GFPUCoreSlave2 = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GFPUCoreSlave2'] = { 'state': [int(GFPUCoreSlave2['state'])] }
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'CoreVariant')
CoreVariant = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['CoreVariant'] = { 'state': [int(CoreVariant['state'])] }
else:
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'GFPUCoreSlave')
GFPUCoreSlave = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['GFPUCoreSlave'] = { 'state': [int(GFPUCoreSlave['state'])] }
index_option = self._get_option(configuration['settings'][index_general]['data']['option'], 'Variant')
Variant = configuration['settings'][index_general]['data']['option'][index_option]
mcu['tool_specific']['iar']['Variant'] = { 'state': [int(Variant['state'])] }
return mcu | [
"def",
"get_mcu_definition",
"(",
"self",
",",
"project_file",
")",
":",
"# TODO: check the extension here if it's valid IAR project or we",
"# should at least check if syntax is correct check something IAR defines and return error if not",
"project_file",
"=",
"join",
"(",
"getcwd",
"... | 55.828571 | 36.257143 |
def set_channel_created(self, channel_link, channel_id):
""" set_channel_created: records progress after creating channel on Kolibri Studio
Args:
channel_link (str): link to uploaded channel
channel_id (str): id of channel that has been uploaded
Returns: None
"""
self.channel_link = channel_link
self.channel_id = channel_id
self.__record_progress(Status.PUBLISH_CHANNEL if config.PUBLISH else Status.DONE) | [
"def",
"set_channel_created",
"(",
"self",
",",
"channel_link",
",",
"channel_id",
")",
":",
"self",
".",
"channel_link",
"=",
"channel_link",
"self",
".",
"channel_id",
"=",
"channel_id",
"self",
".",
"__record_progress",
"(",
"Status",
".",
"PUBLISH_CHANNEL",
... | 49.4 | 15.7 |
def _prt_line_detail(self, prt, values, lnum=""):
"""Print header and field values in a readable format."""
#### data = zip(self.req_str, self.ntgafobj._fields, values)
data = zip(self.req_str, self.flds, values)
txt = ["{:2}) {:3} {:20} {}".format(i, req, hdr, val) for i, (req, hdr, val) in enumerate(data)]
prt.write("{LNUM}\n{TXT}\n".format(LNUM=lnum, TXT="\n".join(txt))) | [
"def",
"_prt_line_detail",
"(",
"self",
",",
"prt",
",",
"values",
",",
"lnum",
"=",
"\"\"",
")",
":",
"#### data = zip(self.req_str, self.ntgafobj._fields, values)",
"data",
"=",
"zip",
"(",
"self",
".",
"req_str",
",",
"self",
".",
"flds",
",",
"values",
")"... | 68.5 | 24.333333 |
def resizeEvent(self, evt=None):
w = self.width()
h = self.height()
'''
if h<=360:
h=360
self.resize(w,h)
if w<=640:
w = 640
self.resize(w, h)
'''
step = (w * 94 / 100) / 5
foot = h * 3 / 48 | [
"def",
"resizeEvent",
"(",
"self",
",",
"evt",
"=",
"None",
")",
":",
"w",
"=",
"self",
".",
"width",
"(",
")",
"h",
"=",
"self",
".",
"height",
"(",
")",
"step",
"=",
"(",
"w",
"*",
"94",
"/",
"100",
")",
"/",
"5",
"foot",
"=",
"h",
"*",
... | 19.333333 | 20.666667 |
def propose_value(self, value, assume_leader=False):
"""
Proposes a value to the network.
"""
if value is None:
raise ValueError("Not allowed to propose value None")
paxos = self.paxos_instance
paxos.leader = assume_leader
msg = paxos.propose_value(value)
if msg is None:
msg = paxos.prepare()
self.setattrs_from_paxos(paxos)
self.announce(msg)
return msg | [
"def",
"propose_value",
"(",
"self",
",",
"value",
",",
"assume_leader",
"=",
"False",
")",
":",
"if",
"value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Not allowed to propose value None\"",
")",
"paxos",
"=",
"self",
".",
"paxos_instance",
"paxos",
"... | 32.428571 | 8.714286 |
def highPassFilter(self, threshold):
'''
remove all low frequencies by setting a square in the middle of the
Fourier transformation of the size (2*threshold)^2 to zero
threshold = 0...1
'''
if not threshold:
return
rows, cols = self.img.shape
tx = int(cols * threshold)
ty = int(rows * threshold)
# middle:
crow, ccol = rows // 2, cols // 2
# square in the middle to zero
self.fshift[crow - tx:crow + tx, ccol - ty:ccol + ty] = 0 | [
"def",
"highPassFilter",
"(",
"self",
",",
"threshold",
")",
":",
"if",
"not",
"threshold",
":",
"return",
"rows",
",",
"cols",
"=",
"self",
".",
"img",
".",
"shape",
"tx",
"=",
"int",
"(",
"cols",
"*",
"threshold",
")",
"ty",
"=",
"int",
"(",
"row... | 36.333333 | 15.666667 |
def _convert_seconds(self, packed_seconds):
"""Unpack the internal representation."""
seconds = struct.unpack("!H", packed_seconds[:2])[0]
seconds += struct.unpack("!I", packed_seconds[2:])[0]
return seconds | [
"def",
"_convert_seconds",
"(",
"self",
",",
"packed_seconds",
")",
":",
"seconds",
"=",
"struct",
".",
"unpack",
"(",
"\"!H\"",
",",
"packed_seconds",
"[",
":",
"2",
"]",
")",
"[",
"0",
"]",
"seconds",
"+=",
"struct",
".",
"unpack",
"(",
"\"!I\"",
","... | 47 | 12.4 |
def scale(arr, mn=0, mx=1):
"""
Apply min-max scaling (normalize)
then scale to (mn,mx)
"""
amn = arr.min()
amx = arr.max()
# normalize:
arr = (arr - amn) / (amx - amn)
# scale:
if amn != mn or amx != mx:
arr *= mx - mn
arr += mn
return arr | [
"def",
"scale",
"(",
"arr",
",",
"mn",
"=",
"0",
",",
"mx",
"=",
"1",
")",
":",
"amn",
"=",
"arr",
".",
"min",
"(",
")",
"amx",
"=",
"arr",
".",
"max",
"(",
")",
"# normalize:",
"arr",
"=",
"(",
"arr",
"-",
"amn",
")",
"/",
"(",
"amx",
"-... | 20.5 | 14.785714 |
def _list_iter(host=None, path=None):
'''
Return a generator iterating over hosts
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
'''
tgt = host or '*'
client = salt.client.get_local_client(__opts__['conf_file'])
for container_info in client.cmd_iter(
tgt, 'lxc.list', kwarg={'path': path}
):
if not container_info:
continue
if not isinstance(container_info, dict):
continue
chunk = {}
id_ = next(six.iterkeys(container_info))
if host and host != id_:
continue
if not isinstance(container_info[id_], dict):
continue
if 'ret' not in container_info[id_]:
continue
if not isinstance(container_info[id_]['ret'], dict):
continue
chunk[id_] = container_info[id_]['ret']
yield chunk | [
"def",
"_list_iter",
"(",
"host",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"tgt",
"=",
"host",
"or",
"'*'",
"client",
"=",
"salt",
".",
"client",
".",
"get_local_client",
"(",
"__opts__",
"[",
"'conf_file'",
"]",
")",
"for",
"container_info",
"... | 29.516129 | 16.935484 |
def write_configs(self, project_root):
"""Wrapper method that writes all configuration files to the pipeline
directory
"""
# Write resources config
with open(join(project_root, "resources.config"), "w") as fh:
fh.write(self.resources)
# Write containers config
with open(join(project_root, "containers.config"), "w") as fh:
fh.write(self.containers)
# Write containers config
with open(join(project_root, "params.config"), "w") as fh:
fh.write(self.params)
# Write manifest config
with open(join(project_root, "manifest.config"), "w") as fh:
fh.write(self.manifest)
# Write user config if not present in the project directory
if not exists(join(project_root, "user.config")):
with open(join(project_root, "user.config"), "w") as fh:
fh.write(self.user_config)
lib_dir = join(project_root, "lib")
if not exists(lib_dir):
os.makedirs(lib_dir)
with open(join(lib_dir, "Helper.groovy"), "w") as fh:
fh.write(self.help)
# Generate the pipeline DAG
pipeline_to_json = self.render_pipeline()
with open(splitext(self.nf_file)[0] + ".html", "w") as fh:
fh.write(pipeline_to_json) | [
"def",
"write_configs",
"(",
"self",
",",
"project_root",
")",
":",
"# Write resources config",
"with",
"open",
"(",
"join",
"(",
"project_root",
",",
"\"resources.config\"",
")",
",",
"\"w\"",
")",
"as",
"fh",
":",
"fh",
".",
"write",
"(",
"self",
".",
"r... | 36.277778 | 17.611111 |
def do_dock6_flexible(self, ligand_path, force_rerun=False):
"""Dock a ligand to the protein.
Args:
ligand_path (str): Path to ligand (mol2 format) to dock to protein
force_rerun (bool): If method should be rerun even if output file exists
"""
log.debug('{}: running DOCK6...'.format(self.id))
ligand_name = os.path.basename(ligand_path).split('.')[0]
in_name = op.join(self.dock_dir, "{}_{}_flexdock.in".format(self.id, ligand_name))
out_name = op.join(self.dock_dir, "{}_{}_flexdock.out".format(self.id, ligand_name))
conformers_out = op.join(self.dock_dir, '{}_{}_flexdock_conformers.mol2'.format(self.id, ligand_name))
scored_out = op.join(self.dock_dir, '{}_{}_flexdock_scored.mol2'.format(self.id, ligand_name))
ranked_out = op.join(self.dock_dir, '{}_{}_flexdock_ranked.mol2'.format(self.id, ligand_name))
if ssbio.utils.force_rerun(flag=force_rerun, outfile=ranked_out):
with open(in_name, "w") as f:
dock_text = """ligand_atom_file {}
limit_max_ligands no
skip_molecule no
read_mol_solvation no
calculate_rmsd no
use_database_filter no
orient_ligand yes
automated_matching yes
receptor_site_file {}
max_orientations 500
critical_points no
chemical_matching no
use_ligand_spheres no
use_internal_energy yes
internal_energy_rep_exp 12
flexible_ligand yes
user_specified_anchor no
limit_max_anchors no
min_anchor_size 5
pruning_use_clustering yes
pruning_max_orients 100
pruning_clustering_cutoff 100
pruning_conformer_score_cutoff 100
use_clash_overlap no
write_growth_tree no
bump_filter yes
bump_grid_prefix {}
score_molecules yes
contact_score_primary no
contact_score_secondary no
grid_score_primary yes
grid_score_secondary no
grid_score_rep_rad_scale 1
grid_score_vdw_scale 1
grid_score_es_scale 1
grid_score_grid_prefix {}
multigrid_score_secondary no
dock3.5_score_secondary no
continuous_score_secondary no
descriptor_score_secondary no
gbsa_zou_score_secondary no
gbsa_hawkins_score_secondary no
SASA_descriptor_score_secondary no
amber_score_secondary no
minimize_ligand yes
minimize_anchor yes
minimize_flexible_growth yes
use_advanced_simplex_parameters no
simplex_max_cycles 1
simplex_score_converge 0.1
simplex_cycle_converge 1.0
simplex_trans_step 1.0
simplex_rot_step 0.1
simplex_tors_step 10.0
simplex_anchor_max_iterations 500
simplex_grow_max_iterations 500
simplex_grow_tors_premin_iterations 0
simplex_random_seed 0
simplex_restraint_min yes
simplex_coefficient_restraint 10.0
atom_model all
vdw_defn_file {}
flex_defn_file {}
flex_drive_file {}
ligand_outfile_prefix {}_{}_flexdock
write_orientations no
num_scored_conformers 20
write_conformations yes
cluster_conformations yes
rank_ligands yes
""".format(ligand_path, op.basename(self.sphsel_path), op.splitext(op.basename(self.grid_path))[0],
op.splitext(op.basename(self.grid_path))[0], self.amb_file, self.flex1_file, self.flex2_file,
self.id, ligand_name)
f.write(dock_text)
os.chdir(self.dock_dir)
cmd = "dock6 -i {} -o {} -v".format(in_name, out_name)
os.system(cmd)
if ssbio.utils.is_non_zero_file(ranked_out):
self.dock_flexible_outfile = out_name
self.dock_flexible_conformers_result = conformers_out
self.dock_flexible_scored_result = scored_out
log.debug('{}: successful docking!'.format(self.dock_flexible_outfile))
else:
log.error('{}+{}: empty DOCK6 ranked file, execution error (or ligand failed to dock)'.format(self.id,
op.basename(ligand_path))) | [
"def",
"do_dock6_flexible",
"(",
"self",
",",
"ligand_path",
",",
"force_rerun",
"=",
"False",
")",
":",
"log",
".",
"debug",
"(",
"'{}: running DOCK6...'",
".",
"format",
"(",
"self",
".",
"id",
")",
")",
"ligand_name",
"=",
"os",
".",
"path",
".",
"bas... | 59.990741 | 26.527778 |
def three_hours_forecast_at_id(self, id):
"""
Queries the OWM Weather API for three hours weather forecast for the
specified city ID (eg: 5128581). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of
five days: this instance encapsulates *Weather* objects, with a time
interval of three hours one from each other
:param id: the location's city ID
:type id: int
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached
"""
assert type(id) is int, "'id' must be an int"
if id < 0:
raise ValueError("'id' value must be greater than 0")
params = {'id': id, 'lang': self._language}
uri = http_client.HttpClient.to_url(THREE_HOURS_FORECAST_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval("3h")
return forecaster.Forecaster(forecast)
else:
return None | [
"def",
"three_hours_forecast_at_id",
"(",
"self",
",",
"id",
")",
":",
"assert",
"type",
"(",
"id",
")",
"is",
"int",
",",
"\"'id' must be an int\"",
"if",
"id",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"'id' value must be greater than 0\"",
")",
"params",
... | 48.645161 | 20.516129 |
def full_dict(ldict, keys):
"""Return Comparison Dictionaries
from list dict on keys
keys: a list of keys that when
combined make the row in the list unique
"""
if type(keys) == str:
keys = [keys]
else:
keys = keys
cmp_dict = {}
for line in ldict:
index = []
for key in keys:
index.append(str(line.get(key, '')))
index = '-'.join(index)
cmp_dict[index] = line
return cmp_dict | [
"def",
"full_dict",
"(",
"ldict",
",",
"keys",
")",
":",
"if",
"type",
"(",
"keys",
")",
"==",
"str",
":",
"keys",
"=",
"[",
"keys",
"]",
"else",
":",
"keys",
"=",
"keys",
"cmp_dict",
"=",
"{",
"}",
"for",
"line",
"in",
"ldict",
":",
"index",
"... | 23.35 | 16.35 |
def attempt_connection(self):
"""
Try connecting to the (host, port) tuples specified at construction time.
"""
self.connection_error = False
sleep_exp = 1
connect_count = 0
while self.running and self.socket is None and (
connect_count < self.__reconnect_attempts_max or
self.__reconnect_attempts_max == -1 ):
for host_and_port in self.__host_and_ports:
try:
log.info("Attempting connection to host %s, port %s", host_and_port[0], host_and_port[1])
self.socket = get_socket(host_and_port[0], host_and_port[1], self.__timeout)
self.__enable_keepalive()
need_ssl = self.__need_ssl(host_and_port)
if need_ssl: # wrap socket
ssl_params = self.get_ssl(host_and_port)
if ssl_params['ca_certs']:
cert_validation = ssl.CERT_REQUIRED
else:
cert_validation = ssl.CERT_NONE
try:
tls_context = ssl.create_default_context(cafile=ssl_params['ca_certs'])
except AttributeError:
tls_context = None
if tls_context:
# Wrap the socket for TLS
certfile = ssl_params['cert_file']
keyfile = ssl_params['key_file']
password = ssl_params.get('password')
if certfile and not keyfile:
keyfile = certfile
if certfile:
tls_context.load_cert_chain(certfile, keyfile, password)
if cert_validation is None or cert_validation == ssl.CERT_NONE:
tls_context.check_hostname = False
tls_context.verify_mode = cert_validation
self.socket = tls_context.wrap_socket(self.socket, server_hostname=host_and_port[0])
else:
# Old-style wrap_socket where we don't have a modern SSLContext (so no SNI)
self.socket = ssl.wrap_socket(
self.socket,
keyfile=ssl_params['key_file'],
certfile=ssl_params['cert_file'],
cert_reqs=cert_validation,
ca_certs=ssl_params['ca_certs'],
ssl_version=ssl_params['ssl_version'])
self.socket.settimeout(self.__timeout)
if self.blocking is not None:
self.socket.setblocking(self.blocking)
#
# Validate server cert
#
if need_ssl and ssl_params['cert_validator']:
cert = self.socket.getpeercert()
(ok, errmsg) = ssl_params['cert_validator'](cert, host_and_port[0])
if not ok:
raise SSLError("Server certificate validation failed: %s", errmsg)
self.current_host_and_port = host_and_port
log.info("Established connection to host %s, port %s", host_and_port[0], host_and_port[1])
break
except socket.error:
self.socket = None
connect_count += 1
log.warning("Could not connect to host %s, port %s", host_and_port[0], host_and_port[1], exc_info=1)
if self.socket is None:
sleep_duration = (min(self.__reconnect_sleep_max,
((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))
* math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))
* (1.0 + random.random() * self.__reconnect_sleep_jitter))
sleep_end = monotonic() + sleep_duration
log.debug("Sleeping for %.1f seconds before attempting reconnect", sleep_duration)
while self.running and monotonic() < sleep_end:
time.sleep(0.2)
if sleep_duration < self.__reconnect_sleep_max:
sleep_exp += 1
if not self.socket:
raise exception.ConnectFailedException() | [
"def",
"attempt_connection",
"(",
"self",
")",
":",
"self",
".",
"connection_error",
"=",
"False",
"sleep_exp",
"=",
"1",
"connect_count",
"=",
"0",
"while",
"self",
".",
"running",
"and",
"self",
".",
"socket",
"is",
"None",
"and",
"(",
"connect_count",
"... | 51.818182 | 25.181818 |
def parse_qc(self, qc_file):
"""
Parse phantompeakqualtools (spp) QC table and return quality metrics.
:param str qc_file: Path to phantompeakqualtools output file, which
contains sample quality measurements.
"""
import pandas as pd
series = pd.Series()
try:
with open(qc_file) as handle:
line = handle.readlines()[0].strip().split("\t") # list of strings per line
series["NSC"] = line[-3]
series["RSC"] = line[-2]
series["qualityTag"] = line[-1]
except:
pass
return series | [
"def",
"parse_qc",
"(",
"self",
",",
"qc_file",
")",
":",
"import",
"pandas",
"as",
"pd",
"series",
"=",
"pd",
".",
"Series",
"(",
")",
"try",
":",
"with",
"open",
"(",
"qc_file",
")",
"as",
"handle",
":",
"line",
"=",
"handle",
".",
"readlines",
"... | 34.333333 | 17.666667 |
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | [
"def",
"winsorize",
"(",
"row",
",",
"min_percentile",
",",
"max_percentile",
")",
":",
"a",
"=",
"row",
".",
"copy",
"(",
")",
"nan_count",
"=",
"isnan",
"(",
"row",
")",
".",
"sum",
"(",
")",
"nonnan_count",
"=",
"a",
".",
"size",
"-",
"nan_count",... | 35.678571 | 20.964286 |
def run(**kwargs):
"""
This function was necessary to separate from main() to accommodate for
server startup path on system 3.0, which is server.main. In the case where
the api is on system 3.0, server.main will redirect to this function with
an additional argument of 'patch_old_init'. kwargs are hence used to allow
the use of different length args
"""
log_init()
loop = asyncio.get_event_loop()
log.info("API server version: {}".format(__version__))
if not os.environ.get("ENABLE_VIRTUAL_SMOOTHIE"):
initialize_robot(loop)
if ff.use_protocol_api_v2():
loop.run_until_complete(hardware.cache_instruments())
if not ff.disable_home_on_boot():
log.info("Homing Z axes")
if ff.use_protocol_api_v2():
loop.run_until_complete(hardware.home_z())
else:
hardware.home_z()
try:
udev.setup_rules_file()
except Exception:
log.exception(
"Could not setup udev rules, modules may not be detected")
# Explicitly unlock resin updates in case a prior server left them locked
resin.unlock_updates()
server.run(kwargs.get('hostname'), kwargs.get('port'), kwargs.get('path'),
loop) | [
"def",
"run",
"(",
"*",
"*",
"kwargs",
")",
":",
"log_init",
"(",
")",
"loop",
"=",
"asyncio",
".",
"get_event_loop",
"(",
")",
"log",
".",
"info",
"(",
"\"API server version: {}\"",
".",
"format",
"(",
"__version__",
")",
")",
"if",
"not",
"os",
".",... | 40.83871 | 18.451613 |
def paginate_data(searched_data, request_data):
"""
Paginates the searched_data as per the request_data
Source: Himanshu Shankar (https://github.com/iamhssingh)
Parameters
----------
searched_data: Serializer.data
It is the data received from queryset. It uses
show_serializer
request_data: Serializer.data
It is the request data. It uses serializer_class.
Returns
-------
data: dict
"""
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
if int(request_data.data['paginator']) > 0:
paginator = Paginator(searched_data.data,
request_data.data['paginator'])
try:
curr = paginator.page(request_data.data['page'])
except PageNotAnInteger:
curr = paginator.page(1)
except EmptyPage:
curr = paginator.page(paginator.num_pages)
data = {'total_pages': paginator.num_pages, 'current': curr.number,
'total_objects': len(searched_data.data)}
if curr.has_next():
data['next'] = curr.next_page_number()
else:
data['next'] = -1
if curr.number > 1:
data['previous'] = curr.previous_page_number()
else:
data['previous'] = -1
data['objects'] = curr.object_list
else:
data = {'objects': searched_data.data, 'previous': -1, 'next': -1,
'total_pages': 1, 'current': 1,
'total_objects': len(searched_data.data)}
return data | [
"def",
"paginate_data",
"(",
"searched_data",
",",
"request_data",
")",
":",
"from",
"django",
".",
"core",
".",
"paginator",
"import",
"Paginator",
",",
"EmptyPage",
",",
"PageNotAnInteger",
"if",
"int",
"(",
"request_data",
".",
"data",
"[",
"'paginator'",
"... | 34.555556 | 19.266667 |
def color(self, key):
"""
Returns the color value for the given key for this console.
:param key | <unicode>
:return <QtGui.QColor>
"""
if type(key) == int:
key = self.LoggingMap.get(key, ('NotSet', ''))[0]
name = nativestring(key).capitalize()
return self._colorSet.color(name) | [
"def",
"color",
"(",
"self",
",",
"key",
")",
":",
"if",
"type",
"(",
"key",
")",
"==",
"int",
":",
"key",
"=",
"self",
".",
"LoggingMap",
".",
"get",
"(",
"key",
",",
"(",
"'NotSet'",
",",
"''",
")",
")",
"[",
"0",
"]",
"name",
"=",
"natives... | 31.75 | 13.083333 |
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args)) | [
"def",
"corr",
"(",
"dataset",
",",
"column",
",",
"method",
"=",
"\"pearson\"",
")",
":",
"sc",
"=",
"SparkContext",
".",
"_active_spark_context",
"javaCorrObj",
"=",
"_jvm",
"(",
")",
".",
"org",
".",
"apache",
".",
"spark",
".",
"ml",
".",
"stat",
"... | 54.142857 | 24.285714 |
def location(self, filetype, base_dir=None, **kwargs):
"""Return the location of the relative sas path of a given type of file.
Parameters
----------
filetype : str
File type parameter.
Returns
-------
full : str
The relative sas path to the file.
"""
full = kwargs.get('full', None)
if not full:
full = self.full(filetype, **kwargs)
self.set_base_dir(base_dir=base_dir)
location = full[len(self.base_dir):] if full and full.startswith(self.base_dir) else None
if location and '//' in location:
location = location.replace('//', '/')
return location | [
"def",
"location",
"(",
"self",
",",
"filetype",
",",
"base_dir",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"full",
"=",
"kwargs",
".",
"get",
"(",
"'full'",
",",
"None",
")",
"if",
"not",
"full",
":",
"full",
"=",
"self",
".",
"full",
"(",... | 27.64 | 20.8 |
def get_postadres_by_huisnummer(self, huisnummer):
'''
Get the `postadres` for a :class:`Huisnummer`.
:param huisnummer: The :class:`Huisnummer` for which the \
`postadres` is wanted. OR A huisnummer id.
:rtype: A :class:`str`.
'''
try:
id = huisnummer.id
except AttributeError:
id = huisnummer
def creator():
res = crab_gateway_request(
self.client, 'GetPostadresByHuisnummerId', id
)
if res == None:
raise GatewayResourceNotFoundException()
return res.Postadres
if self.caches['short'].is_configured:
key = 'GetPostadresByHuisnummerId#%s' % (id)
postadres = self.caches['short'].get_or_create(key, creator)
else:
postadres = creator()
return postadres | [
"def",
"get_postadres_by_huisnummer",
"(",
"self",
",",
"huisnummer",
")",
":",
"try",
":",
"id",
"=",
"huisnummer",
".",
"id",
"except",
"AttributeError",
":",
"id",
"=",
"huisnummer",
"def",
"creator",
"(",
")",
":",
"res",
"=",
"crab_gateway_request",
"("... | 34.84 | 17.64 |
def copy(self):
"""Create a shallow copy of self.
This runs in O(len(self.num_unique_elements()))
"""
out = self._from_iterable(None)
out._dict = self._dict.copy()
out._size = self._size
return out | [
"def",
"copy",
"(",
"self",
")",
":",
"out",
"=",
"self",
".",
"_from_iterable",
"(",
"None",
")",
"out",
".",
"_dict",
"=",
"self",
".",
"_dict",
".",
"copy",
"(",
")",
"out",
".",
"_size",
"=",
"self",
".",
"_size",
"return",
"out"
] | 22.666667 | 14.888889 |
def match(A, S, trueS):
"""Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)"""
cov = np.cov(trueS, S)
k = S.shape[0]
corr = np.zeros([k,k])
for i in range(k):
for j in range(k):
corr[i][j] = cov[i + k][j]/np.sqrt(cov[i + k][i + k]*cov[j][j])
arrangement = linear_sum_assignment(-corr)
resS = np.zeros_like(S)
resAT = np.zeros_like(A.T)
for t in range(k):
resS[arrangement[1][t]] = S[arrangement[0][t]]
resAT[arrangement[1][t]] = A.T[arrangement[0][t]]
return resAT.T, resS | [
"def",
"match",
"(",
"A",
",",
"S",
",",
"trueS",
")",
":",
"cov",
"=",
"np",
".",
"cov",
"(",
"trueS",
",",
"S",
")",
"k",
"=",
"S",
".",
"shape",
"[",
"0",
"]",
"corr",
"=",
"np",
".",
"zeros",
"(",
"[",
"k",
",",
"k",
"]",
")",
"for"... | 40.266667 | 14.866667 |
def fixcode(**kwargs):
"""
auto pep8 format all python file in ``source code`` and ``tests`` dir.
"""
# repository direcotry
repo_dir = Path(__file__).parent.absolute()
# source code directory
source_dir = Path(repo_dir, package.__name__)
if source_dir.exists():
print("Source code locate at: '%s'." % source_dir)
print("Auto pep8 all python file ...")
source_dir.autopep8(**kwargs)
else:
print("Source code directory not found!")
# unittest code directory
unittest_dir = Path(repo_dir, "tests")
if unittest_dir.exists():
print("Unittest code locate at: '%s'." % unittest_dir)
print("Auto pep8 all python file ...")
unittest_dir.autopep8(**kwargs)
else:
print("Unittest code directory not found!")
print("Complete!") | [
"def",
"fixcode",
"(",
"*",
"*",
"kwargs",
")",
":",
"# repository direcotry",
"repo_dir",
"=",
"Path",
"(",
"__file__",
")",
".",
"parent",
".",
"absolute",
"(",
")",
"# source code directory",
"source_dir",
"=",
"Path",
"(",
"repo_dir",
",",
"package",
"."... | 30.148148 | 16.592593 |
def StoreCSRFCookie(user, response):
"""Decorator for WSGI handler that inserts CSRF cookie into response."""
csrf_token = GenerateCSRFToken(user, None)
response.set_cookie(
"csrftoken", csrf_token, max_age=CSRF_TOKEN_DURATION.seconds) | [
"def",
"StoreCSRFCookie",
"(",
"user",
",",
"response",
")",
":",
"csrf_token",
"=",
"GenerateCSRFToken",
"(",
"user",
",",
"None",
")",
"response",
".",
"set_cookie",
"(",
"\"csrftoken\"",
",",
"csrf_token",
",",
"max_age",
"=",
"CSRF_TOKEN_DURATION",
".",
"s... | 40.5 | 15.5 |
def docs_init_to_class(self):
"""If found a __init__ method's docstring and the class
without any docstring, so set the class docstring with __init__one,
and let __init__ without docstring.
:returns: True if done
:rtype: boolean
"""
result = False
if not self.parsed:
self._parse()
einit = []
eclass = []
for e in self.docs_list:
if len(eclass) == len(einit) + 1 and e['docs'].element['name'] == '__init__':
einit.append(e)
elif not eclass and e['docs'].element['type'] == 'class':
eclass.append(e)
for c, i in zip(eclass, einit):
start, _ = c['location']
if start < 0:
start, _ = i['location']
if start > 0:
result = True
cspaces = c['docs'].get_spaces()
ispaces = i['docs'].get_spaces()
c['docs'].set_spaces(ispaces)
i['docs'].set_spaces(cspaces)
c['docs'].generate_docs()
i['docs'].generate_docs()
c['docs'], i['docs'] = i['docs'], c['docs']
return result | [
"def",
"docs_init_to_class",
"(",
"self",
")",
":",
"result",
"=",
"False",
"if",
"not",
"self",
".",
"parsed",
":",
"self",
".",
"_parse",
"(",
")",
"einit",
"=",
"[",
"]",
"eclass",
"=",
"[",
"]",
"for",
"e",
"in",
"self",
".",
"docs_list",
":",
... | 36.848485 | 14.545455 |
def _check_error(response):
"""Raises an exception if the Spark Cloud returned an error."""
if (not response.ok) or (response.status_code != 200):
raise Exception(
response.json()['error'] + ': ' +
response.json()['error_description']
) | [
"def",
"_check_error",
"(",
"response",
")",
":",
"if",
"(",
"not",
"response",
".",
"ok",
")",
"or",
"(",
"response",
".",
"status_code",
"!=",
"200",
")",
":",
"raise",
"Exception",
"(",
"response",
".",
"json",
"(",
")",
"[",
"'error'",
"]",
"+",
... | 43.142857 | 13.571429 |
def get_subjects_with_equal_or_higher_perm(self, perm_str):
"""
Args:
perm_str : str
Permission, ``read``, ``write`` or ``changePermission``.
Returns:
set of str : Subj that have perm equal or higher than ``perm_str``.
Since the lowest permission a subject can have is ``read``, passing ``read``
will return all subjects.
"""
self._assert_valid_permission(perm_str)
return {
s
for p in self._equal_or_higher_perm(perm_str)
for s in self._perm_dict.get(p, set())
} | [
"def",
"get_subjects_with_equal_or_higher_perm",
"(",
"self",
",",
"perm_str",
")",
":",
"self",
".",
"_assert_valid_permission",
"(",
"perm_str",
")",
"return",
"{",
"s",
"for",
"p",
"in",
"self",
".",
"_equal_or_higher_perm",
"(",
"perm_str",
")",
"for",
"s",
... | 31.111111 | 22.333333 |
def pupv_to_vRvz(pu,pv,u,v,delta=1.,oblate=False):
"""
NAME:
pupv_to_vRvz
PURPOSE:
calculate cylindrical vR and vz from momenta in prolate or oblate confocal u and v coordinates for a given focal length delta
INPUT:
pu - u momentum
pv - v momentum
u - u coordinate
v - v coordinate
delta= focus
oblate= (False) if True, compute oblate confocal coordinates instead of prolate
OUTPUT:
(vR,vz)
HISTORY:
2017-12-04 - Written - Bovy (UofT)
"""
if oblate:
denom= delta*(sc.sinh(u)**2.+sc.cos(v)**2.)
vR= (pu*sc.sinh(u)*sc.sin(v)+pv*sc.cosh(u)*sc.cos(v))/denom
vz= (pu*sc.cosh(u)*sc.cos(v)-pv*sc.sinh(u)*sc.sin(v))/denom
else:
denom= delta*(sc.sinh(u)**2.+sc.sin(v)**2.)
vR= (pu*sc.cosh(u)*sc.sin(v)+pv*sc.sinh(u)*sc.cos(v))/denom
vz= (pu*sc.sinh(u)*sc.cos(v)-pv*sc.cosh(u)*sc.sin(v))/denom
return (vR,vz) | [
"def",
"pupv_to_vRvz",
"(",
"pu",
",",
"pv",
",",
"u",
",",
"v",
",",
"delta",
"=",
"1.",
",",
"oblate",
"=",
"False",
")",
":",
"if",
"oblate",
":",
"denom",
"=",
"delta",
"*",
"(",
"sc",
".",
"sinh",
"(",
"u",
")",
"**",
"2.",
"+",
"sc",
... | 21.627907 | 29.813953 |
def start(cls, ev):
"""
Read all data from Views and send them to the backend.
"""
ev.preventDefault()
ev.stopPropagation()
ViewController.log_view.add("Beginning MARCGenerator request..")
if not ViewController.validate():
ViewController.urlbox_error.show(
"Vyplňte prosím zvýrazněné hodnoty."
)
ViewController.log_view.add("Aborting. Not all required data set.")
return
# make sure, that `url` was filled
url = ViewController.url.strip()
if not (url.startswith("http://") or url.startswith("https://")):
ViewController.urlbox_error.show(
"`%s` není validní URL! Zkuste přidat http:// | https://" % url
)
return
properties = ViewController.get_all_properties()
properties["rules"] = RulesViewController.get_dict()
# keep the url_id from hidden input
if "url_id" in document and document["url_id"].value:
properties["url_id"] = document["url_id"].value
ViewController.log_view.add(
"Sending following data: %s" % repr(properties)
)
make_request(
url=join(settings.API_PATH, "to_output"),
data={"data": json.dumps(properties)},
on_complete=cls.on_complete
) | [
"def",
"start",
"(",
"cls",
",",
"ev",
")",
":",
"ev",
".",
"preventDefault",
"(",
")",
"ev",
".",
"stopPropagation",
"(",
")",
"ViewController",
".",
"log_view",
".",
"add",
"(",
"\"Beginning MARCGenerator request..\"",
")",
"if",
"not",
"ViewController",
"... | 33.575 | 20.525 |
def recv_exactly(self, n, timeout='default'):
"""
Recieve exactly n bytes
Aliases: read_exactly, readexactly, recvexactly
"""
self._print_recv_header(
'======== Receiving until exactly {0}B{timeout_text} ========', timeout, n)
return self._recv_predicate(lambda s: n if len(s) >= n else 0, timeout) | [
"def",
"recv_exactly",
"(",
"self",
",",
"n",
",",
"timeout",
"=",
"'default'",
")",
":",
"self",
".",
"_print_recv_header",
"(",
"'======== Receiving until exactly {0}B{timeout_text} ========'",
",",
"timeout",
",",
"n",
")",
"return",
"self",
".",
"_recv_predicate... | 31.909091 | 22.090909 |
def can_attack_air(self) -> bool:
""" Does not include upgrades """
if self._weapons:
weapon = next(
(weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}),
None,
)
return weapon is not None
return False | [
"def",
"can_attack_air",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"self",
".",
"_weapons",
":",
"weapon",
"=",
"next",
"(",
"(",
"weapon",
"for",
"weapon",
"in",
"self",
".",
"_weapons",
"if",
"weapon",
".",
"type",
"in",
"{",
"TargetType",
".",
"A... | 36.888889 | 20.111111 |
def source(self, source):
"""When the source gets updated, update the pane object"""
BaseView.source.fset(self, source)
if self.main_pane:
self.main_pane.object = self.contents
self.label_pane.object = self.label | [
"def",
"source",
"(",
"self",
",",
"source",
")",
":",
"BaseView",
".",
"source",
".",
"fset",
"(",
"self",
",",
"source",
")",
"if",
"self",
".",
"main_pane",
":",
"self",
".",
"main_pane",
".",
"object",
"=",
"self",
".",
"contents",
"self",
".",
... | 42.5 | 7.833333 |
def is_archive(filename):
'''returns boolean of whether this filename looks like an archive'''
for archive in archive_formats:
if filename.endswith(archive_formats[archive]['suffix']):
return True
return False | [
"def",
"is_archive",
"(",
"filename",
")",
":",
"for",
"archive",
"in",
"archive_formats",
":",
"if",
"filename",
".",
"endswith",
"(",
"archive_formats",
"[",
"archive",
"]",
"[",
"'suffix'",
"]",
")",
":",
"return",
"True",
"return",
"False"
] | 39.333333 | 19.666667 |
def run_functor(functor, *args, **kwargs):
"""
Given a functor, run it and return its result. We can use this with
multiprocessing.map and map it over a list of job functors to do them.
Handles getting more than multiprocessing's pitiful exception output
This function was derived from:
http://stackoverflow.com/a/16618842/19741
This code was adopted from the American Gut project:
https://github.com/biocore/American-Gut/blob/master/americangut/parallel.py
"""
try:
# This is where you do your actual work
return functor(*args, **kwargs)
except Exception:
# Put all exception text into an exception and raise that
raise Exception("".join(traceback.format_exception(*sys.exc_info()))) | [
"def",
"run_functor",
"(",
"functor",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"# This is where you do your actual work",
"return",
"functor",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"Exception",
":",
"# Put all exce... | 39.210526 | 21.315789 |
def _resolve_metric(self, original_metric_name, metrics_to_collect, prefix=""):
"""
Return the submit method and the metric name to use.
The metric name is defined as follow:
* If available, the normalized metric name alias
* (Or) the normalized original metric name
"""
submit_method = (
metrics_to_collect[original_metric_name][0]
if isinstance(metrics_to_collect[original_metric_name], tuple)
else metrics_to_collect[original_metric_name]
)
metric_name = (
metrics_to_collect[original_metric_name][1]
if isinstance(metrics_to_collect[original_metric_name], tuple)
else original_metric_name
)
return submit_method, self._normalize(metric_name, submit_method, prefix) | [
"def",
"_resolve_metric",
"(",
"self",
",",
"original_metric_name",
",",
"metrics_to_collect",
",",
"prefix",
"=",
"\"\"",
")",
":",
"submit_method",
"=",
"(",
"metrics_to_collect",
"[",
"original_metric_name",
"]",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"metri... | 38.619048 | 22.047619 |
def download(self, size=SIZE_LARGE, thumbnail=False, wait=60, asynchronous=False):
""" Downloads this image to cache.
Calling the download() method instantiates an asynchronous URLAccumulator.
Once it is done downloading, this image will have its path property
set to an image file in the cache.
"""
if thumbnail == True: size = SIZE_THUMBNAIL # backwards compatibility
self._size = disambiguate_size(size)
if self._size == SIZE_THUMBNAIL:
url = self.url.replace("/preview/", "/med/")
else:
url = self.url
cache = "morguefile"
extension = os.path.splitext(url)[1]
URLAccumulator.__init__(self, url, wait, asynchronous, cache, extension, 2)
if not asynchronous:
return self.path | [
"def",
"download",
"(",
"self",
",",
"size",
"=",
"SIZE_LARGE",
",",
"thumbnail",
"=",
"False",
",",
"wait",
"=",
"60",
",",
"asynchronous",
"=",
"False",
")",
":",
"if",
"thumbnail",
"==",
"True",
":",
"size",
"=",
"SIZE_THUMBNAIL",
"# backwards compatibi... | 36.913043 | 21.478261 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.