text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def on_click(self, event):
""" Override this method to do more interesting things with the event. """
DesktopNotification(
title=event.title,
body="{} until {}!".format(event.time_remaining, event.title),
icon='dialog-information',
urgency=1,
timeout=0,
).display() | [
"def",
"on_click",
"(",
"self",
",",
"event",
")",
":",
"DesktopNotification",
"(",
"title",
"=",
"event",
".",
"title",
",",
"body",
"=",
"\"{} until {}!\"",
".",
"format",
"(",
"event",
".",
"time_remaining",
",",
"event",
".",
"title",
")",
",",
"icon... | 37.888889 | 14.333333 |
def qgis_composer_html_renderer(impact_report, component):
"""HTML to PDF renderer using QGIS Composer.
Render using qgis composer for a given impact_report data and component
context for html input.
:param impact_report: ImpactReport contains data about the report that is
going to be generated.
:type impact_report: safe.report.impact_report.ImpactReport
:param component: Contains the component metadata and context for
rendering the output.
:type component:
safe.report.report_metadata.QgisComposerComponentsMetadata
:return: Whatever type of output the component should be.
.. versionadded:: 4.0
"""
context = component.context
# QGIS3: not used
# qgis_composition_context = impact_report.qgis_composition_context
# create new layout with A4 portrait page
layout = QgsPrintLayout(QgsProject.instance())
page = QgsLayoutItemPage(layout)
page.setPageSize('A4', orientation=QgsLayoutItemPage.Portrait)
layout.pageCollection().addPage(page)
if not context.html_frame_elements:
# if no html frame elements at all, do not generate empty report.
component.output = ''
return component.output
# Add HTML Frame
for html_el in context.html_frame_elements:
mode = html_el.get('mode')
html_element = QgsLayoutItemHtml(layout)
margin_left = html_el.get('margin_left', 10)
margin_top = html_el.get('margin_top', 10)
width = html_el.get('width', component.page_width - 2 * margin_left)
height = html_el.get('height', component.page_height - 2 * margin_top)
html_frame = QgsLayoutFrame(layout, html_element)
html_frame.attemptSetSceneRect(
QRectF(margin_left, margin_top, width, height))
html_element.addFrame(html_frame)
if html_element:
if mode == 'text':
text = html_el.get('text')
text = text if text else ''
html_element.setContentMode(QgsLayoutItemHtml.ManualHtml)
html_element.setResizeMode(
QgsLayoutItemHtml.RepeatUntilFinished)
html_element.setHtml(text)
html_element.loadHtml()
elif mode == 'url':
url = html_el.get('url')
html_element.setContentMode(QgsLayoutItemHtml.Url)
html_element.setResizeMode(
QgsLayoutItemHtml.RepeatUntilFinished)
qurl = QUrl.fromLocalFile(url)
html_element.setUrl(qurl)
# Attempt on removing blank page. Notes: We assume that the blank page
# will always appears in the last x page(s), not in the middle.
pc = layout.pageCollection()
index = pc.pageCount()
while pc.pageIsEmpty(index):
pc.deletePage(index)
index -= 1
# process to output
# in case output folder not specified
if impact_report.output_folder is None:
impact_report.output_folder = mkdtemp(dir=temp_dir())
component_output_path = impact_report.component_absolute_output_path(
component.key)
component_output = None
output_format = component.output_format
doc_format = QgisComposerComponentsMetadata.OutputFormat.DOC_OUTPUT
template_format = QgisComposerComponentsMetadata.OutputFormat.QPT
if isinstance(output_format, list):
component_output = []
for i in range(len(output_format)):
each_format = output_format[i]
each_path = component_output_path[i]
if each_format in doc_format:
result_path = create_qgis_pdf_output(
impact_report,
each_path,
layout,
each_format,
component)
component_output.append(result_path)
elif each_format == template_format:
result_path = create_qgis_template_output(
each_path, layout)
component_output.append(result_path)
elif isinstance(output_format, dict):
component_output = {}
for key, each_format in list(output_format.items()):
each_path = component_output_path[key]
if each_format in doc_format:
result_path = create_qgis_pdf_output(
impact_report,
each_path,
layout,
each_format,
component)
component_output[key] = result_path
elif each_format == template_format:
result_path = create_qgis_template_output(
each_path, layout)
component_output[key] = result_path
elif (output_format in
QgisComposerComponentsMetadata.OutputFormat.SUPPORTED_OUTPUT):
component_output = None
if output_format in doc_format:
result_path = create_qgis_pdf_output(
impact_report,
component_output_path,
layout,
output_format,
component)
component_output = result_path
elif output_format == template_format:
result_path = create_qgis_template_output(
component_output_path, layout)
component_output = result_path
component.output = component_output
return component.output | [
"def",
"qgis_composer_html_renderer",
"(",
"impact_report",
",",
"component",
")",
":",
"context",
"=",
"component",
".",
"context",
"# QGIS3: not used",
"# qgis_composition_context = impact_report.qgis_composition_context",
"# create new layout with A4 portrait page",
"layout",
"=... | 36.986014 | 16.587413 |
def _init_weights(self,
X):
"""Set the weights and normalize data before starting training."""
X = np.asarray(X, dtype=np.float64)
if self.scaler is not None:
X = self.scaler.fit_transform(X)
if self.initializer is not None:
self.weights = self.initializer(X, self.num_neurons)
for v in self.params.values():
v['value'] = v['orig']
return X | [
"def",
"_init_weights",
"(",
"self",
",",
"X",
")",
":",
"X",
"=",
"np",
".",
"asarray",
"(",
"X",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"if",
"self",
".",
"scaler",
"is",
"not",
"None",
":",
"X",
"=",
"self",
".",
"scaler",
".",
"fit_t... | 29.066667 | 17.333333 |
def unsubscribe(self, transform="", downlink=False):
"""Unsubscribes from a previously subscribed stream. Note that the same values of transform
and downlink must be passed in order to do the correct unsubscribe::
s.subscribe(callback,transform="if last")
s.unsubscribe(transform="if last")
"""
streampath = self.path
if downlink:
streampath += "/downlink"
return self.db.unsubscribe(streampath, transform) | [
"def",
"unsubscribe",
"(",
"self",
",",
"transform",
"=",
"\"\"",
",",
"downlink",
"=",
"False",
")",
":",
"streampath",
"=",
"self",
".",
"path",
"if",
"downlink",
":",
"streampath",
"+=",
"\"/downlink\"",
"return",
"self",
".",
"db",
".",
"unsubscribe",
... | 40.083333 | 16.416667 |
def check_with_pyflakes(source_code, filename=None):
"""Check source code with pyflakes
Returns an empty list if pyflakes is not installed"""
try:
if filename is None:
filename = '<string>'
try:
source_code += '\n'
except TypeError:
# Python 3
source_code += to_binary_string('\n')
import _ast
from pyflakes.checker import Checker
# First, compile into an AST and handle syntax errors.
try:
tree = compile(source_code, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError as value:
# If there's an encoding problem with the file, the text is None.
if value.text is None:
results = []
else:
results = [(value.args[0], value.lineno)]
except (ValueError, TypeError):
# Example of ValueError: file contains invalid \x escape character
# (see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674797)
# Example of TypeError: file contains null character
# (see https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=674796)
results = []
else:
# Okay, it's syntactically valid. Now check it.
w = Checker(tree, filename)
w.messages.sort(key=lambda x: x.lineno)
results = []
coding = encoding.get_coding(source_code)
lines = source_code.splitlines()
for warning in w.messages:
if 'analysis:ignore' not in \
to_text_string(lines[warning.lineno-1], coding):
results.append((warning.message % warning.message_args,
warning.lineno))
except Exception:
# Never return None to avoid lock in spyder/widgets/editor.py
# See Issue 1547
results = []
if DEBUG_EDITOR:
traceback.print_exc() # Print exception in internal console
return results | [
"def",
"check_with_pyflakes",
"(",
"source_code",
",",
"filename",
"=",
"None",
")",
":",
"try",
":",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"'<string>'",
"try",
":",
"source_code",
"+=",
"'\\n'",
"except",
"TypeError",
":",
"# Python 3\r",
"s... | 42.5625 | 17.833333 |
def delete_order_by_id(cls, order_id, **kwargs):
"""Delete Order
Delete an instance of Order by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_order_by_id(order_id, async=True)
>>> result = thread.get()
:param async bool
:param str order_id: ID of order to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_order_by_id_with_http_info(order_id, **kwargs)
else:
(data) = cls._delete_order_by_id_with_http_info(order_id, **kwargs)
return data | [
"def",
"delete_order_by_id",
"(",
"cls",
",",
"order_id",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_delete_order_by_id_with_ht... | 39.809524 | 18.571429 |
def deepcopy(self):
"""
Create a deep copy of the PolygonsOnImage object.
Returns
-------
imgaug.PolygonsOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for PolygonsOnImage,
# so use manual copy here too
polys = [poly.deepcopy() for poly in self.polygons]
return PolygonsOnImage(polys, tuple(self.shape)) | [
"def",
"deepcopy",
"(",
"self",
")",
":",
"# Manual copy is far faster than deepcopy for PolygonsOnImage,",
"# so use manual copy here too",
"polys",
"=",
"[",
"poly",
".",
"deepcopy",
"(",
")",
"for",
"poly",
"in",
"self",
".",
"polygons",
"]",
"return",
"PolygonsOnI... | 28.714286 | 18.857143 |
def path(self, filename):
'''
This returns the absolute path of a file uploaded to this set. It
doesn't actually check whether said file exists.
:param filename: The filename to return the path for.
:param folder: The subfolder within the upload set previously used
to save to.
:raises OperationNotSupported: when the backenddoesn't support direct file access
'''
if not self.backend.root:
raise OperationNotSupported(
'Direct file access is not supported by ' +
self.backend.__class__.__name__
)
return os.path.join(self.backend.root, filename) | [
"def",
"path",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"self",
".",
"backend",
".",
"root",
":",
"raise",
"OperationNotSupported",
"(",
"'Direct file access is not supported by '",
"+",
"self",
".",
"backend",
".",
"__class__",
".",
"__name__",
")... | 40.117647 | 22.823529 |
async def create_websocket_server(sock, filter=None): # pylint: disable=W0622
"""
A more low-level form of open_websocket_server.
You are responsible for closing this websocket.
"""
ws = Websocket()
await ws.start_server(sock, filter=filter)
return ws | [
"async",
"def",
"create_websocket_server",
"(",
"sock",
",",
"filter",
"=",
"None",
")",
":",
"# pylint: disable=W0622",
"ws",
"=",
"Websocket",
"(",
")",
"await",
"ws",
".",
"start_server",
"(",
"sock",
",",
"filter",
"=",
"filter",
")",
"return",
"ws"
] | 34.125 | 14.125 |
def from_hising(cls, h, J, offset=None):
"""Construct a binary polynomial from a higher-order Ising problem.
Args:
h (dict):
The linear biases.
J (dict):
The higher-order biases.
offset (optional, default=0.0):
Constant offset applied to the model.
Returns:
:obj:`.BinaryPolynomial`
Examples:
>>> poly = dimod.BinaryPolynomial.from_hising({'a': 2}, {'ab': -1}, 0)
"""
poly = {(k,): v for k, v in h.items()}
poly.update(J)
if offset is not None:
poly[frozenset([])] = offset
return cls(poly, Vartype.SPIN) | [
"def",
"from_hising",
"(",
"cls",
",",
"h",
",",
"J",
",",
"offset",
"=",
"None",
")",
":",
"poly",
"=",
"{",
"(",
"k",
",",
")",
":",
"v",
"for",
"k",
",",
"v",
"in",
"h",
".",
"items",
"(",
")",
"}",
"poly",
".",
"update",
"(",
"J",
")"... | 27.12 | 18.24 |
def update(self, date, data=None, inow=None):
"""
Update strategy. Updates prices, values, weight, etc.
"""
# resolve stale state
self.root.stale = False
# update helpers on date change
# also set newpt flag
newpt = False
if self.now == 0:
newpt = True
elif date != self.now:
self._net_flows = 0
self._last_price = self._price
self._last_value = self._value
self._last_fee = 0.0
newpt = True
# update now
self.now = date
if inow is None:
if self.now == 0:
inow = 0
else:
inow = self.data.index.get_loc(date)
# update children if any and calculate value
val = self._capital # default if no children
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
c.update(date, data, inow)
val += c.value
if self.root == self:
if (val < 0) and not self.bankrupt:
# Declare a bankruptcy
self.bankrupt = True
self.flatten()
# update data if this value is different or
# if now has changed - avoid all this if not since it
# won't change
if newpt or self._value != val:
self._value = val
self._values.values[inow] = val
bottom = self._last_value + self._net_flows
if bottom != 0:
ret = self._value / (self._last_value + self._net_flows) - 1
else:
if self._value == 0:
ret = 0
else:
raise ZeroDivisionError(
'Could not update %s. Last value '
'was %s and net flows were %s. Current'
'value is %s. Therefore, '
'we are dividing by zero to obtain the return '
'for the period.' % (self.name,
self._last_value,
self._net_flows,
self._value))
self._price = self._last_price * (1 + ret)
self._prices.values[inow] = self._price
# update children weights
if self.children is not None:
for c in self._childrenv:
# avoid useless update call
if c._issec and not c._needupdate:
continue
if val != 0:
c._weight = c.value / val
else:
c._weight = 0.0
# if we have strategy children, we will need to update them in universe
if self._has_strat_children:
for c in self._strat_children:
# TODO: optimize ".loc" here as well
self._universe.loc[date, c] = self.children[c].price
# Cash should track the unallocated capital at the end of the day, so
# we should update it every time we call "update".
# Same for fees
self._cash.values[inow] = self._capital
self._fees.values[inow] = self._last_fee
# update paper trade if necessary
if newpt and self._paper_trade:
self._paper.update(date)
self._paper.run()
self._paper.update(date)
# update price
self._price = self._paper.price
self._prices.values[inow] = self._price | [
"def",
"update",
"(",
"self",
",",
"date",
",",
"data",
"=",
"None",
",",
"inow",
"=",
"None",
")",
":",
"# resolve stale state",
"self",
".",
"root",
".",
"stale",
"=",
"False",
"# update helpers on date change",
"# also set newpt flag",
"newpt",
"=",
"False"... | 34.825243 | 15.213592 |
def pageassert(func):
'''
Decorator that assert page number
'''
@wraps(func)
def wrapper(*args, **kwargs):
if args[0] < 1 or args[0] > 40:
raise ValueError('Page Number not found')
return func(*args, **kwargs)
return wrapper | [
"def",
"pageassert",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"args",
"[",
"0",
"]",
"<",
"1",
"or",
"args",
"[",
"0",
"]",
">",
"40",
":",
"raise",
... | 26.7 | 15.9 |
def get_upregulated_genes_network(self) -> Graph:
"""Get the graph of up-regulated genes.
:return Graph: Graph of up-regulated genes.
"""
logger.info("In get_upregulated_genes_network()")
deg_graph = self.graph.copy() # deep copy graph
not_diff_expr = self.graph.vs(up_regulated_eq=False)
# delete genes which are not differentially expressed or have no connections to others
deg_graph.delete_vertices(not_diff_expr.indices)
deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0))
return deg_graph | [
"def",
"get_upregulated_genes_network",
"(",
"self",
")",
"->",
"Graph",
":",
"logger",
".",
"info",
"(",
"\"In get_upregulated_genes_network()\"",
")",
"deg_graph",
"=",
"self",
".",
"graph",
".",
"copy",
"(",
")",
"# deep copy graph",
"not_diff_expr",
"=",
"self... | 38.2 | 23.133333 |
def __retrieve(self, key):
''' Retrieve file location from cache DB
'''
with self.get_conn() as conn:
try:
c = conn.cursor()
if key is None:
c.execute("SELECT value FROM cache_entries WHERE key IS NULL")
else:
c.execute("SELECT value FROM cache_entries WHERE key = ?", (key,))
result = c.fetchone()
if result is None or len(result) != 1:
getLogger().info("There's no entry with key={key}".format(key=key))
return None
else:
return result[0]
except:
getLogger().exception("Cannot retrieve")
return None | [
"def",
"__retrieve",
"(",
"self",
",",
"key",
")",
":",
"with",
"self",
".",
"get_conn",
"(",
")",
"as",
"conn",
":",
"try",
":",
"c",
"=",
"conn",
".",
"cursor",
"(",
")",
"if",
"key",
"is",
"None",
":",
"c",
".",
"execute",
"(",
"\"SELECT value... | 39.947368 | 18.263158 |
def perform_action(
self, action, machines, params, progress_title, success_title):
"""Perform the action on the set of machines."""
if len(machines) == 0:
return 0
with utils.Spinner() as context:
return self._async_perform_action(
context, action, list(machines), params,
progress_title, success_title) | [
"def",
"perform_action",
"(",
"self",
",",
"action",
",",
"machines",
",",
"params",
",",
"progress_title",
",",
"success_title",
")",
":",
"if",
"len",
"(",
"machines",
")",
"==",
"0",
":",
"return",
"0",
"with",
"utils",
".",
"Spinner",
"(",
")",
"as... | 43.111111 | 12.666667 |
def transform(self, X, lenscale=None):
"""
Apply the random basis to X.
Parameters
----------
X: ndarray
(N, d) array of observations where N is the number of samples, and
d is the dimensionality of X.
lenscale: scalar or ndarray, optional
scalar or array of shape (d,) length scales (one for each dimension
of X). If not input, this uses the value of the initial length
scale.
Returns
-------
ndarray:
of shape (N, 2*nbases) where nbases is number of random bases to
use, given in the constructor.
"""
N, D = X.shape
lenscale = self._check_dim(D, lenscale)[:, np.newaxis]
WX = np.dot(X, self.W / lenscale)
return np.hstack((np.cos(WX), np.sin(WX))) / np.sqrt(self.n) | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"lenscale",
"=",
"None",
")",
":",
"N",
",",
"D",
"=",
"X",
".",
"shape",
"lenscale",
"=",
"self",
".",
"_check_dim",
"(",
"D",
",",
"lenscale",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"WX"... | 32.384615 | 21.230769 |
def imgAverage(images, copy=True):
'''
returns an image average
works on many, also unloaded images
minimises RAM usage
'''
i0 = images[0]
out = imread(i0, dtype='float')
if copy and id(i0) == id(out):
out = out.copy()
for i in images[1:]:
out += imread(i, dtype='float')
out /= len(images)
return out | [
"def",
"imgAverage",
"(",
"images",
",",
"copy",
"=",
"True",
")",
":",
"i0",
"=",
"images",
"[",
"0",
"]",
"out",
"=",
"imread",
"(",
"i0",
",",
"dtype",
"=",
"'float'",
")",
"if",
"copy",
"and",
"id",
"(",
"i0",
")",
"==",
"id",
"(",
"out",
... | 22.6875 | 17.3125 |
def get_data_by_slug_or_404(model, slug, kind='', **kwargs):
"""Wrap get_data_by_slug, abort 404 if missing data."""
data = get_data_by_slug(model, slug, kind, **kwargs)
if not data:
abort(404)
return data | [
"def",
"get_data_by_slug_or_404",
"(",
"model",
",",
"slug",
",",
"kind",
"=",
"''",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"get_data_by_slug",
"(",
"model",
",",
"slug",
",",
"kind",
",",
"*",
"*",
"kwargs",
")",
"if",
"not",
"data",
":",
... | 28 | 23.375 |
def plot_figure(array, as_subplot, units, kpc_per_arcsec, figsize, aspect, cmap, norm, norm_min, norm_max,
linthresh, linscale, xticks_manual, yticks_manual):
"""Open a matplotlib figure and plot the array of data on it.
Parameters
-----------
array : data.array.scaled_array.ScaledArray
The 2D array of data which is plotted.
as_subplot : bool
Whether the array is plotted as part of a subplot, in which case the grid figure is not opened / closed.
units : str
The units of the y / x axis of the plots, in arc-seconds ('arcsec') or kiloparsecs ('kpc').
kpc_per_arcsec : float or None
The conversion factor between arc-seconds and kiloparsecs, required to plot the units in kpc.
figsize : (int, int)
The size of the figure in (rows, columns).
aspect : str
The aspect ratio of the array, specifically whether it is forced to be square ('equal') or adapts its size to \
the figure size ('auto').
cmap : str
The colormap the array is plotted using, which may be chosen from the standard matplotlib colormaps.
norm : str
The normalization of the colormap used to plot the image, specifically whether it is linear ('linear'), log \
('log') or a symmetric log normalization ('symmetric_log').
norm_min : float or None
The minimum array value the colormap map spans (all values below this value are plotted the same color).
norm_max : float or None
The maximum array value the colormap map spans (all values above this value are plotted the same color).
linthresh : float
For the 'symmetric_log' colormap normalization ,this specifies the range of values within which the colormap \
is linear.
linscale : float
For the 'symmetric_log' colormap normalization, this allowws the linear range set by linthresh to be stretched \
relative to the logarithmic range.
xticks_manual : [] or None
If input, the xticks do not use the array's default xticks but instead overwrite them as these values.
yticks_manual : [] or None
If input, the yticks do not use the array's default yticks but instead overwrite them as these values.
"""
fig = plotter_util.setup_figure(figsize=figsize, as_subplot=as_subplot)
norm_min, norm_max = get_normalization_min_max(array=array, norm_min=norm_min, norm_max=norm_max)
norm_scale = get_normalization_scale(norm=norm, norm_min=norm_min, norm_max=norm_max,
linthresh=linthresh, linscale=linscale)
extent = get_extent(array=array, units=units, kpc_per_arcsec=kpc_per_arcsec,
xticks_manual=xticks_manual, yticks_manual=yticks_manual)
plt.imshow(array, aspect=aspect, cmap=cmap, norm=norm_scale, extent=extent)
return fig | [
"def",
"plot_figure",
"(",
"array",
",",
"as_subplot",
",",
"units",
",",
"kpc_per_arcsec",
",",
"figsize",
",",
"aspect",
",",
"cmap",
",",
"norm",
",",
"norm_min",
",",
"norm_max",
",",
"linthresh",
",",
"linscale",
",",
"xticks_manual",
",",
"yticks_manua... | 55.117647 | 36.019608 |
def import_cluster_template(self, api_cluster_template, add_repositories=False):
"""
Create a cluster according to the provided template
@param api_cluster_template: cluster template to import
@param add_repositories: if true the parcels repositories in the cluster template will be added.
@return: Command handing cluster import
@since: API v12
"""
return self._post("importClusterTemplate", ApiCommand, False, api_cluster_template, params=dict(addRepositories=add_repositories), api_version=12) | [
"def",
"import_cluster_template",
"(",
"self",
",",
"api_cluster_template",
",",
"add_repositories",
"=",
"False",
")",
":",
"return",
"self",
".",
"_post",
"(",
"\"importClusterTemplate\"",
",",
"ApiCommand",
",",
"False",
",",
"api_cluster_template",
",",
"params"... | 52 | 30.8 |
def _FormatDateTime(self, event):
"""Formats the date to a datetime object without timezone information.
Note: timezone information must be removed due to lack of support
by xlsxwriter and Excel.
Args:
event (EventObject): event.
Returns:
datetime.datetime|str: date and time value or a string containing
"ERROR" on OverflowError.
"""
try:
datetime_object = datetime.datetime(
1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
datetime_object += datetime.timedelta(microseconds=event.timestamp)
datetime_object.astimezone(self._output_mediator.timezone)
return datetime_object.replace(tzinfo=None)
except (OverflowError, ValueError) as exception:
self._ReportEventError(event, (
'unable to copy timestamp: {0!s} to a human readable date and time '
'with error: {1!s}. Defaulting to: "ERROR"').format(
event.timestamp, exception))
return 'ERROR' | [
"def",
"_FormatDateTime",
"(",
"self",
",",
"event",
")",
":",
"try",
":",
"datetime_object",
"=",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"tzinfo",
"=",
"pytz",
".",
"UTC",
")",
... | 35.111111 | 20.62963 |
def extract(self, name):
"""Get the contents of an entry.
NAME is an entry name.
Return the tuple (ispkg, contents).
For non-Python resoures, ispkg is meaningless (and 0).
Used by the import mechanism."""
if type(name) == type(''):
ndx = self.toc.find(name)
if ndx == -1:
return None
else:
ndx = name
(dpos, dlen, ulen, flag, typcd, nm) = self.toc.get(ndx)
self.lib.seek(self.pkgstart+dpos)
rslt = self.lib.read(dlen)
if flag == 2:
global AES
import AES
key = rslt[:32]
# Note: keep this in sync with bootloader's code
rslt = AES.new(key, AES.MODE_CFB, "\0"*AES.block_size).decrypt(rslt[32:])
if flag == 1 or flag == 2:
rslt = zlib.decompress(rslt)
if typcd == 'M':
return (1, rslt)
return (0, rslt) | [
"def",
"extract",
"(",
"self",
",",
"name",
")",
":",
"if",
"type",
"(",
"name",
")",
"==",
"type",
"(",
"''",
")",
":",
"ndx",
"=",
"self",
".",
"toc",
".",
"find",
"(",
"name",
")",
"if",
"ndx",
"==",
"-",
"1",
":",
"return",
"None",
"else"... | 34.592593 | 14.407407 |
def list_plugins(self):
"""Returns a sorted list of all plugins that are available in this
plugin source. This can be useful to automatically discover plugins
that are available and is usually used together with
:meth:`load_plugin`.
"""
rv = []
for _, modname, ispkg in pkgutil.iter_modules(self.mod.__path__):
rv.append(modname)
return sorted(rv) | [
"def",
"list_plugins",
"(",
"self",
")",
":",
"rv",
"=",
"[",
"]",
"for",
"_",
",",
"modname",
",",
"ispkg",
"in",
"pkgutil",
".",
"iter_modules",
"(",
"self",
".",
"mod",
".",
"__path__",
")",
":",
"rv",
".",
"append",
"(",
"modname",
")",
"return... | 41.5 | 16.8 |
def phylotree(self):
"""
Get the c++ PhyloTree object corresponding to this tree.
:return: PhyloTree instance
"""
if not self._phylotree or self._dirty:
try:
if ISPY3:
self._phylotree = PhyloTree(self.newick.encode(), self.rooted)
else:
self._phylotree = PhyloTree(self.newick, self.rooted)
except ValueError:
logger.error('Couldn\'t convert to C++ PhyloTree -- are there bootstrap values?')
self._dirty = False
return self._phylotree | [
"def",
"phylotree",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_phylotree",
"or",
"self",
".",
"_dirty",
":",
"try",
":",
"if",
"ISPY3",
":",
"self",
".",
"_phylotree",
"=",
"PhyloTree",
"(",
"self",
".",
"newick",
".",
"encode",
"(",
")",
"... | 39.466667 | 18.266667 |
def _create_non_null_wrapper(name, t):
'creates type wrapper for non-null of given type'
def __new__(cls, json_data, selection_list=None):
if json_data is None:
raise ValueError(name + ' received null value')
return t(json_data, selection_list)
def __to_graphql_input__(value, indent=0, indent_string=' '):
return t.__to_graphql_input__(value, indent, indent_string)
return type(name, (t,), {
'__new__': __new__,
'_%s__auto_register' % name: False,
'__to_graphql_input__': __to_graphql_input__,
}) | [
"def",
"_create_non_null_wrapper",
"(",
"name",
",",
"t",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"json_data",
",",
"selection_list",
"=",
"None",
")",
":",
"if",
"json_data",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"name",
"+",
"' received nul... | 37.733333 | 17.866667 |
def add_interface(self, interface):
"""Manually add or overwrite an interface definition from an Interface object.
:param interface: an Interface() object
"""
if not isinstance(interface, Interface):
raise TypeError
self._interfaces[interface.name] = interface | [
"def",
"add_interface",
"(",
"self",
",",
"interface",
")",
":",
"if",
"not",
"isinstance",
"(",
"interface",
",",
"Interface",
")",
":",
"raise",
"TypeError",
"self",
".",
"_interfaces",
"[",
"interface",
".",
"name",
"]",
"=",
"interface"
] | 30.6 | 16.5 |
def request_port_forward(self, address, port, handler=None):
"""
Ask the server to forward TCP connections from a listening port on
the server, across this SSH session.
If a handler is given, that handler is called from a different thread
whenever a forwarded connection arrives. The handler parameters are::
handler(channel, (origin_addr, origin_port), (server_addr, server_port))
where C{server_addr} and C{server_port} are the address and port that
the server was listening on.
If no handler is set, the default behavior is to send new incoming
forwarded connections into the accept queue, to be picked up via
L{accept}.
@param address: the address to bind when forwarding
@type address: str
@param port: the port to forward, or 0 to ask the server to allocate
any port
@type port: int
@param handler: optional handler for incoming forwarded connections
@type handler: function(Channel, (str, int), (str, int))
@return: the port # allocated by the server
@rtype: int
@raise SSHException: if the server refused the TCP forward request
"""
if not self.active:
raise SSHException('SSH session not active')
address = str(address)
port = int(port)
response = self.global_request('tcpip-forward', (address, port), wait=True)
if response is None:
raise SSHException('TCP forwarding request denied')
if port == 0:
port = response.get_int()
if handler is None:
def default_handler(channel, (src_addr, src_port), (dest_addr, dest_port)):
self._queue_incoming_channel(channel)
handler = default_handler
self._tcp_handler = handler
return port | [
"def",
"request_port_forward",
"(",
"self",
",",
"address",
",",
"port",
",",
"handler",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"active",
":",
"raise",
"SSHException",
"(",
"'SSH session not active'",
")",
"address",
"=",
"str",
"(",
"address",
"... | 41.590909 | 23.5 |
def atlasdb_get_random_peer( con=None, path=None ):
"""
Select a peer from the db at random
Return None if the table is empty
"""
ret = {}
with AtlasDBOpen(con=con, path=path) as dbcon:
num_peers = atlasdb_num_peers( con=con, path=path )
if num_peers is None or num_peers == 0:
# no peers
ret['peer_hostport'] = None
else:
r = random.randint(1, num_peers)
sql = "SELECT * FROM peers WHERE peer_index = ?;"
args = (r,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
ret = {'peer_hostport': None}
for row in res:
ret.update( row )
break
return ret['peer_hostport'] | [
"def",
"atlasdb_get_random_peer",
"(",
"con",
"=",
"None",
",",
"path",
"=",
"None",
")",
":",
"ret",
"=",
"{",
"}",
"with",
"AtlasDBOpen",
"(",
"con",
"=",
"con",
",",
"path",
"=",
"path",
")",
"as",
"dbcon",
":",
"num_peers",
"=",
"atlasdb_num_peers"... | 25.1 | 18.7 |
def get_state_paths(cls, impl, working_dir):
"""
Get the set of state paths that point to the current chain and state info.
Returns a list of paths.
"""
return [config.get_db_filename(impl, working_dir), config.get_snapshots_filename(impl, working_dir)] | [
"def",
"get_state_paths",
"(",
"cls",
",",
"impl",
",",
"working_dir",
")",
":",
"return",
"[",
"config",
".",
"get_db_filename",
"(",
"impl",
",",
"working_dir",
")",
",",
"config",
".",
"get_snapshots_filename",
"(",
"impl",
",",
"working_dir",
")",
"]"
] | 48 | 20.333333 |
def remover(self, id_tipo_acesso):
"""Removes access type by its identifier.
:param id_tipo_acesso: Access type identifier.
:return: None
:raise TipoAcessoError: Access type associated with equipment, cannot be removed.
:raise InvalidParameterError: Protocol value is invalid or none.
:raise TipoAcessoNaoExisteError: Access type doesn't exist.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_tipo_acesso):
raise InvalidParameterError(
u'Access type id is invalid or was not informed.')
url = 'tipoacesso/' + str(id_tipo_acesso) + '/'
code, xml = self.submit(None, 'DELETE', url)
return self.response(code, xml) | [
"def",
"remover",
"(",
"self",
",",
"id_tipo_acesso",
")",
":",
"if",
"not",
"is_valid_int_param",
"(",
"id_tipo_acesso",
")",
":",
"raise",
"InvalidParameterError",
"(",
"u'Access type id is invalid or was not informed.'",
")",
"url",
"=",
"'tipoacesso/'",
"+",
"str"... | 38.272727 | 23.363636 |
def http_exception(channel, title):
"""
Creates an embed UI containing the 'too long' error message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
title (str): The title of the embed
Returns:
ui (ui_embed.UI): The embed UI object
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
"Too much help",
"{} is too helpful! Try trimming some of the help messages.".format(title),
modulename=modulename
)
return gui | [
"def",
"http_exception",
"(",
"channel",
",",
"title",
")",
":",
"# Create embed UI object",
"gui",
"=",
"ui_embed",
".",
"UI",
"(",
"channel",
",",
"\"Too much help\"",
",",
"\"{} is too helpful! Try trimming some of the help messages.\"",
".",
"format",
"(",
"title",
... | 24.619048 | 22.619048 |
def set_key(key: str, value: str) -> dict:
"""Set or update a key in the conf.
For now only strings are supported.
We use to update the version number.
"""
if not _conf.path:
return {}
if "toml" in _conf.path:
with open(_conf.path, "r") as f:
parser = parse(f.read())
parser["tool"]["commitizen"][key] = value
with open(_conf.path, "w") as f:
f.write(parser.as_string())
else:
parser = configparser.ConfigParser()
parser.read(_conf.path)
parser["commitizen"][key] = value
with open(_conf.path, "w") as f:
parser.write(f)
return _conf.config | [
"def",
"set_key",
"(",
"key",
":",
"str",
",",
"value",
":",
"str",
")",
"->",
"dict",
":",
"if",
"not",
"_conf",
".",
"path",
":",
"return",
"{",
"}",
"if",
"\"toml\"",
"in",
"_conf",
".",
"path",
":",
"with",
"open",
"(",
"_conf",
".",
"path",
... | 28.391304 | 11.521739 |
def long_press(self, on_element):
"""
Long press on an element.
:Args:
- on_element: The element to long press.
"""
self._actions.append(lambda: self._driver.execute(
Command.LONG_PRESS, {'element': on_element.id}))
return self | [
"def",
"long_press",
"(",
"self",
",",
"on_element",
")",
":",
"self",
".",
"_actions",
".",
"append",
"(",
"lambda",
":",
"self",
".",
"_driver",
".",
"execute",
"(",
"Command",
".",
"LONG_PRESS",
",",
"{",
"'element'",
":",
"on_element",
".",
"id",
"... | 28.8 | 14.8 |
def _to_epoch(self, ts):
"""
Adds a year to the syslog timestamp because syslog doesn't use years
:param ts: The timestamp to add a year to
:return: Date/time string that includes a year
"""
year = self.year
tmpts = "%s %s" % (ts, str(self.year))
new_time = int(calendar.timegm(time.strptime(tmpts, "%b %d %H:%M:%S %Y")))
# If adding the year puts it in the future, this log must be from last year
if new_time > int(time.time()):
year -= 1
tmpts = "%s %s" % (ts, str(year))
new_time = int(calendar.timegm(time.strptime(tmpts, "%b %d %H:%M:%S %Y")))
return new_time | [
"def",
"_to_epoch",
"(",
"self",
",",
"ts",
")",
":",
"year",
"=",
"self",
".",
"year",
"tmpts",
"=",
"\"%s %s\"",
"%",
"(",
"ts",
",",
"str",
"(",
"self",
".",
"year",
")",
")",
"new_time",
"=",
"int",
"(",
"calendar",
".",
"timegm",
"(",
"time"... | 35.473684 | 22.631579 |
def trunc_list(s: List) -> List:
"""Truncate lists to maximum length."""
if len(s) > max_list_size:
i = max_list_size // 2
j = i - 1
s = s[:i] + [ELLIPSIS] + s[-j:]
return s | [
"def",
"trunc_list",
"(",
"s",
":",
"List",
")",
"->",
"List",
":",
"if",
"len",
"(",
"s",
")",
">",
"max_list_size",
":",
"i",
"=",
"max_list_size",
"//",
"2",
"j",
"=",
"i",
"-",
"1",
"s",
"=",
"s",
"[",
":",
"i",
"]",
"+",
"[",
"ELLIPSIS",... | 29 | 11.428571 |
def xml_import(self,
filepath="",
xml_content=None,
markings=None,
identifier_ns_uri=None,
**kwargs):
"""
Import a STIX or CybOX xml from file <filepath> or a string passed as ``xml_content``
You can furhter provide:
- a list of InfoObjects as markings with which all generated Information Objects
will be associated (e.g., in order to provide provenance function)
- a default identifier namespace URI.
The kwargs are not read -- they are present to allow the use of the
DingoImportCommand class for easy definition of commandline import commands
(the class passes all command line arguments to the xml_import function, so
without the **kwargs parameter, an error would occur.
"""
# Clear internal state such that same object can be reused for
# multiple imports.
if 'default_timestamp' in kwargs and kwargs['default_timestamp']:
if isinstance(kwargs['default_timestamp'],basestring):
naive = parse_datetime(kwargs['default_timestamp'])
else:
naive = kwargs['default_timestamp']
if not timezone.is_aware(naive):
aware = timezone.make_aware(naive,timezone.utc)
else:
aware = naive
self.default_timestamp = aware
self.namespace_dict = {None: DINGOS_NAMESPACE_URI}
self.default_identifier_ns_uri = None
self.default_identifier_ns_uri = identifier_ns_uri
if not markings:
markings = []
# Use the generic XML import customized for STIX/CybOX import
# to turn XML into DingoObjDicts
import_result = MantisImporter.xml_import(xml_fname=filepath,
xml_content=xml_content,
ns_mapping=self.namespace_dict,
embedded_predicate=self.stix_embedding_pred,
id_and_revision_extractor=self.id_and_revision_extractor)
# The MANTIS/DINGOS xml importer returns then the following structure::
#
#
# {'id_and_rev_info': Id and revision info of top-level element of the form
# {'id': ... , 'timestamp': ...}
# 'elt_name': Element name of top-level element
# 'dict_repr': Dictionary representation of XML, minus the embedded objects -- for
# those, an 'idref' reference has been generated
# 'embedded_objects': List of embedded objects, as dictionary
# {"id_and_revision_info": id and revision info of extracted object,
# "elt_name": Element name,
# "dict_repr" : dictionary representation of XML of embedded object
# }
# 'unprocessed' : List of unprocessed embedded objects (as libxml2 Node object)
# (e.g. for handover to other importer
# 'file_content': Content of imported file (or, if content was passed instead of a file name,
# the original content)}
# Extract information from import result
top_level_id_and_rev_info = import_result['id_and_rev_info']
top_level_elt_name = import_result['elt_name']
top_level_elt_dict = import_result['dict_repr']
file_content = import_result['file_content']
embedded_objects = import_result['embedded_objects']
unprocessed_list = import_result['unprocessed']
if not 'id' in top_level_id_and_rev_info or not top_level_id_and_rev_info['id']:
if self.default_identifier_ns_uri:
# Top-level element had no identifier. If a default namespace has been provided,
# then an identifier is generated
top_level_id_and_rev_info['id_ns'] = self.default_identifier_ns_uri
top_level_id_and_rev_info['id_uid'] = hashlib.md5(file_content).hexdigest()
logger.info("Top level element had no identifier: "
"identifier %s has been generated " % top_level_id_and_rev_info['id_uid'])
else:
logger.warning("Top level element had no identifier. "
"No identifier was generated, because no default namespace had been provided "
"(you can provide a namespace with the '-n' commandline parameter")
# We now have the top-level object, the list of embedded objects,
# and possibly a list of hitherto unprocessed XML nodes.
# As we shall see below, we have configured the DINGOS
# XML importer such that it extracts marking structures
# as separate objects and tags them with a label
# 'import_first'. So, what we need to do is to go through
# the embedded objects and collect all markings (along with
# information, in which STIX_Package each marking was defined).
# This allows us to first create all marking objects and
# then mark the subsequently created objects with the
# appropriate markings.
# Note, that currently,
# - we only extract and treat markings defined in the header of STIX_Package
# - we do not care about the XPATH-expression and treat every marking as if
# it was defined for the whole STIX-Package
marking_dict = {}
# We use queues rather than lists, since we have no need
# to access elements somewhere in the list, but rather
# always pop from the end
import_first_queue = deque()
pending_queue = deque()
# The top-level object is certainly no marking, so we
# put it on the pending queue
pending_queue.append((top_level_id_and_rev_info, top_level_elt_name, top_level_elt_dict))
while embedded_objects:
# We go through the embedded objects and look for the 'import_first' label
embedded_object = embedded_objects.pop()
id_and_rev_info = embedded_object['id_and_rev_info']
elt_name = embedded_object['elt_name']
elt_dict = embedded_object['dict_repr']
if 'import_first' in id_and_rev_info:
import_first_queue.append((id_and_rev_info, elt_name, elt_dict))
else:
pending_queue.append((id_and_rev_info, elt_name, elt_dict))
while import_first_queue:
# We go through the import_first queue, import all markings,
# and collect information about the STIX_Package in which the marking
# was defined in the marking_dict (some organizations use a format
# in which several STIX Packages are bundled into a single XML file).
(id_and_rev_info, elt_name, elt_dict) = import_first_queue.pop()
(info_obj, existed) = self.iobject_import(id_and_rev_info,
elt_name,
elt_dict)
# id_and_rev_info can carries additional information other than the identifier
# and timestamp that has been extracted: the MANTIS XML importer carries
# along information inherited from ancestor objects: this allows us
# to propagate information down to children, grandchildren etc. As we
# see below, we use this mechanism to propagate information about
# the STIX_Package in which an object was defined.
if id_and_rev_info['inherited']['embedding_STIX_Package'] in marking_dict:
marking_dict[id_and_rev_info['inherited']['embedding_STIX_Package']].append(info_obj)
else:
marking_dict[id_and_rev_info['inherited']['embedding_STIX_Package']] = [info_obj]
while pending_queue:
# Now we start the import of the remaining embedded objects (plus the top-level object)
(id_and_rev_info, elt_name, elt_dict) = pending_queue.pop()
if 'embedding_STIX_Package' in id_and_rev_info:
embedding_STIX_Package = id_and_rev_info['embedding_STIX_Package']
else:
embedding_STIX_Package = id_and_rev_info.get('inherited',{}).get('embedding_STIX_Package')
logger.debug("%s embedded in %s" % (id_and_rev_info['id'], embedding_STIX_Package))
if elt_name in ['Kill_Chain','Kill_Chain_Phase']:
# We chose not to attach markings to Kill_Chain information
object_markings = []
else:
# We mark the object with markings passed to the xml_import command
# and markings defined in the STIX_Package in which the object was defined.
object_markings = markings + marking_dict.get(embedding_STIX_Package,[])
self.iobject_import(id_and_rev_info,
elt_name,
elt_dict,
markings=object_markings)
# As we shall see below, we have configured the xml_importer such that
# it recognizes OpenIOC structures embedded as test mechanism and
# leaves them unprocessed. These unprocessed elements we now hand
# over to the MANTIS OpenIOC importer.
for unprocessed_elt in unprocessed_list:
(id_and_rev_info,typeinfo,xml_node) = unprocessed_elt
processor_class = self.processors.get(id_and_rev_info['defer_processing']['processor'],None)
if 'embedding_STIX_Package' in id_and_rev_info:
embedding_STIX_Package = id_and_rev_info['embedding_STIX_Package']
else:
embedding_STIX_Package = id_and_rev_info.get('inherited',{}).get('embedding_STIX_Package')
logger.debug("%s embedded in %s" % (id_and_rev_info['id'], embedding_STIX_Package))
object_markings = markings + marking_dict.get(embedding_STIX_Package,[])
if processor_class:
processor = processor_class(namespace_dict=self.namespace_dict)
processor.xml_import(self,
xml_content=xml_node,
markings=object_markings,
identifier_ns_uri=self.namespace_dict[id_and_rev_info['id'].split(':')[0]],
initialize_importer=False
)
else:
logger.error("Did not find a processor for %s" % id_and_rev_info['defer_processing']['processor']) | [
"def",
"xml_import",
"(",
"self",
",",
"filepath",
"=",
"\"\"",
",",
"xml_content",
"=",
"None",
",",
"markings",
"=",
"None",
",",
"identifier_ns_uri",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Clear internal state such that same object can be reused for... | 45.679487 | 31.470085 |
def _analyze_all_function_features(self, all_funcs_completed=False):
"""
Iteratively analyze all changed functions, update their returning attribute, until a fix-point is reached (i.e.
no new returning/not-returning functions are found).
:return: None
"""
while True:
new_changes = self._iteratively_analyze_function_features(all_funcs_completed=all_funcs_completed)
new_returning_functions = new_changes['functions_return']
new_not_returning_functions = new_changes['functions_do_not_return']
if not new_returning_functions and not new_not_returning_functions:
break
for returning_function in new_returning_functions:
self._pending_jobs.add_returning_function(returning_function.addr)
if returning_function.addr in self._function_returns:
for fr in self._function_returns[returning_function.addr]:
# Confirm them all
if not self.kb.functions.contains_addr(fr.caller_func_addr):
# FIXME: A potential bug might arise here. After post processing (phase 2), if the function
# specified by fr.caller_func_addr has been merged to another function during phase 2, we
# will simply skip this FunctionReturn here. It might lead to unconfirmed fake_ret edges
# in the newly merged function. Fix this bug in the future when it becomes an issue.
continue
if self.kb.functions.get_by_addr(fr.caller_func_addr).returning is not True:
self._updated_nonreturning_functions.add(fr.caller_func_addr)
return_to_node = self._nodes.get(fr.return_to, None)
if return_to_node is None:
return_to_snippet = self._to_snippet(addr=fr.return_to, base_state=self._base_state)
else:
return_to_snippet = self._to_snippet(cfg_node=self._nodes[fr.return_to])
self.kb.functions._add_return_from_call(fr.caller_func_addr, fr.callee_func_addr,
return_to_snippet)
del self._function_returns[returning_function.addr]
for nonreturning_function in new_not_returning_functions:
self._pending_jobs.add_nonreturning_function(nonreturning_function.addr)
if nonreturning_function.addr in self._function_returns:
for fr in self._function_returns[nonreturning_function.addr]:
# Remove all those FakeRet edges
if self.kb.functions.contains_addr(fr.caller_func_addr) and \
self.kb.functions.get_by_addr(fr.caller_func_addr).returning is not True:
self._updated_nonreturning_functions.add(fr.caller_func_addr)
del self._function_returns[nonreturning_function.addr] | [
"def",
"_analyze_all_function_features",
"(",
"self",
",",
"all_funcs_completed",
"=",
"False",
")",
":",
"while",
"True",
":",
"new_changes",
"=",
"self",
".",
"_iteratively_analyze_function_features",
"(",
"all_funcs_completed",
"=",
"all_funcs_completed",
")",
"new_r... | 59.961538 | 39.307692 |
def decode_sequence(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int,
target_embed: mx.sym.Symbol,
target_embed_lengths: mx.sym.Symbol,
target_embed_max_length: int) -> mx.sym.Symbol:
"""
Decodes a sequence of embedded target words and returns sequence of last decoder
representations for each time step.
:param source_encoded: Encoded source: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:param target_embed: Embedded target sequence. Shape: (batch_size, target_embed_max_length, target_num_embed).
:param target_embed_lengths: Lengths of embedded target sequences. Shape: (batch_size,).
:param target_embed_max_length: Dimension of the embedded target sequence.
:return: Decoder data. Shape: (batch_size, target_embed_max_length, decoder_depth).
"""
pass | [
"def",
"decode_sequence",
"(",
"self",
",",
"source_encoded",
":",
"mx",
".",
"sym",
".",
"Symbol",
",",
"source_encoded_lengths",
":",
"mx",
".",
"sym",
".",
"Symbol",
",",
"source_encoded_max_length",
":",
"int",
",",
"target_embed",
":",
"mx",
".",
"sym",... | 60.15 | 31.35 |
def parse_args():
"""Parser/validator for the cmd line args."""
parser = get_parser()
if len(sys.argv) < 2:
parser.print_help()
warnings.warn('Too few arguments!', UserWarning)
parser.exit(1)
# parsing
try:
params = parser.parse_args()
except Exception as exc:
print(exc)
raise ValueError('Unable to parse command-line arguments.')
in_features_path = os.path.abspath(params.in_features_path)
if not os.path.exists(in_features_path):
raise IOError("Given features file doesn't exist.")
groups_path = os.path.abspath(params.groups_path)
if not os.path.exists(groups_path):
raise IOError("Given groups file doesn't exist.")
return in_features_path, groups_path, params.weight_method, params.num_bins, params.edge_range, \
params.trim_outliers, params.trim_percentile, params.return_networkx_graph, params.out_weights_path | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"get_parser",
"(",
")",
"if",
"len",
"(",
"sys",
".",
"argv",
")",
"<",
"2",
":",
"parser",
".",
"print_help",
"(",
")",
"warnings",
".",
"warn",
"(",
"'Too few arguments!'",
",",
"UserWarning",
")",
... | 33.962963 | 24.888889 |
def get_meta_content(self, metaName):
"""\
Extract a given meta content form document
"""
meta = self.parser.css_select(self.article.doc, metaName)
content = None
if meta is not None and len(meta) > 0:
content = self.parser.getAttribute(meta[0], 'content')
if content:
return content.strip()
return '' | [
"def",
"get_meta_content",
"(",
"self",
",",
"metaName",
")",
":",
"meta",
"=",
"self",
".",
"parser",
".",
"css_select",
"(",
"self",
".",
"article",
".",
"doc",
",",
"metaName",
")",
"content",
"=",
"None",
"if",
"meta",
"is",
"not",
"None",
"and",
... | 27.071429 | 18.428571 |
def _generate_validation_scripts(self):
"""
Include the scripts used by solutions.
"""
id_script_list_validation_fields = (
AccessibleFormImplementation.ID_SCRIPT_LIST_VALIDATION_FIELDS
)
local = self.parser.find('head,body').first_result()
if local is not None:
if (
self.parser.find(
'#'
+ AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS
).first_result() is None
):
common_functions_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'common.js'
),
'r'
)
common_functions_content = common_functions_file.read()
common_functions_file.close()
common_functions_script = self.parser.create_element('script')
common_functions_script.set_attribute(
'id',
AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS
)
common_functions_script.set_attribute(
'type',
'text/javascript'
)
common_functions_script.append_text(common_functions_content)
local.prepend_element(common_functions_script)
self.script_list_fields_with_validation = self.parser.find(
'#'
+ id_script_list_validation_fields
).first_result()
if self.script_list_fields_with_validation is None:
script_list_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'scriptlist_validation_fields.js'
),
'r'
)
script_list_content = script_list_file.read()
script_list_file.close()
self.script_list_fields_with_validation = (
self.parser.create_element('script')
)
self.script_list_fields_with_validation.set_attribute(
'id',
id_script_list_validation_fields
)
self.script_list_fields_with_validation.set_attribute(
'type',
'text/javascript'
)
self.script_list_fields_with_validation.append_text(
script_list_content
)
local.append_element(self.script_list_fields_with_validation)
if (
self.parser.find(
'#'
+ AccessibleFormImplementation.ID_SCRIPT_EXECUTE_VALIDATION
).first_result() is None
):
script_function_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'validation.js'
),
'r'
)
script_function_content = script_function_file.read()
script_function_file.close()
script_function = self.parser.create_element('script')
script_function.set_attribute(
'id',
AccessibleFormImplementation.ID_SCRIPT_EXECUTE_VALIDATION
)
script_function.set_attribute('type', 'text/javascript')
script_function.append_text(script_function_content)
self.parser.find('body').first_result().append_element(
script_function
)
self.scripts_added = True | [
"def",
"_generate_validation_scripts",
"(",
"self",
")",
":",
"id_script_list_validation_fields",
"=",
"(",
"AccessibleFormImplementation",
".",
"ID_SCRIPT_LIST_VALIDATION_FIELDS",
")",
"local",
"=",
"self",
".",
"parser",
".",
"find",
"(",
"'head,body'",
")",
".",
"f... | 39.317308 | 18.490385 |
def rescan_file(self, filename, sha256hash, apikey):
"""
just send the hash, check the date
"""
url = self.base_url + "file/rescan"
params = {
'apikey': apikey,
'resource': sha256hash
}
rate_limit_clear = self.rate_limit()
if rate_limit_clear:
response = requests.post(url, params=params)
if response.status_code == self.HTTP_OK:
self.logger.info("sent: %s, HTTP: %d, content: %s", os.path.basename(filename), response.status_code, response.text)
elif response.status_code == self.HTTP_RATE_EXCEEDED:
time.sleep(20)
else:
self.logger.error("sent: %s, HTTP: %d", os.path.basename(filename), response.status_code)
return response | [
"def",
"rescan_file",
"(",
"self",
",",
"filename",
",",
"sha256hash",
",",
"apikey",
")",
":",
"url",
"=",
"self",
".",
"base_url",
"+",
"\"file/rescan\"",
"params",
"=",
"{",
"'apikey'",
":",
"apikey",
",",
"'resource'",
":",
"sha256hash",
"}",
"rate_lim... | 40.45 | 19.75 |
def _add_converted_units(self, dataframe, parameter, key='VALUE'):
"""Add an additional DATA_VALUE column with converted VALUEs"""
convert_unit = self.parameters.get_converter(parameter)
try:
log.debug("Adding unit converted DATA_VALUE to the data")
dataframe[key] = dataframe['DATA_VALUE'].apply(convert_unit)
except KeyError:
log.warning("Missing 'VALUE': no unit conversion.")
else:
dataframe.unit = self.parameters.unit(parameter) | [
"def",
"_add_converted_units",
"(",
"self",
",",
"dataframe",
",",
"parameter",
",",
"key",
"=",
"'VALUE'",
")",
":",
"convert_unit",
"=",
"self",
".",
"parameters",
".",
"get_converter",
"(",
"parameter",
")",
"try",
":",
"log",
".",
"debug",
"(",
"\"Addi... | 51.3 | 22.4 |
def _ratelimited_get(self, *args, **kwargs):
"""Perform get request, handling rate limiting."""
with self._ratelimiter:
resp = self.session.get(*args, **kwargs)
# It's possible that Space-Track will return HTTP status 500 with a
# query rate limit violation. This can happen if a script is cancelled
# before it has finished sleeping to satisfy the rate limit and it is
# started again.
#
# Let's catch this specific instance and retry once if it happens.
if resp.status_code == 500:
# Let's only retry if the error page tells us it's a rate limit
# violation.
if 'violated your query rate limit' in resp.text:
# Mimic the RateLimiter callback behaviour.
until = time.time() + self._ratelimiter.period
t = threading.Thread(target=self._ratelimit_callback, args=(until,))
t.daemon = True
t.start()
time.sleep(self._ratelimiter.period)
# Now retry
with self._ratelimiter:
resp = self.session.get(*args, **kwargs)
return resp | [
"def",
"_ratelimited_get",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"_ratelimiter",
":",
"resp",
"=",
"self",
".",
"session",
".",
"get",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# It's possible t... | 43.518519 | 21.814815 |
def serializer_by_type_id(self, type_id):
"""
Find and return the serializer for the type-id
:param type_id: type-id the serializer
:return: the serializer
"""
if type_id <= 0:
indx = index_for_default_type(type_id)
serializer = self._constant_type_ids.get(indx, None)
if serializer is not None:
return serializer
return self._id_dic.get(type_id, None) | [
"def",
"serializer_by_type_id",
"(",
"self",
",",
"type_id",
")",
":",
"if",
"type_id",
"<=",
"0",
":",
"indx",
"=",
"index_for_default_type",
"(",
"type_id",
")",
"serializer",
"=",
"self",
".",
"_constant_type_ids",
".",
"get",
"(",
"indx",
",",
"None",
... | 37.416667 | 7.916667 |
def crypto_sign_keypair(seed=None):
"""Return (verifying, secret) key from a given seed, or os.urandom(32)"""
if seed is None:
seed = os.urandom(PUBLICKEYBYTES)
else:
warnings.warn("ed25519ll should choose random seed.",
RuntimeWarning)
if len(seed) != 32:
raise ValueError("seed must be 32 random bytes or None.")
skbytes = seed
vkbytes = djbec.publickey(skbytes)
return Keypair(vkbytes, skbytes+vkbytes) | [
"def",
"crypto_sign_keypair",
"(",
"seed",
"=",
"None",
")",
":",
"if",
"seed",
"is",
"None",
":",
"seed",
"=",
"os",
".",
"urandom",
"(",
"PUBLICKEYBYTES",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"ed25519ll should choose random seed.\"",
",",
"Run... | 39 | 12.583333 |
def main():
"""Ideally we shouldn't lose the first second of events"""
with Input() as input_generator:
def extra_bytes_callback(string):
print('got extra bytes', repr(string))
print('type:', type(string))
input_generator.unget_bytes(string)
time.sleep(1)
with CursorAwareWindow(extra_bytes_callback=extra_bytes_callback) as window:
window.get_cursor_position()
for e in input_generator:
print(repr(e)) | [
"def",
"main",
"(",
")",
":",
"with",
"Input",
"(",
")",
"as",
"input_generator",
":",
"def",
"extra_bytes_callback",
"(",
"string",
")",
":",
"print",
"(",
"'got extra bytes'",
",",
"repr",
"(",
"string",
")",
")",
"print",
"(",
"'type:'",
",",
"type",
... | 41.583333 | 10.583333 |
def parameterSpace( self ):
"""Return the parameter space of the experiment as a list of dicts,
with each dict mapping each parameter name to a value.
:returns: the parameter space as a list of dicts"""
ps = self.parameters()
if len(ps) == 0:
return []
else:
return self._crossProduct(ps) | [
"def",
"parameterSpace",
"(",
"self",
")",
":",
"ps",
"=",
"self",
".",
"parameters",
"(",
")",
"if",
"len",
"(",
"ps",
")",
"==",
"0",
":",
"return",
"[",
"]",
"else",
":",
"return",
"self",
".",
"_crossProduct",
"(",
"ps",
")"
] | 35.2 | 14.8 |
def add(self, name, definition):
""" Register a definition to the registry. Existing definitions are
replaced silently.
:param name: The name which can be used as reference in a validation
schema.
:type name: :class:`str`
:param definition: The definition.
:type definition: any :term:`mapping` """
self._storage[name] = self._expand_definition(definition) | [
"def",
"add",
"(",
"self",
",",
"name",
",",
"definition",
")",
":",
"self",
".",
"_storage",
"[",
"name",
"]",
"=",
"self",
".",
"_expand_definition",
"(",
"definition",
")"
] | 42.5 | 14.5 |
def get_supported_metrics_topic(self, name, topic_name):
'''
Retrieves the list of supported metrics for this namespace and topic
name:
Name of the service bus namespace.
topic_name:
Name of the service bus queue in this namespace.
'''
response = self._perform_get(
self._get_get_supported_metrics_topic_path(name, topic_name),
None)
return _MinidomXmlToObject.convert_response_to_feeds(
response,
partial(
_ServiceBusManagementXmlSerializer.xml_to_metrics,
object_type=MetricProperties
)
) | [
"def",
"get_supported_metrics_topic",
"(",
"self",
",",
"name",
",",
"topic_name",
")",
":",
"response",
"=",
"self",
".",
"_perform_get",
"(",
"self",
".",
"_get_get_supported_metrics_topic_path",
"(",
"name",
",",
"topic_name",
")",
",",
"None",
")",
"return",... | 32.65 | 23.55 |
async def restore_storage_configuration(self):
"""
Restore machine's storage configuration to its initial state.
"""
self._data = await self._handler.restore_storage_configuration(
system_id=self.system_id) | [
"async",
"def",
"restore_storage_configuration",
"(",
"self",
")",
":",
"self",
".",
"_data",
"=",
"await",
"self",
".",
"_handler",
".",
"restore_storage_configuration",
"(",
"system_id",
"=",
"self",
".",
"system_id",
")"
] | 40.833333 | 11.5 |
def call_parallel(self, cdata, low):
'''
Call the state defined in the given cdata in parallel
'''
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get('args') or [None])[0] or cdata['kwargs'].get('name')
if not name:
name = low.get('name', low.get('__id__'))
proc = salt.utils.process.MultiprocessingProcess(
target=self._call_parallel_target,
args=(name, cdata, low))
proc.start()
ret = {'name': name,
'result': None,
'changes': {},
'comment': 'Started in a separate process',
'proc': proc}
return ret | [
"def",
"call_parallel",
"(",
"self",
",",
"cdata",
",",
"low",
")",
":",
"# There are a number of possibilities to not have the cdata",
"# populated with what we might have expected, so just be smart",
"# enough to not raise another KeyError as the name is easily",
"# guessable and fallbac... | 40.565217 | 19.26087 |
def _prompt_placement(D, tt):
"""
Since automatic placement didn't work, find somewhere to place the model data manually with the help of the user.
:param dict D: Metadata
:param str tt: Table type
:return str _model_name: Chosen model name for placement
"""
_model_name = ""
# There wasn't a table name match, so we need prompts to fix it
_placement_options = _get_available_placements(D, tt)
print("Please choose where you'd like to place this model:")
for _idx, _opt in enumerate(_placement_options):
print("({}) {}".format(_idx, _opt))
_choice = input("> ")
try:
if int(_choice) <= len(_placement_options) and _choice:
# Get the option the user chose
_model_name = _placement_options[int(_choice)]
else:
# They user chose an option out of the placement list range
print("Invalid choice input")
return
except Exception as e:
# Choice was not a number or empty
print("Invalid choice")
return _model_name | [
"def",
"_prompt_placement",
"(",
"D",
",",
"tt",
")",
":",
"_model_name",
"=",
"\"\"",
"# There wasn't a table name match, so we need prompts to fix it",
"_placement_options",
"=",
"_get_available_placements",
"(",
"D",
",",
"tt",
")",
"print",
"(",
"\"Please choose where... | 37.142857 | 18.928571 |
def execute(helper, config, args):
"""
Deletes an environment
"""
env_config = parse_env_config(config, args.environment)
environments_to_wait_for_term = []
environments = helper.get_environments()
for env in environments:
if env['EnvironmentName'] == args.environment:
if env['Status'] != 'Ready':
out("Unable to delete " + env['EnvironmentName']
+ " because it's not in status Ready ("
+ env['Status'] + ")")
else:
out("Deleting environment: "+env['EnvironmentName'])
helper.delete_environment(env['EnvironmentName'])
environments_to_wait_for_term.append(env['EnvironmentName'])
if not args.dont_wait:
helper.wait_for_environments(environments_to_wait_for_term,
status='Terminated',
include_deleted=True)
out("Environment deleted")
return 0 | [
"def",
"execute",
"(",
"helper",
",",
"config",
",",
"args",
")",
":",
"env_config",
"=",
"parse_env_config",
"(",
"config",
",",
"args",
".",
"environment",
")",
"environments_to_wait_for_term",
"=",
"[",
"]",
"environments",
"=",
"helper",
".",
"get_environm... | 36.222222 | 18.592593 |
def get(self, model_module, model_module_version, model_name, view_module, view_module_version, view_name):
"""Get a value"""
module_versions = self._registry[model_module]
# The python semver module doesn't work well, for example, it can't do match('3', '*')
# so we just take the first model module version.
#model_names = next(v for k, v in module_versions.items()
# if semver.match(model_module_version, k))
model_names = list(module_versions.values())[0]
view_modules = model_names[model_name]
view_versions = view_modules[view_module]
# The python semver module doesn't work well, so we just take the first view module version
#view_names = next(v for k, v in view_versions.items()
# if semver.match(view_module_version, k))
view_names = list(view_versions.values())[0]
widget_class = view_names[view_name]
return widget_class | [
"def",
"get",
"(",
"self",
",",
"model_module",
",",
"model_module_version",
",",
"model_name",
",",
"view_module",
",",
"view_module_version",
",",
"view_name",
")",
":",
"module_versions",
"=",
"self",
".",
"_registry",
"[",
"model_module",
"]",
"# The python se... | 60.75 | 23.3125 |
def _extract_tag_from_data(self, data, tag_name=b'packet'):
"""Gets data containing a (part of) tshark xml.
If the given tag is found in it, returns the tag data and the remaining data.
Otherwise returns None and the same data.
:param data: string of a partial tshark xml.
:return: a tuple of (tag, data). tag will be None if none is found.
"""
opening_tag = b'<' + tag_name + b'>'
closing_tag = opening_tag.replace(b'<', b'</')
tag_end = data.find(closing_tag)
if tag_end != -1:
tag_end += len(closing_tag)
tag_start = data.find(opening_tag)
return data[tag_start:tag_end], data[tag_end:]
return None, data | [
"def",
"_extract_tag_from_data",
"(",
"self",
",",
"data",
",",
"tag_name",
"=",
"b'packet'",
")",
":",
"opening_tag",
"=",
"b'<'",
"+",
"tag_name",
"+",
"b'>'",
"closing_tag",
"=",
"opening_tag",
".",
"replace",
"(",
"b'<'",
",",
"b'</'",
")",
"tag_end",
... | 42.176471 | 16.058824 |
def default_value(self, type_name):
'''
Obtain the default value for some *type name*.
'''
uname = type_name.upper()
if uname == 'BOOLEAN':
return False
elif uname == 'INTEGER':
return 0
elif uname == 'REAL':
return 0.0
elif uname == 'STRING':
return ''
elif uname == 'UNIQUE_ID':
if self.metamodel:
return next(self.metamodel.id_generator)
else:
return None
else:
raise MetaException("Unknown type named '%s'" % type_name) | [
"def",
"default_value",
"(",
"self",
",",
"type_name",
")",
":",
"uname",
"=",
"type_name",
".",
"upper",
"(",
")",
"if",
"uname",
"==",
"'BOOLEAN'",
":",
"return",
"False",
"elif",
"uname",
"==",
"'INTEGER'",
":",
"return",
"0",
"elif",
"uname",
"==",
... | 27.083333 | 17.916667 |
def dumps(data, escape=False, **kwargs):
"""A wrapper around `json.dumps` that can handle objects that json
module is not aware.
This function is aware of a list of custom serializers that can be
registered by the API user, making it possible to convert any kind
of object to types that the json library can handle.
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
converted = json.dumps(data, default=_converter, **kwargs)
if escape:
# We're escaping the whole dumped string here cause there's no (easy)
# way to hook into the native json library and change how they process
# values like strings, None objects and some other "literal" stuff.
#
# Also, we're not escaping quotes here cause they're escaped by the
# native json library already. So, we just escape basic html entities,
# like <, > and &;
return cgi.escape(converted)
return converted | [
"def",
"dumps",
"(",
"data",
",",
"escape",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'sort_keys'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'sort_keys'",
"]",
"=",
"True",
"converted",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
... | 43.363636 | 22.045455 |
def getResults(self, parFound = None):
'''
Function to obtain the Dictionarythat represents this object.
:param parFound: values to return.
:return: The output format will be like:
[{"type" : "i3visio.email", "value": "foo@bar.com", "attributes": [] }, {"type" : "i3visio.email", "value": "bar@foo.com", "attributes": [] }]
'''
# Defining a dictionary
results = []
# Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found.
#results[self.name] = {"reg_exp" : self.reg_exp, "found_exp" : parFound}
#results[self.name] = parFound
if len(parFound ) >0:
for found in parFound:
aux = {}
aux["type"] = self.getEntityType(found)
aux["value"] = self.getValue(found)
aux["attributes"] = self.getAttributes(found)
results.append(aux)
return results | [
"def",
"getResults",
"(",
"self",
",",
"parFound",
"=",
"None",
")",
":",
"# Defining a dictionary",
"results",
"=",
"[",
"]",
"# Defining a dictionary inside with a couple of fields: reg_exp for the regular expression and found_exp for the expressions found.",
"#results[self.name] =... | 46.681818 | 26.136364 |
def example_2_load_data(self):
"""
加载数据
"""
# 权重向量, w1代表神经网络的第一层,w2代表神经网络的第二层
self.w1 = Variable(random_normal([2, 3], stddev=1, seed=1))
self.w2 = Variable(random_normal([3, 1], stddev=1, seed=1))
# 特征向量, 区别是,这里不会在计算图中生成节点
#self.x = placeholder(float32, shape=(1, 2), name='input')
self.x = placeholder(float32, shape=(3, 2), name='input') | [
"def",
"example_2_load_data",
"(",
"self",
")",
":",
"# 权重向量, w1代表神经网络的第一层,w2代表神经网络的第二层",
"self",
".",
"w1",
"=",
"Variable",
"(",
"random_normal",
"(",
"[",
"2",
",",
"3",
"]",
",",
"stddev",
"=",
"1",
",",
"seed",
"=",
"1",
")",
")",
"self",
".",
"w2... | 40.3 | 15.1 |
def list_domains():
'''
Return a list of virtual machine names on the minion
CLI Example:
.. code-block:: bash
salt '*' virt.list_domains
'''
data = __salt__['vmadm.list'](keyed=True)
vms = ["UUID TYPE RAM STATE ALIAS"]
for vm in data:
vms.append("{vmuuid}{vmtype}{vmram}{vmstate}{vmalias}".format(
vmuuid=vm.ljust(38),
vmtype=data[vm]['type'].ljust(6),
vmram=data[vm]['ram'].ljust(9),
vmstate=data[vm]['state'].ljust(18),
vmalias=data[vm]['alias'],
))
return vms | [
"def",
"list_domains",
"(",
")",
":",
"data",
"=",
"__salt__",
"[",
"'vmadm.list'",
"]",
"(",
"keyed",
"=",
"True",
")",
"vms",
"=",
"[",
"\"UUID TYPE RAM STATE ALIAS\"",
"]",
"for",
"vm",
"in",
"data",
":",
"vms"... | 29.380952 | 21.761905 |
def parse_input_file(text, variables=None):
""" Parser for a file with syntax somewhat similar to Drake."""
text = find_includes(text)
lines = text.splitlines()
tasks, linenumbers = find_tasks(lines)
preamble = [line for line in lines[:linenumbers[0]]]
logging.debug("Preamble:\n{}".format("\n".join(preamble)))
if variables is not None:
preamble += "\n" + "\n".join(variables)
environment = create_environment(preamble)
code_sections = []
for n in range(len(linenumbers) - 1):
code_sections.append((linenumbers[n], linenumbers[n+1]))
for n, task in zip(code_sections, tasks):
task["code"] = lines[n[0]: n[1]]
task["environment"] = environment
clean_tasks = []
for task in tasks:
clean_tasks.append(Task(**task))
return clean_tasks | [
"def",
"parse_input_file",
"(",
"text",
",",
"variables",
"=",
"None",
")",
":",
"text",
"=",
"find_includes",
"(",
"text",
")",
"lines",
"=",
"text",
".",
"splitlines",
"(",
")",
"tasks",
",",
"linenumbers",
"=",
"find_tasks",
"(",
"lines",
")",
"preamb... | 40.4 | 9.65 |
def movingSum(requestContext, seriesList, windowSize):
"""
Graphs the moving sum of a metric (or metrics) over a fixed number of
past points, or a time interval.
Takes one metric or a wildcard seriesList followed by a number N of
datapoints or a quoted string with a length of time like '1hour' or '5min'
(See ``from / until`` in the render\_api_ for examples of time formats).
Graphs the sum of the preceeding datapoints for each point on the graph.
Example::
&target=movingSum(Server.instance01.requests,10)
&target=movingSum(Server.instance*.errors,'5min')
"""
if not seriesList:
return []
windowInterval = None
if isinstance(windowSize, six.string_types):
delta = parseTimeOffset(windowSize)
windowInterval = abs(delta.seconds + (delta.days * 86400))
if windowInterval:
previewSeconds = windowInterval
else:
previewSeconds = max([s.step for s in seriesList]) * int(windowSize)
# ignore original data and pull new, including our preview
# data from earlier is needed to calculate the early results
newContext = requestContext.copy()
newContext['startTime'] = (requestContext['startTime'] -
timedelta(seconds=previewSeconds))
previewList = evaluateTokens(newContext, requestContext['args'][0])
result = []
for series in previewList:
if windowInterval:
windowPoints = windowInterval // series.step
else:
windowPoints = int(windowSize)
if isinstance(windowSize, six.string_types):
newName = 'movingSum(%s,"%s")' % (series.name, windowSize)
else:
newName = "movingSum(%s,%s)" % (series.name, windowSize)
newSeries = TimeSeries(newName, series.start + previewSeconds,
series.end, series.step, [])
newSeries.pathExpression = newName
window_sum = safeSum(series[:windowPoints])
newSeries.append(window_sum)
for n, last in enumerate(series[windowPoints:-1]):
if series[n] is not None:
window_sum -= series[n]
if last is not None:
window_sum = (window_sum or 0) + last
newSeries.append(window_sum)
result.append(newSeries)
return result | [
"def",
"movingSum",
"(",
"requestContext",
",",
"seriesList",
",",
"windowSize",
")",
":",
"if",
"not",
"seriesList",
":",
"return",
"[",
"]",
"windowInterval",
"=",
"None",
"if",
"isinstance",
"(",
"windowSize",
",",
"six",
".",
"string_types",
")",
":",
... | 36.222222 | 21.68254 |
def get_token(self, token):
'''
Request a token from the master
'''
load = {}
load['token'] = token
load['cmd'] = 'get_token'
tdata = self._send_token_request(load)
return tdata | [
"def",
"get_token",
"(",
"self",
",",
"token",
")",
":",
"load",
"=",
"{",
"}",
"load",
"[",
"'token'",
"]",
"=",
"token",
"load",
"[",
"'cmd'",
"]",
"=",
"'get_token'",
"tdata",
"=",
"self",
".",
"_send_token_request",
"(",
"load",
")",
"return",
"t... | 25.888889 | 15.444444 |
def view(self, sort=None, purge=False, done=None, undone=None, **kwargs):
"""Handles the 'v' command.
:sort: Sort pattern.
:purge: Whether to purge items marked as 'done'.
:done: Done pattern.
:undone: Not done pattern.
:kwargs: Additional arguments to pass to the View object.
"""
View(self.model.modify(
sort=self._getPattern(sort),
purge=purge,
done=self._getDone(done, undone)
), **kwargs) | [
"def",
"view",
"(",
"self",
",",
"sort",
"=",
"None",
",",
"purge",
"=",
"False",
",",
"done",
"=",
"None",
",",
"undone",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"View",
"(",
"self",
".",
"model",
".",
"modify",
"(",
"sort",
"=",
"self... | 32.6 | 15.533333 |
def fit(self, X, y=None, groups=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, groups, ParameterGrid(self.param_grid)) | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
",",
"groups",
"=",
"None",
")",
":",
"return",
"self",
".",
"_fit",
"(",
"X",
",",
"y",
",",
"groups",
",",
"ParameterGrid",
"(",
"self",
".",
"param_grid",
")",
")"
] | 40.263158 | 21.105263 |
def commit(self, offsets=None):
"""Commit offsets to kafka, blocking until success or error.
This commits offsets only to Kafka. The offsets committed using this API
will be used on the first fetch after every rebalance and also on
startup. As such, if you need to store offsets in anything other than
Kafka, this API should not be used. To avoid re-processing the last
message read if a consumer is restarted, the committed offset should be
the next message your application should consume, i.e.: last_offset + 1.
Blocks until either the commit succeeds or an unrecoverable error is
encountered (in which case it is thrown to the caller).
Currently only supports kafka-topic offset storage (not zookeeper).
Arguments:
offsets (dict, optional): {TopicPartition: OffsetAndMetadata} dict
to commit with the configured group_id. Defaults to currently
consumed offsets for all subscribed partitions.
"""
assert self.config['api_version'] >= (0, 8, 1), 'Requires >= Kafka 0.8.1'
assert self.config['group_id'] is not None, 'Requires group_id'
if offsets is None:
offsets = self._subscription.all_consumed_offsets()
self._coordinator.commit_offsets_sync(offsets) | [
"def",
"commit",
"(",
"self",
",",
"offsets",
"=",
"None",
")",
":",
"assert",
"self",
".",
"config",
"[",
"'api_version'",
"]",
">=",
"(",
"0",
",",
"8",
",",
"1",
")",
",",
"'Requires >= Kafka 0.8.1'",
"assert",
"self",
".",
"config",
"[",
"'group_id... | 52.8 | 29.16 |
def correlate(self, signal):
"""
Correlate records against one or many one-dimensional arrays.
Parameters
----------
signal : array-like
One or more signals to correlate against.
"""
s = asarray(signal)
if s.ndim == 1:
if size(s) != self.shape[-1]:
raise ValueError("Length of signal '%g' does not match record length '%g'"
% (size(s), self.shape[-1]))
return self.map(lambda x: corrcoef(x, s)[0, 1], index=[1])
elif s.ndim == 2:
if s.shape[1] != self.shape[-1]:
raise ValueError("Length of signal '%g' does not match record length '%g'"
% (s.shape[1], self.shape[-1]))
newindex = arange(0, s.shape[0])
return self.map(lambda x: array([corrcoef(x, y)[0, 1] for y in s]), index=newindex)
else:
raise Exception('Signal to correlate with must have 1 or 2 dimensions') | [
"def",
"correlate",
"(",
"self",
",",
"signal",
")",
":",
"s",
"=",
"asarray",
"(",
"signal",
")",
"if",
"s",
".",
"ndim",
"==",
"1",
":",
"if",
"size",
"(",
"s",
")",
"!=",
"self",
".",
"shape",
"[",
"-",
"1",
"]",
":",
"raise",
"ValueError",
... | 37.222222 | 24.62963 |
def flatten(cls, stats):
"""Makes a flat statistics from the given statistics."""
flat_children = {}
for _stats in spread_stats(stats):
key = (_stats.name, _stats.filename, _stats.lineno, _stats.module)
try:
flat_stats = flat_children[key]
except KeyError:
flat_stats = flat_children[key] = cls(*key)
flat_stats.own_hits += _stats.own_hits
flat_stats.deep_hits += _stats.deep_hits
flat_stats.own_time += _stats.own_time
flat_stats.deep_time += _stats.deep_time
children = list(itervalues(flat_children))
return cls(stats.name, stats.filename, stats.lineno, stats.module,
stats.own_hits, stats.deep_hits, stats.own_time,
stats.deep_time, children) | [
"def",
"flatten",
"(",
"cls",
",",
"stats",
")",
":",
"flat_children",
"=",
"{",
"}",
"for",
"_stats",
"in",
"spread_stats",
"(",
"stats",
")",
":",
"key",
"=",
"(",
"_stats",
".",
"name",
",",
"_stats",
".",
"filename",
",",
"_stats",
".",
"lineno",... | 48.470588 | 14.823529 |
def query(database, query, **client_args):
'''
Execute a query.
database
Name of the database to query on.
query
InfluxQL query string.
'''
client = _client(**client_args)
_result = client.query(query, database=database)
if isinstance(_result, collections.Sequence):
return [_pull_query_results(_query_result) for _query_result in _result if _query_result]
return [_pull_query_results(_result) if _result else {}] | [
"def",
"query",
"(",
"database",
",",
"query",
",",
"*",
"*",
"client_args",
")",
":",
"client",
"=",
"_client",
"(",
"*",
"*",
"client_args",
")",
"_result",
"=",
"client",
".",
"query",
"(",
"query",
",",
"database",
"=",
"database",
")",
"if",
"is... | 28.8125 | 23.8125 |
def _assemble_translocation(stmt):
"""Assemble Translocation statements into text."""
agent_str = _assemble_agent_str(stmt.agent)
stmt_str = agent_str + ' translocates'
if stmt.from_location is not None:
stmt_str += ' from the ' + stmt.from_location
if stmt.to_location is not None:
stmt_str += ' to the ' + stmt.to_location
return _make_sentence(stmt_str) | [
"def",
"_assemble_translocation",
"(",
"stmt",
")",
":",
"agent_str",
"=",
"_assemble_agent_str",
"(",
"stmt",
".",
"agent",
")",
"stmt_str",
"=",
"agent_str",
"+",
"' translocates'",
"if",
"stmt",
".",
"from_location",
"is",
"not",
"None",
":",
"stmt_str",
"+... | 43.111111 | 5.333333 |
def transitively_reduce(self):
"""
Performs a transitive reduction on the graph.
"""
removals = set()
for from_node, neighbors in self._edges.items():
childpairs = \
[(c1, c2) for c1 in neighbors for c2 in neighbors if c1 != c2]
for child1, child2 in childpairs:
if self.has_path(child1, child2) \
and not self.has_path(child1, from_node):
removals.add((from_node, child2))
for edge in removals:
self.remove_edge(edge[0], edge[1]) | [
"def",
"transitively_reduce",
"(",
"self",
")",
":",
"removals",
"=",
"set",
"(",
")",
"for",
"from_node",
",",
"neighbors",
"in",
"self",
".",
"_edges",
".",
"items",
"(",
")",
":",
"childpairs",
"=",
"[",
"(",
"c1",
",",
"c2",
")",
"for",
"c1",
"... | 33.647059 | 17.176471 |
def ppdict(dict_to_print, br='\n', html=False, key_align='l', sort_keys=True,
key_preffix='', key_suffix='', value_prefix='', value_suffix='', left_margin=3, indent=2):
"""Indent representation of a dict"""
if dict_to_print:
if sort_keys:
dic = dict_to_print.copy()
keys = list(dic.keys())
keys.sort()
dict_to_print = OrderedDict()
for k in keys:
dict_to_print[k] = dic[k]
tmp = ['{']
ks = [type(x) == str and "'%s'" % x or x for x in dict_to_print.keys()]
vs = [type(x) == str and "'%s'" % x or x for x in dict_to_print.values()]
max_key_len = max([len(str(x)) for x in ks])
for i in range(len(ks)):
k = {1: str(ks[i]).ljust(max_key_len),
key_align == 'r': str(ks[i]).rjust(max_key_len)}[1]
v = vs[i]
tmp.append(' ' * indent + '{}{}{}:{}{}{},'.format(key_preffix, k, key_suffix,
value_prefix, v, value_suffix))
tmp[-1] = tmp[-1][:-1] # remove the ',' in the last item
tmp.append('}')
if left_margin:
tmp = [' ' * left_margin + x for x in tmp]
if html:
return '<code>{}</code>'.format(br.join(tmp).replace(' ', ' '))
else:
return br.join(tmp)
else:
return '{}' | [
"def",
"ppdict",
"(",
"dict_to_print",
",",
"br",
"=",
"'\\n'",
",",
"html",
"=",
"False",
",",
"key_align",
"=",
"'l'",
",",
"sort_keys",
"=",
"True",
",",
"key_preffix",
"=",
"''",
",",
"key_suffix",
"=",
"''",
",",
"value_prefix",
"=",
"''",
",",
... | 37.351351 | 24.891892 |
def deletesystemhook(self, hook_id):
"""
Delete a project hook
:param hook_id: hook id
:return: True if success
"""
data = {"id": hook_id}
request = requests.delete(
'{0}/{1}'.format(self.hook_url, hook_id), data=data,
headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return True
else:
return False | [
"def",
"deletesystemhook",
"(",
"self",
",",
"hook_id",
")",
":",
"data",
"=",
"{",
"\"id\"",
":",
"hook_id",
"}",
"request",
"=",
"requests",
".",
"delete",
"(",
"'{0}/{1}'",
".",
"format",
"(",
"self",
".",
"hook_url",
",",
"hook_id",
")",
",",
"data... | 27.705882 | 18.176471 |
def _remove_processed_data(
self):
"""*remove processed data*
"""
self.log.info('starting the ``_remove_processed_data`` method')
archivePath = self.settings["atlas archive path"]
from fundamentals.mysql import readquery
sqlQuery = u"""
select mjd from (SELECT DISTINCT
FLOOR(mjd) as mjd
FROM
atlas_exposures
WHERE
local_data = 1 AND dophot_match > 0) as a
where mjd NOT IN (SELECT
*
FROM
(SELECT DISTINCT
FLOOR(mjd)
FROM
atlas_exposures
WHERE
local_data = 1 AND dophot_match = 0) AS a);
""" % locals()
mjds = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn
)
if not len(mjds):
return None
oldMjds = []
oldMjds[:] = [str(int(o["mjd"])) for o in mjds]
for m in oldMjds:
for i in ["01a", "02a"]:
datapath = archivePath + "/%(i)s/%(m)s" % locals()
# shutil.rmtree(datapath)
try:
shutil.rmtree(datapath)
except:
self.log.warning(
"The path %(datapath)s does not exist - no need to delete" % locals())
mjdString = (',').join(oldMjds)
sqlQuery = """
update day_tracker set local_data = 0 where floor(mjd) in (%(mjdString)s);
update atlas_exposures set local_data = 0 where floor(mjd) in (%(mjdString)s) and dophot_match != 0;""" % locals(
)
print sqlQuery
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.atlasMoversDBConn
)
self.log.info('completed the ``_remove_processed_data`` method')
return None | [
"def",
"_remove_processed_data",
"(",
"self",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"'starting the ``_remove_processed_data`` method'",
")",
"archivePath",
"=",
"self",
".",
"settings",
"[",
"\"atlas archive path\"",
"]",
"from",
"fundamentals",
".",
"mysq... | 28.84127 | 18.904762 |
def get_allowed(allow, disallow):
""" Normalize the given string attributes as a list of all allowed vClasses."""
if allow is None and disallow is None:
return SUMO_VEHICLE_CLASSES
elif disallow is None:
return allow.split()
else:
disallow = disallow.split()
return tuple([c for c in SUMO_VEHICLE_CLASSES if c not in disallow]) | [
"def",
"get_allowed",
"(",
"allow",
",",
"disallow",
")",
":",
"if",
"allow",
"is",
"None",
"and",
"disallow",
"is",
"None",
":",
"return",
"SUMO_VEHICLE_CLASSES",
"elif",
"disallow",
"is",
"None",
":",
"return",
"allow",
".",
"split",
"(",
")",
"else",
... | 40.777778 | 12.444444 |
def dictionize(fields: Sequence, records: Sequence) -> Generator:
"""Create dictionaries mapping fields to record data."""
return (dict(zip(fields, rec)) for rec in records) | [
"def",
"dictionize",
"(",
"fields",
":",
"Sequence",
",",
"records",
":",
"Sequence",
")",
"->",
"Generator",
":",
"return",
"(",
"dict",
"(",
"zip",
"(",
"fields",
",",
"rec",
")",
")",
"for",
"rec",
"in",
"records",
")"
] | 44.75 | 19.75 |
async def from_href(self):
"""Get the full object from spotify with a `href` attribute."""
if not hasattr(self, 'href'):
raise TypeError('Spotify object has no `href` attribute, therefore cannot be retrived')
elif hasattr(self, 'http'):
return await self.http.request(('GET', self.href))
else:
cls = type(self)
try:
client = getattr(self, '_{0}__client'.format(cls.__name__))
except AttributeError:
raise TypeError('Spotify object has no way to access a HTTPClient.')
else:
http = client.http
data = await http.request(('GET', self.href))
return cls(client, data) | [
"async",
"def",
"from_href",
"(",
"self",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'href'",
")",
":",
"raise",
"TypeError",
"(",
"'Spotify object has no `href` attribute, therefore cannot be retrived'",
")",
"elif",
"hasattr",
"(",
"self",
",",
"'http'... | 32.952381 | 24.238095 |
def nalu(x, depth, epsilon=1e-30, name=None, reuse=None):
"""NALU as in https://arxiv.org/abs/1808.00508."""
with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse):
x_shape = shape_list(x)
x_flat = tf.reshape(x, [-1, x_shape[-1]])
gw = tf.get_variable("w", [x_shape[-1], depth])
g = tf.nn.sigmoid(tf.matmul(x_flat, gw))
g = tf.reshape(g, x_shape[:-1] + [depth])
a = nac(x, depth, name="nac_lin")
log_x = tf.log(tf.abs(x) + epsilon)
m = nac(log_x, depth, name="nac_log")
return g * a + (1 - g) * tf.exp(m) | [
"def",
"nalu",
"(",
"x",
",",
"depth",
",",
"epsilon",
"=",
"1e-30",
",",
"name",
"=",
"None",
",",
"reuse",
"=",
"None",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"name",
",",
"default_name",
"=",
"\"nalu\"",
",",
"values",
"=",
"[",
"x"... | 46.083333 | 8.25 |
def set_address(self, address):
"""
Set the address of the remote host the is contacted, without
changing hostname, username, password, protocol, and TCP port
number.
This is the actual address that is used to open the connection.
:type address: string
:param address: A hostname or IP name.
"""
if is_ip(address):
self.address = clean_ip(address)
else:
self.address = address | [
"def",
"set_address",
"(",
"self",
",",
"address",
")",
":",
"if",
"is_ip",
"(",
"address",
")",
":",
"self",
".",
"address",
"=",
"clean_ip",
"(",
"address",
")",
"else",
":",
"self",
".",
"address",
"=",
"address"
] | 33.5 | 16.357143 |
def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Data array of frequency in Hz
mean_frequency: float
(optional) The nominal mean frequency, in Hz
if omitted, defaults to mean frequency=np.mean(frequency)
Returns
-------
y:
Data array of fractional frequency
"""
if mean_frequency == -1:
mu = np.mean(frequency)
else:
mu = mean_frequency
y = [(x-mu)/mu for x in frequency]
return y | [
"def",
"frequency2fractional",
"(",
"frequency",
",",
"mean_frequency",
"=",
"-",
"1",
")",
":",
"if",
"mean_frequency",
"==",
"-",
"1",
":",
"mu",
"=",
"np",
".",
"mean",
"(",
"frequency",
")",
"else",
":",
"mu",
"=",
"mean_frequency",
"y",
"=",
"[",
... | 25.545455 | 18.545455 |
def get_monomers(self, ligands=True, pseudo_group=False):
"""Retrieves all the `Monomers` from the `Assembly` object.
Parameters
----------
ligands : bool, optional
If `true`, will include ligand `Monomers`.
pseudo_group : bool, optional
If `True`, will include pseudo atoms.
"""
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
monomers = itertools.chain(
*(p.get_monomers(ligands=ligands) for p in in_groups))
return monomers | [
"def",
"get_monomers",
"(",
"self",
",",
"ligands",
"=",
"True",
",",
"pseudo_group",
"=",
"False",
")",
":",
"base_filters",
"=",
"dict",
"(",
"ligands",
"=",
"ligands",
",",
"pseudo_group",
"=",
"pseudo_group",
")",
"restricted_mol_types",
"=",
"[",
"x",
... | 43.3125 | 18.0625 |
def valid_body_waiting(self):
"""
Check if a valid body is waiting in buffer
"""
# 0f f8 be 04 00 08 00 00 2f 04
packet_size = velbus.MINIMUM_MESSAGE_SIZE + \
(self.buffer[3] & 0x0F)
if len(self.buffer) < packet_size:
self.logger.debug("Buffer does not yet contain full message")
result = False
else:
result = True
result = result and self.buffer[packet_size - 1] == velbus.END_BYTE
if not result:
self.logger.warning("End byte not recognized")
result = result and velbus.checksum(
self.buffer[0:packet_size - 2])[0] == self.buffer[packet_size - 2]
if not result:
self.logger.warning("Checksum not recognized")
self.logger.debug("Valid Body Waiting: %s (%s)", result, str(self.buffer))
return result | [
"def",
"valid_body_waiting",
"(",
"self",
")",
":",
"# 0f f8 be 04 00 08 00 00 2f 04",
"packet_size",
"=",
"velbus",
".",
"MINIMUM_MESSAGE_SIZE",
"+",
"(",
"self",
".",
"buffer",
"[",
"3",
"]",
"&",
"0x0F",
")",
"if",
"len",
"(",
"self",
".",
"buffer",
")",
... | 42.619048 | 16.809524 |
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name) | [
"def",
"setup",
"(",
"product_name",
")",
":",
"if",
"CONF",
".",
"log_config",
":",
"_load_log_config",
"(",
"CONF",
".",
"log_config",
")",
"else",
":",
"_setup_logging_from_conf",
"(",
")",
"sys",
".",
"excepthook",
"=",
"_create_logging_excepthook",
"(",
"... | 30.857143 | 13.142857 |
def SystemShare():
"""
Register AntShare.
Returns:
RegisterTransaction:
"""
amount = Fixed8.FromDecimal(sum(Blockchain.GENERATION_AMOUNT) * Blockchain.DECREMENT_INTERVAL)
owner = ECDSA.secp256r1().Curve.Infinity
admin = Crypto.ToScriptHash(PUSHT)
return RegisterTransaction([], [], AssetType.GoverningToken,
"[{\"lang\":\"zh-CN\",\"name\":\"小蚁股\"},{\"lang\":\"en\",\"name\":\"AntShare\"}]",
amount, 0, owner, admin) | [
"def",
"SystemShare",
"(",
")",
":",
"amount",
"=",
"Fixed8",
".",
"FromDecimal",
"(",
"sum",
"(",
"Blockchain",
".",
"GENERATION_AMOUNT",
")",
"*",
"Blockchain",
".",
"DECREMENT_INTERVAL",
")",
"owner",
"=",
"ECDSA",
".",
"secp256r1",
"(",
")",
".",
"Curv... | 42.307692 | 23.384615 |
def set_progress(self, progress):
"""Update the progress for this application.
For applications processing a fixed set of work it may be useful for
diagnostics to set the progress as the application processes.
Progress indicates job progression, and must be a float between 0 and
1. By default the progress is set at 0.1 for its duration, which is a
good default value for applications that don't know their progress,
(e.g. interactive applications).
Parameters
----------
progress : float
The application progress, must be a value between 0 and 1.
"""
if not (0 <= progress <= 1.0):
raise ValueError("progress must be between 0 and 1, got %.3f"
% progress)
self._call('SetProgress', proto.SetProgressRequest(progress=progress)) | [
"def",
"set_progress",
"(",
"self",
",",
"progress",
")",
":",
"if",
"not",
"(",
"0",
"<=",
"progress",
"<=",
"1.0",
")",
":",
"raise",
"ValueError",
"(",
"\"progress must be between 0 and 1, got %.3f\"",
"%",
"progress",
")",
"self",
".",
"_call",
"(",
"'Se... | 43.45 | 23.2 |
def paintEvent(self, event):
""" Fills the panel background. """
super(EncodingPanel, self).paintEvent(event)
if self.isVisible():
# fill background
painter = QtGui.QPainter(self)
self._background_brush = QtGui.QBrush(self._color)
painter.fillRect(event.rect(), self._background_brush) | [
"def",
"paintEvent",
"(",
"self",
",",
"event",
")",
":",
"super",
"(",
"EncodingPanel",
",",
"self",
")",
".",
"paintEvent",
"(",
"event",
")",
"if",
"self",
".",
"isVisible",
"(",
")",
":",
"# fill background",
"painter",
"=",
"QtGui",
".",
"QPainter",... | 43.75 | 12.125 |
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x) | [
"def",
"write",
"(",
"self",
",",
"x",
")",
":",
"if",
"self",
".",
"_new_lines",
":",
"if",
"not",
"self",
".",
"_first_write",
":",
"self",
".",
"stream",
".",
"write",
"(",
"'\\n'",
"*",
"self",
".",
"_new_lines",
")",
"self",
".",
"code_lineno",
... | 44.857143 | 12.714286 |
def is_valid(cls, oid):
"""Checks if a `oid` string is valid or not.
:Parameters:
- `oid`: the object id to validate
.. versionadded:: 2.3
"""
if not oid:
return False
try:
ObjectId(oid)
return True
except (InvalidId, TypeError):
return False | [
"def",
"is_valid",
"(",
"cls",
",",
"oid",
")",
":",
"if",
"not",
"oid",
":",
"return",
"False",
"try",
":",
"ObjectId",
"(",
"oid",
")",
"return",
"True",
"except",
"(",
"InvalidId",
",",
"TypeError",
")",
":",
"return",
"False"
] | 21.5 | 17.9375 |
def execute_command(args, shell=False, cwd=None, env=None, stdin=None, stdout=None, stderr=None, cmd_encoding='utf-8'):
"""
Execute external command
:param args: command line arguments : [unicode]
:param shell: True when using shell : boolean
:param cwd: working directory : string
:param env: environment variables : dict
:param stdin: standard input
:param stdout: standard output
:param stderr: standard error
:param cmd_encoding: command line encoding: string
:return: return code
"""
return subprocess.call(
args=__convert_args(args, shell, cmd_encoding), shell=shell, cwd=cwd, env=__convert_env(env, cmd_encoding),
stdin=stdin, stdout=stdout, stderr=stderr) | [
"def",
"execute_command",
"(",
"args",
",",
"shell",
"=",
"False",
",",
"cwd",
"=",
"None",
",",
"env",
"=",
"None",
",",
"stdin",
"=",
"None",
",",
"stdout",
"=",
"None",
",",
"stderr",
"=",
"None",
",",
"cmd_encoding",
"=",
"'utf-8'",
")",
":",
"... | 44.75 | 16.625 |
def setLegendData(self, *args, **kwargs):
""" Set or genernate the legend data from this canteen.
Uses :py:func:`.buildLegend` for genernating """
self.legendData = buildLegend(*args, key=self.legendKeyFunc, **kwargs) | [
"def",
"setLegendData",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"legendData",
"=",
"buildLegend",
"(",
"*",
"args",
",",
"key",
"=",
"self",
".",
"legendKeyFunc",
",",
"*",
"*",
"kwargs",
")"
] | 60.5 | 9.75 |
def _getSegmentActiveSynapses(self, c, i, s, activeState, newSynapses=False):
"""
Return a segmentUpdate data structure containing a list of proposed
changes to segment s. Let activeSynapses be the list of active synapses
where the originating cells have their activeState output = 1 at time step
t. (This list is empty if s is None since the segment doesn't exist.)
newSynapses is an optional argument that defaults to false. If newSynapses
is true, then newSynapseCount - len(activeSynapses) synapses are added to
activeSynapses. These synapses are randomly chosen from the set of cells
that have learnState = 1 at timeStep.
:param c TODO: document
:param i TODO: document
:param s TODO: document
:param activeState TODO: document
:param newSynapses TODO: document
"""
activeSynapses = []
if s is not None: # s can be None, if adding a new segment
# Here we add *integers* to activeSynapses
activeSynapses = [idx for idx, syn in enumerate(s.syns) \
if activeState[syn[0], syn[1]]]
if newSynapses: # add a few more synapses
nSynapsesToAdd = self.newSynapseCount - len(activeSynapses)
# Here we add *pairs* (colIdx, cellIdx) to activeSynapses
activeSynapses += self._chooseCellsToLearnFrom(c, i, s, nSynapsesToAdd,
activeState)
# It's still possible that activeSynapses is empty, and this will
# be handled in addToSegmentUpdates
# NOTE: activeSynapses contains a mixture of integers and pairs of integers
# - integers are indices of synapses already existing on the segment,
# that we will need to update.
# - pairs represent source (colIdx, cellIdx) of new synapses to create on
# the segment
update = BacktrackingTM._SegmentUpdate(c, i, s, activeSynapses)
return update | [
"def",
"_getSegmentActiveSynapses",
"(",
"self",
",",
"c",
",",
"i",
",",
"s",
",",
"activeState",
",",
"newSynapses",
"=",
"False",
")",
":",
"activeSynapses",
"=",
"[",
"]",
"if",
"s",
"is",
"not",
"None",
":",
"# s can be None, if adding a new segment",
"... | 43.186047 | 24.767442 |
def chart(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2008, 1, 1),
end=datetime.datetime(2009, 12, 31), # data stops at 2013/1/1
normalize=True,
):
"""Display a graph of the price history for the list of ticker symbols provided
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
"""
start = util.normalize_date(start or datetime.date(2008, 1, 1))
end = util.normalize_date(end or datetime.date(2009, 12, 31))
symbols = [s.upper() for s in symbols]
timeofday = datetime.timedelta(hours=16)
timestamps = du.getNYSEdays(start, end, timeofday)
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data['close'].values
if normalize:
na_price /= na_price[0, :]
plt.clf()
plt.plot(timestamps, na_price)
plt.legend(symbols)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('chart.pdf', format='pdf')
plt.grid(True)
plt.show()
return na_price | [
"def",
"chart",
"(",
"symbols",
"=",
"(",
"\"AAPL\"",
",",
"\"GLD\"",
",",
"\"GOOG\"",
",",
"\"$SPX\"",
",",
"\"XOM\"",
",",
"\"msft\"",
")",
",",
"start",
"=",
"datetime",
".",
"datetime",
"(",
"2008",
",",
"1",
",",
"1",
")",
",",
"end",
"=",
"da... | 35.605263 | 21.026316 |
def initializenb():
""" Find input files and log initialization info """
logger.info('Working directory: {0}'.format(os.getcwd()))
logger.info('Run on {0}'.format(asctime()))
try:
fileroot = os.environ['fileroot']
logger.info('Setting fileroot to {0} from environment variable.\n'.format(fileroot))
candsfile = 'cands_{0}_merge.pkl'.format(fileroot)
noisefile = 'noise_{0}_merge.pkl'.format(fileroot)
except KeyError:
sdmdir = os.getcwd()
logger.info('Setting sdmdir to current directory {0}\n'.format(os.path.abspath(sdmdir)))
candsfiles = glob.glob('cands_*_merge.pkl')
noisefiles = glob.glob('noise_*_merge.pkl')
if len(candsfiles) == 1 and len(noisefiles) == 1:
logger.info('Found one cands/merge file set')
else:
logger.warn('Found multiple cands/noise file sets. Taking first.')
candsfile = candsfiles[0]
noisefile = noisefiles[0]
fileroot = candsfile.rstrip('_merge.pkl').lstrip('cands_')
logger.info('Set: \n\t candsfile {} \n\t noisefile {} \n\t fileroot {} '.format(candsfile, noisefile, fileroot))
return (candsfile, noisefile, fileroot) | [
"def",
"initializenb",
"(",
")",
":",
"logger",
".",
"info",
"(",
"'Working directory: {0}'",
".",
"format",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
")",
"logger",
".",
"info",
"(",
"'Run on {0}'",
".",
"format",
"(",
"asctime",
"(",
")",
")",
")",
... | 47.28 | 23.12 |
def frame_generator(frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n | [
"def",
"frame_generator",
"(",
"frame_duration_ms",
",",
"audio",
",",
"sample_rate",
")",
":",
"n",
"=",
"int",
"(",
"sample_rate",
"*",
"(",
"frame_duration_ms",
"/",
"1000.0",
")",
"*",
"2",
")",
"offset",
"=",
"0",
"timestamp",
"=",
"0.0",
"duration",
... | 33.5 | 18.0625 |
def create_sobol_samples(order, dim, seed=1):
"""
Args:
order (int):
Number of unique samples to generate.
dim (int):
Number of spacial dimensions. Must satisfy ``0 < dim < 41``.
seed (int):
Starting seed. Non-positive values are treated as 1. If omitted,
consecutive samples are used.
Returns:
(numpy.ndarray):
Quasi-random vector with ``shape == (dim, order)``.
"""
assert 0 < dim < DIM_MAX, "dim in [1, 40]"
# global RANDOM_SEED # pylint: disable=global-statement
# if seed is None:
# seed = RANDOM_SEED
# RANDOM_SEED += order
set_state(seed_value=seed)
seed = RANDOM_SEED
set_state(step=order)
# Initialize row 1 of V.
samples = SOURCE_SAMPLES.copy()
maxcol = int(math.log(2**LOG_MAX-1, 2))+1
samples[0, 0:maxcol] = 1
# Initialize the remaining rows of V.
for idx in range(1, dim):
# The bits of the integer POLY(I) gives the form of polynomial:
degree = int(math.log(POLY[idx], 2))
#Expand this bit pattern to separate components:
includ = numpy.array([val == "1" for val in bin(POLY[idx])[-degree:]])
#Calculate the remaining elements of row I as explained
#in Bratley and Fox, section 2.
for idy in range(degree+1, maxcol+1):
newv = samples[idx, idy-degree-1].item()
base = 1
for idz in range(1, degree+1):
base *= 2
if includ[idz-1]:
newv = newv ^ base * samples[idx, idy-idz-1].item()
samples[idx, idy-1] = newv
samples = samples[:dim]
# Multiply columns of V by appropriate power of 2.
samples *= 2**(numpy.arange(maxcol, 0, -1, dtype=int))
#RECIPD is 1/(common denominator of the elements in V).
recipd = 0.5**(maxcol+1)
lastq = numpy.zeros(dim, dtype=int)
seed = int(seed) if seed > 1 else 1
for seed_ in range(seed):
lowbit = len(bin(seed_)[2:].split("0")[-1])
lastq[:] = lastq ^ samples[:, lowbit]
#Calculate the new components of QUASI.
quasi = numpy.empty((dim, order))
for idx in range(order):
lowbit = len(bin(seed+idx)[2:].split("0")[-1])
quasi[:, idx] = lastq * recipd
lastq[:] = lastq ^ samples[:, lowbit]
return quasi | [
"def",
"create_sobol_samples",
"(",
"order",
",",
"dim",
",",
"seed",
"=",
"1",
")",
":",
"assert",
"0",
"<",
"dim",
"<",
"DIM_MAX",
",",
"\"dim in [1, 40]\"",
"# global RANDOM_SEED # pylint: disable=global-statement",
"# if seed is None:",
"# seed = RANDOM_SEED",
... | 31.424658 | 18.328767 |
def find_font(face, bold, italic):
"""Find font"""
bold = FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR
italic = FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN
face = face.encode('utf8')
fontconfig.FcInit()
pattern = fontconfig.FcPatternCreate()
fontconfig.FcPatternAddInteger(pattern, FC_WEIGHT, bold)
fontconfig.FcPatternAddInteger(pattern, FC_SLANT, italic)
fontconfig.FcPatternAddString(pattern, FC_FAMILY, face)
fontconfig.FcConfigSubstitute(0, pattern, FcMatchPattern)
fontconfig.FcDefaultSubstitute(pattern)
result = FcType()
match = fontconfig.FcFontMatch(0, pattern, byref(result))
fontconfig.FcPatternDestroy(pattern)
if not match:
raise RuntimeError('Could not match font "%s"' % face)
value = FcValue()
fontconfig.FcPatternGet(match, FC_FAMILY, 0, byref(value))
if(value.u.s != face):
warnings.warn('Could not find face match "%s", falling back to "%s"'
% (face, value.u.s))
result = fontconfig.FcPatternGet(match, FC_FILE, 0, byref(value))
if result != 0:
raise RuntimeError('No filename or FT face for "%s"' % face)
fname = value.u.s
return fname.decode('utf-8') | [
"def",
"find_font",
"(",
"face",
",",
"bold",
",",
"italic",
")",
":",
"bold",
"=",
"FC_WEIGHT_BOLD",
"if",
"bold",
"else",
"FC_WEIGHT_REGULAR",
"italic",
"=",
"FC_SLANT_ITALIC",
"if",
"italic",
"else",
"FC_SLANT_ROMAN",
"face",
"=",
"face",
".",
"encode",
"... | 43.814815 | 16.148148 |
def getRecommendedRenderTargetSize(self):
"""Suggested size for the intermediate render target that the distortion pulls from."""
fn = self.function_table.getRecommendedRenderTargetSize
pnWidth = c_uint32()
pnHeight = c_uint32()
fn(byref(pnWidth), byref(pnHeight))
return pnWidth.value, pnHeight.value | [
"def",
"getRecommendedRenderTargetSize",
"(",
"self",
")",
":",
"fn",
"=",
"self",
".",
"function_table",
".",
"getRecommendedRenderTargetSize",
"pnWidth",
"=",
"c_uint32",
"(",
")",
"pnHeight",
"=",
"c_uint32",
"(",
")",
"fn",
"(",
"byref",
"(",
"pnWidth",
")... | 42.875 | 11.75 |
def vsearch_chimera_filter_de_novo(
fasta_filepath,
working_dir,
output_chimeras=True,
output_nonchimeras=True,
output_alns=False,
output_tabular=False,
log_name="vsearch_uchime_de_novo_chimera_filtering.log",
HALT_EXEC=False):
""" Detect chimeras present in the fasta-formatted filename,
without external references (i.e. de novo). Automatically
sort the sequences in filename by decreasing abundance
beforehand. Output chimeras and non-chimeras to FASTA files
and/or 3-way global alignments and/or tabular output.
Parameters
----------
fasta_filepath : string
input fasta file (dereplicated fasta with pattern
[>;]size=integer[;] in the fasta header)
working_dir : string
directory path for all output files
output_chimeras : boolean, optional
output chimeric sequences to file, in fasta format
output_nonchimeras : boolean, optional
output nonchimeric sequences to file, in fasta format
output_alns : boolean, optional
output 3-way global alignments (parentA, parentB, chimera)
in human readable format to file
output_tabular : boolean, optional
output results using the uchime tab-separated format of
18 fields (see Vsearch user manual)
HALT_EXEC : boolean, optional
used for debugging app controller
Return
------
output_chimera_filepath : string
filepath to chimeric fasta sequences
output_non_chimera_filepath : string
filepath to nonchimeric fasta sequences
output_alns_filepath : string
filepath to chimeric sequences alignment
file
output_tabular_filepath : string
filepath to chimeric sequences tabular
output file
log_filepath : string
filepath to log file
"""
app = Vsearch(WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
if not (output_chimeras or
output_nonchimeras or
output_alns or
output_tabular):
raise ValueError("At least one output format (output_chimeras,"
"output_nonchimeras, output_alns, output_tabular)"
"must be selected")
output_chimera_filepath = None
output_non_chimera_filepath = None
output_alns_filepath = None
output_tabular_filepath = None
# set output filepaths
if output_chimeras:
output_chimera_filepath = join(working_dir, 'uchime_chimeras.fasta')
app.Parameters['--chimeras'].on(output_chimera_filepath)
if output_nonchimeras:
output_non_chimera_filepath = join(working_dir,
'uchime_non_chimeras.fasta')
app.Parameters['--nonchimeras'].on(output_non_chimera_filepath)
if output_alns:
output_alns_filepath = join(working_dir, 'uchime_alignments.txt')
app.Parameters['--uchimealns'].on(output_alns_filepath)
if output_tabular:
output_tabular_filepath = join(working_dir, 'uchime_tabular.txt')
app.Parameters['--uchimeout'].on(output_tabular_filepath)
log_filepath = join(working_dir, log_name)
app.Parameters['--uchime_denovo'].on(fasta_filepath)
app.Parameters['--log'].on(log_filepath)
app_result = app()
return output_chimera_filepath, output_non_chimera_filepath,\
output_alns_filepath, output_tabular_filepath, log_filepath | [
"def",
"vsearch_chimera_filter_de_novo",
"(",
"fasta_filepath",
",",
"working_dir",
",",
"output_chimeras",
"=",
"True",
",",
"output_nonchimeras",
"=",
"True",
",",
"output_alns",
"=",
"False",
",",
"output_tabular",
"=",
"False",
",",
"log_name",
"=",
"\"vsearch_u... | 37.857143 | 18.263736 |
def insert(self, index, item):
"""Insert an item at the specified index.
Args:
index (int): Position to insert the item.
item: Item to be inserted. It must have the type specified in the
constructor.
Raises:
:exc:`~.exceptions.WrongListItemType`: If the item has a different
type than the one specified in the constructor.
"""
if issubclass(item.__class__, self._pyof_class):
list.insert(self, index, item)
else:
raise exceptions.WrongListItemType(item.__class__.__name__,
self._pyof_class.__name__) | [
"def",
"insert",
"(",
"self",
",",
"index",
",",
"item",
")",
":",
"if",
"issubclass",
"(",
"item",
".",
"__class__",
",",
"self",
".",
"_pyof_class",
")",
":",
"list",
".",
"insert",
"(",
"self",
",",
"index",
",",
"item",
")",
"else",
":",
"raise... | 37.333333 | 23 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.