text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def copy_resource_dir(src, dest):
"""
To copy package data directory to destination
"""
package_name = "mocha"
dest = (dest + "/" + os.path.basename(src)).rstrip("/")
if pkg_resources.resource_isdir(package_name, src):
if not os.path.isdir(dest):
os.makedirs(dest)
for res in pkg_resources.resource_listdir(__name__, src):
copy_resource_dir(src + "/" + res, dest)
else:
if not os.path.isfile(dest) and os.path.splitext(src)[1] not in [".pyc"]:
copy_resource_file(src, dest) | [
"def",
"copy_resource_dir",
"(",
"src",
",",
"dest",
")",
":",
"package_name",
"=",
"\"mocha\"",
"dest",
"=",
"(",
"dest",
"+",
"\"/\"",
"+",
"os",
".",
"path",
".",
"basename",
"(",
"src",
")",
")",
".",
"rstrip",
"(",
"\"/\"",
")",
"if",
"pkg_resources",
".",
"resource_isdir",
"(",
"package_name",
",",
"src",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dest",
")",
":",
"os",
".",
"makedirs",
"(",
"dest",
")",
"for",
"res",
"in",
"pkg_resources",
".",
"resource_listdir",
"(",
"__name__",
",",
"src",
")",
":",
"copy_resource_dir",
"(",
"src",
"+",
"\"/\"",
"+",
"res",
",",
"dest",
")",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"dest",
")",
"and",
"os",
".",
"path",
".",
"splitext",
"(",
"src",
")",
"[",
"1",
"]",
"not",
"in",
"[",
"\".pyc\"",
"]",
":",
"copy_resource_file",
"(",
"src",
",",
"dest",
")"
] | 39.142857 | 13.571429 |
def format_name(self):
"""Formats the media file based on enhanced metadata.
The actual name of the file and even the name of the directory
structure where the file is to be stored.
"""
self.formatted_filename = formatter.format_filename(
self.series_name, self.season_number,
self.episode_numbers, self.episode_names,
self.extension)
self.formatted_dirname = self.location
if cfg.CONF.move_files_enabled:
self.formatted_dirname = formatter.format_location(
self.series_name, self.season_number)
self.out_location = os.path.join(self.formatted_dirname,
self.formatted_filename) | [
"def",
"format_name",
"(",
"self",
")",
":",
"self",
".",
"formatted_filename",
"=",
"formatter",
".",
"format_filename",
"(",
"self",
".",
"series_name",
",",
"self",
".",
"season_number",
",",
"self",
".",
"episode_numbers",
",",
"self",
".",
"episode_names",
",",
"self",
".",
"extension",
")",
"self",
".",
"formatted_dirname",
"=",
"self",
".",
"location",
"if",
"cfg",
".",
"CONF",
".",
"move_files_enabled",
":",
"self",
".",
"formatted_dirname",
"=",
"formatter",
".",
"format_location",
"(",
"self",
".",
"series_name",
",",
"self",
".",
"season_number",
")",
"self",
".",
"out_location",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"formatted_dirname",
",",
"self",
".",
"formatted_filename",
")"
] | 40.666667 | 18 |
def _proxy_kwargs(browser_name, proxy, browser_kwargs={}): # pylint: disable=dangerous-default-value
"""
Determines the kwargs needed to set up a proxy based on the
browser type.
Returns: a dictionary of arguments needed to pass when
instantiating the WebDriver instance.
"""
proxy_dict = {
"httpProxy": proxy.proxy,
"proxyType": 'manual',
}
if browser_name == 'firefox' and 'desired_capabilities' not in browser_kwargs:
# This one works for firefox locally
wd_proxy = webdriver.common.proxy.Proxy(proxy_dict)
browser_kwargs['proxy'] = wd_proxy
else:
# This one works with chrome, both locally and remote
# This one works with firefox remote, but not locally
if 'desired_capabilities' not in browser_kwargs:
browser_kwargs['desired_capabilities'] = {}
browser_kwargs['desired_capabilities']['proxy'] = proxy_dict
return browser_kwargs | [
"def",
"_proxy_kwargs",
"(",
"browser_name",
",",
"proxy",
",",
"browser_kwargs",
"=",
"{",
"}",
")",
":",
"# pylint: disable=dangerous-default-value",
"proxy_dict",
"=",
"{",
"\"httpProxy\"",
":",
"proxy",
".",
"proxy",
",",
"\"proxyType\"",
":",
"'manual'",
",",
"}",
"if",
"browser_name",
"==",
"'firefox'",
"and",
"'desired_capabilities'",
"not",
"in",
"browser_kwargs",
":",
"# This one works for firefox locally",
"wd_proxy",
"=",
"webdriver",
".",
"common",
".",
"proxy",
".",
"Proxy",
"(",
"proxy_dict",
")",
"browser_kwargs",
"[",
"'proxy'",
"]",
"=",
"wd_proxy",
"else",
":",
"# This one works with chrome, both locally and remote",
"# This one works with firefox remote, but not locally",
"if",
"'desired_capabilities'",
"not",
"in",
"browser_kwargs",
":",
"browser_kwargs",
"[",
"'desired_capabilities'",
"]",
"=",
"{",
"}",
"browser_kwargs",
"[",
"'desired_capabilities'",
"]",
"[",
"'proxy'",
"]",
"=",
"proxy_dict",
"return",
"browser_kwargs"
] | 35.037037 | 22.888889 |
def add_event(request):
""" Public form to add an event. """
form = AddEventForm(request.POST or None)
if form.is_valid():
instance = form.save(commit=False)
instance.sites = settings.SITE_ID
instance.submitted_by = request.user
instance.approved = True
instance.slug = slugify(instance.name)
instance.save()
messages.success(request, 'Your event has been added.')
return HttpResponseRedirect(reverse('events_index'))
return render(request, 'happenings/event_form.html', {
'form': form,
'form_title': 'Add an event'
}) | [
"def",
"add_event",
"(",
"request",
")",
":",
"form",
"=",
"AddEventForm",
"(",
"request",
".",
"POST",
"or",
"None",
")",
"if",
"form",
".",
"is_valid",
"(",
")",
":",
"instance",
"=",
"form",
".",
"save",
"(",
"commit",
"=",
"False",
")",
"instance",
".",
"sites",
"=",
"settings",
".",
"SITE_ID",
"instance",
".",
"submitted_by",
"=",
"request",
".",
"user",
"instance",
".",
"approved",
"=",
"True",
"instance",
".",
"slug",
"=",
"slugify",
"(",
"instance",
".",
"name",
")",
"instance",
".",
"save",
"(",
")",
"messages",
".",
"success",
"(",
"request",
",",
"'Your event has been added.'",
")",
"return",
"HttpResponseRedirect",
"(",
"reverse",
"(",
"'events_index'",
")",
")",
"return",
"render",
"(",
"request",
",",
"'happenings/event_form.html'",
",",
"{",
"'form'",
":",
"form",
",",
"'form_title'",
":",
"'Add an event'",
"}",
")"
] | 37.6875 | 12.1875 |
def delete(self):
"""Delete a file.
The base directory is also removed, as it is assumed that only one file
exists in the directory.
"""
fs, path = self._get_fs(create_dir=False)
if fs.exists(path):
fs.remove(path)
if self.clean_dir and fs.exists('.'):
fs.removedir('.')
return True | [
"def",
"delete",
"(",
"self",
")",
":",
"fs",
",",
"path",
"=",
"self",
".",
"_get_fs",
"(",
"create_dir",
"=",
"False",
")",
"if",
"fs",
".",
"exists",
"(",
"path",
")",
":",
"fs",
".",
"remove",
"(",
"path",
")",
"if",
"self",
".",
"clean_dir",
"and",
"fs",
".",
"exists",
"(",
"'.'",
")",
":",
"fs",
".",
"removedir",
"(",
"'.'",
")",
"return",
"True"
] | 30 | 15.166667 |
def merge_list(self, new_list):
"""Add new CM servers to the list
:param new_list: a list of ``(ip, port)`` tuples
:type new_list: :class:`list`
"""
total = len(self.list)
for ip, port in new_list:
if (ip, port) not in self.list:
self.mark_good((ip, port))
if len(self.list) > total:
self._LOG.debug("Added %d new CM addresses." % (len(self.list) - total)) | [
"def",
"merge_list",
"(",
"self",
",",
"new_list",
")",
":",
"total",
"=",
"len",
"(",
"self",
".",
"list",
")",
"for",
"ip",
",",
"port",
"in",
"new_list",
":",
"if",
"(",
"ip",
",",
"port",
")",
"not",
"in",
"self",
".",
"list",
":",
"self",
".",
"mark_good",
"(",
"(",
"ip",
",",
"port",
")",
")",
"if",
"len",
"(",
"self",
".",
"list",
")",
">",
"total",
":",
"self",
".",
"_LOG",
".",
"debug",
"(",
"\"Added %d new CM addresses.\"",
"%",
"(",
"len",
"(",
"self",
".",
"list",
")",
"-",
"total",
")",
")"
] | 31.571429 | 15.714286 |
def next_frame_ae():
"""Conv autoencoder."""
hparams = next_frame_basic_deterministic()
hparams.bottom["inputs"] = modalities.video_bitwise_bottom
hparams.top["inputs"] = modalities.video_top
hparams.hidden_size = 256
hparams.batch_size = 8
hparams.num_hidden_layers = 4
hparams.num_compress_steps = 4
hparams.dropout = 0.4
return hparams | [
"def",
"next_frame_ae",
"(",
")",
":",
"hparams",
"=",
"next_frame_basic_deterministic",
"(",
")",
"hparams",
".",
"bottom",
"[",
"\"inputs\"",
"]",
"=",
"modalities",
".",
"video_bitwise_bottom",
"hparams",
".",
"top",
"[",
"\"inputs\"",
"]",
"=",
"modalities",
".",
"video_top",
"hparams",
".",
"hidden_size",
"=",
"256",
"hparams",
".",
"batch_size",
"=",
"8",
"hparams",
".",
"num_hidden_layers",
"=",
"4",
"hparams",
".",
"num_compress_steps",
"=",
"4",
"hparams",
".",
"dropout",
"=",
"0.4",
"return",
"hparams"
] | 31.636364 | 12.454545 |
def ISSO_eq_at_pole(r, chi):
"""
Polynomial that enables the calculation of the Kerr polar
(inclination = +/- pi/2) innermost stable spherical orbit
(ISSO) radius via its roots. Physical solutions are
between 6 and 1+sqrt[3]+sqrt[3+2sqrt[3]].
Parameters
-----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
Returns
----------
float
r**3*(r**2*(r-6)+chi**2*(3*r+4))+chi**4*(3*r*(r-2)+chi**2)
"""
return r**3*(r**2*(r-6)+chi**2*(3*r+4))+chi**4*(3*r*(r-2)+chi**2) | [
"def",
"ISSO_eq_at_pole",
"(",
"r",
",",
"chi",
")",
":",
"return",
"r",
"**",
"3",
"*",
"(",
"r",
"**",
"2",
"*",
"(",
"r",
"-",
"6",
")",
"+",
"chi",
"**",
"2",
"*",
"(",
"3",
"*",
"r",
"+",
"4",
")",
")",
"+",
"chi",
"**",
"4",
"*",
"(",
"3",
"*",
"r",
"*",
"(",
"r",
"-",
"2",
")",
"+",
"chi",
"**",
"2",
")"
] | 28.9 | 20.5 |
def get_form(self, form_class=None):
"""Get form for model"""
form = super().get_form(form_class)
if not getattr(form, 'helper', None):
form.helper = FormHelper()
form.helper.form_tag = False
else:
form.helper.form_tag = False
return form | [
"def",
"get_form",
"(",
"self",
",",
"form_class",
"=",
"None",
")",
":",
"form",
"=",
"super",
"(",
")",
".",
"get_form",
"(",
"form_class",
")",
"if",
"not",
"getattr",
"(",
"form",
",",
"'helper'",
",",
"None",
")",
":",
"form",
".",
"helper",
"=",
"FormHelper",
"(",
")",
"form",
".",
"helper",
".",
"form_tag",
"=",
"False",
"else",
":",
"form",
".",
"helper",
".",
"form_tag",
"=",
"False",
"return",
"form"
] | 30.6 | 10.2 |
def setup_menu(self, minmax):
"""Setup context menu"""
if self.minmax_action is not None:
self.minmax_action.setChecked(minmax)
return
resize_action = create_action(self, _("Resize rows to contents"),
triggered=self.resizeRowsToContents)
resize_columns_action = create_action(
self,
_("Resize columns to contents"),
triggered=self.resize_column_contents)
self.paste_action = create_action(self, _("Paste"),
icon=ima.icon('editpaste'),
triggered=self.paste)
self.copy_action = create_action(self, _("Copy"),
icon=ima.icon('editcopy'),
triggered=self.copy)
self.edit_action = create_action(self, _("Edit"),
icon=ima.icon('edit'),
triggered=self.edit_item)
self.plot_action = create_action(self, _("Plot"),
icon=ima.icon('plot'),
triggered=lambda: self.plot_item('plot'))
self.plot_action.setVisible(False)
self.hist_action = create_action(self, _("Histogram"),
icon=ima.icon('hist'),
triggered=lambda: self.plot_item('hist'))
self.hist_action.setVisible(False)
self.imshow_action = create_action(self, _("Show image"),
icon=ima.icon('imshow'),
triggered=self.imshow_item)
self.imshow_action.setVisible(False)
self.save_array_action = create_action(self, _("Save array"),
icon=ima.icon('filesave'),
triggered=self.save_array)
self.save_array_action.setVisible(False)
self.insert_action = create_action(self, _("Insert"),
icon=ima.icon('insert'),
triggered=self.insert_item)
self.remove_action = create_action(self, _("Remove"),
icon=ima.icon('editdelete'),
triggered=self.remove_item)
self.minmax_action = create_action(self, _("Show arrays min/max"),
toggled=self.toggle_minmax)
self.minmax_action.setChecked(minmax)
self.toggle_minmax(minmax)
self.rename_action = create_action(self, _("Rename"),
icon=ima.icon('rename'),
triggered=self.rename_item)
self.duplicate_action = create_action(self, _("Duplicate"),
icon=ima.icon('edit_add'),
triggered=self.duplicate_item)
menu = QMenu(self)
menu_actions = [self.edit_action, self.plot_action, self.hist_action,
self.imshow_action, self.save_array_action,
self.insert_action, self.remove_action,
self.copy_action, self.paste_action,
None, self.rename_action, self.duplicate_action,
None, resize_action, resize_columns_action]
if ndarray is not FakeObject:
menu_actions.append(self.minmax_action)
add_actions(menu, menu_actions)
self.empty_ws_menu = QMenu(self)
add_actions(self.empty_ws_menu,
[self.insert_action, self.paste_action,
None, resize_action, resize_columns_action])
return menu | [
"def",
"setup_menu",
"(",
"self",
",",
"minmax",
")",
":",
"if",
"self",
".",
"minmax_action",
"is",
"not",
"None",
":",
"self",
".",
"minmax_action",
".",
"setChecked",
"(",
"minmax",
")",
"return",
"resize_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Resize rows to contents\"",
")",
",",
"triggered",
"=",
"self",
".",
"resizeRowsToContents",
")",
"resize_columns_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Resize columns to contents\"",
")",
",",
"triggered",
"=",
"self",
".",
"resize_column_contents",
")",
"self",
".",
"paste_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Paste\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'editpaste'",
")",
",",
"triggered",
"=",
"self",
".",
"paste",
")",
"self",
".",
"copy_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Copy\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'editcopy'",
")",
",",
"triggered",
"=",
"self",
".",
"copy",
")",
"self",
".",
"edit_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Edit\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'edit'",
")",
",",
"triggered",
"=",
"self",
".",
"edit_item",
")",
"self",
".",
"plot_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Plot\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'plot'",
")",
",",
"triggered",
"=",
"lambda",
":",
"self",
".",
"plot_item",
"(",
"'plot'",
")",
")",
"self",
".",
"plot_action",
".",
"setVisible",
"(",
"False",
")",
"self",
".",
"hist_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Histogram\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'hist'",
")",
",",
"triggered",
"=",
"lambda",
":",
"self",
".",
"plot_item",
"(",
"'hist'",
")",
")",
"self",
".",
"hist_action",
".",
"setVisible",
"(",
"False",
")",
"self",
".",
"imshow_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Show image\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'imshow'",
")",
",",
"triggered",
"=",
"self",
".",
"imshow_item",
")",
"self",
".",
"imshow_action",
".",
"setVisible",
"(",
"False",
")",
"self",
".",
"save_array_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Save array\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'filesave'",
")",
",",
"triggered",
"=",
"self",
".",
"save_array",
")",
"self",
".",
"save_array_action",
".",
"setVisible",
"(",
"False",
")",
"self",
".",
"insert_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Insert\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'insert'",
")",
",",
"triggered",
"=",
"self",
".",
"insert_item",
")",
"self",
".",
"remove_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Remove\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'editdelete'",
")",
",",
"triggered",
"=",
"self",
".",
"remove_item",
")",
"self",
".",
"minmax_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Show arrays min/max\"",
")",
",",
"toggled",
"=",
"self",
".",
"toggle_minmax",
")",
"self",
".",
"minmax_action",
".",
"setChecked",
"(",
"minmax",
")",
"self",
".",
"toggle_minmax",
"(",
"minmax",
")",
"self",
".",
"rename_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Rename\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'rename'",
")",
",",
"triggered",
"=",
"self",
".",
"rename_item",
")",
"self",
".",
"duplicate_action",
"=",
"create_action",
"(",
"self",
",",
"_",
"(",
"\"Duplicate\"",
")",
",",
"icon",
"=",
"ima",
".",
"icon",
"(",
"'edit_add'",
")",
",",
"triggered",
"=",
"self",
".",
"duplicate_item",
")",
"menu",
"=",
"QMenu",
"(",
"self",
")",
"menu_actions",
"=",
"[",
"self",
".",
"edit_action",
",",
"self",
".",
"plot_action",
",",
"self",
".",
"hist_action",
",",
"self",
".",
"imshow_action",
",",
"self",
".",
"save_array_action",
",",
"self",
".",
"insert_action",
",",
"self",
".",
"remove_action",
",",
"self",
".",
"copy_action",
",",
"self",
".",
"paste_action",
",",
"None",
",",
"self",
".",
"rename_action",
",",
"self",
".",
"duplicate_action",
",",
"None",
",",
"resize_action",
",",
"resize_columns_action",
"]",
"if",
"ndarray",
"is",
"not",
"FakeObject",
":",
"menu_actions",
".",
"append",
"(",
"self",
".",
"minmax_action",
")",
"add_actions",
"(",
"menu",
",",
"menu_actions",
")",
"self",
".",
"empty_ws_menu",
"=",
"QMenu",
"(",
"self",
")",
"add_actions",
"(",
"self",
".",
"empty_ws_menu",
",",
"[",
"self",
".",
"insert_action",
",",
"self",
".",
"paste_action",
",",
"None",
",",
"resize_action",
",",
"resize_columns_action",
"]",
")",
"return",
"menu"
] | 57.5 | 21.279412 |
def asizeof(*objs, **opts):
'''Return the combined size in bytes of all objects passed as positional argments.
The available options and defaults are the following.
*align=8* -- size alignment
*all=False* -- all current objects
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*derive=False* -- derive from super type
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0.0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
All current module, global and stack objects are sized if
*all* is True and if no positional arguments are supplied.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat[4] sizes of
the given objects. High *limit* values may cause runtime
errors and miss objects for sizing.
A positive value for *stats* prints up to 8 statistics, (1)
a summary of the number of objects sized and seen, (2) a
simple profile of the sized objects by type and (3+) up to
6 tables showing the static, dynamic, derived, ignored,
inferred and dict types used, found resp. installed. The
fractional part of the *stats* value (x100) is the cutoff
percentage for simple profiles.
[4] See the documentation of this module for the definition of flat size.
'''
t, p = _objs_opts(objs, **opts)
if t:
_asizer.reset(**p)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts) # show opts as _kwdstr
_asizer._clear()
else:
s = 0
return s | [
"def",
"asizeof",
"(",
"*",
"objs",
",",
"*",
"*",
"opts",
")",
":",
"t",
",",
"p",
"=",
"_objs_opts",
"(",
"objs",
",",
"*",
"*",
"opts",
")",
"if",
"t",
":",
"_asizer",
".",
"reset",
"(",
"*",
"*",
"p",
")",
"s",
"=",
"_asizer",
".",
"asizeof",
"(",
"*",
"t",
")",
"_asizer",
".",
"print_stats",
"(",
"objs",
"=",
"t",
",",
"opts",
"=",
"opts",
")",
"# show opts as _kwdstr",
"_asizer",
".",
"_clear",
"(",
")",
"else",
":",
"s",
"=",
"0",
"return",
"s"
] | 34.542857 | 24.057143 |
def underflow(self, axis=0):
"""
Return the underflow for the given axis.
Depending on the dimension of the histogram, may return an array.
"""
if axis not in range(3):
raise ValueError("axis must be 0, 1, or 2")
if self.DIM == 1:
return self.GetBinContent(0)
elif self.DIM == 2:
def idx(i):
arg = [i]
arg.insert(axis, 0)
return arg
return [
self.GetBinContent(*idx(i))
for i in self.bins_range(axis=(axis + 1) % 2, overflow=True)]
elif self.DIM == 3:
axes = [0, 1, 2]
axes.remove(axis)
axis2, axis3 = axes
def idx(i, j):
arg = [i, j]
arg.insert(axis, 0)
return arg
return [[
self.GetBinContent(*idx(i, j))
for i in self.bins_range(axis=axis2, overflow=True)]
for j in self.bins_range(axis=axis3, overflow=True)] | [
"def",
"underflow",
"(",
"self",
",",
"axis",
"=",
"0",
")",
":",
"if",
"axis",
"not",
"in",
"range",
"(",
"3",
")",
":",
"raise",
"ValueError",
"(",
"\"axis must be 0, 1, or 2\"",
")",
"if",
"self",
".",
"DIM",
"==",
"1",
":",
"return",
"self",
".",
"GetBinContent",
"(",
"0",
")",
"elif",
"self",
".",
"DIM",
"==",
"2",
":",
"def",
"idx",
"(",
"i",
")",
":",
"arg",
"=",
"[",
"i",
"]",
"arg",
".",
"insert",
"(",
"axis",
",",
"0",
")",
"return",
"arg",
"return",
"[",
"self",
".",
"GetBinContent",
"(",
"*",
"idx",
"(",
"i",
")",
")",
"for",
"i",
"in",
"self",
".",
"bins_range",
"(",
"axis",
"=",
"(",
"axis",
"+",
"1",
")",
"%",
"2",
",",
"overflow",
"=",
"True",
")",
"]",
"elif",
"self",
".",
"DIM",
"==",
"3",
":",
"axes",
"=",
"[",
"0",
",",
"1",
",",
"2",
"]",
"axes",
".",
"remove",
"(",
"axis",
")",
"axis2",
",",
"axis3",
"=",
"axes",
"def",
"idx",
"(",
"i",
",",
"j",
")",
":",
"arg",
"=",
"[",
"i",
",",
"j",
"]",
"arg",
".",
"insert",
"(",
"axis",
",",
"0",
")",
"return",
"arg",
"return",
"[",
"[",
"self",
".",
"GetBinContent",
"(",
"*",
"idx",
"(",
"i",
",",
"j",
")",
")",
"for",
"i",
"in",
"self",
".",
"bins_range",
"(",
"axis",
"=",
"axis2",
",",
"overflow",
"=",
"True",
")",
"]",
"for",
"j",
"in",
"self",
".",
"bins_range",
"(",
"axis",
"=",
"axis3",
",",
"overflow",
"=",
"True",
")",
"]"
] | 34.4 | 14.2 |
def get_workspace_config(namespace, workspace, cnamespace, config):
"""Get method configuration in workspace.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
cnamespace (str): Config namespace
config (str): Config name
Swagger:
https://api.firecloud.org/#!/Method_Configurations/getWorkspaceMethodConfig
"""
uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace,
workspace, cnamespace, config)
return __get(uri) | [
"def",
"get_workspace_config",
"(",
"namespace",
",",
"workspace",
",",
"cnamespace",
",",
"config",
")",
":",
"uri",
"=",
"\"workspaces/{0}/{1}/method_configs/{2}/{3}\"",
".",
"format",
"(",
"namespace",
",",
"workspace",
",",
"cnamespace",
",",
"config",
")",
"return",
"__get",
"(",
"uri",
")"
] | 35.125 | 22.625 |
def post_url(self, url, token='', json=None, data=None, headers=None):
"""
Returns a post resquest object taking in a url, user token, and
possible json information.
Arguments:
url (str): The url to make post to
token (str): The authentication token
json (dict): json info to send
Returns:
obj: Post request object
"""
if (token == ''):
token = self._user_token
if headers:
headers.update({'Authorization': 'Token {}'.format(token)})
else:
headers = {'Authorization': 'Token {}'.format(token)}
if json:
return requests.post(url,
headers=headers,
json=json,
verify=False)
if data:
return requests.post(url,
headers=headers,
data=data,
verify=False)
return requests.post(url,
headers=headers,
verify=False) | [
"def",
"post_url",
"(",
"self",
",",
"url",
",",
"token",
"=",
"''",
",",
"json",
"=",
"None",
",",
"data",
"=",
"None",
",",
"headers",
"=",
"None",
")",
":",
"if",
"(",
"token",
"==",
"''",
")",
":",
"token",
"=",
"self",
".",
"_user_token",
"if",
"headers",
":",
"headers",
".",
"update",
"(",
"{",
"'Authorization'",
":",
"'Token {}'",
".",
"format",
"(",
"token",
")",
"}",
")",
"else",
":",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Token {}'",
".",
"format",
"(",
"token",
")",
"}",
"if",
"json",
":",
"return",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"json",
"=",
"json",
",",
"verify",
"=",
"False",
")",
"if",
"data",
":",
"return",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"data",
"=",
"data",
",",
"verify",
"=",
"False",
")",
"return",
"requests",
".",
"post",
"(",
"url",
",",
"headers",
"=",
"headers",
",",
"verify",
"=",
"False",
")"
] | 32.428571 | 16.028571 |
def list(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, date_updated_before=values.unset,
date_updated=values.unset, date_updated_after=values.unset,
friendly_name=values.unset, status=values.unset, limit=None,
page_size=None):
"""
Lists ConferenceInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param date date_created_before: The `YYYY-MM-DD` value of the resources to read
:param date date_created: The `YYYY-MM-DD` value of the resources to read
:param date date_created_after: The `YYYY-MM-DD` value of the resources to read
:param date date_updated_before: The `YYYY-MM-DD` value of the resources to read
:param date date_updated: The `YYYY-MM-DD` value of the resources to read
:param date date_updated_after: The `YYYY-MM-DD` value of the resources to read
:param unicode friendly_name: The string that identifies the Conference resources to read
:param ConferenceInstance.Status status: The status of the resources to read
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.conference.ConferenceInstance]
"""
return list(self.stream(
date_created_before=date_created_before,
date_created=date_created,
date_created_after=date_created_after,
date_updated_before=date_updated_before,
date_updated=date_updated,
date_updated_after=date_updated_after,
friendly_name=friendly_name,
status=status,
limit=limit,
page_size=page_size,
)) | [
"def",
"list",
"(",
"self",
",",
"date_created_before",
"=",
"values",
".",
"unset",
",",
"date_created",
"=",
"values",
".",
"unset",
",",
"date_created_after",
"=",
"values",
".",
"unset",
",",
"date_updated_before",
"=",
"values",
".",
"unset",
",",
"date_updated",
"=",
"values",
".",
"unset",
",",
"date_updated_after",
"=",
"values",
".",
"unset",
",",
"friendly_name",
"=",
"values",
".",
"unset",
",",
"status",
"=",
"values",
".",
"unset",
",",
"limit",
"=",
"None",
",",
"page_size",
"=",
"None",
")",
":",
"return",
"list",
"(",
"self",
".",
"stream",
"(",
"date_created_before",
"=",
"date_created_before",
",",
"date_created",
"=",
"date_created",
",",
"date_created_after",
"=",
"date_created_after",
",",
"date_updated_before",
"=",
"date_updated_before",
",",
"date_updated",
"=",
"date_updated",
",",
"date_updated_after",
"=",
"date_updated_after",
",",
"friendly_name",
"=",
"friendly_name",
",",
"status",
"=",
"status",
",",
"limit",
"=",
"limit",
",",
"page_size",
"=",
"page_size",
",",
")",
")"
] | 58.475 | 28.875 |
def get_repository_method_acl(namespace, method, snapshot_id):
"""Get permissions for a method.
The method should exist in the methods repository.
Args:
namespace (str): Methods namespace
method (str): method name
version (int): snapshot_id of the method
Swagger:
https://api.firecloud.org/#!/Method_Repository/getMethodACL
"""
uri = "methods/{0}/{1}/{2}/permissions".format(namespace,method,snapshot_id)
return __get(uri) | [
"def",
"get_repository_method_acl",
"(",
"namespace",
",",
"method",
",",
"snapshot_id",
")",
":",
"uri",
"=",
"\"methods/{0}/{1}/{2}/permissions\"",
".",
"format",
"(",
"namespace",
",",
"method",
",",
"snapshot_id",
")",
"return",
"__get",
"(",
"uri",
")"
] | 31.4 | 21.2 |
def add_environment_vars(config: MutableMapping[str, Any]):
"""Override config with environment variables
Environment variables have to be prefixed with BELBIO_
which will be stripped before splitting on '__' and lower-casing
the environment variable name that is left into keys for the
config dictionary.
Example:
BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio
1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL
2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url
3. bel_api__servers__api_url ==> [bel_api, servers, api_url]
4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio
"""
# TODO need to redo config - can't add value to dictionary without recursively building up the dict
# check into config libraries again
for e in os.environ:
if re.match("BELBIO_", e):
val = os.environ.get(e)
if val:
e.replace("BELBIO_", "")
env_keys = e.lower().split("__")
if len(env_keys) > 1:
joined = '"]["'.join(env_keys)
eval_config = f'config["{joined}"] = val'
try:
eval(eval_config)
except Exception as exc:
log.warn("Cannot process {e} into config")
else:
config[env_keys[0]] = val | [
"def",
"add_environment_vars",
"(",
"config",
":",
"MutableMapping",
"[",
"str",
",",
"Any",
"]",
")",
":",
"# TODO need to redo config - can't add value to dictionary without recursively building up the dict",
"# check into config libraries again",
"for",
"e",
"in",
"os",
".",
"environ",
":",
"if",
"re",
".",
"match",
"(",
"\"BELBIO_\"",
",",
"e",
")",
":",
"val",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"e",
")",
"if",
"val",
":",
"e",
".",
"replace",
"(",
"\"BELBIO_\"",
",",
"\"\"",
")",
"env_keys",
"=",
"e",
".",
"lower",
"(",
")",
".",
"split",
"(",
"\"__\"",
")",
"if",
"len",
"(",
"env_keys",
")",
">",
"1",
":",
"joined",
"=",
"'\"][\"'",
".",
"join",
"(",
"env_keys",
")",
"eval_config",
"=",
"f'config[\"{joined}\"] = val'",
"try",
":",
"eval",
"(",
"eval_config",
")",
"except",
"Exception",
"as",
"exc",
":",
"log",
".",
"warn",
"(",
"\"Cannot process {e} into config\"",
")",
"else",
":",
"config",
"[",
"env_keys",
"[",
"0",
"]",
"]",
"=",
"val"
] | 41.342857 | 21 |
def _get_qtyprc(self, n_points=6):
""" Returns a list of tuples of the form (qty, prc) created from the
cost function. If the cost function is polynomial it will be converted
to piece-wise linear using poly_to_pwl(n_points).
"""
if self.pcost_model == POLYNOMIAL:
# Convert polynomial cost function to piece-wise linear.
self.poly_to_pwl(n_points)
n_segments = len(self.p_cost) - 1
qtyprc = []
for i in range(n_segments):
x1, y1 = self.p_cost[i]
x2, y2 = self.p_cost[(i + 1)]
quantity = x2 - x1
price = (y2 - y1) / quantity
qtyprc.append((quantity, price))
return qtyprc | [
"def",
"_get_qtyprc",
"(",
"self",
",",
"n_points",
"=",
"6",
")",
":",
"if",
"self",
".",
"pcost_model",
"==",
"POLYNOMIAL",
":",
"# Convert polynomial cost function to piece-wise linear.",
"self",
".",
"poly_to_pwl",
"(",
"n_points",
")",
"n_segments",
"=",
"len",
"(",
"self",
".",
"p_cost",
")",
"-",
"1",
"qtyprc",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"n_segments",
")",
":",
"x1",
",",
"y1",
"=",
"self",
".",
"p_cost",
"[",
"i",
"]",
"x2",
",",
"y2",
"=",
"self",
".",
"p_cost",
"[",
"(",
"i",
"+",
"1",
")",
"]",
"quantity",
"=",
"x2",
"-",
"x1",
"price",
"=",
"(",
"y2",
"-",
"y1",
")",
"/",
"quantity",
"qtyprc",
".",
"append",
"(",
"(",
"quantity",
",",
"price",
")",
")",
"return",
"qtyprc"
] | 30.913043 | 17.391304 |
async def has_started(self):
"""
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
**This function is now deprecated and will be removed in v2.0+.**
:rtype: bool
"""
# pylint: disable=protected-access
timeout = False
auth_in_progress = False
if self._handler._connection.cbs:
timeout, auth_in_progress = await self._handler._auth.handle_token_async()
if timeout:
raise EventHubError("Authorization timeout.")
if auth_in_progress:
return False
if not await self._handler._client_ready_async():
return False
return True | [
"async",
"def",
"has_started",
"(",
"self",
")",
":",
"# pylint: disable=protected-access",
"timeout",
"=",
"False",
"auth_in_progress",
"=",
"False",
"if",
"self",
".",
"_handler",
".",
"_connection",
".",
"cbs",
":",
"timeout",
",",
"auth_in_progress",
"=",
"await",
"self",
".",
"_handler",
".",
"_auth",
".",
"handle_token_async",
"(",
")",
"if",
"timeout",
":",
"raise",
"EventHubError",
"(",
"\"Authorization timeout.\"",
")",
"if",
"auth_in_progress",
":",
"return",
"False",
"if",
"not",
"await",
"self",
".",
"_handler",
".",
"_client_ready_async",
"(",
")",
":",
"return",
"False",
"return",
"True"
] | 37.238095 | 17.428571 |
def contents_size(self):
'''
Returns the number of different categories to be shown in the
contents side-bar in the HTML documentation.
'''
count = 0
if hasattr(self,'variables'): count += 1
if hasattr(self,'types'): count += 1
if hasattr(self,'modules'): count += 1
if hasattr(self,'submodules'): count += 1
if hasattr(self,'subroutines'): count += 1
if hasattr(self,'modprocedures'): count += 1
if hasattr(self,'functions'): count += 1
if hasattr(self,'interfaces'): count += 1
if hasattr(self,'absinterfaces'): count += 1
if hasattr(self,'programs'): count += 1
if hasattr(self,'boundprocs'): count += 1
if hasattr(self,'finalprocs'): count += 1
if hasattr(self,'enums'): count += 1
if hasattr(self,'procedure'): count += 1
if hasattr(self,'constructor'): count += 1
if hasattr(self,'modfunctions'): count += 1
if hasattr(self,'modsubroutines'): count += 1
if hasattr(self,'modprocs'): count += 1
if getattr(self,'src',None): count += 1
return count | [
"def",
"contents_size",
"(",
"self",
")",
":",
"count",
"=",
"0",
"if",
"hasattr",
"(",
"self",
",",
"'variables'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'types'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'modules'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'submodules'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'subroutines'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'modprocedures'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'functions'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'interfaces'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'absinterfaces'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'programs'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'boundprocs'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'finalprocs'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'enums'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'procedure'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'constructor'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'modfunctions'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'modsubroutines'",
")",
":",
"count",
"+=",
"1",
"if",
"hasattr",
"(",
"self",
",",
"'modprocs'",
")",
":",
"count",
"+=",
"1",
"if",
"getattr",
"(",
"self",
",",
"'src'",
",",
"None",
")",
":",
"count",
"+=",
"1",
"return",
"count"
] | 43.346154 | 12.346154 |
def map(*args):
""" this map works just like the builtin.map, except, this one you can also:
- give it multiple functions to map over an iterable
- give it a single function with multiple arguments to run a window
based map operation over an iterable
"""
functions_to_apply = [i for i in args if callable(i)]
iterables_to_run = [i for i in args if not callable(i)]
#print('functions_to_apply:',functions_to_apply)
#print('iterables_to_run:',iterables_to_run)
assert len(functions_to_apply)>0, 'at least one function needs to be given to map'
assert len(iterables_to_run)>0, 'no iterables were given to map'
# check for native map usage
if len(functions_to_apply) == 1 and len(iterables_to_run) >= 1 and function_arg_count(*functions_to_apply)==1:
if hasattr(iter([]), '__next__'): # if python 3
return __builtins__.map(functions_to_apply[0], *iterables_to_run)
else:
return iter(__builtins__.map(functions_to_apply[0], *iterables_to_run))
# ---------------------------- new logic below ----------------------------
# logic for a single function
elif len(functions_to_apply) == 1:
fn = functions_to_apply[0]
# if there is a single iterable, chop it up
if len(iterables_to_run) == 1:
return (fn(*i) for i in window(iterables_to_run[0], function_arg_count(functions_to_apply[0])))
# logic for more than 1 function
elif len(functions_to_apply) > 1 and len(iterables_to_run) == 1:
return multi_ops(*(iterables_to_run + functions_to_apply))
else:
raise ValueError('invalid usage of map()') | [
"def",
"map",
"(",
"*",
"args",
")",
":",
"functions_to_apply",
"=",
"[",
"i",
"for",
"i",
"in",
"args",
"if",
"callable",
"(",
"i",
")",
"]",
"iterables_to_run",
"=",
"[",
"i",
"for",
"i",
"in",
"args",
"if",
"not",
"callable",
"(",
"i",
")",
"]",
"#print('functions_to_apply:',functions_to_apply)",
"#print('iterables_to_run:',iterables_to_run)",
"assert",
"len",
"(",
"functions_to_apply",
")",
">",
"0",
",",
"'at least one function needs to be given to map'",
"assert",
"len",
"(",
"iterables_to_run",
")",
">",
"0",
",",
"'no iterables were given to map'",
"# check for native map usage",
"if",
"len",
"(",
"functions_to_apply",
")",
"==",
"1",
"and",
"len",
"(",
"iterables_to_run",
")",
">=",
"1",
"and",
"function_arg_count",
"(",
"*",
"functions_to_apply",
")",
"==",
"1",
":",
"if",
"hasattr",
"(",
"iter",
"(",
"[",
"]",
")",
",",
"'__next__'",
")",
":",
"# if python 3",
"return",
"__builtins__",
".",
"map",
"(",
"functions_to_apply",
"[",
"0",
"]",
",",
"*",
"iterables_to_run",
")",
"else",
":",
"return",
"iter",
"(",
"__builtins__",
".",
"map",
"(",
"functions_to_apply",
"[",
"0",
"]",
",",
"*",
"iterables_to_run",
")",
")",
"# ---------------------------- new logic below ----------------------------",
"# logic for a single function",
"elif",
"len",
"(",
"functions_to_apply",
")",
"==",
"1",
":",
"fn",
"=",
"functions_to_apply",
"[",
"0",
"]",
"# if there is a single iterable, chop it up",
"if",
"len",
"(",
"iterables_to_run",
")",
"==",
"1",
":",
"return",
"(",
"fn",
"(",
"*",
"i",
")",
"for",
"i",
"in",
"window",
"(",
"iterables_to_run",
"[",
"0",
"]",
",",
"function_arg_count",
"(",
"functions_to_apply",
"[",
"0",
"]",
")",
")",
")",
"# logic for more than 1 function",
"elif",
"len",
"(",
"functions_to_apply",
")",
">",
"1",
"and",
"len",
"(",
"iterables_to_run",
")",
"==",
"1",
":",
"return",
"multi_ops",
"(",
"*",
"(",
"iterables_to_run",
"+",
"functions_to_apply",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'invalid usage of map()'",
")"
] | 49.575758 | 23.424242 |
def predict_noiseless(self, Xnew, full_cov=False, kern=None):
"""
Predict the underlying function f at the new point(s) Xnew.
:param Xnew: The points at which to make a prediction
:type Xnew: np.ndarray (Nnew x self.input_dim)
:param full_cov: whether to return the full covariance matrix, or just the diagonal
:type full_cov: bool
:param kern: The kernel to use for prediction (defaults to the model kern).
:returns: (mean, var):
mean: posterior mean, a Numpy array, Nnew x self.input_dim
var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise
If full_cov and self.input_dim > 1, the return shape of var is Nnew x Nnew x self.input_dim.
If self.input_dim == 1, the return shape is Nnew x Nnew.
This is to allow for different normalizations of the output dimensions.
"""
# Predict the latent function values
mu, var = self._raw_predict(Xnew, full_cov=full_cov, kern=kern)
# Un-apply normalization
if self.normalizer is not None:
mu, var = self.normalizer.inverse_mean(mu), self.normalizer.inverse_variance(var)
return mu, var | [
"def",
"predict_noiseless",
"(",
"self",
",",
"Xnew",
",",
"full_cov",
"=",
"False",
",",
"kern",
"=",
"None",
")",
":",
"# Predict the latent function values",
"mu",
",",
"var",
"=",
"self",
".",
"_raw_predict",
"(",
"Xnew",
",",
"full_cov",
"=",
"full_cov",
",",
"kern",
"=",
"kern",
")",
"# Un-apply normalization",
"if",
"self",
".",
"normalizer",
"is",
"not",
"None",
":",
"mu",
",",
"var",
"=",
"self",
".",
"normalizer",
".",
"inverse_mean",
"(",
"mu",
")",
",",
"self",
".",
"normalizer",
".",
"inverse_variance",
"(",
"var",
")",
"return",
"mu",
",",
"var"
] | 47 | 28.384615 |
def resolve_realms(self, attributes):
"""
The format of realm settings is:
{'name_of_realm':
{'cls': 'location to realm class',
'account_store': 'location to realm account_store class'}}
- 'name of realm' is a label used for internal tracking
- 'cls' and 'account_store' are static key names and are not to be changed
- the location of classes should follow dotted notation: pkg.module.class
"""
realms = []
for realm, realm_attributes in attributes['realms'].items():
realm_cls = maybe_resolve(realm)
account_store_cls = maybe_resolve(realm_attributes['account_store'])
verifiers = {}
authc_verifiers = realm_attributes.get('authc_verifiers')
if authc_verifiers:
if isinstance(authc_verifiers, list):
authc_verifiers_cls = tuple(maybe_resolve(verifier)(self.settings) for
verifier in authc_verifiers)
else:
authc_verifiers_cls = tuple([maybe_resolve(authc_verifiers)(self.settings)])
verifiers['authc_verifiers'] = authc_verifiers_cls
authz_verifier = realm_attributes.get('authz_verifier')
if authz_verifier:
permission_verifier_cls = maybe_resolve(authz_verifier)
if permission_verifier_cls:
verifiers['permission_verifier'] = maybe_resolve(permission_verifier_cls)()
realms.append([realm_cls, account_store_cls, verifiers])
return realms | [
"def",
"resolve_realms",
"(",
"self",
",",
"attributes",
")",
":",
"realms",
"=",
"[",
"]",
"for",
"realm",
",",
"realm_attributes",
"in",
"attributes",
"[",
"'realms'",
"]",
".",
"items",
"(",
")",
":",
"realm_cls",
"=",
"maybe_resolve",
"(",
"realm",
")",
"account_store_cls",
"=",
"maybe_resolve",
"(",
"realm_attributes",
"[",
"'account_store'",
"]",
")",
"verifiers",
"=",
"{",
"}",
"authc_verifiers",
"=",
"realm_attributes",
".",
"get",
"(",
"'authc_verifiers'",
")",
"if",
"authc_verifiers",
":",
"if",
"isinstance",
"(",
"authc_verifiers",
",",
"list",
")",
":",
"authc_verifiers_cls",
"=",
"tuple",
"(",
"maybe_resolve",
"(",
"verifier",
")",
"(",
"self",
".",
"settings",
")",
"for",
"verifier",
"in",
"authc_verifiers",
")",
"else",
":",
"authc_verifiers_cls",
"=",
"tuple",
"(",
"[",
"maybe_resolve",
"(",
"authc_verifiers",
")",
"(",
"self",
".",
"settings",
")",
"]",
")",
"verifiers",
"[",
"'authc_verifiers'",
"]",
"=",
"authc_verifiers_cls",
"authz_verifier",
"=",
"realm_attributes",
".",
"get",
"(",
"'authz_verifier'",
")",
"if",
"authz_verifier",
":",
"permission_verifier_cls",
"=",
"maybe_resolve",
"(",
"authz_verifier",
")",
"if",
"permission_verifier_cls",
":",
"verifiers",
"[",
"'permission_verifier'",
"]",
"=",
"maybe_resolve",
"(",
"permission_verifier_cls",
")",
"(",
")",
"realms",
".",
"append",
"(",
"[",
"realm_cls",
",",
"account_store_cls",
",",
"verifiers",
"]",
")",
"return",
"realms"
] | 43.918919 | 26.351351 |
def _padding_model_number(number, max_num):
'''
This method returns a zero-front padded string
It makes out of str(45) -> '0045' if 999 < max_num < 10000. This is
meant to work for reasonable integers (maybe less than 10^6).
Parameters
----------
number : integer
number that the string should represent.
max_num : integer
max number of cycle list, implies how many 0s have be padded
'''
cnum = str(number)
clen = len(cnum)
cmax = int(log10(max_num)) + 1
return (cmax - clen)*'0' + cnum | [
"def",
"_padding_model_number",
"(",
"number",
",",
"max_num",
")",
":",
"cnum",
"=",
"str",
"(",
"number",
")",
"clen",
"=",
"len",
"(",
"cnum",
")",
"cmax",
"=",
"int",
"(",
"log10",
"(",
"max_num",
")",
")",
"+",
"1",
"return",
"(",
"cmax",
"-",
"clen",
")",
"*",
"'0'",
"+",
"cnum"
] | 24.5 | 25.045455 |
def create_function_f_c(self):
"""condition function"""
return ca.Function(
'f_c',
[self.t, self.x, self.y, self.m, self.p, self.ng, self.nu],
[self.f_c],
['t', 'x', 'y', 'm', 'p', 'ng', 'nu'], ['c'], self.func_opt) | [
"def",
"create_function_f_c",
"(",
"self",
")",
":",
"return",
"ca",
".",
"Function",
"(",
"'f_c'",
",",
"[",
"self",
".",
"t",
",",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"self",
".",
"m",
",",
"self",
".",
"p",
",",
"self",
".",
"ng",
",",
"self",
".",
"nu",
"]",
",",
"[",
"self",
".",
"f_c",
"]",
",",
"[",
"'t'",
",",
"'x'",
",",
"'y'",
",",
"'m'",
",",
"'p'",
",",
"'ng'",
",",
"'nu'",
"]",
",",
"[",
"'c'",
"]",
",",
"self",
".",
"func_opt",
")"
] | 39 | 17.857143 |
def multinomial_resample(weights):
""" This is the naive form of roulette sampling where we compute the
cumulative sum of the weights and then use binary search to select the
resampled point based on a uniformly distributed random number. Run time
is O(n log n). You do not want to use this algorithm in practice; for some
reason it is popular in blogs and online courses so I included it for
reference.
Parameters
----------
weights : list-like of float
list of weights as floats
Returns
-------
indexes : ndarray of ints
array of indexes into the weights defining the resample. i.e. the
index of the zeroth resample is indexes[0], etc.
"""
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # avoid round-off errors: ensures sum is exactly one
return np.searchsorted(cumulative_sum, random(len(weights))) | [
"def",
"multinomial_resample",
"(",
"weights",
")",
":",
"cumulative_sum",
"=",
"np",
".",
"cumsum",
"(",
"weights",
")",
"cumulative_sum",
"[",
"-",
"1",
"]",
"=",
"1.",
"# avoid round-off errors: ensures sum is exactly one",
"return",
"np",
".",
"searchsorted",
"(",
"cumulative_sum",
",",
"random",
"(",
"len",
"(",
"weights",
")",
")",
")"
] | 36.791667 | 24.416667 |
def db_exists(cls, impl, working_dir):
"""
Does the chainstate db exist?
"""
path = config.get_snapshots_filename(impl, working_dir)
return os.path.exists(path) | [
"def",
"db_exists",
"(",
"cls",
",",
"impl",
",",
"working_dir",
")",
":",
"path",
"=",
"config",
".",
"get_snapshots_filename",
"(",
"impl",
",",
"working_dir",
")",
"return",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")"
] | 32.5 | 5.5 |
def y0(self):
"""Y-axis coordinate of the first data point
:type: `~astropy.units.Quantity` scalar
"""
try:
return self._y0
except AttributeError:
self._y0 = Quantity(0, self.yunit)
return self._y0 | [
"def",
"y0",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_y0",
"except",
"AttributeError",
":",
"self",
".",
"_y0",
"=",
"Quantity",
"(",
"0",
",",
"self",
".",
"yunit",
")",
"return",
"self",
".",
"_y0"
] | 26.5 | 14.4 |
def uservoice(parser, token):
"""
UserVoice tracking template tag.
Renders Javascript code to track page visits. You must supply
your UserVoice Widget Key in the ``USERVOICE_WIDGET_KEY``
setting or the ``uservoice_widget_key`` template context variable.
"""
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError("'%s' takes no arguments" % bits[0])
return UserVoiceNode() | [
"def",
"uservoice",
"(",
"parser",
",",
"token",
")",
":",
"bits",
"=",
"token",
".",
"split_contents",
"(",
")",
"if",
"len",
"(",
"bits",
")",
">",
"1",
":",
"raise",
"TemplateSyntaxError",
"(",
"\"'%s' takes no arguments\"",
"%",
"bits",
"[",
"0",
"]",
")",
"return",
"UserVoiceNode",
"(",
")"
] | 35.5 | 16.833333 |
def _spill(self):
"""
dump already partitioned data into disks.
"""
global MemoryBytesSpilled, DiskBytesSpilled
path = self._get_spill_dir(self.spills)
if not os.path.exists(path):
os.makedirs(path)
used_memory = get_used_memory()
if not self.pdata:
# The data has not been partitioned, it will iterator the
# data once, write them into different files, has no
# additional memory. It only called when the memory goes
# above limit at the first time.
# open all the files for writing
streams = [open(os.path.join(path, str(i)), 'wb')
for i in range(self.partitions)]
# If the number of keys is small, then the overhead of sort is small
# sort them before dumping into disks
self._sorted = len(self.data) < self.SORT_KEY_LIMIT
if self._sorted:
self.serializer = self.flattened_serializer()
for k in sorted(self.data.keys()):
h = self._partition(k)
self.serializer.dump_stream([(k, self.data[k])], streams[h])
else:
for k, v in self.data.items():
h = self._partition(k)
self.serializer.dump_stream([(k, v)], streams[h])
for s in streams:
DiskBytesSpilled += s.tell()
s.close()
self.data.clear()
# self.pdata is cached in `mergeValues` and `mergeCombiners`
self.pdata.extend([{} for i in range(self.partitions)])
else:
for i in range(self.partitions):
p = os.path.join(path, str(i))
with open(p, "wb") as f:
# dump items in batch
if self._sorted:
# sort by key only (stable)
sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0))
self.serializer.dump_stream(sorted_items, f)
else:
self.serializer.dump_stream(self.pdata[i].items(), f)
self.pdata[i].clear()
DiskBytesSpilled += os.path.getsize(p)
self.spills += 1
gc.collect() # release the memory as much as possible
MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 | [
"def",
"_spill",
"(",
"self",
")",
":",
"global",
"MemoryBytesSpilled",
",",
"DiskBytesSpilled",
"path",
"=",
"self",
".",
"_get_spill_dir",
"(",
"self",
".",
"spills",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"os",
".",
"makedirs",
"(",
"path",
")",
"used_memory",
"=",
"get_used_memory",
"(",
")",
"if",
"not",
"self",
".",
"pdata",
":",
"# The data has not been partitioned, it will iterator the",
"# data once, write them into different files, has no",
"# additional memory. It only called when the memory goes",
"# above limit at the first time.",
"# open all the files for writing",
"streams",
"=",
"[",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"i",
")",
")",
",",
"'wb'",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
"]",
"# If the number of keys is small, then the overhead of sort is small",
"# sort them before dumping into disks",
"self",
".",
"_sorted",
"=",
"len",
"(",
"self",
".",
"data",
")",
"<",
"self",
".",
"SORT_KEY_LIMIT",
"if",
"self",
".",
"_sorted",
":",
"self",
".",
"serializer",
"=",
"self",
".",
"flattened_serializer",
"(",
")",
"for",
"k",
"in",
"sorted",
"(",
"self",
".",
"data",
".",
"keys",
"(",
")",
")",
":",
"h",
"=",
"self",
".",
"_partition",
"(",
"k",
")",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"[",
"(",
"k",
",",
"self",
".",
"data",
"[",
"k",
"]",
")",
"]",
",",
"streams",
"[",
"h",
"]",
")",
"else",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"data",
".",
"items",
"(",
")",
":",
"h",
"=",
"self",
".",
"_partition",
"(",
"k",
")",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"[",
"(",
"k",
",",
"v",
")",
"]",
",",
"streams",
"[",
"h",
"]",
")",
"for",
"s",
"in",
"streams",
":",
"DiskBytesSpilled",
"+=",
"s",
".",
"tell",
"(",
")",
"s",
".",
"close",
"(",
")",
"self",
".",
"data",
".",
"clear",
"(",
")",
"# self.pdata is cached in `mergeValues` and `mergeCombiners`",
"self",
".",
"pdata",
".",
"extend",
"(",
"[",
"{",
"}",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
"]",
")",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"partitions",
")",
":",
"p",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"str",
"(",
"i",
")",
")",
"with",
"open",
"(",
"p",
",",
"\"wb\"",
")",
"as",
"f",
":",
"# dump items in batch",
"if",
"self",
".",
"_sorted",
":",
"# sort by key only (stable)",
"sorted_items",
"=",
"sorted",
"(",
"self",
".",
"pdata",
"[",
"i",
"]",
".",
"items",
"(",
")",
",",
"key",
"=",
"operator",
".",
"itemgetter",
"(",
"0",
")",
")",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"sorted_items",
",",
"f",
")",
"else",
":",
"self",
".",
"serializer",
".",
"dump_stream",
"(",
"self",
".",
"pdata",
"[",
"i",
"]",
".",
"items",
"(",
")",
",",
"f",
")",
"self",
".",
"pdata",
"[",
"i",
"]",
".",
"clear",
"(",
")",
"DiskBytesSpilled",
"+=",
"os",
".",
"path",
".",
"getsize",
"(",
"p",
")",
"self",
".",
"spills",
"+=",
"1",
"gc",
".",
"collect",
"(",
")",
"# release the memory as much as possible",
"MemoryBytesSpilled",
"+=",
"max",
"(",
"used_memory",
"-",
"get_used_memory",
"(",
")",
",",
"0",
")",
"<<",
"20"
] | 41.431034 | 18.637931 |
def rsdl_sn(self, U):
"""Compute dual residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
"""
return self.rho * np.linalg.norm(self.cnst_AT(U)) | [
"def",
"rsdl_sn",
"(",
"self",
",",
"U",
")",
":",
"return",
"self",
".",
"rho",
"*",
"np",
".",
"linalg",
".",
"norm",
"(",
"self",
".",
"cnst_AT",
"(",
"U",
")",
")"
] | 32.888889 | 21.444444 |
def _process_wildtypes(self, limit=None):
"""
This table provides the genotype IDs, name,
and abbreviation of the wildtype genotypes.
These are the typical genomic backgrounds...there's about 20 of them.
http://zfin.org/downloads/wildtypes_fish.txt
Triples created:
<genotype id> a GENO:wildtype
<genotype id> rdfs:label genotype_abbreviation
<genotype id> dc:description genotype_name
:param limit:
:return:
"""
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
# model = Model(graph) # unused
LOG.info("Processing wildtype genotypes")
line_counter = 0
geno = Genotype(graph)
raw = '/'.join((self.rawdir, self.files['wild']['file']))
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(fish_num, fish_name, fish_abbreviation, genotype_num
# , empty
) = row
# ZDB-FISH-150901-10750 INDO INDO ZDB-GENO-980210-32
fish_id = 'ZFIN:'+fish_num
genotype_id = 'ZFIN:' + genotype_num.strip()
background_type = self.globaltt['genomic_background']
# Add genotype to graph with label and description,
# as a genomic_background genotype
unspecified_background = 'ZDB-GENO-030619-2'
if re.match(genotype_num.strip(), unspecified_background):
background_type = self.globaltt['unspecified_genomic_background']
geno.addGenomicBackground(
genotype_id, fish_abbreviation, background_type, fish_name)
graph.addTriple(fish_id, self.globaltt['has_genotype'], genotype_id)
# Build the hash for the wild type genotypes.
self.id_label_map[genotype_id] = fish_abbreviation
# store these in a special hash to look up later
self.wildtype_genotypes += [genotype_id]
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with wildtype genotypes")
return | [
"def",
"_process_wildtypes",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"if",
"self",
".",
"test_mode",
":",
"graph",
"=",
"self",
".",
"testgraph",
"else",
":",
"graph",
"=",
"self",
".",
"graph",
"# model = Model(graph) # unused",
"LOG",
".",
"info",
"(",
"\"Processing wildtype genotypes\"",
")",
"line_counter",
"=",
"0",
"geno",
"=",
"Genotype",
"(",
"graph",
")",
"raw",
"=",
"'/'",
".",
"join",
"(",
"(",
"self",
".",
"rawdir",
",",
"self",
".",
"files",
"[",
"'wild'",
"]",
"[",
"'file'",
"]",
")",
")",
"with",
"open",
"(",
"raw",
",",
"'r'",
",",
"encoding",
"=",
"\"iso-8859-1\"",
")",
"as",
"csvfile",
":",
"filereader",
"=",
"csv",
".",
"reader",
"(",
"csvfile",
",",
"delimiter",
"=",
"'\\t'",
",",
"quotechar",
"=",
"'\\\"'",
")",
"for",
"row",
"in",
"filereader",
":",
"line_counter",
"+=",
"1",
"(",
"fish_num",
",",
"fish_name",
",",
"fish_abbreviation",
",",
"genotype_num",
"# , empty",
")",
"=",
"row",
"# ZDB-FISH-150901-10750\tINDO\tINDO\tZDB-GENO-980210-32",
"fish_id",
"=",
"'ZFIN:'",
"+",
"fish_num",
"genotype_id",
"=",
"'ZFIN:'",
"+",
"genotype_num",
".",
"strip",
"(",
")",
"background_type",
"=",
"self",
".",
"globaltt",
"[",
"'genomic_background'",
"]",
"# Add genotype to graph with label and description,",
"# as a genomic_background genotype",
"unspecified_background",
"=",
"'ZDB-GENO-030619-2'",
"if",
"re",
".",
"match",
"(",
"genotype_num",
".",
"strip",
"(",
")",
",",
"unspecified_background",
")",
":",
"background_type",
"=",
"self",
".",
"globaltt",
"[",
"'unspecified_genomic_background'",
"]",
"geno",
".",
"addGenomicBackground",
"(",
"genotype_id",
",",
"fish_abbreviation",
",",
"background_type",
",",
"fish_name",
")",
"graph",
".",
"addTriple",
"(",
"fish_id",
",",
"self",
".",
"globaltt",
"[",
"'has_genotype'",
"]",
",",
"genotype_id",
")",
"# Build the hash for the wild type genotypes.",
"self",
".",
"id_label_map",
"[",
"genotype_id",
"]",
"=",
"fish_abbreviation",
"# store these in a special hash to look up later",
"self",
".",
"wildtype_genotypes",
"+=",
"[",
"genotype_id",
"]",
"if",
"not",
"self",
".",
"test_mode",
"and",
"limit",
"is",
"not",
"None",
"and",
"line_counter",
">",
"limit",
":",
"break",
"LOG",
".",
"info",
"(",
"\"Done with wildtype genotypes\"",
")",
"return"
] | 40.155172 | 21.5 |
def _compute_mixing_probabilities(self):
"""
Compute the mixing probability for each filter.
"""
self.cbar = dot(self.mu, self.M)
for i in range(self.N):
for j in range(self.N):
self.omega[i, j] = (self.M[i, j]*self.mu[i]) / self.cbar[j] | [
"def",
"_compute_mixing_probabilities",
"(",
"self",
")",
":",
"self",
".",
"cbar",
"=",
"dot",
"(",
"self",
".",
"mu",
",",
"self",
".",
"M",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"N",
")",
":",
"for",
"j",
"in",
"range",
"(",
"self",
".",
"N",
")",
":",
"self",
".",
"omega",
"[",
"i",
",",
"j",
"]",
"=",
"(",
"self",
".",
"M",
"[",
"i",
",",
"j",
"]",
"*",
"self",
".",
"mu",
"[",
"i",
"]",
")",
"/",
"self",
".",
"cbar",
"[",
"j",
"]"
] | 33.111111 | 11.555556 |
def multi_mask_sequences(records, slices):
"""
Replace characters sliced by slices with gap characters.
"""
for record in records:
record_indices = list(range(len(record)))
keep_indices = reduce(lambda i, s: i - frozenset(record_indices[s]),
slices, frozenset(record_indices))
seq = ''.join(b if i in keep_indices else '-'
for i, b in enumerate(str(record.seq)))
record.seq = Seq(seq)
yield record | [
"def",
"multi_mask_sequences",
"(",
"records",
",",
"slices",
")",
":",
"for",
"record",
"in",
"records",
":",
"record_indices",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"record",
")",
")",
")",
"keep_indices",
"=",
"reduce",
"(",
"lambda",
"i",
",",
"s",
":",
"i",
"-",
"frozenset",
"(",
"record_indices",
"[",
"s",
"]",
")",
",",
"slices",
",",
"frozenset",
"(",
"record_indices",
")",
")",
"seq",
"=",
"''",
".",
"join",
"(",
"b",
"if",
"i",
"in",
"keep_indices",
"else",
"'-'",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"str",
"(",
"record",
".",
"seq",
")",
")",
")",
"record",
".",
"seq",
"=",
"Seq",
"(",
"seq",
")",
"yield",
"record"
] | 41.166667 | 14.166667 |
def get_access_information(self, code, # pylint: disable=W0221
update_session=True):
"""Return the access information for an OAuth2 authorization grant.
:param code: the code received in the request from the OAuth2 server
:param update_session: Update the current session with the retrieved
token(s).
:returns: A dictionary with the key/value pairs for access_token,
refresh_token and scope. The refresh_token value will be done when
the OAuth2 grant is not refreshable.
"""
retval = super(AuthenticatedReddit, self).get_access_information(code)
if update_session:
self.set_access_credentials(**retval)
return retval | [
"def",
"get_access_information",
"(",
"self",
",",
"code",
",",
"# pylint: disable=W0221",
"update_session",
"=",
"True",
")",
":",
"retval",
"=",
"super",
"(",
"AuthenticatedReddit",
",",
"self",
")",
".",
"get_access_information",
"(",
"code",
")",
"if",
"update_session",
":",
"self",
".",
"set_access_credentials",
"(",
"*",
"*",
"retval",
")",
"return",
"retval"
] | 46.6875 | 22.8125 |
def _launch_process_group(self, process_commands, streams_path):
"""
Launches processes defined by process_commands, but only
executes max_concurrency processes at a time; if a process
completes and there are still outstanding processes to be
executed, the next processes are run until max_concurrency is
reached again.
"""
processes = {}
def check_complete_processes(wait=False):
"""
Returns True if a process completed, False otherwise.
Optionally allows waiting for better performance (avoids
sleep-poll cycle if possible).
"""
result = False
# list creates copy of keys, as dict is modified in loop
for proc in list(processes):
if wait: proc.wait()
if proc.poll() is not None:
# process is done, free up slot
self.debug("Process %d exited with code %d."
% (processes[proc]['tid'], proc.poll()))
processes[proc]['stdout'].close()
processes[proc]['stderr'].close()
del processes[proc]
result = True
return result
for cmd, tid in process_commands:
self.debug("Starting process %d..." % tid)
job_timestamp = time.strftime('%H%M%S')
basename = "%s_%s_tid_%d" % (self.batch_name, job_timestamp, tid)
stdout_handle = open(os.path.join(streams_path, "%s.o.%d"
% (basename, tid)), "wb")
stderr_handle = open(os.path.join(streams_path, "%s.e.%d"
% (basename, tid)), "wb")
proc = subprocess.Popen(cmd, stdout=stdout_handle, stderr=stderr_handle)
processes[proc] = { 'tid' : tid,
'stdout' : stdout_handle,
'stderr' : stderr_handle }
if self.max_concurrency:
# max_concurrency reached, wait until more slots available
while len(processes) >= self.max_concurrency:
if not check_complete_processes(len(processes)==1):
time.sleep(0.1)
# Wait for all processes to complete
while len(processes) > 0:
if not check_complete_processes(True):
time.sleep(0.1) | [
"def",
"_launch_process_group",
"(",
"self",
",",
"process_commands",
",",
"streams_path",
")",
":",
"processes",
"=",
"{",
"}",
"def",
"check_complete_processes",
"(",
"wait",
"=",
"False",
")",
":",
"\"\"\"\n Returns True if a process completed, False otherwise.\n Optionally allows waiting for better performance (avoids\n sleep-poll cycle if possible).\n \"\"\"",
"result",
"=",
"False",
"# list creates copy of keys, as dict is modified in loop",
"for",
"proc",
"in",
"list",
"(",
"processes",
")",
":",
"if",
"wait",
":",
"proc",
".",
"wait",
"(",
")",
"if",
"proc",
".",
"poll",
"(",
")",
"is",
"not",
"None",
":",
"# process is done, free up slot",
"self",
".",
"debug",
"(",
"\"Process %d exited with code %d.\"",
"%",
"(",
"processes",
"[",
"proc",
"]",
"[",
"'tid'",
"]",
",",
"proc",
".",
"poll",
"(",
")",
")",
")",
"processes",
"[",
"proc",
"]",
"[",
"'stdout'",
"]",
".",
"close",
"(",
")",
"processes",
"[",
"proc",
"]",
"[",
"'stderr'",
"]",
".",
"close",
"(",
")",
"del",
"processes",
"[",
"proc",
"]",
"result",
"=",
"True",
"return",
"result",
"for",
"cmd",
",",
"tid",
"in",
"process_commands",
":",
"self",
".",
"debug",
"(",
"\"Starting process %d...\"",
"%",
"tid",
")",
"job_timestamp",
"=",
"time",
".",
"strftime",
"(",
"'%H%M%S'",
")",
"basename",
"=",
"\"%s_%s_tid_%d\"",
"%",
"(",
"self",
".",
"batch_name",
",",
"job_timestamp",
",",
"tid",
")",
"stdout_handle",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"streams_path",
",",
"\"%s.o.%d\"",
"%",
"(",
"basename",
",",
"tid",
")",
")",
",",
"\"wb\"",
")",
"stderr_handle",
"=",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"streams_path",
",",
"\"%s.e.%d\"",
"%",
"(",
"basename",
",",
"tid",
")",
")",
",",
"\"wb\"",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"stdout_handle",
",",
"stderr",
"=",
"stderr_handle",
")",
"processes",
"[",
"proc",
"]",
"=",
"{",
"'tid'",
":",
"tid",
",",
"'stdout'",
":",
"stdout_handle",
",",
"'stderr'",
":",
"stderr_handle",
"}",
"if",
"self",
".",
"max_concurrency",
":",
"# max_concurrency reached, wait until more slots available",
"while",
"len",
"(",
"processes",
")",
">=",
"self",
".",
"max_concurrency",
":",
"if",
"not",
"check_complete_processes",
"(",
"len",
"(",
"processes",
")",
"==",
"1",
")",
":",
"time",
".",
"sleep",
"(",
"0.1",
")",
"# Wait for all processes to complete",
"while",
"len",
"(",
"processes",
")",
">",
"0",
":",
"if",
"not",
"check_complete_processes",
"(",
"True",
")",
":",
"time",
".",
"sleep",
"(",
"0.1",
")"
] | 46.826923 | 17.288462 |
def _call(self, resource, params):
"""Call to get a resource.
:param method: resource to get
:param params: dict with the HTTP parameters needed to get
the given resource
"""
url = self.URL % {'base': self.base_url, 'resource': resource}
if self.api_token:
params[self.PKEY] = self.api_token
logger.debug("Redmine client requests: %s params: %s",
resource, str(params))
r = self.fetch(url, payload=params, verify=False)
return r.text | [
"def",
"_call",
"(",
"self",
",",
"resource",
",",
"params",
")",
":",
"url",
"=",
"self",
".",
"URL",
"%",
"{",
"'base'",
":",
"self",
".",
"base_url",
",",
"'resource'",
":",
"resource",
"}",
"if",
"self",
".",
"api_token",
":",
"params",
"[",
"self",
".",
"PKEY",
"]",
"=",
"self",
".",
"api_token",
"logger",
".",
"debug",
"(",
"\"Redmine client requests: %s params: %s\"",
",",
"resource",
",",
"str",
"(",
"params",
")",
")",
"r",
"=",
"self",
".",
"fetch",
"(",
"url",
",",
"payload",
"=",
"params",
",",
"verify",
"=",
"False",
")",
"return",
"r",
".",
"text"
] | 29.888889 | 19.722222 |
def unarchive(self, experiments=None, archive=None, complete=False,
project_data=False, replace_project_config=False, root=None,
projectname=None, fmt=None, force=False, **kwargs):
"""
Extract archived experiments
Parameters
----------
experiments: list of str
The experiments to extract. If None the current experiment is used
archive: str
The path to an archive to extract the experiments from. If None,
we assume that the path to the archive has been stored in the
configuration when using the :meth:`archive` command
complete: bool
If True, archives are extracted completely, not only the experiment
(implies ``project_data = True``)
project_data: bool
If True, the data for the project is extracted as well
replace_project_config: bool
If True and the project does already exist in the configuration, it
is updated with what is stored in the archive
root: str
An alternative root directory to use. Otherwise the experiment will
be exctracted to
1. the root directory specified in the configuration files
(if the project exists in it) and `replace_project_config` is
False
2. the root directory as stored in the archive
projectname: str
The projectname to use. If None, use the one specified in the
archive
fmt: { 'gztar' | 'bztar' | 'tar' | 'zip' }
The format of the archive. If None, it is inferred
force: bool
If True, force to overwrite the configuration of all experiments
from what is stored in `archive`. Otherwise, the configuration of
the experiments in `archive` are only used if missing in the
current configuration
"""
def extract_file(path):
if atype == 'zip':
return file_obj.open(path)
else:
return file_obj.extractfile(path)
self.app_main(**kwargs)
logger = self.logger
project_config = None
all_exps = self.config.experiments
all_projects = self.config.projects
# ---- set archive
# if archive is None, check for the archives listed in `experiments`
# and raise an error if one has not been archived yet or if they belong
# to different files
if archive is None:
# ---- set experiments
# if experiments is None, use the current experiment. If complete
# is True, this will be replaced below
if experiments is None:
experiments = [self.experiment]
archives = list(filter(utils.isstring,
map(self.is_archived, experiments)))
if len(archives) > 1:
raise ValueError(
'The given experiments belong to multiple archives %s!' % (
', '.join(archives)))
archive = next(iter(archives))
elif not complete and experiments is None:
experiments = [self.experiment]
logger.info('Unarchiving from %s', archive)
# --- infer compression type
ext_map, fmt_map = self._archive_extensions()
if fmt is None:
try:
fmt = next(fmt for ext, fmt in ext_map.items()
if archive.endswith(ext))
except StopIteration:
raise IOError(
"Could not infer archive format of {}! Please specify it "
"manually using the `fmt` parameter!".format(archive))
# if no root directory is specified but a projectname, we take the root
# directory from the configuration if, and only if, the configuration
# should not be replaced
if (root is None and projectname is not None and
not replace_project_config):
all_projects.get(projectname, {}).get('root')
# ---- open the archive
modes = {'bztar': 'r:bz2', 'gztar': 'r:gz', 'tar': 'r', 'zip': 'r'}
atype = 'zip' if fmt == 'zip' else 'tar'
if atype == 'tar':
from tarfile import open as open_file
else:
from zipfile import ZipFile as open_file
file_obj = open_file(archive, modes[fmt])
# ---- if root is None, get it from the archive
if root is None:
fp = extract_file(osp.join('.project', '.project.yml'))
try:
project_config = ordered_yaml_load(fp)
# use the projectname in archive only, if nothing is specified
# here
projectname = projectname or project_config['name']
except:
raise
finally:
fp.close()
# if the projectname is existent in our configuration and already
# specified, use this one
if (projectname in self.config.projects and
not replace_project_config):
root = self.config.projects[projectname].get('root')
else:
root = project_config.get('root')
# if we still don't have it, because it was not specified in the
# archive or the configuration, raise an error
if root is None:
raise ValueError("Could not find a root directory path for the "
"project. Please specify manually!")
logger.info('Root directory for the project: %s', root)
t = str(dt.datetime.now()) # time at the beginning of extraction
config_files = []
def fname_filter(m):
fname = get_fname(m)
if (dir_contains('.project', fname) and
not osp.basename(fname).startswith('.')):
config_files.append(fname)
return (
complete or fname in config_files or
(project_data and not dir_contains('experiments', fname)) or
any(dir_contains(d, fname) for d in dirs))
if not complete:
dirs = [osp.join('experiments', exp) for exp in experiments]
dirs.append(osp.join('.project', '.project.yml'))
dir_contains = partial(utils.dir_contains, exists=False)
if atype == 'zip':
def get_fname(m):
return m
members = list(filter(fname_filter, file_obj.namelist()))
else:
def get_fname(m):
return m.name
members = list(filter(fname_filter, file_obj.getmembers()))
logger.debug('Extracting %s files from archive to %s',
len(members), root)
file_obj.extractall(root, members=members)
# if the project_config yet not has been red, read it now
if not project_config:
with open(osp.join(root, '.project', '.project.yml')) as fp:
project_config = ordered_yaml_load(fp)
if projectname:
project_config['name'] = projectname
else:
projectname = project_config['name']
if projectname not in all_projects or replace_project_config:
all_projects[projectname] = project_config
else:
all_projects[projectname]['root'] = root
# get all experiment names in the archive
arc_exps = [osp.splitext(osp.basename(f))[0] for f in config_files]
if complete:
experiments = arc_exps
else:
for exp in filter(lambda exp: exp not in arc_exps, experiments[:]):
logger.warn('Experiment %s was not found in archive!', exp)
experiments.remove(exp)
for exp in experiments:
if force or exp not in self.config.experiments or self.is_archived(
exp):
with open(osp.join(root, '.project', exp + '.yml')) as fexp:
exp_config = ordered_yaml_load(fexp)
logger.debug('Update configuration for %s', exp)
all_exps[exp] = self.fix_paths(exp_config)
else:
exp_config = all_exps[exp]
exp_config['project'] = projectname
exp_config['timestamps']['unarchive'] = t
logger.debug('Done.') | [
"def",
"unarchive",
"(",
"self",
",",
"experiments",
"=",
"None",
",",
"archive",
"=",
"None",
",",
"complete",
"=",
"False",
",",
"project_data",
"=",
"False",
",",
"replace_project_config",
"=",
"False",
",",
"root",
"=",
"None",
",",
"projectname",
"=",
"None",
",",
"fmt",
"=",
"None",
",",
"force",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"extract_file",
"(",
"path",
")",
":",
"if",
"atype",
"==",
"'zip'",
":",
"return",
"file_obj",
".",
"open",
"(",
"path",
")",
"else",
":",
"return",
"file_obj",
".",
"extractfile",
"(",
"path",
")",
"self",
".",
"app_main",
"(",
"*",
"*",
"kwargs",
")",
"logger",
"=",
"self",
".",
"logger",
"project_config",
"=",
"None",
"all_exps",
"=",
"self",
".",
"config",
".",
"experiments",
"all_projects",
"=",
"self",
".",
"config",
".",
"projects",
"# ---- set archive",
"# if archive is None, check for the archives listed in `experiments`",
"# and raise an error if one has not been archived yet or if they belong",
"# to different files",
"if",
"archive",
"is",
"None",
":",
"# ---- set experiments",
"# if experiments is None, use the current experiment. If complete",
"# is True, this will be replaced below",
"if",
"experiments",
"is",
"None",
":",
"experiments",
"=",
"[",
"self",
".",
"experiment",
"]",
"archives",
"=",
"list",
"(",
"filter",
"(",
"utils",
".",
"isstring",
",",
"map",
"(",
"self",
".",
"is_archived",
",",
"experiments",
")",
")",
")",
"if",
"len",
"(",
"archives",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'The given experiments belong to multiple archives %s!'",
"%",
"(",
"', '",
".",
"join",
"(",
"archives",
")",
")",
")",
"archive",
"=",
"next",
"(",
"iter",
"(",
"archives",
")",
")",
"elif",
"not",
"complete",
"and",
"experiments",
"is",
"None",
":",
"experiments",
"=",
"[",
"self",
".",
"experiment",
"]",
"logger",
".",
"info",
"(",
"'Unarchiving from %s'",
",",
"archive",
")",
"# --- infer compression type",
"ext_map",
",",
"fmt_map",
"=",
"self",
".",
"_archive_extensions",
"(",
")",
"if",
"fmt",
"is",
"None",
":",
"try",
":",
"fmt",
"=",
"next",
"(",
"fmt",
"for",
"ext",
",",
"fmt",
"in",
"ext_map",
".",
"items",
"(",
")",
"if",
"archive",
".",
"endswith",
"(",
"ext",
")",
")",
"except",
"StopIteration",
":",
"raise",
"IOError",
"(",
"\"Could not infer archive format of {}! Please specify it \"",
"\"manually using the `fmt` parameter!\"",
".",
"format",
"(",
"archive",
")",
")",
"# if no root directory is specified but a projectname, we take the root",
"# directory from the configuration if, and only if, the configuration",
"# should not be replaced",
"if",
"(",
"root",
"is",
"None",
"and",
"projectname",
"is",
"not",
"None",
"and",
"not",
"replace_project_config",
")",
":",
"all_projects",
".",
"get",
"(",
"projectname",
",",
"{",
"}",
")",
".",
"get",
"(",
"'root'",
")",
"# ---- open the archive",
"modes",
"=",
"{",
"'bztar'",
":",
"'r:bz2'",
",",
"'gztar'",
":",
"'r:gz'",
",",
"'tar'",
":",
"'r'",
",",
"'zip'",
":",
"'r'",
"}",
"atype",
"=",
"'zip'",
"if",
"fmt",
"==",
"'zip'",
"else",
"'tar'",
"if",
"atype",
"==",
"'tar'",
":",
"from",
"tarfile",
"import",
"open",
"as",
"open_file",
"else",
":",
"from",
"zipfile",
"import",
"ZipFile",
"as",
"open_file",
"file_obj",
"=",
"open_file",
"(",
"archive",
",",
"modes",
"[",
"fmt",
"]",
")",
"# ---- if root is None, get it from the archive",
"if",
"root",
"is",
"None",
":",
"fp",
"=",
"extract_file",
"(",
"osp",
".",
"join",
"(",
"'.project'",
",",
"'.project.yml'",
")",
")",
"try",
":",
"project_config",
"=",
"ordered_yaml_load",
"(",
"fp",
")",
"# use the projectname in archive only, if nothing is specified",
"# here",
"projectname",
"=",
"projectname",
"or",
"project_config",
"[",
"'name'",
"]",
"except",
":",
"raise",
"finally",
":",
"fp",
".",
"close",
"(",
")",
"# if the projectname is existent in our configuration and already",
"# specified, use this one",
"if",
"(",
"projectname",
"in",
"self",
".",
"config",
".",
"projects",
"and",
"not",
"replace_project_config",
")",
":",
"root",
"=",
"self",
".",
"config",
".",
"projects",
"[",
"projectname",
"]",
".",
"get",
"(",
"'root'",
")",
"else",
":",
"root",
"=",
"project_config",
".",
"get",
"(",
"'root'",
")",
"# if we still don't have it, because it was not specified in the",
"# archive or the configuration, raise an error",
"if",
"root",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Could not find a root directory path for the \"",
"\"project. Please specify manually!\"",
")",
"logger",
".",
"info",
"(",
"'Root directory for the project: %s'",
",",
"root",
")",
"t",
"=",
"str",
"(",
"dt",
".",
"datetime",
".",
"now",
"(",
")",
")",
"# time at the beginning of extraction",
"config_files",
"=",
"[",
"]",
"def",
"fname_filter",
"(",
"m",
")",
":",
"fname",
"=",
"get_fname",
"(",
"m",
")",
"if",
"(",
"dir_contains",
"(",
"'.project'",
",",
"fname",
")",
"and",
"not",
"osp",
".",
"basename",
"(",
"fname",
")",
".",
"startswith",
"(",
"'.'",
")",
")",
":",
"config_files",
".",
"append",
"(",
"fname",
")",
"return",
"(",
"complete",
"or",
"fname",
"in",
"config_files",
"or",
"(",
"project_data",
"and",
"not",
"dir_contains",
"(",
"'experiments'",
",",
"fname",
")",
")",
"or",
"any",
"(",
"dir_contains",
"(",
"d",
",",
"fname",
")",
"for",
"d",
"in",
"dirs",
")",
")",
"if",
"not",
"complete",
":",
"dirs",
"=",
"[",
"osp",
".",
"join",
"(",
"'experiments'",
",",
"exp",
")",
"for",
"exp",
"in",
"experiments",
"]",
"dirs",
".",
"append",
"(",
"osp",
".",
"join",
"(",
"'.project'",
",",
"'.project.yml'",
")",
")",
"dir_contains",
"=",
"partial",
"(",
"utils",
".",
"dir_contains",
",",
"exists",
"=",
"False",
")",
"if",
"atype",
"==",
"'zip'",
":",
"def",
"get_fname",
"(",
"m",
")",
":",
"return",
"m",
"members",
"=",
"list",
"(",
"filter",
"(",
"fname_filter",
",",
"file_obj",
".",
"namelist",
"(",
")",
")",
")",
"else",
":",
"def",
"get_fname",
"(",
"m",
")",
":",
"return",
"m",
".",
"name",
"members",
"=",
"list",
"(",
"filter",
"(",
"fname_filter",
",",
"file_obj",
".",
"getmembers",
"(",
")",
")",
")",
"logger",
".",
"debug",
"(",
"'Extracting %s files from archive to %s'",
",",
"len",
"(",
"members",
")",
",",
"root",
")",
"file_obj",
".",
"extractall",
"(",
"root",
",",
"members",
"=",
"members",
")",
"# if the project_config yet not has been red, read it now",
"if",
"not",
"project_config",
":",
"with",
"open",
"(",
"osp",
".",
"join",
"(",
"root",
",",
"'.project'",
",",
"'.project.yml'",
")",
")",
"as",
"fp",
":",
"project_config",
"=",
"ordered_yaml_load",
"(",
"fp",
")",
"if",
"projectname",
":",
"project_config",
"[",
"'name'",
"]",
"=",
"projectname",
"else",
":",
"projectname",
"=",
"project_config",
"[",
"'name'",
"]",
"if",
"projectname",
"not",
"in",
"all_projects",
"or",
"replace_project_config",
":",
"all_projects",
"[",
"projectname",
"]",
"=",
"project_config",
"else",
":",
"all_projects",
"[",
"projectname",
"]",
"[",
"'root'",
"]",
"=",
"root",
"# get all experiment names in the archive",
"arc_exps",
"=",
"[",
"osp",
".",
"splitext",
"(",
"osp",
".",
"basename",
"(",
"f",
")",
")",
"[",
"0",
"]",
"for",
"f",
"in",
"config_files",
"]",
"if",
"complete",
":",
"experiments",
"=",
"arc_exps",
"else",
":",
"for",
"exp",
"in",
"filter",
"(",
"lambda",
"exp",
":",
"exp",
"not",
"in",
"arc_exps",
",",
"experiments",
"[",
":",
"]",
")",
":",
"logger",
".",
"warn",
"(",
"'Experiment %s was not found in archive!'",
",",
"exp",
")",
"experiments",
".",
"remove",
"(",
"exp",
")",
"for",
"exp",
"in",
"experiments",
":",
"if",
"force",
"or",
"exp",
"not",
"in",
"self",
".",
"config",
".",
"experiments",
"or",
"self",
".",
"is_archived",
"(",
"exp",
")",
":",
"with",
"open",
"(",
"osp",
".",
"join",
"(",
"root",
",",
"'.project'",
",",
"exp",
"+",
"'.yml'",
")",
")",
"as",
"fexp",
":",
"exp_config",
"=",
"ordered_yaml_load",
"(",
"fexp",
")",
"logger",
".",
"debug",
"(",
"'Update configuration for %s'",
",",
"exp",
")",
"all_exps",
"[",
"exp",
"]",
"=",
"self",
".",
"fix_paths",
"(",
"exp_config",
")",
"else",
":",
"exp_config",
"=",
"all_exps",
"[",
"exp",
"]",
"exp_config",
"[",
"'project'",
"]",
"=",
"projectname",
"exp_config",
"[",
"'timestamps'",
"]",
"[",
"'unarchive'",
"]",
"=",
"t",
"logger",
".",
"debug",
"(",
"'Done.'",
")"
] | 42.307692 | 20.758974 |
def tailf(
filepath,
lastn=0,
timeout=60,
stopon=None,
encoding="utf8",
delay=0.1
):
"""provide a `tail -f` like function
:param filepath: file to tail -f, absolute path or relative path
:param lastn: lastn line will also be yield
:param timeout: (optional)
stop tail -f when time's up [timeout <= 10min, default = 1min]
:param stopon: (optional) stops when the stopon(output) returns True
:param encoding: (optional) default encoding utf8
:param delay: (optional) sleep if no data is available, default is 0.1s
Usage::
>>> for line in tailf('/tmp/foo'):
... print(line)
...
"bar"
"barz"
"""
if not os.path.isfile(filepath):
raise ShCmdError("[{0}] not exists".format(filepath))
if consts.TIMEOUT_MAX > timeout:
timeout = consts.TIMEOUT_DEFAULT
delay = delay if consts.DELAY_MAX > delay > 0 else consts.DELAY_DEFAULT
if isinstance(stopon, types.FunctionType) is False:
stopon = always_false
logger.info("tail -f {0} begin".format(filepath))
with open(filepath, "rt", encoding=encoding) as file_obj:
lastn_filter = deque(maxlen=lastn)
logger.debug("tail last {0} lines".format(lastn))
for line in file_obj:
lastn_filter.append(line.rstrip())
for line in lastn_filter:
yield line
start = time.time()
while timeout < 0 or (time.time() - start) < timeout:
line = file_obj.readline()
where = file_obj.tell()
if line:
logger.debug("found line: [{0}]".format(line))
yield line
if stopon(line):
break
else:
file_obj.seek(0, os.SEEK_END)
if file_obj.tell() < where:
logger.info("file [{0}] rewinded!".format(filepath))
file_obj.seek(0)
else:
logger.debug("no data, waiting for [{0}]s".format(delay))
time.sleep(delay)
logger.info("tail -f {0} end".format(filepath)) | [
"def",
"tailf",
"(",
"filepath",
",",
"lastn",
"=",
"0",
",",
"timeout",
"=",
"60",
",",
"stopon",
"=",
"None",
",",
"encoding",
"=",
"\"utf8\"",
",",
"delay",
"=",
"0.1",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
":",
"raise",
"ShCmdError",
"(",
"\"[{0}] not exists\"",
".",
"format",
"(",
"filepath",
")",
")",
"if",
"consts",
".",
"TIMEOUT_MAX",
">",
"timeout",
":",
"timeout",
"=",
"consts",
".",
"TIMEOUT_DEFAULT",
"delay",
"=",
"delay",
"if",
"consts",
".",
"DELAY_MAX",
">",
"delay",
">",
"0",
"else",
"consts",
".",
"DELAY_DEFAULT",
"if",
"isinstance",
"(",
"stopon",
",",
"types",
".",
"FunctionType",
")",
"is",
"False",
":",
"stopon",
"=",
"always_false",
"logger",
".",
"info",
"(",
"\"tail -f {0} begin\"",
".",
"format",
"(",
"filepath",
")",
")",
"with",
"open",
"(",
"filepath",
",",
"\"rt\"",
",",
"encoding",
"=",
"encoding",
")",
"as",
"file_obj",
":",
"lastn_filter",
"=",
"deque",
"(",
"maxlen",
"=",
"lastn",
")",
"logger",
".",
"debug",
"(",
"\"tail last {0} lines\"",
".",
"format",
"(",
"lastn",
")",
")",
"for",
"line",
"in",
"file_obj",
":",
"lastn_filter",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"for",
"line",
"in",
"lastn_filter",
":",
"yield",
"line",
"start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"timeout",
"<",
"0",
"or",
"(",
"time",
".",
"time",
"(",
")",
"-",
"start",
")",
"<",
"timeout",
":",
"line",
"=",
"file_obj",
".",
"readline",
"(",
")",
"where",
"=",
"file_obj",
".",
"tell",
"(",
")",
"if",
"line",
":",
"logger",
".",
"debug",
"(",
"\"found line: [{0}]\"",
".",
"format",
"(",
"line",
")",
")",
"yield",
"line",
"if",
"stopon",
"(",
"line",
")",
":",
"break",
"else",
":",
"file_obj",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_END",
")",
"if",
"file_obj",
".",
"tell",
"(",
")",
"<",
"where",
":",
"logger",
".",
"info",
"(",
"\"file [{0}] rewinded!\"",
".",
"format",
"(",
"filepath",
")",
")",
"file_obj",
".",
"seek",
"(",
"0",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"no data, waiting for [{0}]s\"",
".",
"format",
"(",
"delay",
")",
")",
"time",
".",
"sleep",
"(",
"delay",
")",
"logger",
".",
"info",
"(",
"\"tail -f {0} end\"",
".",
"format",
"(",
"filepath",
")",
")"
] | 32.015385 | 20.030769 |
def matmul(a, b, output_shape=None, reduced_dims=None, name=None):
"""Alias for einsum([a, b])."""
return einsum(
[a, b], output_shape=output_shape, reduced_dims=reduced_dims, name=name) | [
"def",
"matmul",
"(",
"a",
",",
"b",
",",
"output_shape",
"=",
"None",
",",
"reduced_dims",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"return",
"einsum",
"(",
"[",
"a",
",",
"b",
"]",
",",
"output_shape",
"=",
"output_shape",
",",
"reduced_dims",
"=",
"reduced_dims",
",",
"name",
"=",
"name",
")"
] | 48.25 | 22 |
def get_asn_whois(self, retry_count=3):
"""
The function for retrieving ASN information for an IP address from
Cymru via port 43/tcp (WHOIS).
Args:
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
Returns:
str: The raw ASN data.
Raises:
ASNLookupError: The ASN lookup failed.
"""
try:
# Create the connection for the Cymru whois query.
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(self.timeout)
log.debug('ASN query for {0}'.format(self.address_str))
conn.connect((CYMRU_WHOIS, 43))
# Query the Cymru whois server, and store the results.
conn.send((
' -r -a -c -p -f {0}{1}'.format(
self.address_str, '\r\n')
).encode())
data = ''
while True:
d = conn.recv(4096).decode()
data += d
if not d:
break
conn.close()
return str(data)
except (socket.timeout, socket.error) as e: # pragma: no cover
log.debug('ASN query socket error: {0}'.format(e))
if retry_count > 0:
log.debug('ASN query retrying (count: {0})'.format(
str(retry_count)))
return self.get_asn_whois(retry_count - 1)
else:
raise ASNLookupError(
'ASN lookup failed for {0}.'.format(self.address_str)
)
except: # pragma: no cover
raise ASNLookupError(
'ASN lookup failed for {0}.'.format(self.address_str)
) | [
"def",
"get_asn_whois",
"(",
"self",
",",
"retry_count",
"=",
"3",
")",
":",
"try",
":",
"# Create the connection for the Cymru whois query.",
"conn",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"conn",
".",
"settimeout",
"(",
"self",
".",
"timeout",
")",
"log",
".",
"debug",
"(",
"'ASN query for {0}'",
".",
"format",
"(",
"self",
".",
"address_str",
")",
")",
"conn",
".",
"connect",
"(",
"(",
"CYMRU_WHOIS",
",",
"43",
")",
")",
"# Query the Cymru whois server, and store the results.",
"conn",
".",
"send",
"(",
"(",
"' -r -a -c -p -f {0}{1}'",
".",
"format",
"(",
"self",
".",
"address_str",
",",
"'\\r\\n'",
")",
")",
".",
"encode",
"(",
")",
")",
"data",
"=",
"''",
"while",
"True",
":",
"d",
"=",
"conn",
".",
"recv",
"(",
"4096",
")",
".",
"decode",
"(",
")",
"data",
"+=",
"d",
"if",
"not",
"d",
":",
"break",
"conn",
".",
"close",
"(",
")",
"return",
"str",
"(",
"data",
")",
"except",
"(",
"socket",
".",
"timeout",
",",
"socket",
".",
"error",
")",
"as",
"e",
":",
"# pragma: no cover",
"log",
".",
"debug",
"(",
"'ASN query socket error: {0}'",
".",
"format",
"(",
"e",
")",
")",
"if",
"retry_count",
">",
"0",
":",
"log",
".",
"debug",
"(",
"'ASN query retrying (count: {0})'",
".",
"format",
"(",
"str",
"(",
"retry_count",
")",
")",
")",
"return",
"self",
".",
"get_asn_whois",
"(",
"retry_count",
"-",
"1",
")",
"else",
":",
"raise",
"ASNLookupError",
"(",
"'ASN lookup failed for {0}.'",
".",
"format",
"(",
"self",
".",
"address_str",
")",
")",
"except",
":",
"# pragma: no cover",
"raise",
"ASNLookupError",
"(",
"'ASN lookup failed for {0}.'",
".",
"format",
"(",
"self",
".",
"address_str",
")",
")"
] | 28 | 23.2 |
def value_to_db(self, value):
""" Returns field's single value prepared for saving into a database. """
assert isinstance(value, datetime.datetime)
try:
value = value - datetime.datetime(1970, 1, 1)
except OverflowError:
raise tldap.exceptions.ValidationError("is too big a date")
value = value.seconds + value.days * 24 * 3600
value = str(value).encode("utf_8")
return value | [
"def",
"value_to_db",
"(",
"self",
",",
"value",
")",
":",
"assert",
"isinstance",
"(",
"value",
",",
"datetime",
".",
"datetime",
")",
"try",
":",
"value",
"=",
"value",
"-",
"datetime",
".",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
"except",
"OverflowError",
":",
"raise",
"tldap",
".",
"exceptions",
".",
"ValidationError",
"(",
"\"is too big a date\"",
")",
"value",
"=",
"value",
".",
"seconds",
"+",
"value",
".",
"days",
"*",
"24",
"*",
"3600",
"value",
"=",
"str",
"(",
"value",
")",
".",
"encode",
"(",
"\"utf_8\"",
")",
"return",
"value"
] | 34.307692 | 20.384615 |
def render(self, url, template=None, expiration=0):
"""
Render feed template
"""
template = template or self.default_template
return render_to_string(template, self.get_context(url, expiration)) | [
"def",
"render",
"(",
"self",
",",
"url",
",",
"template",
"=",
"None",
",",
"expiration",
"=",
"0",
")",
":",
"template",
"=",
"template",
"or",
"self",
".",
"default_template",
"return",
"render_to_string",
"(",
"template",
",",
"self",
".",
"get_context",
"(",
"url",
",",
"expiration",
")",
")"
] | 33.857143 | 14.714286 |
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill.remove()
skills = [s for s in self.skills_data['skills']
if s['name'] != skill.name]
self.skills_data['skills'] = skills
return | [
"def",
"remove",
"(",
"self",
",",
"param",
",",
"author",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"param",
",",
"SkillEntry",
")",
":",
"skill",
"=",
"param",
"else",
":",
"skill",
"=",
"self",
".",
"find_skill",
"(",
"param",
",",
"author",
")",
"skill",
".",
"remove",
"(",
")",
"skills",
"=",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"skills_data",
"[",
"'skills'",
"]",
"if",
"s",
"[",
"'name'",
"]",
"!=",
"skill",
".",
"name",
"]",
"self",
".",
"skills_data",
"[",
"'skills'",
"]",
"=",
"skills",
"return"
] | 34.545455 | 11.181818 |
def custom_indicator_class_factory(indicator_type, base_class, class_dict, value_fields):
"""Internal method for dynamically building Custom Indicator Class."""
value_count = len(value_fields)
def init_1(self, tcex, value1, xid, **kwargs): # pylint: disable=W0641
"""Init method for Custom Indicator Types with one value"""
summary = self.build_summary(value1) # build the indicator summary
base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)
for k, v in class_dict.items():
setattr(self, k, v)
def init_2(self, tcex, value1, value2, xid, **kwargs): # pylint: disable=W0641
"""Init method for Custom Indicator Types with two values."""
summary = self.build_summary(value1, value2) # build the indicator summary
base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)
for k, v in class_dict.items():
setattr(self, k, v)
def init_3(self, tcex, value1, value2, value3, xid, **kwargs): # pylint: disable=W0641
"""Init method for Custom Indicator Types with three values."""
summary = self.build_summary(value1, value2, value3) # build the indicator summary
base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs)
for k, v in class_dict.items():
setattr(self, k, v)
class_name = indicator_type.replace(' ', '')
init_method = locals()['init_{}'.format(value_count)]
newclass = type(str(class_name), (base_class,), {'__init__': init_method})
return newclass | [
"def",
"custom_indicator_class_factory",
"(",
"indicator_type",
",",
"base_class",
",",
"class_dict",
",",
"value_fields",
")",
":",
"value_count",
"=",
"len",
"(",
"value_fields",
")",
"def",
"init_1",
"(",
"self",
",",
"tcex",
",",
"value1",
",",
"xid",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0641",
"\"\"\"Init method for Custom Indicator Types with one value\"\"\"",
"summary",
"=",
"self",
".",
"build_summary",
"(",
"value1",
")",
"# build the indicator summary",
"base_class",
".",
"__init__",
"(",
"self",
",",
"tcex",
",",
"indicator_type",
",",
"summary",
",",
"xid",
",",
"*",
"*",
"kwargs",
")",
"for",
"k",
",",
"v",
"in",
"class_dict",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")",
"def",
"init_2",
"(",
"self",
",",
"tcex",
",",
"value1",
",",
"value2",
",",
"xid",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0641",
"\"\"\"Init method for Custom Indicator Types with two values.\"\"\"",
"summary",
"=",
"self",
".",
"build_summary",
"(",
"value1",
",",
"value2",
")",
"# build the indicator summary",
"base_class",
".",
"__init__",
"(",
"self",
",",
"tcex",
",",
"indicator_type",
",",
"summary",
",",
"xid",
",",
"*",
"*",
"kwargs",
")",
"for",
"k",
",",
"v",
"in",
"class_dict",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")",
"def",
"init_3",
"(",
"self",
",",
"tcex",
",",
"value1",
",",
"value2",
",",
"value3",
",",
"xid",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=W0641",
"\"\"\"Init method for Custom Indicator Types with three values.\"\"\"",
"summary",
"=",
"self",
".",
"build_summary",
"(",
"value1",
",",
"value2",
",",
"value3",
")",
"# build the indicator summary",
"base_class",
".",
"__init__",
"(",
"self",
",",
"tcex",
",",
"indicator_type",
",",
"summary",
",",
"xid",
",",
"*",
"*",
"kwargs",
")",
"for",
"k",
",",
"v",
"in",
"class_dict",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"self",
",",
"k",
",",
"v",
")",
"class_name",
"=",
"indicator_type",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"init_method",
"=",
"locals",
"(",
")",
"[",
"'init_{}'",
".",
"format",
"(",
"value_count",
")",
"]",
"newclass",
"=",
"type",
"(",
"str",
"(",
"class_name",
")",
",",
"(",
"base_class",
",",
")",
",",
"{",
"'__init__'",
":",
"init_method",
"}",
")",
"return",
"newclass"
] | 53.517241 | 24.241379 |
def delete(self, path=None, url_kwargs=None, **kwargs):
"""
Sends a PUT request.
:param path:
The HTTP path (either absolute or relative).
:param url_kwargs:
Parameters to override in the generated URL. See `~hyperlink.URL`.
:param **kwargs:
Optional arguments that ``request`` takes.
:return: response object
"""
return self._session.delete(self._url(path, url_kwargs), **kwargs) | [
"def",
"delete",
"(",
"self",
",",
"path",
"=",
"None",
",",
"url_kwargs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_session",
".",
"delete",
"(",
"self",
".",
"_url",
"(",
"path",
",",
"url_kwargs",
")",
",",
"*",
"*",
"kwargs",
")"
] | 36.076923 | 17.461538 |
def ts_describe(self, transport, table):
"""
ts_describe(table)
Retrieve a time series table description from the Riak cluster.
.. note:: This request is automatically retried :attr:`retries`
times if it fails due to network error.
:param table: The timeseries table.
:type table: string or :class:`Table <riak.table.Table>`
:rtype: :class:`TsObject <riak.ts_object.TsObject>`
"""
t = table
if isinstance(t, six.string_types):
t = Table(self, table)
return transport.ts_describe(t) | [
"def",
"ts_describe",
"(",
"self",
",",
"transport",
",",
"table",
")",
":",
"t",
"=",
"table",
"if",
"isinstance",
"(",
"t",
",",
"six",
".",
"string_types",
")",
":",
"t",
"=",
"Table",
"(",
"self",
",",
"table",
")",
"return",
"transport",
".",
"ts_describe",
"(",
"t",
")"
] | 34.058824 | 16.764706 |
def get_server_premaster_secret(self, password_verifier, server_private, client_public, common_secret):
"""S = (A * v^u) ^ b % N
:param int password_verifier:
:param int server_private:
:param int client_public:
:param int common_secret:
:rtype: int
"""
return pow((client_public * pow(password_verifier, common_secret, self._prime)), server_private, self._prime) | [
"def",
"get_server_premaster_secret",
"(",
"self",
",",
"password_verifier",
",",
"server_private",
",",
"client_public",
",",
"common_secret",
")",
":",
"return",
"pow",
"(",
"(",
"client_public",
"*",
"pow",
"(",
"password_verifier",
",",
"common_secret",
",",
"self",
".",
"_prime",
")",
")",
",",
"server_private",
",",
"self",
".",
"_prime",
")"
] | 41.9 | 22.4 |
def update_token(self):
""" Get token from key and secret """
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic ' + base64.b64encode(
(self._key + ':' + self._secret).encode()).decode()
}
data = {'grant_type': 'client_credentials'}
response = requests.post(TOKEN_URL, data=data, headers=headers)
obj = json.loads(response.content.decode('UTF-8'))
self._token = obj['access_token']
self._token_expire_date = (
datetime.now() +
timedelta(minutes=self._expiery)) | [
"def",
"update_token",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"'Content-Type'",
":",
"'application/x-www-form-urlencoded'",
",",
"'Authorization'",
":",
"'Basic '",
"+",
"base64",
".",
"b64encode",
"(",
"(",
"self",
".",
"_key",
"+",
"':'",
"+",
"self",
".",
"_secret",
")",
".",
"encode",
"(",
")",
")",
".",
"decode",
"(",
")",
"}",
"data",
"=",
"{",
"'grant_type'",
":",
"'client_credentials'",
"}",
"response",
"=",
"requests",
".",
"post",
"(",
"TOKEN_URL",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")",
"obj",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"content",
".",
"decode",
"(",
"'UTF-8'",
")",
")",
"self",
".",
"_token",
"=",
"obj",
"[",
"'access_token'",
"]",
"self",
".",
"_token_expire_date",
"=",
"(",
"datetime",
".",
"now",
"(",
")",
"+",
"timedelta",
"(",
"minutes",
"=",
"self",
".",
"_expiery",
")",
")"
] | 41.133333 | 17.066667 |
def get_transaction(self, tx_hash, id=None, endpoint=None):
"""
Look up a transaction by hash.
Args:
tx_hash: (str) hash in the form '58c634f81fbd4ae2733d7e3930a9849021840fc19dc6af064d6f2812a333f91d'
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json: the transaction as a json object
"""
return self._call_endpoint(GET_RAW_TRANSACTION, params=[tx_hash, 1], id=id, endpoint=endpoint) | [
"def",
"get_transaction",
"(",
"self",
",",
"tx_hash",
",",
"id",
"=",
"None",
",",
"endpoint",
"=",
"None",
")",
":",
"return",
"self",
".",
"_call_endpoint",
"(",
"GET_RAW_TRANSACTION",
",",
"params",
"=",
"[",
"tx_hash",
",",
"1",
"]",
",",
"id",
"=",
"id",
",",
"endpoint",
"=",
"endpoint",
")"
] | 45.416667 | 25.75 |
def normalizedFluctuationCorrelationFunction(A_n, B_n=None, N_max=None, norm=True):
"""Compute the normalized fluctuation (cross) correlation function of (two) stationary timeseries.
C(t) = (<A(t) B(t)> - <A><B>) / (<AB> - <A><B>)
This may be useful in diagnosing odd time-correlations in timeseries data.
Parameters
----------
A_n : np.ndarray
A_n[n] is nth value of timeseries A. Length is deduced from vector.
B_n : np.ndarray
B_n[n] is nth value of timeseries B. Length is deduced from vector.
N_max : int, default=None
if specified, will only compute correlation function out to time lag of N_max
norm: bool, optional, default=True
if False will return the unnormalized correlation function D(t) = <A(t) B(t)>
Returns
-------
C_n : np.ndarray
C_n[n] is the normalized fluctuation auto- or cross-correlation function for timeseries A(t) and B(t).
Notes
-----
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
This procedure may be slow.
The statistical error in C_n[n] will grow with increasing n. No effort is made here to estimate the uncertainty.
References
----------
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
--------
Estimate normalized fluctuation correlation function.
>>> from pymbar import testsystems
>>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0)
>>> C_t = normalizedFluctuationCorrelationFunction(A_t, N_max=25)
"""
# If B_n is not specified, set it to be identical to A_n.
if B_n is None:
B_n = A_n
# Create np copies of input arguments.
A_n = np.array(A_n)
B_n = np.array(B_n)
# Get the length of the timeseries.
N = A_n.size
# Set maximum time to compute correlation functon for.
if (not N_max) or (N_max > N - 1):
N_max = N - 1
# Be sure A_n and B_n have the same dimensions.
if(A_n.shape != B_n.shape):
raise ParameterError('A_n and B_n must have same dimensions.')
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute means and variance.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies at high precision with means subtracted off.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# sigma2_AB = sum((A_n-mu_A) * (B_n-mu_B)) / (float(N)-1.0) # unbiased estimator
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
if(sigma2_AB == 0):
raise ParameterError('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency')
# allocate storage for normalized fluctuation correlation function
C_n = np.zeros([N_max + 1], np.float64)
# Compute normalized correlation function.
t = 0
for t in range(0, N_max + 1):
# compute normalized fluctuation correlation function at time t
C_n[t] = np.sum(dA_n[0:(N - t)] * dB_n[t:N] + dB_n[0:(N - t)] * dA_n[t:N]) / (2.0 * float(N - t) * sigma2_AB)
# Return the computed correlation function
if norm:
return C_n
else:
return C_n*sigma2_AB + mu_A*mu_B | [
"def",
"normalizedFluctuationCorrelationFunction",
"(",
"A_n",
",",
"B_n",
"=",
"None",
",",
"N_max",
"=",
"None",
",",
"norm",
"=",
"True",
")",
":",
"# If B_n is not specified, set it to be identical to A_n.",
"if",
"B_n",
"is",
"None",
":",
"B_n",
"=",
"A_n",
"# Create np copies of input arguments.",
"A_n",
"=",
"np",
".",
"array",
"(",
"A_n",
")",
"B_n",
"=",
"np",
".",
"array",
"(",
"B_n",
")",
"# Get the length of the timeseries.",
"N",
"=",
"A_n",
".",
"size",
"# Set maximum time to compute correlation functon for.",
"if",
"(",
"not",
"N_max",
")",
"or",
"(",
"N_max",
">",
"N",
"-",
"1",
")",
":",
"N_max",
"=",
"N",
"-",
"1",
"# Be sure A_n and B_n have the same dimensions.",
"if",
"(",
"A_n",
".",
"shape",
"!=",
"B_n",
".",
"shape",
")",
":",
"raise",
"ParameterError",
"(",
"'A_n and B_n must have same dimensions.'",
")",
"# Initialize statistical inefficiency estimate with uncorrelated value.",
"g",
"=",
"1.0",
"# Compute means and variance.",
"mu_A",
"=",
"A_n",
".",
"mean",
"(",
")",
"mu_B",
"=",
"B_n",
".",
"mean",
"(",
")",
"# Make temporary copies at high precision with means subtracted off.",
"dA_n",
"=",
"A_n",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"-",
"mu_A",
"dB_n",
"=",
"B_n",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"-",
"mu_B",
"# sigma2_AB = sum((A_n-mu_A) * (B_n-mu_B)) / (float(N)-1.0) # unbiased estimator",
"sigma2_AB",
"=",
"(",
"dA_n",
"*",
"dB_n",
")",
".",
"mean",
"(",
")",
"# standard estimator to ensure C(0) = 1",
"if",
"(",
"sigma2_AB",
"==",
"0",
")",
":",
"raise",
"ParameterError",
"(",
"'Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency'",
")",
"# allocate storage for normalized fluctuation correlation function",
"C_n",
"=",
"np",
".",
"zeros",
"(",
"[",
"N_max",
"+",
"1",
"]",
",",
"np",
".",
"float64",
")",
"# Compute normalized correlation function.",
"t",
"=",
"0",
"for",
"t",
"in",
"range",
"(",
"0",
",",
"N_max",
"+",
"1",
")",
":",
"# compute normalized fluctuation correlation function at time t",
"C_n",
"[",
"t",
"]",
"=",
"np",
".",
"sum",
"(",
"dA_n",
"[",
"0",
":",
"(",
"N",
"-",
"t",
")",
"]",
"*",
"dB_n",
"[",
"t",
":",
"N",
"]",
"+",
"dB_n",
"[",
"0",
":",
"(",
"N",
"-",
"t",
")",
"]",
"*",
"dA_n",
"[",
"t",
":",
"N",
"]",
")",
"/",
"(",
"2.0",
"*",
"float",
"(",
"N",
"-",
"t",
")",
"*",
"sigma2_AB",
")",
"# Return the computed correlation function",
"if",
"norm",
":",
"return",
"C_n",
"else",
":",
"return",
"C_n",
"*",
"sigma2_AB",
"+",
"mu_A",
"*",
"mu_B"
] | 35.115789 | 28.515789 |
def lockfile(lockfile_name, lock_wait_timeout=-1):
"""
Only runs the method if the lockfile is not acquired.
You should create a setting ``LOCKFILE_PATH`` which points to
``/home/username/tmp/``.
In your management command, use it like so::
LOCKFILE = os.path.join(
settings.LOCKFILE_FOLDER, 'command_name')
class Command(NoArgsCommand):
@lockfile(LOCKFILE)
def handle_noargs(self, **options):
# your command here
:lockfile_name: A unique name for a lockfile that belongs to the wrapped
method.
:lock_wait_timeout: Seconds to wait if lockfile is acquired. If ``-1`` we
will not wait and just quit.
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
lock = FileLock(lockfile_name)
try:
lock.acquire(lock_wait_timeout)
except AlreadyLocked:
return
except LockTimeout:
return
try:
result = func(*args, **kwargs)
finally:
lock.release()
return result
return wrapper
return decorator | [
"def",
"lockfile",
"(",
"lockfile_name",
",",
"lock_wait_timeout",
"=",
"-",
"1",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"lock",
"=",
"FileLock",
"(",
"lockfile_name",
")",
"try",
":",
"lock",
".",
"acquire",
"(",
"lock_wait_timeout",
")",
"except",
"AlreadyLocked",
":",
"return",
"except",
"LockTimeout",
":",
"return",
"try",
":",
"result",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"finally",
":",
"lock",
".",
"release",
"(",
")",
"return",
"result",
"return",
"wrapper",
"return",
"decorator"
] | 28.536585 | 18.04878 |
def unite_dataset(dataset, basecolumn, fn=None):
"""
Unite dataset via fn
Parameters
----------
dataset : list
A list of data
basecolumn : int
A number of column which will be respected in uniting dataset
fn : function
A function which recieve :attr:`data` and return classification string.
It if is None, a function which return the first item of the
:attr:`data` will be used (See ``with_filename`` parameter of
:func:`maidenhair.load` function).
Returns
-------
list
A united dataset
"""
# create default unite_fn
if fn is None:
fn = default_unite_function
# classify dataset via unite_fn
united_dataset = OrderedDict()
for data in dataset:
unite_name = fn(data)
if unite_name not in united_dataset:
united_dataset[unite_name] = []
united_dataset[unite_name].append(data[1:])
# unite dataset via maidenhair.loaders.base.unite_dataset
for name, dataset in united_dataset.items():
united_dataset[name] = _unite_dataset(dataset, basecolumn)[0]
# create new dataset (respect the order of the dataset)
dataset = []
for name, _dataset in united_dataset.items():
dataset.append([name] + _dataset)
return dataset | [
"def",
"unite_dataset",
"(",
"dataset",
",",
"basecolumn",
",",
"fn",
"=",
"None",
")",
":",
"# create default unite_fn",
"if",
"fn",
"is",
"None",
":",
"fn",
"=",
"default_unite_function",
"# classify dataset via unite_fn",
"united_dataset",
"=",
"OrderedDict",
"(",
")",
"for",
"data",
"in",
"dataset",
":",
"unite_name",
"=",
"fn",
"(",
"data",
")",
"if",
"unite_name",
"not",
"in",
"united_dataset",
":",
"united_dataset",
"[",
"unite_name",
"]",
"=",
"[",
"]",
"united_dataset",
"[",
"unite_name",
"]",
".",
"append",
"(",
"data",
"[",
"1",
":",
"]",
")",
"# unite dataset via maidenhair.loaders.base.unite_dataset",
"for",
"name",
",",
"dataset",
"in",
"united_dataset",
".",
"items",
"(",
")",
":",
"united_dataset",
"[",
"name",
"]",
"=",
"_unite_dataset",
"(",
"dataset",
",",
"basecolumn",
")",
"[",
"0",
"]",
"# create new dataset (respect the order of the dataset)",
"dataset",
"=",
"[",
"]",
"for",
"name",
",",
"_dataset",
"in",
"united_dataset",
".",
"items",
"(",
")",
":",
"dataset",
".",
"append",
"(",
"[",
"name",
"]",
"+",
"_dataset",
")",
"return",
"dataset"
] | 32.692308 | 17.923077 |
def cache2errors(function, cache, disp=0, ftol=0.05):
"""
This function will attempt to identify 1 sigma errors, assuming your
function is a chi^2. For this, the 1-sigma is bracketed.
If you were smart enough to build a cache list of [x,y] into your function,
you can pass it here. The values bracketing 1 sigma will be used as
starting values.
If no such values exist, e.g. because all values were very close to the
optimum (good starting values), the bracket is expanded.
"""
vals = numpy.array(sorted(cache, key=lambda x: x[0]))
if disp > 0: print ' --- cache2errors --- ', vals
vi = vals[:,1].min()
def renormedfunc(x):
y = function(x)
cache.append([x, y])
if disp > 1: print ' renormed:', x, y, y - (vi + 1)
return y - (vi + 1)
vals[:,1] -= vi + 1
lowmask = vals[:,1] < 0
highmask = vals[:,1] > 0
indices = numpy.arange(len(vals))
b, vb = vals[indices[lowmask][ 0],:]
c, vc = vals[indices[lowmask][-1],:]
if any(vals[:,0][highmask] < b):
if disp > 0: print 'already have bracket'
a, va = vals[indices[highmask][vals[:,0][highmask] < b][-1],:]
else:
a = b
va = vb
while b > -50:
a = b - max(vals[-1,0] - vals[0,0], 1)
va = renormedfunc(a)
if disp > 0: print 'going further left: %.1f [%.1f] --> %.1f [%.1f]' % (b, vb, a, va)
if va > 0:
if disp > 0: print 'found outer part'
break
else:
# need to go further
b = a
vb = va
if disp > 0: print 'left bracket', a, b, va, vb
if va > 0 and vb < 0:
leftroot = scipy.optimize.brentq(renormedfunc, a, b, rtol=ftol)
else:
if disp > 0: print 'WARNING: border problem found.'
leftroot = a
if disp > 0: print 'left root', leftroot
if any(vals[:,0][highmask] > c):
if disp > 0: print 'already have bracket'
d, vd = vals[indices[highmask][vals[:,0][highmask] > c][ 0],:]
else:
d = c
vd = vc
while c < 50:
d = c + max(vals[-1,0] - vals[0,0], 1)
vd = renormedfunc(d)
if disp > 0: print 'going further right: %.1f [%.1f] --> %.1f [%.1f]' % (c, vc, d, vd)
if vd > 0:
if disp > 0: print 'found outer part'
break
else:
# need to go further
c = d
vc = vd
if disp > 0: print 'right bracket', c, d, vc, vd
if vd > 0 and vc < 0:
rightroot = scipy.optimize.brentq(renormedfunc, c, d, rtol=ftol)
else:
if disp > 0: print 'WARNING: border problem found.'
rightroot = d
if disp > 0: print 'right root', rightroot
assert leftroot < rightroot
if disp > 2:
fullvals = numpy.array(sorted(cache, key=lambda x: x[0]))
fullvals[:,1] -= vi + 1
plt.figure()
plt.plot(fullvals[:,0], fullvals[:,1], 's')
plt.plot(vals[:,0], vals[:,1], 'o')
plt.xlim(a, d)
plt.ylim(min(va, vb, vc, vd), max(va, vb, vc, vd))
ymin, ymax = plt.ylim()
plt.vlines([leftroot, rightroot], ymin, ymax, linestyles='dotted')
plt.savefig('cache_brent.pdf')
return leftroot, rightroot | [
"def",
"cache2errors",
"(",
"function",
",",
"cache",
",",
"disp",
"=",
"0",
",",
"ftol",
"=",
"0.05",
")",
":",
"vals",
"=",
"numpy",
".",
"array",
"(",
"sorted",
"(",
"cache",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
")",
"if",
"disp",
">",
"0",
":",
"print",
"' --- cache2errors --- '",
",",
"vals",
"vi",
"=",
"vals",
"[",
":",
",",
"1",
"]",
".",
"min",
"(",
")",
"def",
"renormedfunc",
"(",
"x",
")",
":",
"y",
"=",
"function",
"(",
"x",
")",
"cache",
".",
"append",
"(",
"[",
"x",
",",
"y",
"]",
")",
"if",
"disp",
">",
"1",
":",
"print",
"' renormed:'",
",",
"x",
",",
"y",
",",
"y",
"-",
"(",
"vi",
"+",
"1",
")",
"return",
"y",
"-",
"(",
"vi",
"+",
"1",
")",
"vals",
"[",
":",
",",
"1",
"]",
"-=",
"vi",
"+",
"1",
"lowmask",
"=",
"vals",
"[",
":",
",",
"1",
"]",
"<",
"0",
"highmask",
"=",
"vals",
"[",
":",
",",
"1",
"]",
">",
"0",
"indices",
"=",
"numpy",
".",
"arange",
"(",
"len",
"(",
"vals",
")",
")",
"b",
",",
"vb",
"=",
"vals",
"[",
"indices",
"[",
"lowmask",
"]",
"[",
"0",
"]",
",",
":",
"]",
"c",
",",
"vc",
"=",
"vals",
"[",
"indices",
"[",
"lowmask",
"]",
"[",
"-",
"1",
"]",
",",
":",
"]",
"if",
"any",
"(",
"vals",
"[",
":",
",",
"0",
"]",
"[",
"highmask",
"]",
"<",
"b",
")",
":",
"if",
"disp",
">",
"0",
":",
"print",
"'already have bracket'",
"a",
",",
"va",
"=",
"vals",
"[",
"indices",
"[",
"highmask",
"]",
"[",
"vals",
"[",
":",
",",
"0",
"]",
"[",
"highmask",
"]",
"<",
"b",
"]",
"[",
"-",
"1",
"]",
",",
":",
"]",
"else",
":",
"a",
"=",
"b",
"va",
"=",
"vb",
"while",
"b",
">",
"-",
"50",
":",
"a",
"=",
"b",
"-",
"max",
"(",
"vals",
"[",
"-",
"1",
",",
"0",
"]",
"-",
"vals",
"[",
"0",
",",
"0",
"]",
",",
"1",
")",
"va",
"=",
"renormedfunc",
"(",
"a",
")",
"if",
"disp",
">",
"0",
":",
"print",
"'going further left: %.1f [%.1f] --> %.1f [%.1f]'",
"%",
"(",
"b",
",",
"vb",
",",
"a",
",",
"va",
")",
"if",
"va",
">",
"0",
":",
"if",
"disp",
">",
"0",
":",
"print",
"'found outer part'",
"break",
"else",
":",
"# need to go further",
"b",
"=",
"a",
"vb",
"=",
"va",
"if",
"disp",
">",
"0",
":",
"print",
"'left bracket'",
",",
"a",
",",
"b",
",",
"va",
",",
"vb",
"if",
"va",
">",
"0",
"and",
"vb",
"<",
"0",
":",
"leftroot",
"=",
"scipy",
".",
"optimize",
".",
"brentq",
"(",
"renormedfunc",
",",
"a",
",",
"b",
",",
"rtol",
"=",
"ftol",
")",
"else",
":",
"if",
"disp",
">",
"0",
":",
"print",
"'WARNING: border problem found.'",
"leftroot",
"=",
"a",
"if",
"disp",
">",
"0",
":",
"print",
"'left root'",
",",
"leftroot",
"if",
"any",
"(",
"vals",
"[",
":",
",",
"0",
"]",
"[",
"highmask",
"]",
">",
"c",
")",
":",
"if",
"disp",
">",
"0",
":",
"print",
"'already have bracket'",
"d",
",",
"vd",
"=",
"vals",
"[",
"indices",
"[",
"highmask",
"]",
"[",
"vals",
"[",
":",
",",
"0",
"]",
"[",
"highmask",
"]",
">",
"c",
"]",
"[",
"0",
"]",
",",
":",
"]",
"else",
":",
"d",
"=",
"c",
"vd",
"=",
"vc",
"while",
"c",
"<",
"50",
":",
"d",
"=",
"c",
"+",
"max",
"(",
"vals",
"[",
"-",
"1",
",",
"0",
"]",
"-",
"vals",
"[",
"0",
",",
"0",
"]",
",",
"1",
")",
"vd",
"=",
"renormedfunc",
"(",
"d",
")",
"if",
"disp",
">",
"0",
":",
"print",
"'going further right: %.1f [%.1f] --> %.1f [%.1f]'",
"%",
"(",
"c",
",",
"vc",
",",
"d",
",",
"vd",
")",
"if",
"vd",
">",
"0",
":",
"if",
"disp",
">",
"0",
":",
"print",
"'found outer part'",
"break",
"else",
":",
"# need to go further",
"c",
"=",
"d",
"vc",
"=",
"vd",
"if",
"disp",
">",
"0",
":",
"print",
"'right bracket'",
",",
"c",
",",
"d",
",",
"vc",
",",
"vd",
"if",
"vd",
">",
"0",
"and",
"vc",
"<",
"0",
":",
"rightroot",
"=",
"scipy",
".",
"optimize",
".",
"brentq",
"(",
"renormedfunc",
",",
"c",
",",
"d",
",",
"rtol",
"=",
"ftol",
")",
"else",
":",
"if",
"disp",
">",
"0",
":",
"print",
"'WARNING: border problem found.'",
"rightroot",
"=",
"d",
"if",
"disp",
">",
"0",
":",
"print",
"'right root'",
",",
"rightroot",
"assert",
"leftroot",
"<",
"rightroot",
"if",
"disp",
">",
"2",
":",
"fullvals",
"=",
"numpy",
".",
"array",
"(",
"sorted",
"(",
"cache",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
")",
"fullvals",
"[",
":",
",",
"1",
"]",
"-=",
"vi",
"+",
"1",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"plot",
"(",
"fullvals",
"[",
":",
",",
"0",
"]",
",",
"fullvals",
"[",
":",
",",
"1",
"]",
",",
"'s'",
")",
"plt",
".",
"plot",
"(",
"vals",
"[",
":",
",",
"0",
"]",
",",
"vals",
"[",
":",
",",
"1",
"]",
",",
"'o'",
")",
"plt",
".",
"xlim",
"(",
"a",
",",
"d",
")",
"plt",
".",
"ylim",
"(",
"min",
"(",
"va",
",",
"vb",
",",
"vc",
",",
"vd",
")",
",",
"max",
"(",
"va",
",",
"vb",
",",
"vc",
",",
"vd",
")",
")",
"ymin",
",",
"ymax",
"=",
"plt",
".",
"ylim",
"(",
")",
"plt",
".",
"vlines",
"(",
"[",
"leftroot",
",",
"rightroot",
"]",
",",
"ymin",
",",
"ymax",
",",
"linestyles",
"=",
"'dotted'",
")",
"plt",
".",
"savefig",
"(",
"'cache_brent.pdf'",
")",
"return",
"leftroot",
",",
"rightroot"
] | 29.98913 | 20.815217 |
def draw_heatmap_array(self, image_shape, alpha_lines=1.0, alpha_points=1.0,
size_lines=1, size_points=0, antialiased=True,
raise_if_out_of_image=False):
"""
Draw the line segments and points of the line string as a heatmap array.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
alpha_lines : float, optional
Opacity of the line string. Higher values denote a more visible
line string.
alpha_points : float, optional
Opacity of the line string points. Higher values denote a more
visible points.
size_lines : int, optional
Thickness of the line segments.
size_points : int, optional
Size of the points in pixels.
antialiased : bool, optional
Whether to draw the line with anti-aliasing activated.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Float array of shape `image_shape` (no channel axis) with drawn
line segments and points. All values are in the
interval ``[0.0, 1.0]``.
"""
heatmap_lines = self.draw_lines_heatmap_array(
image_shape,
alpha=alpha_lines,
size=size_lines,
antialiased=antialiased,
raise_if_out_of_image=raise_if_out_of_image)
if size_points <= 0:
return heatmap_lines
heatmap_points = self.draw_points_heatmap_array(
image_shape,
alpha=alpha_points,
size=size_points,
raise_if_out_of_image=raise_if_out_of_image)
heatmap = np.dstack([heatmap_lines, heatmap_points])
return np.max(heatmap, axis=2) | [
"def",
"draw_heatmap_array",
"(",
"self",
",",
"image_shape",
",",
"alpha_lines",
"=",
"1.0",
",",
"alpha_points",
"=",
"1.0",
",",
"size_lines",
"=",
"1",
",",
"size_points",
"=",
"0",
",",
"antialiased",
"=",
"True",
",",
"raise_if_out_of_image",
"=",
"False",
")",
":",
"heatmap_lines",
"=",
"self",
".",
"draw_lines_heatmap_array",
"(",
"image_shape",
",",
"alpha",
"=",
"alpha_lines",
",",
"size",
"=",
"size_lines",
",",
"antialiased",
"=",
"antialiased",
",",
"raise_if_out_of_image",
"=",
"raise_if_out_of_image",
")",
"if",
"size_points",
"<=",
"0",
":",
"return",
"heatmap_lines",
"heatmap_points",
"=",
"self",
".",
"draw_points_heatmap_array",
"(",
"image_shape",
",",
"alpha",
"=",
"alpha_points",
",",
"size",
"=",
"size_points",
",",
"raise_if_out_of_image",
"=",
"raise_if_out_of_image",
")",
"heatmap",
"=",
"np",
".",
"dstack",
"(",
"[",
"heatmap_lines",
",",
"heatmap_points",
"]",
")",
"return",
"np",
".",
"max",
"(",
"heatmap",
",",
"axis",
"=",
"2",
")"
] | 34.810345 | 20.568966 |
def update():
# type: () -> None
""" Update the feature with updates committed to develop.
This will merge current develop into the current branch.
"""
branch = git.current_branch(refresh=True)
develop = conf.get('git.devel_branch', 'develop')
common.assert_branch_type('feature')
common.git_checkout(develop)
common.git_pull(develop)
common.git_checkout(branch.name)
common.git_merge(branch.name, develop) | [
"def",
"update",
"(",
")",
":",
"# type: () -> None",
"branch",
"=",
"git",
".",
"current_branch",
"(",
"refresh",
"=",
"True",
")",
"develop",
"=",
"conf",
".",
"get",
"(",
"'git.devel_branch'",
",",
"'develop'",
")",
"common",
".",
"assert_branch_type",
"(",
"'feature'",
")",
"common",
".",
"git_checkout",
"(",
"develop",
")",
"common",
".",
"git_pull",
"(",
"develop",
")",
"common",
".",
"git_checkout",
"(",
"branch",
".",
"name",
")",
"common",
".",
"git_merge",
"(",
"branch",
".",
"name",
",",
"develop",
")"
] | 31.357143 | 13.5 |
def traverse_json_obj(obj, path=None, callback=None):
"""
Recursively loop through object and perform the function defined
in callback for every element. Only JSON data types are supported.
:param obj: object to traverse
:param path: current path
:param callback: callback executed on every element
:return: potentially altered object
"""
if path is None:
path = []
if isinstance(obj, dict):
value = {k: traverse_json_obj(v, path + [k], callback)
for k, v in obj.items()}
elif isinstance(obj, list):
value = [traverse_json_obj(elem, path + [[]], callback)
for elem in obj]
else:
value = obj
if callback is None:
return value
return callback(value) | [
"def",
"traverse_json_obj",
"(",
"obj",
",",
"path",
"=",
"None",
",",
"callback",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"value",
"=",
"{",
"k",
":",
"traverse_json_obj",
"(",
"v",
",",
"path",
"+",
"[",
"k",
"]",
",",
"callback",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
"}",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
":",
"value",
"=",
"[",
"traverse_json_obj",
"(",
"elem",
",",
"path",
"+",
"[",
"[",
"]",
"]",
",",
"callback",
")",
"for",
"elem",
"in",
"obj",
"]",
"else",
":",
"value",
"=",
"obj",
"if",
"callback",
"is",
"None",
":",
"return",
"value",
"return",
"callback",
"(",
"value",
")"
] | 31.5 | 16.75 |
def element(self, inp=None):
"""Return an element from ``inp`` or from scratch."""
if inp is not None:
s = str(inp)[:self.length]
s += ' ' * (self.length - len(s))
return s
else:
return ' ' * self.length | [
"def",
"element",
"(",
"self",
",",
"inp",
"=",
"None",
")",
":",
"if",
"inp",
"is",
"not",
"None",
":",
"s",
"=",
"str",
"(",
"inp",
")",
"[",
":",
"self",
".",
"length",
"]",
"s",
"+=",
"' '",
"*",
"(",
"self",
".",
"length",
"-",
"len",
"(",
"s",
")",
")",
"return",
"s",
"else",
":",
"return",
"' '",
"*",
"self",
".",
"length"
] | 33.5 | 10.375 |
def put_message(self, queue_name, content, visibility_timeout=None,
time_to_live=None, timeout=None):
'''
Adds a new message to the back of the message queue.
The visibility timeout specifies the time that the message will be
invisible. After the timeout expires, the message will become visible.
If a visibility timeout is not specified, the default value of 0 is used.
The message time-to-live specifies how long a message will remain in the
queue. The message will be deleted from the queue when the time-to-live
period expires.
If the key-encryption-key field is set on the local service object, this method will
encrypt the content before uploading.
:param str queue_name:
The name of the queue to put the message into.
:param obj content:
Message content. Allowed type is determined by the encode_function
set on the service. Default is str. The encoded message can be up to
64KB in size.
:param int visibility_timeout:
If not specified, the default value is 0. Specifies the
new visibility timeout value, in seconds, relative to server time.
The value must be larger than or equal to 0, and cannot be
larger than 7 days. The visibility timeout of a message cannot be
set to a value later than the expiry time. visibility_timeout
should be set to a value smaller than the time-to-live value.
:param int time_to_live:
Specifies the time-to-live interval for the message, in
seconds. The time-to-live may be any positive number or -1 for infinity. If this
parameter is omitted, the default time-to-live is 7 days.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A :class:`~azure.storage.queue.models.QueueMessage` object.
This object is also populated with the content although it is not
returned from the service.
:rtype: :class:`~azure.storage.queue.models.QueueMessage`
'''
_validate_encryption_required(self.require_encryption, self.key_encryption_key)
_validate_not_none('queue_name', queue_name)
_validate_not_none('content', content)
request = HTTPRequest()
request.method = 'POST'
request.host_locations = self._get_host_locations()
request.path = _get_path(queue_name, True)
request.query = {
'visibilitytimeout': _to_str(visibility_timeout),
'messagettl': _to_str(time_to_live),
'timeout': _int_to_str(timeout)
}
request.body = _get_request_body(_convert_queue_message_xml(content, self.encode_function,
self.key_encryption_key))
message_list = self._perform_request(request, _convert_xml_to_queue_messages,
[self.decode_function, False,
None, None, content])
return message_list[0] | [
"def",
"put_message",
"(",
"self",
",",
"queue_name",
",",
"content",
",",
"visibility_timeout",
"=",
"None",
",",
"time_to_live",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_encryption_required",
"(",
"self",
".",
"require_encryption",
",",
"self",
".",
"key_encryption_key",
")",
"_validate_not_none",
"(",
"'queue_name'",
",",
"queue_name",
")",
"_validate_not_none",
"(",
"'content'",
",",
"content",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'POST'",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
")",
"request",
".",
"path",
"=",
"_get_path",
"(",
"queue_name",
",",
"True",
")",
"request",
".",
"query",
"=",
"{",
"'visibilitytimeout'",
":",
"_to_str",
"(",
"visibility_timeout",
")",
",",
"'messagettl'",
":",
"_to_str",
"(",
"time_to_live",
")",
",",
"'timeout'",
":",
"_int_to_str",
"(",
"timeout",
")",
"}",
"request",
".",
"body",
"=",
"_get_request_body",
"(",
"_convert_queue_message_xml",
"(",
"content",
",",
"self",
".",
"encode_function",
",",
"self",
".",
"key_encryption_key",
")",
")",
"message_list",
"=",
"self",
".",
"_perform_request",
"(",
"request",
",",
"_convert_xml_to_queue_messages",
",",
"[",
"self",
".",
"decode_function",
",",
"False",
",",
"None",
",",
"None",
",",
"content",
"]",
")",
"return",
"message_list",
"[",
"0",
"]"
] | 49.698413 | 27.349206 |
def wrap_args_with_ssh_agent(self, args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False):
"""
Given an existing command line and parameterization this will return the same command line wrapped with the
necessary calls to ``ssh-agent``
"""
if ssh_key_path:
ssh_add_command = args2cmdline('ssh-add', ssh_key_path)
if silence_ssh_add:
ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null'])
cmd = ' && '.join([ssh_add_command,
args2cmdline('rm', '-f', ssh_key_path),
args2cmdline(*args)])
args = ['ssh-agent']
if ssh_auth_sock:
args.extend(['-a', ssh_auth_sock])
args.extend(['sh', '-c', cmd])
return args | [
"def",
"wrap_args_with_ssh_agent",
"(",
"self",
",",
"args",
",",
"ssh_key_path",
",",
"ssh_auth_sock",
"=",
"None",
",",
"silence_ssh_add",
"=",
"False",
")",
":",
"if",
"ssh_key_path",
":",
"ssh_add_command",
"=",
"args2cmdline",
"(",
"'ssh-add'",
",",
"ssh_key_path",
")",
"if",
"silence_ssh_add",
":",
"ssh_add_command",
"=",
"' '",
".",
"join",
"(",
"[",
"ssh_add_command",
",",
"'2>/dev/null'",
"]",
")",
"cmd",
"=",
"' && '",
".",
"join",
"(",
"[",
"ssh_add_command",
",",
"args2cmdline",
"(",
"'rm'",
",",
"'-f'",
",",
"ssh_key_path",
")",
",",
"args2cmdline",
"(",
"*",
"args",
")",
"]",
")",
"args",
"=",
"[",
"'ssh-agent'",
"]",
"if",
"ssh_auth_sock",
":",
"args",
".",
"extend",
"(",
"[",
"'-a'",
",",
"ssh_auth_sock",
"]",
")",
"args",
".",
"extend",
"(",
"[",
"'sh'",
",",
"'-c'",
",",
"cmd",
"]",
")",
"return",
"args"
] | 47.882353 | 18.941176 |
def filter_roidb(self):
"""Remove images without usable rois"""
num_roidb = len(self._roidb)
self._roidb = [roi_rec for roi_rec in self._roidb if len(roi_rec['gt_classes'])]
num_after = len(self._roidb)
logger.info('filter roidb: {} -> {}'.format(num_roidb, num_after)) | [
"def",
"filter_roidb",
"(",
"self",
")",
":",
"num_roidb",
"=",
"len",
"(",
"self",
".",
"_roidb",
")",
"self",
".",
"_roidb",
"=",
"[",
"roi_rec",
"for",
"roi_rec",
"in",
"self",
".",
"_roidb",
"if",
"len",
"(",
"roi_rec",
"[",
"'gt_classes'",
"]",
")",
"]",
"num_after",
"=",
"len",
"(",
"self",
".",
"_roidb",
")",
"logger",
".",
"info",
"(",
"'filter roidb: {} -> {}'",
".",
"format",
"(",
"num_roidb",
",",
"num_after",
")",
")"
] | 50.666667 | 17.833333 |
def CargarFormatoPDF(self, archivo="liquidacion_form_c1116b_wslpg.csv"):
"Cargo el formato de campos a generar desde una planilla CSV"
# si no encuentro archivo, lo busco en el directorio predeterminado:
if not os.path.exists(archivo):
archivo = os.path.join(self.InstallDir, "plantillas", os.path.basename(archivo))
if DEBUG: print "abriendo archivo ", archivo
# inicializo la lista de los elementos:
self.elements = []
for lno, linea in enumerate(open(archivo.encode('latin1')).readlines()):
if DEBUG: print "procesando linea ", lno, linea
args = []
for i,v in enumerate(linea.split(";")):
if not v.startswith("'"):
v = v.replace(",",".")
else:
v = v#.decode('latin1')
if v.strip()=='':
v = None
else:
v = eval(v.strip())
args.append(v)
# corrijo path relativo para las imágenes:
if args[1] == 'I':
if not os.path.exists(args[14]):
args[14] = os.path.join(self.InstallDir, "plantillas", os.path.basename(args[14]))
if DEBUG: print "NUEVO PATH:", args[14]
self.AgregarCampoPDF(*args)
self.AgregarCampoPDF("anulado", 'T', 150, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
if HOMO:
self.AgregarCampoPDF("homo", 'T', 100, 250, 0, 0,
size=70, rotate=45, foreground=0x808080,
priority=-1)
# cargo los elementos en la plantilla
self.template.load_elements(self.elements)
return True | [
"def",
"CargarFormatoPDF",
"(",
"self",
",",
"archivo",
"=",
"\"liquidacion_form_c1116b_wslpg.csv\"",
")",
":",
"# si no encuentro archivo, lo busco en el directorio predeterminado:",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"archivo",
")",
":",
"archivo",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"InstallDir",
",",
"\"plantillas\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"archivo",
")",
")",
"if",
"DEBUG",
":",
"print",
"\"abriendo archivo \"",
",",
"archivo",
"# inicializo la lista de los elementos:",
"self",
".",
"elements",
"=",
"[",
"]",
"for",
"lno",
",",
"linea",
"in",
"enumerate",
"(",
"open",
"(",
"archivo",
".",
"encode",
"(",
"'latin1'",
")",
")",
".",
"readlines",
"(",
")",
")",
":",
"if",
"DEBUG",
":",
"print",
"\"procesando linea \"",
",",
"lno",
",",
"linea",
"args",
"=",
"[",
"]",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"linea",
".",
"split",
"(",
"\";\"",
")",
")",
":",
"if",
"not",
"v",
".",
"startswith",
"(",
"\"'\"",
")",
":",
"v",
"=",
"v",
".",
"replace",
"(",
"\",\"",
",",
"\".\"",
")",
"else",
":",
"v",
"=",
"v",
"#.decode('latin1')",
"if",
"v",
".",
"strip",
"(",
")",
"==",
"''",
":",
"v",
"=",
"None",
"else",
":",
"v",
"=",
"eval",
"(",
"v",
".",
"strip",
"(",
")",
")",
"args",
".",
"append",
"(",
"v",
")",
"# corrijo path relativo para las imágenes:",
"if",
"args",
"[",
"1",
"]",
"==",
"'I'",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"args",
"[",
"14",
"]",
")",
":",
"args",
"[",
"14",
"]",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"InstallDir",
",",
"\"plantillas\"",
",",
"os",
".",
"path",
".",
"basename",
"(",
"args",
"[",
"14",
"]",
")",
")",
"if",
"DEBUG",
":",
"print",
"\"NUEVO PATH:\"",
",",
"args",
"[",
"14",
"]",
"self",
".",
"AgregarCampoPDF",
"(",
"*",
"args",
")",
"self",
".",
"AgregarCampoPDF",
"(",
"\"anulado\"",
",",
"'T'",
",",
"150",
",",
"250",
",",
"0",
",",
"0",
",",
"size",
"=",
"70",
",",
"rotate",
"=",
"45",
",",
"foreground",
"=",
"0x808080",
",",
"priority",
"=",
"-",
"1",
")",
"if",
"HOMO",
":",
"self",
".",
"AgregarCampoPDF",
"(",
"\"homo\"",
",",
"'T'",
",",
"100",
",",
"250",
",",
"0",
",",
"0",
",",
"size",
"=",
"70",
",",
"rotate",
"=",
"45",
",",
"foreground",
"=",
"0x808080",
",",
"priority",
"=",
"-",
"1",
")",
"# cargo los elementos en la plantilla",
"self",
".",
"template",
".",
"load_elements",
"(",
"self",
".",
"elements",
")",
"return",
"True"
] | 38.76087 | 21.195652 |
def calculate_error_by_annotation(graph: BELGraph, annotation: str) -> Mapping[str, List[str]]:
"""Group error names by a given annotation."""
results = defaultdict(list)
for _, exc, ctx in graph.warnings:
if not ctx or not edge_has_annotation(ctx, annotation):
continue
values = ctx[ANNOTATIONS][annotation]
if isinstance(values, str):
results[values].append(exc.__class__.__name__)
elif isinstance(values, Iterable):
for value in values:
results[value].append(exc.__class__.__name__)
return dict(results) | [
"def",
"calculate_error_by_annotation",
"(",
"graph",
":",
"BELGraph",
",",
"annotation",
":",
"str",
")",
"->",
"Mapping",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
":",
"results",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"_",
",",
"exc",
",",
"ctx",
"in",
"graph",
".",
"warnings",
":",
"if",
"not",
"ctx",
"or",
"not",
"edge_has_annotation",
"(",
"ctx",
",",
"annotation",
")",
":",
"continue",
"values",
"=",
"ctx",
"[",
"ANNOTATIONS",
"]",
"[",
"annotation",
"]",
"if",
"isinstance",
"(",
"values",
",",
"str",
")",
":",
"results",
"[",
"values",
"]",
".",
"append",
"(",
"exc",
".",
"__class__",
".",
"__name__",
")",
"elif",
"isinstance",
"(",
"values",
",",
"Iterable",
")",
":",
"for",
"value",
"in",
"values",
":",
"results",
"[",
"value",
"]",
".",
"append",
"(",
"exc",
".",
"__class__",
".",
"__name__",
")",
"return",
"dict",
"(",
"results",
")"
] | 33 | 21.333333 |
def configure(self):
"""Configure the device.
Send the device configuration saved inside the MCP342x object to the target device."""
logger.debug('Configuring ' + hex(self.get_address())
+ ' ch: ' + str(self.get_channel())
+ ' res: ' + str(self.get_resolution())
+ ' gain: ' + str(self.get_gain()))
self.bus.write_byte(self.address, self.config) | [
"def",
"configure",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Configuring '",
"+",
"hex",
"(",
"self",
".",
"get_address",
"(",
")",
")",
"+",
"' ch: '",
"+",
"str",
"(",
"self",
".",
"get_channel",
"(",
")",
")",
"+",
"' res: '",
"+",
"str",
"(",
"self",
".",
"get_resolution",
"(",
")",
")",
"+",
"' gain: '",
"+",
"str",
"(",
"self",
".",
"get_gain",
"(",
")",
")",
")",
"self",
".",
"bus",
".",
"write_byte",
"(",
"self",
".",
"address",
",",
"self",
".",
"config",
")"
] | 48.111111 | 16.333333 |
def setup_local_logging(config=None, parallel=None):
"""Setup logging for a local context, directing messages to appropriate base loggers.
Handles local, multiprocessing and distributed setup, connecting
to handlers created by the base logger.
"""
if config is None: config = {}
if parallel is None: parallel = {}
parallel_type = parallel.get("type", "local")
cores = parallel.get("cores", 1)
wrapper = parallel.get("wrapper", None)
if parallel_type == "ipython":
from bcbio.log import logbook_zmqpush
handler = logbook_zmqpush.ZeroMQPushHandler(parallel["log_queue"])
elif cores > 1:
handler = logbook.queues.MultiProcessingHandler(mpq)
else:
handler = _create_log_handler(config, direct_hostname=wrapper is not None, write_toterm=wrapper is None)
handler.push_thread()
return handler | [
"def",
"setup_local_logging",
"(",
"config",
"=",
"None",
",",
"parallel",
"=",
"None",
")",
":",
"if",
"config",
"is",
"None",
":",
"config",
"=",
"{",
"}",
"if",
"parallel",
"is",
"None",
":",
"parallel",
"=",
"{",
"}",
"parallel_type",
"=",
"parallel",
".",
"get",
"(",
"\"type\"",
",",
"\"local\"",
")",
"cores",
"=",
"parallel",
".",
"get",
"(",
"\"cores\"",
",",
"1",
")",
"wrapper",
"=",
"parallel",
".",
"get",
"(",
"\"wrapper\"",
",",
"None",
")",
"if",
"parallel_type",
"==",
"\"ipython\"",
":",
"from",
"bcbio",
".",
"log",
"import",
"logbook_zmqpush",
"handler",
"=",
"logbook_zmqpush",
".",
"ZeroMQPushHandler",
"(",
"parallel",
"[",
"\"log_queue\"",
"]",
")",
"elif",
"cores",
">",
"1",
":",
"handler",
"=",
"logbook",
".",
"queues",
".",
"MultiProcessingHandler",
"(",
"mpq",
")",
"else",
":",
"handler",
"=",
"_create_log_handler",
"(",
"config",
",",
"direct_hostname",
"=",
"wrapper",
"is",
"not",
"None",
",",
"write_toterm",
"=",
"wrapper",
"is",
"None",
")",
"handler",
".",
"push_thread",
"(",
")",
"return",
"handler"
] | 42.75 | 16.65 |
def _get_query_parts(self, query_str, search_options=None):
""" Split a query string into its parts
"""
if search_options is None:
search_options = {}
if query_str is None:
raise NipapValueError("'query_string' must not be None")
# find query parts
query_str_parts = []
try:
for part in shlex.split(query_str.encode('utf-8')):
query_str_parts.append({ 'string': part.decode('utf-8') })
except ValueError as exc:
if unicode(exc) == 'No closing quotation':
raise NipapValueError(unicode(exc))
raise exc
# Handle empty search.
# We need something to iterate over, but shlex.split() returns
# zero-element list for an empty string, so we have to append one
# manually
if len(query_str_parts) == 0:
query_str_parts.append({ 'string': '' })
return query_str_parts | [
"def",
"_get_query_parts",
"(",
"self",
",",
"query_str",
",",
"search_options",
"=",
"None",
")",
":",
"if",
"search_options",
"is",
"None",
":",
"search_options",
"=",
"{",
"}",
"if",
"query_str",
"is",
"None",
":",
"raise",
"NipapValueError",
"(",
"\"'query_string' must not be None\"",
")",
"# find query parts",
"query_str_parts",
"=",
"[",
"]",
"try",
":",
"for",
"part",
"in",
"shlex",
".",
"split",
"(",
"query_str",
".",
"encode",
"(",
"'utf-8'",
")",
")",
":",
"query_str_parts",
".",
"append",
"(",
"{",
"'string'",
":",
"part",
".",
"decode",
"(",
"'utf-8'",
")",
"}",
")",
"except",
"ValueError",
"as",
"exc",
":",
"if",
"unicode",
"(",
"exc",
")",
"==",
"'No closing quotation'",
":",
"raise",
"NipapValueError",
"(",
"unicode",
"(",
"exc",
")",
")",
"raise",
"exc",
"# Handle empty search.",
"# We need something to iterate over, but shlex.split() returns",
"# zero-element list for an empty string, so we have to append one",
"# manually",
"if",
"len",
"(",
"query_str_parts",
")",
"==",
"0",
":",
"query_str_parts",
".",
"append",
"(",
"{",
"'string'",
":",
"''",
"}",
")",
"return",
"query_str_parts"
] | 33.964286 | 19.821429 |
def doc_unwrap(raw_doc):
"""
Applies two transformations to raw_doc:
1. N consecutive newlines are converted into N-1 newlines.
2. A lone newline is converted to a space, which basically unwraps text.
Returns a new string, or None if the input was None.
"""
if raw_doc is None:
return None
docstring = ''
consecutive_newlines = 0
# Remove all leading and trailing whitespace in the documentation block
for c in raw_doc.strip():
if c == '\n':
consecutive_newlines += 1
if consecutive_newlines > 1:
docstring += c
else:
if consecutive_newlines == 1:
docstring += ' '
consecutive_newlines = 0
docstring += c
return docstring | [
"def",
"doc_unwrap",
"(",
"raw_doc",
")",
":",
"if",
"raw_doc",
"is",
"None",
":",
"return",
"None",
"docstring",
"=",
"''",
"consecutive_newlines",
"=",
"0",
"# Remove all leading and trailing whitespace in the documentation block",
"for",
"c",
"in",
"raw_doc",
".",
"strip",
"(",
")",
":",
"if",
"c",
"==",
"'\\n'",
":",
"consecutive_newlines",
"+=",
"1",
"if",
"consecutive_newlines",
">",
"1",
":",
"docstring",
"+=",
"c",
"else",
":",
"if",
"consecutive_newlines",
"==",
"1",
":",
"docstring",
"+=",
"' '",
"consecutive_newlines",
"=",
"0",
"docstring",
"+=",
"c",
"return",
"docstring"
] | 31.791667 | 14.875 |
def display_dataset(self):
"""Update the widget with information about the dataset."""
header = self.dataset.header
self.parent.setWindowTitle(basename(self.filename))
short_filename = short_strings(basename(self.filename))
self.idx_filename.setText(short_filename)
self.idx_s_freq.setText(str(header['s_freq']))
self.idx_n_chan.setText(str(len(header['chan_name'])))
start_time = header['start_time'].strftime('%b-%d %H:%M:%S')
self.idx_start_time.setText(start_time)
end_time = (header['start_time'] +
timedelta(seconds=header['n_samples'] / header['s_freq']))
self.idx_end_time.setText(end_time.strftime('%b-%d %H:%M:%S')) | [
"def",
"display_dataset",
"(",
"self",
")",
":",
"header",
"=",
"self",
".",
"dataset",
".",
"header",
"self",
".",
"parent",
".",
"setWindowTitle",
"(",
"basename",
"(",
"self",
".",
"filename",
")",
")",
"short_filename",
"=",
"short_strings",
"(",
"basename",
"(",
"self",
".",
"filename",
")",
")",
"self",
".",
"idx_filename",
".",
"setText",
"(",
"short_filename",
")",
"self",
".",
"idx_s_freq",
".",
"setText",
"(",
"str",
"(",
"header",
"[",
"'s_freq'",
"]",
")",
")",
"self",
".",
"idx_n_chan",
".",
"setText",
"(",
"str",
"(",
"len",
"(",
"header",
"[",
"'chan_name'",
"]",
")",
")",
")",
"start_time",
"=",
"header",
"[",
"'start_time'",
"]",
".",
"strftime",
"(",
"'%b-%d %H:%M:%S'",
")",
"self",
".",
"idx_start_time",
".",
"setText",
"(",
"start_time",
")",
"end_time",
"=",
"(",
"header",
"[",
"'start_time'",
"]",
"+",
"timedelta",
"(",
"seconds",
"=",
"header",
"[",
"'n_samples'",
"]",
"/",
"header",
"[",
"'s_freq'",
"]",
")",
")",
"self",
".",
"idx_end_time",
".",
"setText",
"(",
"end_time",
".",
"strftime",
"(",
"'%b-%d %H:%M:%S'",
")",
")"
] | 51.5 | 17.857143 |
def get_logs(self, container_id):
""" Return the full stdout/stderr of a container"""
stdout = self._docker.containers.get(container_id).logs(stdout=True, stderr=False).decode('utf8')
stderr = self._docker.containers.get(container_id).logs(stdout=False, stderr=True).decode('utf8')
return stdout, stderr | [
"def",
"get_logs",
"(",
"self",
",",
"container_id",
")",
":",
"stdout",
"=",
"self",
".",
"_docker",
".",
"containers",
".",
"get",
"(",
"container_id",
")",
".",
"logs",
"(",
"stdout",
"=",
"True",
",",
"stderr",
"=",
"False",
")",
".",
"decode",
"(",
"'utf8'",
")",
"stderr",
"=",
"self",
".",
"_docker",
".",
"containers",
".",
"get",
"(",
"container_id",
")",
".",
"logs",
"(",
"stdout",
"=",
"False",
",",
"stderr",
"=",
"True",
")",
".",
"decode",
"(",
"'utf8'",
")",
"return",
"stdout",
",",
"stderr"
] | 66.2 | 29.6 |
def create_sheet(self, title):
""" Create a sheet with the given title. This does not check if
another sheet by the same name already exists. """
ws = self.conn.sheets_service.AddWorksheet(title, 10, 10, self.id)
self._wsf = None
return Sheet(self, ws) | [
"def",
"create_sheet",
"(",
"self",
",",
"title",
")",
":",
"ws",
"=",
"self",
".",
"conn",
".",
"sheets_service",
".",
"AddWorksheet",
"(",
"title",
",",
"10",
",",
"10",
",",
"self",
".",
"id",
")",
"self",
".",
"_wsf",
"=",
"None",
"return",
"Sheet",
"(",
"self",
",",
"ws",
")"
] | 47.833333 | 11.666667 |
def url(self):
"""
Return the URL of the server.
:returns: URL of the server
:rtype: string
"""
if len(self.drivers) > 0:
return self.drivers[0].url
else:
return self._url | [
"def",
"url",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"drivers",
")",
">",
"0",
":",
"return",
"self",
".",
"drivers",
"[",
"0",
"]",
".",
"url",
"else",
":",
"return",
"self",
".",
"_url"
] | 22 | 12.727273 |
def calculate_slope_aspect(elevation, xres, yres, z=1.0, scale=1.0):
"""
Calculate slope and aspect map.
Return a pair of arrays 2 pixels smaller than the input elevation array.
Slope is returned in radians, from 0 for sheer face to pi/2 for
flat ground. Aspect is returned in radians, counterclockwise from -pi
at north around to pi.
Logic here is borrowed from hillshade.cpp:
http://www.perrygeo.net/wordpress/?p=7
Parameters
----------
elevation : array
input elevation data
xres : float
column width
yres : float
row height
z : float
vertical exaggeration factor
scale : float
scale factor of pixel size units versus height units (insert 112000
when having elevation values in meters in a geodetic projection)
Returns
-------
slope shade : array
"""
z = float(z)
scale = float(scale)
height, width = elevation.shape[0] - 2, elevation.shape[1] - 2
window = [
z * elevation[row:(row + height), col:(col + width)]
for (row, col) in product(range(3), range(3))
]
x = (
(window[0] + window[3] + window[3] + window[6])
- (window[2] + window[5] + window[5] + window[8])
) / (8.0 * xres * scale)
y = (
(window[6] + window[7] + window[7] + window[8])
- (window[0] + window[1] + window[1] + window[2])
) / (8.0 * yres * scale)
# in radians, from 0 to pi/2
slope = math.pi/2 - np.arctan(np.sqrt(x*x + y*y))
# in radians counterclockwise, from -pi at north back to pi
aspect = np.arctan2(x, y)
return slope, aspect | [
"def",
"calculate_slope_aspect",
"(",
"elevation",
",",
"xres",
",",
"yres",
",",
"z",
"=",
"1.0",
",",
"scale",
"=",
"1.0",
")",
":",
"z",
"=",
"float",
"(",
"z",
")",
"scale",
"=",
"float",
"(",
"scale",
")",
"height",
",",
"width",
"=",
"elevation",
".",
"shape",
"[",
"0",
"]",
"-",
"2",
",",
"elevation",
".",
"shape",
"[",
"1",
"]",
"-",
"2",
"window",
"=",
"[",
"z",
"*",
"elevation",
"[",
"row",
":",
"(",
"row",
"+",
"height",
")",
",",
"col",
":",
"(",
"col",
"+",
"width",
")",
"]",
"for",
"(",
"row",
",",
"col",
")",
"in",
"product",
"(",
"range",
"(",
"3",
")",
",",
"range",
"(",
"3",
")",
")",
"]",
"x",
"=",
"(",
"(",
"window",
"[",
"0",
"]",
"+",
"window",
"[",
"3",
"]",
"+",
"window",
"[",
"3",
"]",
"+",
"window",
"[",
"6",
"]",
")",
"-",
"(",
"window",
"[",
"2",
"]",
"+",
"window",
"[",
"5",
"]",
"+",
"window",
"[",
"5",
"]",
"+",
"window",
"[",
"8",
"]",
")",
")",
"/",
"(",
"8.0",
"*",
"xres",
"*",
"scale",
")",
"y",
"=",
"(",
"(",
"window",
"[",
"6",
"]",
"+",
"window",
"[",
"7",
"]",
"+",
"window",
"[",
"7",
"]",
"+",
"window",
"[",
"8",
"]",
")",
"-",
"(",
"window",
"[",
"0",
"]",
"+",
"window",
"[",
"1",
"]",
"+",
"window",
"[",
"1",
"]",
"+",
"window",
"[",
"2",
"]",
")",
")",
"/",
"(",
"8.0",
"*",
"yres",
"*",
"scale",
")",
"# in radians, from 0 to pi/2",
"slope",
"=",
"math",
".",
"pi",
"/",
"2",
"-",
"np",
".",
"arctan",
"(",
"np",
".",
"sqrt",
"(",
"x",
"*",
"x",
"+",
"y",
"*",
"y",
")",
")",
"# in radians counterclockwise, from -pi at north back to pi",
"aspect",
"=",
"np",
".",
"arctan2",
"(",
"x",
",",
"y",
")",
"return",
"slope",
",",
"aspect"
] | 31.333333 | 21.411765 |
def _append_path(path, name, tree, buffer_items):
"""
Append a 2D or 3D path to the scene structure and put the
data into buffer_items.
Parameters
-------------
path : trimesh.Path2D or trimesh.Path3D
Source geometry
name : str
Name of geometry
tree : dict
Will be updated with data from path
buffer_items
Will have buffer appended with path data
"""
# convert the path to the unnamed args for
# a pyglet vertex list
vxlist = rendering.path_to_vertexlist(path)
tree["meshes"].append({
"name": name,
"primitives": [{
"attributes": {"POSITION": len(tree["accessors"])},
"mode": 1, # mode 1 is GL_LINES
"material": len(tree["materials"])}]})
# if units are defined, store them as an extra:
# https://github.com/KhronosGroup/glTF/tree/master/extensions
if path.units is not None and 'meter' not in path.units:
tree["meshes"][-1]["extras"] = {"units": str(path.units)}
tree["accessors"].append(
{
"bufferView": len(buffer_items),
"componentType": 5126,
"count": vxlist[0],
"type": "VEC3",
"byteOffset": 0,
"max": path.vertices.max(axis=0).tolist(),
"min": path.vertices.min(axis=0).tolist(),
}
)
# TODO add color support to Path object
# this is just exporting everying as black
tree["materials"].append(_default_material)
# data is the second value of the fourth field
# which is a (data type, data) tuple
buffer_items.append(_byte_pad(
vxlist[4][1].astype(float32).tobytes())) | [
"def",
"_append_path",
"(",
"path",
",",
"name",
",",
"tree",
",",
"buffer_items",
")",
":",
"# convert the path to the unnamed args for",
"# a pyglet vertex list",
"vxlist",
"=",
"rendering",
".",
"path_to_vertexlist",
"(",
"path",
")",
"tree",
"[",
"\"meshes\"",
"]",
".",
"append",
"(",
"{",
"\"name\"",
":",
"name",
",",
"\"primitives\"",
":",
"[",
"{",
"\"attributes\"",
":",
"{",
"\"POSITION\"",
":",
"len",
"(",
"tree",
"[",
"\"accessors\"",
"]",
")",
"}",
",",
"\"mode\"",
":",
"1",
",",
"# mode 1 is GL_LINES",
"\"material\"",
":",
"len",
"(",
"tree",
"[",
"\"materials\"",
"]",
")",
"}",
"]",
"}",
")",
"# if units are defined, store them as an extra:",
"# https://github.com/KhronosGroup/glTF/tree/master/extensions",
"if",
"path",
".",
"units",
"is",
"not",
"None",
"and",
"'meter'",
"not",
"in",
"path",
".",
"units",
":",
"tree",
"[",
"\"meshes\"",
"]",
"[",
"-",
"1",
"]",
"[",
"\"extras\"",
"]",
"=",
"{",
"\"units\"",
":",
"str",
"(",
"path",
".",
"units",
")",
"}",
"tree",
"[",
"\"accessors\"",
"]",
".",
"append",
"(",
"{",
"\"bufferView\"",
":",
"len",
"(",
"buffer_items",
")",
",",
"\"componentType\"",
":",
"5126",
",",
"\"count\"",
":",
"vxlist",
"[",
"0",
"]",
",",
"\"type\"",
":",
"\"VEC3\"",
",",
"\"byteOffset\"",
":",
"0",
",",
"\"max\"",
":",
"path",
".",
"vertices",
".",
"max",
"(",
"axis",
"=",
"0",
")",
".",
"tolist",
"(",
")",
",",
"\"min\"",
":",
"path",
".",
"vertices",
".",
"min",
"(",
"axis",
"=",
"0",
")",
".",
"tolist",
"(",
")",
",",
"}",
")",
"# TODO add color support to Path object",
"# this is just exporting everying as black",
"tree",
"[",
"\"materials\"",
"]",
".",
"append",
"(",
"_default_material",
")",
"# data is the second value of the fourth field",
"# which is a (data type, data) tuple",
"buffer_items",
".",
"append",
"(",
"_byte_pad",
"(",
"vxlist",
"[",
"4",
"]",
"[",
"1",
"]",
".",
"astype",
"(",
"float32",
")",
".",
"tobytes",
"(",
")",
")",
")"
] | 30.584906 | 17.113208 |
def get_string(self):
"""A string representation of the junction
:return: string represnetation
:rtype: string
"""
return self.left.chr+':'+str(self.left.end)+'-'+self.right.chr+':'+str(self.right.start) | [
"def",
"get_string",
"(",
"self",
")",
":",
"return",
"self",
".",
"left",
".",
"chr",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"left",
".",
"end",
")",
"+",
"'-'",
"+",
"self",
".",
"right",
".",
"chr",
"+",
"':'",
"+",
"str",
"(",
"self",
".",
"right",
".",
"start",
")"
] | 31.142857 | 19.857143 |
def git_repo_to_sloc(url):
"""
Given a Git repository URL, returns number of lines of code based on cloc
Reference:
- cloc: https://github.com/AlDanial/cloc
- https://www.omg.org/spec/AFP/
- Another potential way to calculation effort
Sample cloc output:
{
"header": {
"cloc_url": "github.com/AlDanial/cloc",
"cloc_version": "1.74",
"elapsed_seconds": 0.195950984954834,
"n_files": 27,
"n_lines": 2435,
"files_per_second": 137.78956000769,
"lines_per_second": 12426.5769858787
},
"C++": {
"nFiles": 7,
"blank": 121,
"comment": 314,
"code": 371
},
"C/C++ Header": {
"nFiles": 8,
"blank": 107,
"comment": 604,
"code": 191
},
"CMake": {
"nFiles": 11,
"blank": 49,
"comment": 465,
"code": 165
},
"Markdown": {
"nFiles": 1,
"blank": 18,
"comment": 0,
"code": 30
},
"SUM": {
"blank": 295,
"comment": 1383,
"code": 757,
"nFiles": 27
}
}
"""
with tempfile.TemporaryDirectory() as tmp_dir:
logger.debug('Cloning: url=%s tmp_dir=%s', url, tmp_dir)
tmp_clone = os.path.join(tmp_dir, 'clone-dir')
cmd = ['git', 'clone', '--depth=1', url, tmp_clone]
execute(cmd)
cmd = ['cloc', '--json', tmp_clone]
out, _ = execute(cmd)
try:
json_start = out.find('{"header"')
json_blob = out[json_start:].replace('\\n', '').replace('\'', '')
cloc_json = json.loads(json_blob)
sloc = cloc_json['SUM']['code']
except json.decoder.JSONDecodeError:
logger.debug('Error Decoding: url=%s, out=%s', url, out)
sloc = 0
logger.debug('SLOC: url=%s, sloc=%d', url, sloc)
return sloc | [
"def",
"git_repo_to_sloc",
"(",
"url",
")",
":",
"with",
"tempfile",
".",
"TemporaryDirectory",
"(",
")",
"as",
"tmp_dir",
":",
"logger",
".",
"debug",
"(",
"'Cloning: url=%s tmp_dir=%s'",
",",
"url",
",",
"tmp_dir",
")",
"tmp_clone",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tmp_dir",
",",
"'clone-dir'",
")",
"cmd",
"=",
"[",
"'git'",
",",
"'clone'",
",",
"'--depth=1'",
",",
"url",
",",
"tmp_clone",
"]",
"execute",
"(",
"cmd",
")",
"cmd",
"=",
"[",
"'cloc'",
",",
"'--json'",
",",
"tmp_clone",
"]",
"out",
",",
"_",
"=",
"execute",
"(",
"cmd",
")",
"try",
":",
"json_start",
"=",
"out",
".",
"find",
"(",
"'{\"header\"'",
")",
"json_blob",
"=",
"out",
"[",
"json_start",
":",
"]",
".",
"replace",
"(",
"'\\\\n'",
",",
"''",
")",
".",
"replace",
"(",
"'\\''",
",",
"''",
")",
"cloc_json",
"=",
"json",
".",
"loads",
"(",
"json_blob",
")",
"sloc",
"=",
"cloc_json",
"[",
"'SUM'",
"]",
"[",
"'code'",
"]",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"logger",
".",
"debug",
"(",
"'Error Decoding: url=%s, out=%s'",
",",
"url",
",",
"out",
")",
"sloc",
"=",
"0",
"logger",
".",
"debug",
"(",
"'SLOC: url=%s, sloc=%d'",
",",
"url",
",",
"sloc",
")",
"return",
"sloc"
] | 28.328947 | 17.934211 |
def _update_serial_ports(serial_old_new):
'''
Returns a list of vim.vm.device.VirtualDeviceSpec specifying to edit a
deployed serial port configuration to the new given config
serial_old_new
Dictionary with old and new keys which contains the current and the
next config for a serial port device
'''
serial_changes = []
if serial_old_new:
devs = [serial['old']['adapter'] for serial in serial_old_new]
log.trace('Updating serial ports %s', devs)
for item in serial_old_new:
current_serial = item['old']
next_serial = item['new']
difference = recursive_diff(current_serial, next_serial)
difference.ignore_unset_values = False
if difference.changed():
serial_changes.append(_apply_serial_port(next_serial,
current_serial['key'],
'edit'))
return serial_changes | [
"def",
"_update_serial_ports",
"(",
"serial_old_new",
")",
":",
"serial_changes",
"=",
"[",
"]",
"if",
"serial_old_new",
":",
"devs",
"=",
"[",
"serial",
"[",
"'old'",
"]",
"[",
"'adapter'",
"]",
"for",
"serial",
"in",
"serial_old_new",
"]",
"log",
".",
"trace",
"(",
"'Updating serial ports %s'",
",",
"devs",
")",
"for",
"item",
"in",
"serial_old_new",
":",
"current_serial",
"=",
"item",
"[",
"'old'",
"]",
"next_serial",
"=",
"item",
"[",
"'new'",
"]",
"difference",
"=",
"recursive_diff",
"(",
"current_serial",
",",
"next_serial",
")",
"difference",
".",
"ignore_unset_values",
"=",
"False",
"if",
"difference",
".",
"changed",
"(",
")",
":",
"serial_changes",
".",
"append",
"(",
"_apply_serial_port",
"(",
"next_serial",
",",
"current_serial",
"[",
"'key'",
"]",
",",
"'edit'",
")",
")",
"return",
"serial_changes"
] | 43.695652 | 19.869565 |
def _feature_country_mentions(self, doc):
"""
Given a document, count how many times different country names and adjectives are mentioned.
These are features used in the country picking phase.
Parameters
---------
doc: a spaCy nlp'ed piece of text
Returns
-------
countries: dict
the top two countries (ISO code) and their frequency of mentions.
"""
c_list = []
for i in doc.ents:
try:
country = self._both_codes[i.text]
c_list.append(country)
except KeyError:
pass
count = Counter(c_list).most_common()
try:
top, top_count = count[0]
except:
top = ""
top_count = 0
try:
two, two_count = count[1]
except:
two = ""
two_count = 0
countries = (top, top_count, two, two_count)
return countries | [
"def",
"_feature_country_mentions",
"(",
"self",
",",
"doc",
")",
":",
"c_list",
"=",
"[",
"]",
"for",
"i",
"in",
"doc",
".",
"ents",
":",
"try",
":",
"country",
"=",
"self",
".",
"_both_codes",
"[",
"i",
".",
"text",
"]",
"c_list",
".",
"append",
"(",
"country",
")",
"except",
"KeyError",
":",
"pass",
"count",
"=",
"Counter",
"(",
"c_list",
")",
".",
"most_common",
"(",
")",
"try",
":",
"top",
",",
"top_count",
"=",
"count",
"[",
"0",
"]",
"except",
":",
"top",
"=",
"\"\"",
"top_count",
"=",
"0",
"try",
":",
"two",
",",
"two_count",
"=",
"count",
"[",
"1",
"]",
"except",
":",
"two",
"=",
"\"\"",
"two_count",
"=",
"0",
"countries",
"=",
"(",
"top",
",",
"top_count",
",",
"two",
",",
"two_count",
")",
"return",
"countries"
] | 27.6 | 19.142857 |
def hash(self, value):
"""
function hash() implement to acquire hash value that use simply method that weighted sum.
Parameters:
-----------
value: string
the value is param of need acquire hash
Returns:
--------
result
hash code for value
"""
result = 0
for i in range(len(value)):
result += self.seed * result + ord(value[i])
return (self.capacity - 1) % result | [
"def",
"hash",
"(",
"self",
",",
"value",
")",
":",
"result",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"value",
")",
")",
":",
"result",
"+=",
"self",
".",
"seed",
"*",
"result",
"+",
"ord",
"(",
"value",
"[",
"i",
"]",
")",
"return",
"(",
"self",
".",
"capacity",
"-",
"1",
")",
"%",
"result"
] | 30.352941 | 17.411765 |
def retrieveVals(self):
"""Retrieve values for graphs."""
ntpinfo = NTPinfo()
stats = ntpinfo.getPeerStats()
if stats:
if self.hasGraph('ntp_peer_stratum'):
self.setGraphVal('ntp_peer_stratum', 'stratum',
stats.get('stratum'))
if self.hasGraph('ntp_peer_stats'):
self.setGraphVal('ntp_peer_stats', 'offset',
stats.get('offset'))
self.setGraphVal('ntp_peer_stats', 'delay',
stats.get('delay'))
self.setGraphVal('ntp_peer_stats', 'jitter',
stats.get('jitter')) | [
"def",
"retrieveVals",
"(",
"self",
")",
":",
"ntpinfo",
"=",
"NTPinfo",
"(",
")",
"stats",
"=",
"ntpinfo",
".",
"getPeerStats",
"(",
")",
"if",
"stats",
":",
"if",
"self",
".",
"hasGraph",
"(",
"'ntp_peer_stratum'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'ntp_peer_stratum'",
",",
"'stratum'",
",",
"stats",
".",
"get",
"(",
"'stratum'",
")",
")",
"if",
"self",
".",
"hasGraph",
"(",
"'ntp_peer_stats'",
")",
":",
"self",
".",
"setGraphVal",
"(",
"'ntp_peer_stats'",
",",
"'offset'",
",",
"stats",
".",
"get",
"(",
"'offset'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'ntp_peer_stats'",
",",
"'delay'",
",",
"stats",
".",
"get",
"(",
"'delay'",
")",
")",
"self",
".",
"setGraphVal",
"(",
"'ntp_peer_stats'",
",",
"'jitter'",
",",
"stats",
".",
"get",
"(",
"'jitter'",
")",
")"
] | 46.666667 | 13.933333 |
def awaitTermination(self, timeout=None):
"""
Wait for the execution to stop.
@param timeout: time to wait in seconds
"""
if timeout is None:
self._jssc.awaitTermination()
else:
self._jssc.awaitTerminationOrTimeout(int(timeout * 1000)) | [
"def",
"awaitTermination",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"timeout",
"is",
"None",
":",
"self",
".",
"_jssc",
".",
"awaitTermination",
"(",
")",
"else",
":",
"self",
".",
"_jssc",
".",
"awaitTerminationOrTimeout",
"(",
"int",
"(",
"timeout",
"*",
"1000",
")",
")"
] | 29.9 | 11.9 |
def n_choose_k(n, k):
""" get the number of quartets as n-choose-k. This is used
in equal splits to decide whether a split should be exhaustively sampled
or randomly sampled. Edges near tips can be exhaustive while highly nested
edges probably have too many quartets
"""
return int(reduce(MUL, (Fraction(n-i, i+1) for i in range(k)), 1)) | [
"def",
"n_choose_k",
"(",
"n",
",",
"k",
")",
":",
"return",
"int",
"(",
"reduce",
"(",
"MUL",
",",
"(",
"Fraction",
"(",
"n",
"-",
"i",
",",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"k",
")",
")",
",",
"1",
")",
")"
] | 50.714286 | 17.714286 |
def invalidate_cache(self, obj=None, queryset=None,
extra=None, force_all=False):
"""
Method that should be called by all tiggers to invalidate the
cache for an item(s).
Should be overriden by inheriting classes to customize behavior.
"""
if self.cache_manager:
if queryset != None:
force_all = True
self.cache_manager.invalidate_cache(self.model, instance=obj,
extra=extra,
force_all=force_all) | [
"def",
"invalidate_cache",
"(",
"self",
",",
"obj",
"=",
"None",
",",
"queryset",
"=",
"None",
",",
"extra",
"=",
"None",
",",
"force_all",
"=",
"False",
")",
":",
"if",
"self",
".",
"cache_manager",
":",
"if",
"queryset",
"!=",
"None",
":",
"force_all",
"=",
"True",
"self",
".",
"cache_manager",
".",
"invalidate_cache",
"(",
"self",
".",
"model",
",",
"instance",
"=",
"obj",
",",
"extra",
"=",
"extra",
",",
"force_all",
"=",
"force_all",
")"
] | 37.375 | 20.625 |
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1 | [
"def",
"set_vars",
"(",
"env",
")",
":",
"desired",
"=",
"env",
".",
"get",
"(",
"'MWCW_VERSION'",
",",
"''",
")",
"# return right away if the variables are already set",
"if",
"isinstance",
"(",
"desired",
",",
"MWVersion",
")",
":",
"return",
"1",
"elif",
"desired",
"is",
"None",
":",
"return",
"0",
"versions",
"=",
"find_versions",
"(",
")",
"version",
"=",
"None",
"if",
"desired",
":",
"for",
"v",
"in",
"versions",
":",
"if",
"str",
"(",
"v",
")",
"==",
"desired",
":",
"version",
"=",
"v",
"elif",
"versions",
":",
"version",
"=",
"versions",
"[",
"-",
"1",
"]",
"env",
"[",
"'MWCW_VERSIONS'",
"]",
"=",
"versions",
"env",
"[",
"'MWCW_VERSION'",
"]",
"=",
"version",
"if",
"version",
"is",
"None",
":",
"return",
"0",
"env",
".",
"PrependENVPath",
"(",
"'PATH'",
",",
"version",
".",
"clpath",
")",
"env",
".",
"PrependENVPath",
"(",
"'PATH'",
",",
"version",
".",
"dllpath",
")",
"ENV",
"=",
"env",
"[",
"'ENV'",
"]",
"ENV",
"[",
"'CWFolder'",
"]",
"=",
"version",
".",
"path",
"ENV",
"[",
"'LM_LICENSE_FILE'",
"]",
"=",
"version",
".",
"license",
"plus",
"=",
"lambda",
"x",
":",
"'+%s'",
"%",
"x",
"ENV",
"[",
"'MWCIncludes'",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"map",
"(",
"plus",
",",
"version",
".",
"includes",
")",
")",
"ENV",
"[",
"'MWLibraries'",
"]",
"=",
"os",
".",
"pathsep",
".",
"join",
"(",
"map",
"(",
"plus",
",",
"version",
".",
"libs",
")",
")",
"return",
"1"
] | 30.977778 | 21.511111 |
def log_attempt(self, key):
"""
Log an attempt against key, incrementing the number of attempts for that key and potentially adding a lock to
the lock table
"""
with self.lock:
if key not in self.attempts:
self.attempts[key] = 1
else:
self.attempts[key] += 1
if self.attempts[key] >= self.max_attempts:
log.info('Account %s locked due to too many login attempts' % key)
# lock account
self.locks[key] = datetime.datetime.utcnow() + datetime.timedelta(seconds=self.lock_duration) | [
"def",
"log_attempt",
"(",
"self",
",",
"key",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"key",
"not",
"in",
"self",
".",
"attempts",
":",
"self",
".",
"attempts",
"[",
"key",
"]",
"=",
"1",
"else",
":",
"self",
".",
"attempts",
"[",
"key",
"]",
"+=",
"1",
"if",
"self",
".",
"attempts",
"[",
"key",
"]",
">=",
"self",
".",
"max_attempts",
":",
"log",
".",
"info",
"(",
"'Account %s locked due to too many login attempts'",
"%",
"key",
")",
"# lock account",
"self",
".",
"locks",
"[",
"key",
"]",
"=",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"self",
".",
"lock_duration",
")"
] | 42.466667 | 22.333333 |
def run(cosmology, zi=0, Mi=1e12, z=False, com=True, mah=True,
filename=None, verbose=None, retcosmo=None):
""" Run commah code on halo of mass 'Mi' at redshift 'zi' with
accretion and profile history at higher redshifts 'z'
This is based on Correa et al. (2015a,b,c)
Parameters
----------
cosmology : str or dict
Can be named cosmology, default WMAP7 (aka DRAGONS), or
DRAGONS, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15
or dictionary similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
zi : float / numpy array, optional
Redshift at which halo has mass 'Mi'. If float then all
halo masses 'Mi' are assumed to be at this redshift.
If array but Mi is float, then this halo mass is used across
all starting redshifts. If both Mi and zi are arrays then they
have to be the same size for one - to - one correspondence between
halo mass and the redshift at which it has that mass. Default is 0.
Mi : float / numpy array, optional
Halo mass 'Mi' at a redshift 'zi'. If float then all redshifts 'zi'
are solved for this halo mass. If array but zi is float, then this
redshift is applied to all halo masses. If both Mi and zi are
arrays then they have to be the same size for one - to - one
correspondence between halo mass and the redshift at which it
has that mass. Default is 1e12 Msol.
z : float / numpy array, optional
Redshift to solve commah code at. Must have zi<z else these steps
are skipped. Default is False, meaning commah is solved at z=zi
com : bool, optional
If true then solve for concentration-mass,
default is True.
mah : bool, optional
If true then solve for accretion rate and halo mass history,
default is True.
filename : bool / str, optional
If str is passed this is used as a filename for output of commah
verbose : bool, optional
If true then give comments, default is None.
retcosmo : bool, optional
Return cosmological parameters used as a dict if retcosmo = True,
default is None.
Returns
-------
dataset : structured dataset
dataset contains structured columns of size
(size(Mi) > size(z)) by size(z)
If mah = True and com = False then columns are
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'dMdt' is accretion rate [Msol/yr]
and 'Mz' is the halo mass at 'z' for a halo which was 'Mi' massive
at starting redshift 'zi'
If mah = False and com = True then columns are
('zi',float),('Mi',float),('z',float),('c',float),('sig',float),('nu',float),('zf',float)
where 'zi' is the starting redshift, 'Mi' is halo mass at zi
'z' is output redshift (NB z>zi), 'c' is NFW concentration of halo
at the redshift 'z', 'sig' is the mass variance 'sigma',
'nu' is the dimensionless fluctuation for halo mass 'Mi' at 'zi',
'zf' is the formation redshift for a halo of mass 'Mi' at redshift 'zi'
If mah = True and com = True then columns are:
('zi',float),('Mi',float),('z',float),('dMdt',float),('Mz',float),
('c',float),('sig',float),('nu',float),('zf',float)
file : structured dataset with name 'filename' if passed
Raises
------
Output -1
If com = False and mah = False as user has to select something.
Output -1
If 'zi' and 'Mi' are arrays of unequal size. Impossible to match
corresponding masses and redshifts of output.
Examples
--------
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> import examples
>>> examples.runcommands() # A series of ways to query structured dataset
>>> examples.plotcommands() # Examples to plot data
"""
# Check user choices...
if not com and not mah:
print("User has to choose com=True and / or mah=True ")
return(-1)
# Convert arrays / lists to np.array
# and inflate redshift / mass axis
# to match each other for later loop
results = _checkinput(zi, Mi, z=z, verbose=verbose)
# Return if results is -1
if(results == -1):
return(-1)
# If not, unpack the returned iterable
else:
zi, Mi, z, lenz, lenm, lenzout = results
# At this point we will have lenm objects to iterate over
# Get the cosmological parameters for the given cosmology
cosmo = getcosmo(cosmology)
# Create output file if desired
if filename:
print("Output to file %r" % (filename))
fout = open(filename, 'wb')
# Create the structured dataset
try:
if mah and com:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, "
"zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" Accretion - Final Halo - concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" rate - mass - - "
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" (dM/dt) - (M200) - - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" [Msol/yr] - [Msol] - - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('dMdt', float),
('Mz', float), ('c', float), ('sig', float),
('nu', float), ('zf', float)])
elif mah:
if verbose:
print("Output requested is zi, Mi, z, dMdt, Mz")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z -"
" Accretion - Final Halo "+'\n')
fout.write("# - mass - -"
" rate - mass "+'\n')
fout.write("# - (M200) - -"
" (dm/dt) - (M200) "+'\n')
fout.write("# - [Msol] - -"
" [Msol/yr] - [Msol] "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float),
('dMdt', float), ('Mz', float)])
else:
if verbose:
print("Output requested is zi, Mi, z, c, sig, nu, zf")
if filename:
fout.write(_getcosmoheader(cosmo)+'\n')
fout.write("# Initial z - Initial Halo - Output z - "
" concentration - "
" Mass - Peak - Formation z "+'\n')
fout.write("# - mass - -"
" -"
" Variance - Height - "+'\n')
fout.write("# - (M200) - - "
" - "
" (sigma) - (nu) - "+'\n')
fout.write("# - [Msol] - - "
" - "
" - - "+'\n')
dataset = np.zeros((lenm, lenzout), dtype=[('zi', float),
('Mi', float), ('z', float), ('c', float),
('sig', float), ('nu', float), ('zf', float)])
# Now loop over the combination of initial redshift and halo mamss
for i_ind, (zval, Mval) in enumerate(_izip(zi, Mi)):
if verbose:
print("Output Halo of Mass Mi=%s at zi=%s" % (Mval, zval))
# For a given halo mass Mi at redshift zi need to know
# output redshifts 'z'
# Check that all requested redshifts are greater than
# input redshift, except if z is False, in which case
# only solve z at zi, i.e. remove a loop
if z is False:
ztemp = np.array(zval, ndmin=1, dtype=float)
else:
ztemp = np.array(z[z >= zval], dtype=float)
# Loop over the output redshifts
if ztemp.size:
# Return accretion rates and halo mass progenitors at
# redshifts 'z' for object of mass Mi at zi
dMdt, Mz = MAH(ztemp, zval, Mval, **cosmo)
if mah and com:
# More expensive to return concentrations
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# Save all arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind],
c[j_ind], sig[j_ind], nu[j_ind], zf[j_ind])
if filename:
fout.write(
"{}, {}, {}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind], c[j_ind], sig[j_ind], nu[j_ind],
zf[j_ind]))
elif mah:
# Save only MAH arrays
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], dMdt[j_ind], Mz[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], dMdt[j_ind],
Mz[j_ind]))
else:
# Output only COM arrays
c, sig, nu, zf = COM(ztemp, Mz, **cosmo)
# For any halo mass Mi at redshift zi
# solve for c, sig, nu and zf
for j_ind, j_val in enumerate(ztemp):
dataset[i_ind, j_ind] =\
(zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind])
if filename:
fout.write("{}, {}, {}, {}, {}, {}, {} \n".format(
zval, Mval, ztemp[j_ind], c[j_ind], sig[j_ind],
nu[j_ind], zf[j_ind]))
# Make sure to close the file if it was opened
finally:
fout.close() if filename else None
if retcosmo:
return(dataset, cosmo)
else:
return(dataset) | [
"def",
"run",
"(",
"cosmology",
",",
"zi",
"=",
"0",
",",
"Mi",
"=",
"1e12",
",",
"z",
"=",
"False",
",",
"com",
"=",
"True",
",",
"mah",
"=",
"True",
",",
"filename",
"=",
"None",
",",
"verbose",
"=",
"None",
",",
"retcosmo",
"=",
"None",
")",
":",
"# Check user choices...",
"if",
"not",
"com",
"and",
"not",
"mah",
":",
"print",
"(",
"\"User has to choose com=True and / or mah=True \"",
")",
"return",
"(",
"-",
"1",
")",
"# Convert arrays / lists to np.array",
"# and inflate redshift / mass axis",
"# to match each other for later loop",
"results",
"=",
"_checkinput",
"(",
"zi",
",",
"Mi",
",",
"z",
"=",
"z",
",",
"verbose",
"=",
"verbose",
")",
"# Return if results is -1",
"if",
"(",
"results",
"==",
"-",
"1",
")",
":",
"return",
"(",
"-",
"1",
")",
"# If not, unpack the returned iterable",
"else",
":",
"zi",
",",
"Mi",
",",
"z",
",",
"lenz",
",",
"lenm",
",",
"lenzout",
"=",
"results",
"# At this point we will have lenm objects to iterate over",
"# Get the cosmological parameters for the given cosmology",
"cosmo",
"=",
"getcosmo",
"(",
"cosmology",
")",
"# Create output file if desired",
"if",
"filename",
":",
"print",
"(",
"\"Output to file %r\"",
"%",
"(",
"filename",
")",
")",
"fout",
"=",
"open",
"(",
"filename",
",",
"'wb'",
")",
"# Create the structured dataset",
"try",
":",
"if",
"mah",
"and",
"com",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Output requested is zi, Mi, z, dMdt, Mz, c, sig, nu, \"",
"\"zf\"",
")",
"if",
"filename",
":",
"fout",
".",
"write",
"(",
"_getcosmoheader",
"(",
"cosmo",
")",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# Initial z - Initial Halo - Output z - \"",
"\" Accretion - Final Halo - concentration - \"",
"\" Mass - Peak - Formation z \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - mass - -\"",
"\" rate - mass - - \"",
"\" Variance - Height - \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - (M200) - - \"",
"\" (dM/dt) - (M200) - - \"",
"\" (sigma) - (nu) - \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - [Msol] - - \"",
"\" [Msol/yr] - [Msol] - - \"",
"\" - - \"",
"+",
"'\\n'",
")",
"dataset",
"=",
"np",
".",
"zeros",
"(",
"(",
"lenm",
",",
"lenzout",
")",
",",
"dtype",
"=",
"[",
"(",
"'zi'",
",",
"float",
")",
",",
"(",
"'Mi'",
",",
"float",
")",
",",
"(",
"'z'",
",",
"float",
")",
",",
"(",
"'dMdt'",
",",
"float",
")",
",",
"(",
"'Mz'",
",",
"float",
")",
",",
"(",
"'c'",
",",
"float",
")",
",",
"(",
"'sig'",
",",
"float",
")",
",",
"(",
"'nu'",
",",
"float",
")",
",",
"(",
"'zf'",
",",
"float",
")",
"]",
")",
"elif",
"mah",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Output requested is zi, Mi, z, dMdt, Mz\"",
")",
"if",
"filename",
":",
"fout",
".",
"write",
"(",
"_getcosmoheader",
"(",
"cosmo",
")",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# Initial z - Initial Halo - Output z -\"",
"\" Accretion - Final Halo \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - mass - -\"",
"\" rate - mass \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - (M200) - -\"",
"\" (dm/dt) - (M200) \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - [Msol] - -\"",
"\" [Msol/yr] - [Msol] \"",
"+",
"'\\n'",
")",
"dataset",
"=",
"np",
".",
"zeros",
"(",
"(",
"lenm",
",",
"lenzout",
")",
",",
"dtype",
"=",
"[",
"(",
"'zi'",
",",
"float",
")",
",",
"(",
"'Mi'",
",",
"float",
")",
",",
"(",
"'z'",
",",
"float",
")",
",",
"(",
"'dMdt'",
",",
"float",
")",
",",
"(",
"'Mz'",
",",
"float",
")",
"]",
")",
"else",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Output requested is zi, Mi, z, c, sig, nu, zf\"",
")",
"if",
"filename",
":",
"fout",
".",
"write",
"(",
"_getcosmoheader",
"(",
"cosmo",
")",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# Initial z - Initial Halo - Output z - \"",
"\" concentration - \"",
"\" Mass - Peak - Formation z \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - mass - -\"",
"\" -\"",
"\" Variance - Height - \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - (M200) - - \"",
"\" - \"",
"\" (sigma) - (nu) - \"",
"+",
"'\\n'",
")",
"fout",
".",
"write",
"(",
"\"# - [Msol] - - \"",
"\" - \"",
"\" - - \"",
"+",
"'\\n'",
")",
"dataset",
"=",
"np",
".",
"zeros",
"(",
"(",
"lenm",
",",
"lenzout",
")",
",",
"dtype",
"=",
"[",
"(",
"'zi'",
",",
"float",
")",
",",
"(",
"'Mi'",
",",
"float",
")",
",",
"(",
"'z'",
",",
"float",
")",
",",
"(",
"'c'",
",",
"float",
")",
",",
"(",
"'sig'",
",",
"float",
")",
",",
"(",
"'nu'",
",",
"float",
")",
",",
"(",
"'zf'",
",",
"float",
")",
"]",
")",
"# Now loop over the combination of initial redshift and halo mamss",
"for",
"i_ind",
",",
"(",
"zval",
",",
"Mval",
")",
"in",
"enumerate",
"(",
"_izip",
"(",
"zi",
",",
"Mi",
")",
")",
":",
"if",
"verbose",
":",
"print",
"(",
"\"Output Halo of Mass Mi=%s at zi=%s\"",
"%",
"(",
"Mval",
",",
"zval",
")",
")",
"# For a given halo mass Mi at redshift zi need to know",
"# output redshifts 'z'",
"# Check that all requested redshifts are greater than",
"# input redshift, except if z is False, in which case",
"# only solve z at zi, i.e. remove a loop",
"if",
"z",
"is",
"False",
":",
"ztemp",
"=",
"np",
".",
"array",
"(",
"zval",
",",
"ndmin",
"=",
"1",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"ztemp",
"=",
"np",
".",
"array",
"(",
"z",
"[",
"z",
">=",
"zval",
"]",
",",
"dtype",
"=",
"float",
")",
"# Loop over the output redshifts",
"if",
"ztemp",
".",
"size",
":",
"# Return accretion rates and halo mass progenitors at",
"# redshifts 'z' for object of mass Mi at zi",
"dMdt",
",",
"Mz",
"=",
"MAH",
"(",
"ztemp",
",",
"zval",
",",
"Mval",
",",
"*",
"*",
"cosmo",
")",
"if",
"mah",
"and",
"com",
":",
"# More expensive to return concentrations",
"c",
",",
"sig",
",",
"nu",
",",
"zf",
"=",
"COM",
"(",
"ztemp",
",",
"Mz",
",",
"*",
"*",
"cosmo",
")",
"# Save all arrays",
"for",
"j_ind",
",",
"j_val",
"in",
"enumerate",
"(",
"ztemp",
")",
":",
"dataset",
"[",
"i_ind",
",",
"j_ind",
"]",
"=",
"(",
"zval",
",",
"Mval",
",",
"ztemp",
"[",
"j_ind",
"]",
",",
"dMdt",
"[",
"j_ind",
"]",
",",
"Mz",
"[",
"j_ind",
"]",
",",
"c",
"[",
"j_ind",
"]",
",",
"sig",
"[",
"j_ind",
"]",
",",
"nu",
"[",
"j_ind",
"]",
",",
"zf",
"[",
"j_ind",
"]",
")",
"if",
"filename",
":",
"fout",
".",
"write",
"(",
"\"{}, {}, {}, {}, {}, {}, {}, {}, {} \\n\"",
".",
"format",
"(",
"zval",
",",
"Mval",
",",
"ztemp",
"[",
"j_ind",
"]",
",",
"dMdt",
"[",
"j_ind",
"]",
",",
"Mz",
"[",
"j_ind",
"]",
",",
"c",
"[",
"j_ind",
"]",
",",
"sig",
"[",
"j_ind",
"]",
",",
"nu",
"[",
"j_ind",
"]",
",",
"zf",
"[",
"j_ind",
"]",
")",
")",
"elif",
"mah",
":",
"# Save only MAH arrays",
"for",
"j_ind",
",",
"j_val",
"in",
"enumerate",
"(",
"ztemp",
")",
":",
"dataset",
"[",
"i_ind",
",",
"j_ind",
"]",
"=",
"(",
"zval",
",",
"Mval",
",",
"ztemp",
"[",
"j_ind",
"]",
",",
"dMdt",
"[",
"j_ind",
"]",
",",
"Mz",
"[",
"j_ind",
"]",
")",
"if",
"filename",
":",
"fout",
".",
"write",
"(",
"\"{}, {}, {}, {}, {} \\n\"",
".",
"format",
"(",
"zval",
",",
"Mval",
",",
"ztemp",
"[",
"j_ind",
"]",
",",
"dMdt",
"[",
"j_ind",
"]",
",",
"Mz",
"[",
"j_ind",
"]",
")",
")",
"else",
":",
"# Output only COM arrays",
"c",
",",
"sig",
",",
"nu",
",",
"zf",
"=",
"COM",
"(",
"ztemp",
",",
"Mz",
",",
"*",
"*",
"cosmo",
")",
"# For any halo mass Mi at redshift zi",
"# solve for c, sig, nu and zf",
"for",
"j_ind",
",",
"j_val",
"in",
"enumerate",
"(",
"ztemp",
")",
":",
"dataset",
"[",
"i_ind",
",",
"j_ind",
"]",
"=",
"(",
"zval",
",",
"Mval",
",",
"ztemp",
"[",
"j_ind",
"]",
",",
"c",
"[",
"j_ind",
"]",
",",
"sig",
"[",
"j_ind",
"]",
",",
"nu",
"[",
"j_ind",
"]",
",",
"zf",
"[",
"j_ind",
"]",
")",
"if",
"filename",
":",
"fout",
".",
"write",
"(",
"\"{}, {}, {}, {}, {}, {}, {} \\n\"",
".",
"format",
"(",
"zval",
",",
"Mval",
",",
"ztemp",
"[",
"j_ind",
"]",
",",
"c",
"[",
"j_ind",
"]",
",",
"sig",
"[",
"j_ind",
"]",
",",
"nu",
"[",
"j_ind",
"]",
",",
"zf",
"[",
"j_ind",
"]",
")",
")",
"# Make sure to close the file if it was opened",
"finally",
":",
"fout",
".",
"close",
"(",
")",
"if",
"filename",
"else",
"None",
"if",
"retcosmo",
":",
"return",
"(",
"dataset",
",",
"cosmo",
")",
"else",
":",
"return",
"(",
"dataset",
")"
] | 46.783673 | 22.893878 |
def synchronous(self):
'''
True if transport is synchronous or the connection has been forced
into synchronous mode, False otherwise.
'''
if self._transport is None:
if self._close_info and len(self._close_info['reply_text']) > 0:
raise ConnectionClosed("connection is closed: %s : %s" %
(self._close_info['reply_code'],
self._close_info['reply_text']))
raise ConnectionClosed("connection is closed")
return self.transport.synchronous or self._synchronous | [
"def",
"synchronous",
"(",
"self",
")",
":",
"if",
"self",
".",
"_transport",
"is",
"None",
":",
"if",
"self",
".",
"_close_info",
"and",
"len",
"(",
"self",
".",
"_close_info",
"[",
"'reply_text'",
"]",
")",
">",
"0",
":",
"raise",
"ConnectionClosed",
"(",
"\"connection is closed: %s : %s\"",
"%",
"(",
"self",
".",
"_close_info",
"[",
"'reply_code'",
"]",
",",
"self",
".",
"_close_info",
"[",
"'reply_text'",
"]",
")",
")",
"raise",
"ConnectionClosed",
"(",
"\"connection is closed\"",
")",
"return",
"self",
".",
"transport",
".",
"synchronous",
"or",
"self",
".",
"_synchronous"
] | 50.916667 | 24.416667 |
def check_ratelimit_budget(self, seconds_waited):
""" If we have a ratelimit_budget, ensure it is not exceeded. """
if self.ratelimit_budget is not None:
self.ratelimit_budget -= seconds_waited
if self.ratelimit_budget < 1:
raise RatelimitBudgetExceeded("Rate limit budget exceeded!") | [
"def",
"check_ratelimit_budget",
"(",
"self",
",",
"seconds_waited",
")",
":",
"if",
"self",
".",
"ratelimit_budget",
"is",
"not",
"None",
":",
"self",
".",
"ratelimit_budget",
"-=",
"seconds_waited",
"if",
"self",
".",
"ratelimit_budget",
"<",
"1",
":",
"raise",
"RatelimitBudgetExceeded",
"(",
"\"Rate limit budget exceeded!\"",
")"
] | 55.833333 | 10.333333 |
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ
""" GET request """
course = self.course_factory.get_course(courseid)
username = self.user_manager.session_username()
error = False
change = False
msg = ""
data = web.input()
if self.user_manager.has_staff_rights_on_course(course):
raise web.notfound()
elif not self.user_manager.course_is_open_to_user(course, lti=False):
return self.template_helper.get_renderer().course_unavailable()
elif "register_group" in data:
change = True
if course.can_students_choose_group() and course.use_classrooms():
aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username})
if int(data["register_group"]) >= 0 and (len(aggregation["groups"]) > int(data["register_group"])):
group = aggregation["groups"][int(data["register_group"])]
if group["size"] > len(group["students"]):
for index, group in enumerate(aggregation["groups"]):
if username in group["students"]:
aggregation["groups"][index]["students"].remove(username)
aggregation["groups"][int(data["register_group"])]["students"].append(username)
self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation)
self._logger.info("User %s registered to group %s/%s/%s", username, courseid, aggregation["description"], data["register_group"])
else:
error = True
msg = _("Couldn't register to the specified group.")
elif course.can_students_choose_group():
aggregation = self.database.aggregations.find_one(
{"courseid": course.get_id(), "students": username})
if aggregation is not None:
aggregation["students"].remove(username)
for index, group in enumerate(aggregation["groups"]):
if username in group["students"]:
aggregation["groups"][index]["students"].remove(username)
self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation)
# Add student in the classroom and unique group
self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])},
{"$push": {"students": username}})
new_aggregation = self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])},
{"$push": {"groups.0.students": username}})
if new_aggregation is None:
error = True
msg = _("Couldn't register to the specified group.")
else:
self._logger.info("User %s registered to team %s/%s", username, courseid, aggregation["description"])
else:
error = True
msg = _("You are not allowed to change group.")
elif "unregister_group" in data:
change = True
if course.can_students_choose_group():
aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username, "groups.students": username})
if aggregation is not None:
for index, group in enumerate(aggregation["groups"]):
if username in group["students"]:
aggregation["groups"][index]["students"].remove(username)
self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation)
self._logger.info("User %s unregistered from group/team %s/%s", username, courseid, aggregation["description"])
else:
error = True
msg = _("You're not registered in a group.")
else:
error = True
msg = _("You are not allowed to change group.")
tasks = course.get_tasks()
last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": courseid, "taskid": {"$in": list(tasks.keys())}})
for submission in last_submissions:
submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language())
aggregation = self.user_manager.get_course_user_aggregation(course)
aggregations = self.user_manager.get_course_aggregations(course)
users = self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course))
if course.use_classrooms():
mygroup = None
for index, group in enumerate(aggregation["groups"]):
if self.user_manager.session_username() in group["students"]:
mygroup = group
mygroup["index"] = index + 1
return self.template_helper.get_renderer().classroom(course, last_submissions, aggregation, users,
mygroup, msg, error, change)
else:
return self.template_helper.get_renderer().team(course, last_submissions, aggregations, users,
aggregation, msg, error) | [
"def",
"GET_AUTH",
"(",
"self",
",",
"courseid",
")",
":",
"# pylint: disable=arguments-differ",
"course",
"=",
"self",
".",
"course_factory",
".",
"get_course",
"(",
"courseid",
")",
"username",
"=",
"self",
".",
"user_manager",
".",
"session_username",
"(",
")",
"error",
"=",
"False",
"change",
"=",
"False",
"msg",
"=",
"\"\"",
"data",
"=",
"web",
".",
"input",
"(",
")",
"if",
"self",
".",
"user_manager",
".",
"has_staff_rights_on_course",
"(",
"course",
")",
":",
"raise",
"web",
".",
"notfound",
"(",
")",
"elif",
"not",
"self",
".",
"user_manager",
".",
"course_is_open_to_user",
"(",
"course",
",",
"lti",
"=",
"False",
")",
":",
"return",
"self",
".",
"template_helper",
".",
"get_renderer",
"(",
")",
".",
"course_unavailable",
"(",
")",
"elif",
"\"register_group\"",
"in",
"data",
":",
"change",
"=",
"True",
"if",
"course",
".",
"can_students_choose_group",
"(",
")",
"and",
"course",
".",
"use_classrooms",
"(",
")",
":",
"aggregation",
"=",
"self",
".",
"database",
".",
"aggregations",
".",
"find_one",
"(",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
",",
"\"students\"",
":",
"username",
"}",
")",
"if",
"int",
"(",
"data",
"[",
"\"register_group\"",
"]",
")",
">=",
"0",
"and",
"(",
"len",
"(",
"aggregation",
"[",
"\"groups\"",
"]",
")",
">",
"int",
"(",
"data",
"[",
"\"register_group\"",
"]",
")",
")",
":",
"group",
"=",
"aggregation",
"[",
"\"groups\"",
"]",
"[",
"int",
"(",
"data",
"[",
"\"register_group\"",
"]",
")",
"]",
"if",
"group",
"[",
"\"size\"",
"]",
">",
"len",
"(",
"group",
"[",
"\"students\"",
"]",
")",
":",
"for",
"index",
",",
"group",
"in",
"enumerate",
"(",
"aggregation",
"[",
"\"groups\"",
"]",
")",
":",
"if",
"username",
"in",
"group",
"[",
"\"students\"",
"]",
":",
"aggregation",
"[",
"\"groups\"",
"]",
"[",
"index",
"]",
"[",
"\"students\"",
"]",
".",
"remove",
"(",
"username",
")",
"aggregation",
"[",
"\"groups\"",
"]",
"[",
"int",
"(",
"data",
"[",
"\"register_group\"",
"]",
")",
"]",
"[",
"\"students\"",
"]",
".",
"append",
"(",
"username",
")",
"self",
".",
"database",
".",
"aggregations",
".",
"replace_one",
"(",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
",",
"\"students\"",
":",
"username",
"}",
",",
"aggregation",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"User %s registered to group %s/%s/%s\"",
",",
"username",
",",
"courseid",
",",
"aggregation",
"[",
"\"description\"",
"]",
",",
"data",
"[",
"\"register_group\"",
"]",
")",
"else",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Couldn't register to the specified group.\"",
")",
"elif",
"course",
".",
"can_students_choose_group",
"(",
")",
":",
"aggregation",
"=",
"self",
".",
"database",
".",
"aggregations",
".",
"find_one",
"(",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
",",
"\"students\"",
":",
"username",
"}",
")",
"if",
"aggregation",
"is",
"not",
"None",
":",
"aggregation",
"[",
"\"students\"",
"]",
".",
"remove",
"(",
"username",
")",
"for",
"index",
",",
"group",
"in",
"enumerate",
"(",
"aggregation",
"[",
"\"groups\"",
"]",
")",
":",
"if",
"username",
"in",
"group",
"[",
"\"students\"",
"]",
":",
"aggregation",
"[",
"\"groups\"",
"]",
"[",
"index",
"]",
"[",
"\"students\"",
"]",
".",
"remove",
"(",
"username",
")",
"self",
".",
"database",
".",
"aggregations",
".",
"replace_one",
"(",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
",",
"\"students\"",
":",
"username",
"}",
",",
"aggregation",
")",
"# Add student in the classroom and unique group",
"self",
".",
"database",
".",
"aggregations",
".",
"find_one_and_update",
"(",
"{",
"\"_id\"",
":",
"ObjectId",
"(",
"data",
"[",
"\"register_group\"",
"]",
")",
"}",
",",
"{",
"\"$push\"",
":",
"{",
"\"students\"",
":",
"username",
"}",
"}",
")",
"new_aggregation",
"=",
"self",
".",
"database",
".",
"aggregations",
".",
"find_one_and_update",
"(",
"{",
"\"_id\"",
":",
"ObjectId",
"(",
"data",
"[",
"\"register_group\"",
"]",
")",
"}",
",",
"{",
"\"$push\"",
":",
"{",
"\"groups.0.students\"",
":",
"username",
"}",
"}",
")",
"if",
"new_aggregation",
"is",
"None",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"Couldn't register to the specified group.\"",
")",
"else",
":",
"self",
".",
"_logger",
".",
"info",
"(",
"\"User %s registered to team %s/%s\"",
",",
"username",
",",
"courseid",
",",
"aggregation",
"[",
"\"description\"",
"]",
")",
"else",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"You are not allowed to change group.\"",
")",
"elif",
"\"unregister_group\"",
"in",
"data",
":",
"change",
"=",
"True",
"if",
"course",
".",
"can_students_choose_group",
"(",
")",
":",
"aggregation",
"=",
"self",
".",
"database",
".",
"aggregations",
".",
"find_one",
"(",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
",",
"\"students\"",
":",
"username",
",",
"\"groups.students\"",
":",
"username",
"}",
")",
"if",
"aggregation",
"is",
"not",
"None",
":",
"for",
"index",
",",
"group",
"in",
"enumerate",
"(",
"aggregation",
"[",
"\"groups\"",
"]",
")",
":",
"if",
"username",
"in",
"group",
"[",
"\"students\"",
"]",
":",
"aggregation",
"[",
"\"groups\"",
"]",
"[",
"index",
"]",
"[",
"\"students\"",
"]",
".",
"remove",
"(",
"username",
")",
"self",
".",
"database",
".",
"aggregations",
".",
"replace_one",
"(",
"{",
"\"courseid\"",
":",
"course",
".",
"get_id",
"(",
")",
",",
"\"students\"",
":",
"username",
"}",
",",
"aggregation",
")",
"self",
".",
"_logger",
".",
"info",
"(",
"\"User %s unregistered from group/team %s/%s\"",
",",
"username",
",",
"courseid",
",",
"aggregation",
"[",
"\"description\"",
"]",
")",
"else",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"You're not registered in a group.\"",
")",
"else",
":",
"error",
"=",
"True",
"msg",
"=",
"_",
"(",
"\"You are not allowed to change group.\"",
")",
"tasks",
"=",
"course",
".",
"get_tasks",
"(",
")",
"last_submissions",
"=",
"self",
".",
"submission_manager",
".",
"get_user_last_submissions",
"(",
"5",
",",
"{",
"\"courseid\"",
":",
"courseid",
",",
"\"taskid\"",
":",
"{",
"\"$in\"",
":",
"list",
"(",
"tasks",
".",
"keys",
"(",
")",
")",
"}",
"}",
")",
"for",
"submission",
"in",
"last_submissions",
":",
"submission",
"[",
"\"taskname\"",
"]",
"=",
"tasks",
"[",
"submission",
"[",
"'taskid'",
"]",
"]",
".",
"get_name",
"(",
"self",
".",
"user_manager",
".",
"session_language",
"(",
")",
")",
"aggregation",
"=",
"self",
".",
"user_manager",
".",
"get_course_user_aggregation",
"(",
"course",
")",
"aggregations",
"=",
"self",
".",
"user_manager",
".",
"get_course_aggregations",
"(",
"course",
")",
"users",
"=",
"self",
".",
"user_manager",
".",
"get_users_info",
"(",
"self",
".",
"user_manager",
".",
"get_course_registered_users",
"(",
"course",
")",
")",
"if",
"course",
".",
"use_classrooms",
"(",
")",
":",
"mygroup",
"=",
"None",
"for",
"index",
",",
"group",
"in",
"enumerate",
"(",
"aggregation",
"[",
"\"groups\"",
"]",
")",
":",
"if",
"self",
".",
"user_manager",
".",
"session_username",
"(",
")",
"in",
"group",
"[",
"\"students\"",
"]",
":",
"mygroup",
"=",
"group",
"mygroup",
"[",
"\"index\"",
"]",
"=",
"index",
"+",
"1",
"return",
"self",
".",
"template_helper",
".",
"get_renderer",
"(",
")",
".",
"classroom",
"(",
"course",
",",
"last_submissions",
",",
"aggregation",
",",
"users",
",",
"mygroup",
",",
"msg",
",",
"error",
",",
"change",
")",
"else",
":",
"return",
"self",
".",
"template_helper",
".",
"get_renderer",
"(",
")",
".",
"team",
"(",
"course",
",",
"last_submissions",
",",
"aggregations",
",",
"users",
",",
"aggregation",
",",
"msg",
",",
"error",
")"
] | 58.8 | 35 |
def modify(self, request, nodes, namespace, root_id, post_cut, breadcrumb):
"""
Actual modifier function
:param request: request
:param nodes: complete list of nodes
:param namespace: Menu namespace
:param root_id: eventual root_id
:param post_cut: flag for modifier stage
:param breadcrumb: flag for modifier stage
:return: nodeslist
"""
app = None
config = None
if getattr(request, 'current_page', None) and request.current_page.application_urls:
app = apphook_pool.get_apphook(request.current_page.application_urls)
if app and app.app_config:
namespace = resolve(request.path).namespace
if not self._config.get(namespace, False):
self._config[namespace] = app.get_config(namespace)
config = self._config[namespace]
try:
if config and (
not isinstance(config, BlogConfig) or
config.menu_structure != MENU_TYPE_CATEGORIES
):
return nodes
except AttributeError: # pragma: no cover
# in case `menu_structure` is not present in config
return nodes
if post_cut:
return nodes
current_post = getattr(request, get_setting('CURRENT_POST_IDENTIFIER'), None)
category = None
if current_post and current_post.__class__ == Post:
category = current_post.categories.first()
if not category:
return nodes
for node in nodes:
if '{0}-{1}'.format(category.__class__.__name__, category.pk) == node.id:
node.selected = True
return nodes | [
"def",
"modify",
"(",
"self",
",",
"request",
",",
"nodes",
",",
"namespace",
",",
"root_id",
",",
"post_cut",
",",
"breadcrumb",
")",
":",
"app",
"=",
"None",
"config",
"=",
"None",
"if",
"getattr",
"(",
"request",
",",
"'current_page'",
",",
"None",
")",
"and",
"request",
".",
"current_page",
".",
"application_urls",
":",
"app",
"=",
"apphook_pool",
".",
"get_apphook",
"(",
"request",
".",
"current_page",
".",
"application_urls",
")",
"if",
"app",
"and",
"app",
".",
"app_config",
":",
"namespace",
"=",
"resolve",
"(",
"request",
".",
"path",
")",
".",
"namespace",
"if",
"not",
"self",
".",
"_config",
".",
"get",
"(",
"namespace",
",",
"False",
")",
":",
"self",
".",
"_config",
"[",
"namespace",
"]",
"=",
"app",
".",
"get_config",
"(",
"namespace",
")",
"config",
"=",
"self",
".",
"_config",
"[",
"namespace",
"]",
"try",
":",
"if",
"config",
"and",
"(",
"not",
"isinstance",
"(",
"config",
",",
"BlogConfig",
")",
"or",
"config",
".",
"menu_structure",
"!=",
"MENU_TYPE_CATEGORIES",
")",
":",
"return",
"nodes",
"except",
"AttributeError",
":",
"# pragma: no cover",
"# in case `menu_structure` is not present in config",
"return",
"nodes",
"if",
"post_cut",
":",
"return",
"nodes",
"current_post",
"=",
"getattr",
"(",
"request",
",",
"get_setting",
"(",
"'CURRENT_POST_IDENTIFIER'",
")",
",",
"None",
")",
"category",
"=",
"None",
"if",
"current_post",
"and",
"current_post",
".",
"__class__",
"==",
"Post",
":",
"category",
"=",
"current_post",
".",
"categories",
".",
"first",
"(",
")",
"if",
"not",
"category",
":",
"return",
"nodes",
"for",
"node",
"in",
"nodes",
":",
"if",
"'{0}-{1}'",
".",
"format",
"(",
"category",
".",
"__class__",
".",
"__name__",
",",
"category",
".",
"pk",
")",
"==",
"node",
".",
"id",
":",
"node",
".",
"selected",
"=",
"True",
"return",
"nodes"
] | 39.395349 | 18.232558 |
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
) | [
"def",
"getDescsV2",
"(",
"flags",
",",
"fs_list",
"=",
"(",
")",
",",
"hs_list",
"=",
"(",
")",
",",
"ss_list",
"=",
"(",
")",
",",
"os_list",
"=",
"(",
")",
")",
":",
"count_field_list",
"=",
"[",
"]",
"descr_field_list",
"=",
"[",
"]",
"kw",
"=",
"{",
"}",
"for",
"descriptor_list",
",",
"flag",
",",
"prefix",
",",
"allowed_descriptor_klass",
"in",
"(",
"(",
"fs_list",
",",
"HAS_FS_DESC",
",",
"'fs'",
",",
"USBDescriptorHeader",
")",
",",
"(",
"hs_list",
",",
"HAS_HS_DESC",
",",
"'hs'",
",",
"USBDescriptorHeader",
")",
",",
"(",
"ss_list",
",",
"HAS_SS_DESC",
",",
"'ss'",
",",
"USBDescriptorHeader",
")",
",",
"(",
"os_list",
",",
"HAS_MS_OS_DESC",
",",
"'os'",
",",
"OSDescHeader",
")",
",",
")",
":",
"if",
"descriptor_list",
":",
"for",
"index",
",",
"descriptor",
"in",
"enumerate",
"(",
"descriptor_list",
")",
":",
"if",
"not",
"isinstance",
"(",
"descriptor",
",",
"allowed_descriptor_klass",
")",
":",
"raise",
"TypeError",
"(",
"'Descriptor %r of unexpected type: %r'",
"%",
"(",
"index",
",",
"type",
"(",
"descriptor",
")",
",",
")",
",",
")",
"descriptor_map",
"=",
"[",
"(",
"'desc_%i'",
"%",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"enumerate",
"(",
"descriptor_list",
")",
"]",
"flags",
"|=",
"flag",
"count_name",
"=",
"prefix",
"+",
"'count'",
"descr_name",
"=",
"prefix",
"+",
"'descr'",
"count_field_list",
".",
"append",
"(",
"(",
"count_name",
",",
"le32",
")",
")",
"descr_type",
"=",
"type",
"(",
"'t_'",
"+",
"descr_name",
",",
"(",
"ctypes",
".",
"LittleEndianStructure",
",",
")",
",",
"{",
"'_pack_'",
":",
"1",
",",
"'_fields_'",
":",
"[",
"(",
"x",
",",
"type",
"(",
"y",
")",
")",
"for",
"x",
",",
"y",
"in",
"descriptor_map",
"]",
",",
"}",
")",
"descr_field_list",
".",
"append",
"(",
"(",
"descr_name",
",",
"descr_type",
")",
")",
"kw",
"[",
"count_name",
"]",
"=",
"len",
"(",
"descriptor_map",
")",
"kw",
"[",
"descr_name",
"]",
"=",
"descr_type",
"(",
"*",
"*",
"dict",
"(",
"descriptor_map",
")",
")",
"elif",
"flags",
"&",
"flag",
":",
"raise",
"ValueError",
"(",
"'Flag %r set but descriptor list empty, cannot generate type.'",
"%",
"(",
"FLAGS",
".",
"get",
"(",
"flag",
")",
",",
")",
")",
"klass",
"=",
"type",
"(",
"'DescsV2_0x%02x'",
"%",
"(",
"flags",
"&",
"(",
"HAS_FS_DESC",
"|",
"HAS_HS_DESC",
"|",
"HAS_SS_DESC",
"|",
"HAS_MS_OS_DESC",
")",
",",
"# XXX: include contained descriptors type information ? (and name ?)",
")",
",",
"(",
"DescsHeadV2",
",",
")",
",",
"{",
"'_fields_'",
":",
"count_field_list",
"+",
"descr_field_list",
",",
"}",
",",
")",
"return",
"klass",
"(",
"magic",
"=",
"DESCRIPTORS_MAGIC_V2",
",",
"length",
"=",
"ctypes",
".",
"sizeof",
"(",
"klass",
")",
",",
"flags",
"=",
"flags",
",",
"*",
"*",
"kw",
")"
] | 34.336957 | 16.771739 |
def request_token(self):
""" Gets OAuth request token """
client = OAuth1(
client_key=self._server_cache[self.client.server].key,
client_secret=self._server_cache[self.client.server].secret,
callback_uri=self.callback,
)
request = {"auth": client}
response = self._requester(
requests.post,
"oauth/request_token",
**request
)
data = parse.parse_qs(response.text)
data = {
'token': data[self.PARAM_TOKEN][0],
'token_secret': data[self.PARAM_TOKEN_SECRET][0]
}
return data | [
"def",
"request_token",
"(",
"self",
")",
":",
"client",
"=",
"OAuth1",
"(",
"client_key",
"=",
"self",
".",
"_server_cache",
"[",
"self",
".",
"client",
".",
"server",
"]",
".",
"key",
",",
"client_secret",
"=",
"self",
".",
"_server_cache",
"[",
"self",
".",
"client",
".",
"server",
"]",
".",
"secret",
",",
"callback_uri",
"=",
"self",
".",
"callback",
",",
")",
"request",
"=",
"{",
"\"auth\"",
":",
"client",
"}",
"response",
"=",
"self",
".",
"_requester",
"(",
"requests",
".",
"post",
",",
"\"oauth/request_token\"",
",",
"*",
"*",
"request",
")",
"data",
"=",
"parse",
".",
"parse_qs",
"(",
"response",
".",
"text",
")",
"data",
"=",
"{",
"'token'",
":",
"data",
"[",
"self",
".",
"PARAM_TOKEN",
"]",
"[",
"0",
"]",
",",
"'token_secret'",
":",
"data",
"[",
"self",
".",
"PARAM_TOKEN_SECRET",
"]",
"[",
"0",
"]",
"}",
"return",
"data"
] | 28.545455 | 19.545455 |
def fix_calldef_decls(decls, enums, cxx_std):
"""
some times gccxml report typedefs defined in no namespace
it happens for example in next situation
template< typename X>
void ddd(){ typedef typename X::Y YY;}
if I will fail on this bug next time, the right way to fix it may be
different
"""
default_arg_patcher = default_argument_patcher_t(enums, cxx_std)
# decls should be flat list of all declarations, you want to apply patch on
for decl in decls:
default_arg_patcher(decl)
if isinstance(decl, declarations.casting_operator_t):
_casting_oper_patcher_(decl) | [
"def",
"fix_calldef_decls",
"(",
"decls",
",",
"enums",
",",
"cxx_std",
")",
":",
"default_arg_patcher",
"=",
"default_argument_patcher_t",
"(",
"enums",
",",
"cxx_std",
")",
"# decls should be flat list of all declarations, you want to apply patch on",
"for",
"decl",
"in",
"decls",
":",
"default_arg_patcher",
"(",
"decl",
")",
"if",
"isinstance",
"(",
"decl",
",",
"declarations",
".",
"casting_operator_t",
")",
":",
"_casting_oper_patcher_",
"(",
"decl",
")"
] | 41.266667 | 14.6 |
def disable(self):
"""
Disable the crash reporter. No reports will be sent or saved.
"""
if CrashReporter.active:
CrashReporter.active = False
# Restore the original excepthook
sys.excepthook = self._excepthook
self.stop_watcher()
self.logger.info('CrashReporter: Disabled') | [
"def",
"disable",
"(",
"self",
")",
":",
"if",
"CrashReporter",
".",
"active",
":",
"CrashReporter",
".",
"active",
"=",
"False",
"# Restore the original excepthook",
"sys",
".",
"excepthook",
"=",
"self",
".",
"_excepthook",
"self",
".",
"stop_watcher",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'CrashReporter: Disabled'",
")"
] | 35.7 | 9.3 |
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray and returns the number of
(fractional) seconds.
If the optional parameter ``raw`` is ``True``, the byte (U8)
itself will be returned.
"""
result = super(Time8Type, self).decode(bytes)
if not raw:
result /= 256.0
return result | [
"def",
"decode",
"(",
"self",
",",
"bytes",
",",
"raw",
"=",
"False",
")",
":",
"result",
"=",
"super",
"(",
"Time8Type",
",",
"self",
")",
".",
"decode",
"(",
"bytes",
")",
"if",
"not",
"raw",
":",
"result",
"/=",
"256.0",
"return",
"result"
] | 25.25 | 21.1875 |
def add_occurrences(self, start_time, end_time, **rrule_params):
'''
Add one or more occurences to the event using a comparable API to
``dateutil.rrule``.
If ``rrule_params`` does not contain a ``freq``, one will be defaulted
to ``rrule.DAILY``.
Because ``rrule.rrule`` returns an iterator that can essentially be
unbounded, we need to slightly alter the expected behavior here in order
to enforce a finite number of occurrence creation.
If both ``count`` and ``until`` entries are missing from ``rrule_params``,
only a single ``Occurrence`` instance will be created using the exact
``start_time`` and ``end_time`` values.
'''
count = rrule_params.get('count')
until = rrule_params.get('until')
if not (count or until):
self.occurrence_set.create(start_time=start_time, end_time=end_time)
else:
rrule_params.setdefault('freq', rrule.DAILY)
delta = end_time - start_time
occurrences = []
for ev in rrule.rrule(dtstart=start_time, **rrule_params):
occurrences.append(Occurrence(start_time=ev, end_time=ev + delta, event=self))
self.occurrence_set.bulk_create(occurrences) | [
"def",
"add_occurrences",
"(",
"self",
",",
"start_time",
",",
"end_time",
",",
"*",
"*",
"rrule_params",
")",
":",
"count",
"=",
"rrule_params",
".",
"get",
"(",
"'count'",
")",
"until",
"=",
"rrule_params",
".",
"get",
"(",
"'until'",
")",
"if",
"not",
"(",
"count",
"or",
"until",
")",
":",
"self",
".",
"occurrence_set",
".",
"create",
"(",
"start_time",
"=",
"start_time",
",",
"end_time",
"=",
"end_time",
")",
"else",
":",
"rrule_params",
".",
"setdefault",
"(",
"'freq'",
",",
"rrule",
".",
"DAILY",
")",
"delta",
"=",
"end_time",
"-",
"start_time",
"occurrences",
"=",
"[",
"]",
"for",
"ev",
"in",
"rrule",
".",
"rrule",
"(",
"dtstart",
"=",
"start_time",
",",
"*",
"*",
"rrule_params",
")",
":",
"occurrences",
".",
"append",
"(",
"Occurrence",
"(",
"start_time",
"=",
"ev",
",",
"end_time",
"=",
"ev",
"+",
"delta",
",",
"event",
"=",
"self",
")",
")",
"self",
".",
"occurrence_set",
".",
"bulk_create",
"(",
"occurrences",
")"
] | 46.740741 | 25.333333 |
def getBigIndexFromIndices(self, indices):
"""
Get the big index from a given set of indices
@param indices
@return big index
@note no checks are performed to ensure that the returned
indices are valid
"""
return reduce(operator.add, [self.dimProd[i]*indices[i]
for i in range(self.ndims)], 0) | [
"def",
"getBigIndexFromIndices",
"(",
"self",
",",
"indices",
")",
":",
"return",
"reduce",
"(",
"operator",
".",
"add",
",",
"[",
"self",
".",
"dimProd",
"[",
"i",
"]",
"*",
"indices",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"ndims",
")",
"]",
",",
"0",
")"
] | 38.5 | 13.9 |
def request_challenges(self, identifier):
"""
Create a new authorization.
:param ~acme.messages.Identifier identifier: The identifier to
authorize.
:return: The new authorization resource.
:rtype: Deferred[`~acme.messages.AuthorizationResource`]
"""
action = LOG_ACME_CREATE_AUTHORIZATION(identifier=identifier)
with action.context():
message = messages.NewAuthorization(identifier=identifier)
return (
DeferredContext(
self._client.post(self.directory[message], message))
.addCallback(self._expect_response, http.CREATED)
.addCallback(self._parse_authorization)
.addCallback(self._check_authorization, identifier)
.addCallback(
tap(lambda a: action.add_success_fields(authorization=a)))
.addActionFinish()) | [
"def",
"request_challenges",
"(",
"self",
",",
"identifier",
")",
":",
"action",
"=",
"LOG_ACME_CREATE_AUTHORIZATION",
"(",
"identifier",
"=",
"identifier",
")",
"with",
"action",
".",
"context",
"(",
")",
":",
"message",
"=",
"messages",
".",
"NewAuthorization",
"(",
"identifier",
"=",
"identifier",
")",
"return",
"(",
"DeferredContext",
"(",
"self",
".",
"_client",
".",
"post",
"(",
"self",
".",
"directory",
"[",
"message",
"]",
",",
"message",
")",
")",
".",
"addCallback",
"(",
"self",
".",
"_expect_response",
",",
"http",
".",
"CREATED",
")",
".",
"addCallback",
"(",
"self",
".",
"_parse_authorization",
")",
".",
"addCallback",
"(",
"self",
".",
"_check_authorization",
",",
"identifier",
")",
".",
"addCallback",
"(",
"tap",
"(",
"lambda",
"a",
":",
"action",
".",
"add_success_fields",
"(",
"authorization",
"=",
"a",
")",
")",
")",
".",
"addActionFinish",
"(",
")",
")"
] | 42 | 18.909091 |
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str],
Callable[[str, str, int], str]]:
"""Configure ``gettext`` for given package.
Args:
__pkg: Package to use as location for :program:`gettext` files
Returns:
:program:`gettext` functions for singular and plural translations
"""
package_locale = path.join(path.dirname(__pkg.__file__), 'locale')
gettext.install(__pkg.__name__, package_locale)
return gettext.gettext, gettext.ngettext | [
"def",
"setup",
"(",
"__pkg",
":",
"ModuleType",
")",
"->",
"Tuple",
"[",
"Callable",
"[",
"[",
"str",
"]",
",",
"str",
"]",
",",
"Callable",
"[",
"[",
"str",
",",
"str",
",",
"int",
"]",
",",
"str",
"]",
"]",
":",
"package_locale",
"=",
"path",
".",
"join",
"(",
"path",
".",
"dirname",
"(",
"__pkg",
".",
"__file__",
")",
",",
"'locale'",
")",
"gettext",
".",
"install",
"(",
"__pkg",
".",
"__name__",
",",
"package_locale",
")",
"return",
"gettext",
".",
"gettext",
",",
"gettext",
".",
"ngettext"
] | 36.571429 | 24 |
def _apply_dict(self, qe_dict):
''' Apply a query expression, updating the query object '''
for k, v in qe_dict.items():
k = resolve_name(self.type, k)
if not k in self.__query:
self.__query[k] = v
continue
if not isinstance(self.__query[k], dict) or not isinstance(v, dict):
raise BadQueryException('Multiple assignments to a field must all be dicts.')
self.__query[k].update(**v) | [
"def",
"_apply_dict",
"(",
"self",
",",
"qe_dict",
")",
":",
"for",
"k",
",",
"v",
"in",
"qe_dict",
".",
"items",
"(",
")",
":",
"k",
"=",
"resolve_name",
"(",
"self",
".",
"type",
",",
"k",
")",
"if",
"not",
"k",
"in",
"self",
".",
"__query",
":",
"self",
".",
"__query",
"[",
"k",
"]",
"=",
"v",
"continue",
"if",
"not",
"isinstance",
"(",
"self",
".",
"__query",
"[",
"k",
"]",
",",
"dict",
")",
"or",
"not",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"raise",
"BadQueryException",
"(",
"'Multiple assignments to a field must all be dicts.'",
")",
"self",
".",
"__query",
"[",
"k",
"]",
".",
"update",
"(",
"*",
"*",
"v",
")"
] | 48.4 | 16 |
def get_scales(self, aesthetic):
"""
Return the scale for the aesthetic or None if there
isn't one.
These are the scales specified by the user e.g
`ggplot() + scale_x_continuous()`
or those added by default during the plot building
process
"""
bool_lst = self.find(aesthetic)
try:
idx = bool_lst.index(True)
return self[idx]
except ValueError:
return None | [
"def",
"get_scales",
"(",
"self",
",",
"aesthetic",
")",
":",
"bool_lst",
"=",
"self",
".",
"find",
"(",
"aesthetic",
")",
"try",
":",
"idx",
"=",
"bool_lst",
".",
"index",
"(",
"True",
")",
"return",
"self",
"[",
"idx",
"]",
"except",
"ValueError",
":",
"return",
"None"
] | 29.3125 | 14.0625 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.