text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def reset_scorer(self):
"""Reset the scorer used for couration."""
self.scorer = get_eidos_bayesian_scorer()
for corpus_id, corpus in self.corpora.items():
corpus.curations = {} | [
"def",
"reset_scorer",
"(",
"self",
")",
":",
"self",
".",
"scorer",
"=",
"get_eidos_bayesian_scorer",
"(",
")",
"for",
"corpus_id",
",",
"corpus",
"in",
"self",
".",
"corpora",
".",
"items",
"(",
")",
":",
"corpus",
".",
"curations",
"=",
"{",
"}"
] | 41.8 | 9.4 |
def plot_nodes(self, nodelist, theta, group):
"""
Plots nodes to screen.
"""
for i, node in enumerate(nodelist):
r = self.internal_radius + i * self.scale
x, y = get_cartesian(r, theta)
circle = plt.Circle(xy=(x, y), radius=self.dot_radius,
color=self.node_colormap[group], linewidth=0)
self.ax.add_patch(circle) | [
"def",
"plot_nodes",
"(",
"self",
",",
"nodelist",
",",
"theta",
",",
"group",
")",
":",
"for",
"i",
",",
"node",
"in",
"enumerate",
"(",
"nodelist",
")",
":",
"r",
"=",
"self",
".",
"internal_radius",
"+",
"i",
"*",
"self",
".",
"scale",
"x",
",",
"y",
"=",
"get_cartesian",
"(",
"r",
",",
"theta",
")",
"circle",
"=",
"plt",
".",
"Circle",
"(",
"xy",
"=",
"(",
"x",
",",
"y",
")",
",",
"radius",
"=",
"self",
".",
"dot_radius",
",",
"color",
"=",
"self",
".",
"node_colormap",
"[",
"group",
"]",
",",
"linewidth",
"=",
"0",
")",
"self",
".",
"ax",
".",
"add_patch",
"(",
"circle",
")"
] | 41.5 | 9.9 |
def get_time_remaining_estimate(self):
"""
Looks through all power sources and returns total time remaining estimate
or TIME_REMAINING_UNLIMITED if ac power supply is online.
"""
all_energy_now = []
all_power_now = []
try:
type = self.power_source_type()
if type == common.POWER_TYPE_AC:
if self.is_ac_online(supply_path):
return common.TIME_REMAINING_UNLIMITED
elif type == common.POWER_TYPE_BATTERY:
if self.is_battery_present() and self.is_battery_discharging():
energy_full, energy_now, power_now = self.get_battery_state()
all_energy_now.append(energy_now)
all_power_now.append(power_now)
else:
warnings.warn("UPS is not supported.")
except (RuntimeError, IOError) as e:
warnings.warn("Unable to read system power information!", category=RuntimeWarning)
if len(all_energy_now) > 0:
try:
return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)])
except ZeroDivisionError as e:
warnings.warn("Unable to calculate time remaining estimate: {0}".format(e), category=RuntimeWarning)
return common.TIME_REMAINING_UNKNOWN
else:
return common.TIME_REMAINING_UNKNOWN | [
"def",
"get_time_remaining_estimate",
"(",
"self",
")",
":",
"all_energy_now",
"=",
"[",
"]",
"all_power_now",
"=",
"[",
"]",
"try",
":",
"type",
"=",
"self",
".",
"power_source_type",
"(",
")",
"if",
"type",
"==",
"common",
".",
"POWER_TYPE_AC",
":",
"if",
"self",
".",
"is_ac_online",
"(",
"supply_path",
")",
":",
"return",
"common",
".",
"TIME_REMAINING_UNLIMITED",
"elif",
"type",
"==",
"common",
".",
"POWER_TYPE_BATTERY",
":",
"if",
"self",
".",
"is_battery_present",
"(",
")",
"and",
"self",
".",
"is_battery_discharging",
"(",
")",
":",
"energy_full",
",",
"energy_now",
",",
"power_now",
"=",
"self",
".",
"get_battery_state",
"(",
")",
"all_energy_now",
".",
"append",
"(",
"energy_now",
")",
"all_power_now",
".",
"append",
"(",
"power_now",
")",
"else",
":",
"warnings",
".",
"warn",
"(",
"\"UPS is not supported.\"",
")",
"except",
"(",
"RuntimeError",
",",
"IOError",
")",
"as",
"e",
":",
"warnings",
".",
"warn",
"(",
"\"Unable to read system power information!\"",
",",
"category",
"=",
"RuntimeWarning",
")",
"if",
"len",
"(",
"all_energy_now",
")",
">",
"0",
":",
"try",
":",
"return",
"sum",
"(",
"[",
"energy_now",
"/",
"power_now",
"*",
"60.0",
"for",
"energy_now",
",",
"power_now",
"in",
"zip",
"(",
"all_energy_now",
",",
"all_power_now",
")",
"]",
")",
"except",
"ZeroDivisionError",
"as",
"e",
":",
"warnings",
".",
"warn",
"(",
"\"Unable to calculate time remaining estimate: {0}\"",
".",
"format",
"(",
"e",
")",
",",
"category",
"=",
"RuntimeWarning",
")",
"return",
"common",
".",
"TIME_REMAINING_UNKNOWN",
"else",
":",
"return",
"common",
".",
"TIME_REMAINING_UNKNOWN"
] | 47.833333 | 21.5 |
def recordHostname(self, basedir):
"Record my hostname in twistd.hostname, for user convenience"
log.msg("recording hostname in twistd.hostname")
filename = os.path.join(basedir, "twistd.hostname")
try:
hostname = os.uname()[1] # only on unix
except AttributeError:
# this tends to fail on non-connected hosts, e.g., laptops
# on planes
hostname = socket.getfqdn()
try:
with open(filename, "w") as f:
f.write("{0}\n".format(hostname))
except Exception:
log.msg("failed - ignoring") | [
"def",
"recordHostname",
"(",
"self",
",",
"basedir",
")",
":",
"log",
".",
"msg",
"(",
"\"recording hostname in twistd.hostname\"",
")",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"basedir",
",",
"\"twistd.hostname\"",
")",
"try",
":",
"hostname",
"=",
"os",
".",
"uname",
"(",
")",
"[",
"1",
"]",
"# only on unix",
"except",
"AttributeError",
":",
"# this tends to fail on non-connected hosts, e.g., laptops",
"# on planes",
"hostname",
"=",
"socket",
".",
"getfqdn",
"(",
")",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"{0}\\n\"",
".",
"format",
"(",
"hostname",
")",
")",
"except",
"Exception",
":",
"log",
".",
"msg",
"(",
"\"failed - ignoring\"",
")"
] | 36 | 17.764706 |
def predict(self, X, batch_size=1, show_progressbar=False):
"""
Predict the BMU for each input data.
Parameters
----------
X : numpy array.
The input data.
batch_size : int, optional, default 100
The batch size to use in prediction. This may affect prediction
in stateful, i.e. sequential SOMs.
show_progressbar : bool
Whether to show a progressbar during prediction.
Returns
-------
predictions : numpy array
An array containing the BMU for each input data point.
"""
dist = self.transform(X, batch_size, show_progressbar)
res = dist.__getattribute__(self.argfunc)(1)
return res | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"batch_size",
"=",
"1",
",",
"show_progressbar",
"=",
"False",
")",
":",
"dist",
"=",
"self",
".",
"transform",
"(",
"X",
",",
"batch_size",
",",
"show_progressbar",
")",
"res",
"=",
"dist",
".",
"__getattribute__",
"(",
"self",
".",
"argfunc",
")",
"(",
"1",
")",
"return",
"res"
] | 30.5 | 19.666667 |
def parse_value_objectwithpath(self, tup_tree):
"""
::
<!ELEMENT VALUE.OBJECTWITHPATH ((CLASSPATH, CLASS) |
(INSTANCEPATH, INSTANCE))>
Returns:
tupletree with child item that is:
- for class-level use: a tuple(CIMClassName, CIMClass) where the
path of the CIMClass object is set (with namespace and host).
- for class-level use: a CIMInstance object with its path set
(with namespace and host).
"""
self.check_node(tup_tree, 'VALUE.OBJECTWITHPATH')
k = kids(tup_tree)
if len(k) != 2:
raise CIMXMLParseError(
_format("Element {0!A} has invalid number of child elements "
"{1!A} (expecting two child elements "
"((CLASSPATH, CLASS) | (INSTANCEPATH, INSTANCE)))",
name(tup_tree), k),
conn_id=self.conn_id)
if name(k[0]) == 'CLASSPATH':
# Note: Before pywbem 0.12, CIMClass did not have a path, therefore
# classpath and class were returned as a tuple. In pywbem 0.12,
# CIMClass got a path, but they are still returned as a tuple.
class_path = self.parse_classpath(k[0])
klass = self.parse_class(k[1])
klass.path = class_path
_object = (class_path, klass)
else: # INSTANCEPATH
# convert tuple to CIMInstance object with path set
inst_path = self.parse_instancepath(k[0])
_object = self.parse_instance(k[1])
_object.path = inst_path
return (name(tup_tree), attrs(tup_tree), _object) | [
"def",
"parse_value_objectwithpath",
"(",
"self",
",",
"tup_tree",
")",
":",
"self",
".",
"check_node",
"(",
"tup_tree",
",",
"'VALUE.OBJECTWITHPATH'",
")",
"k",
"=",
"kids",
"(",
"tup_tree",
")",
"if",
"len",
"(",
"k",
")",
"!=",
"2",
":",
"raise",
"CIMXMLParseError",
"(",
"_format",
"(",
"\"Element {0!A} has invalid number of child elements \"",
"\"{1!A} (expecting two child elements \"",
"\"((CLASSPATH, CLASS) | (INSTANCEPATH, INSTANCE)))\"",
",",
"name",
"(",
"tup_tree",
")",
",",
"k",
")",
",",
"conn_id",
"=",
"self",
".",
"conn_id",
")",
"if",
"name",
"(",
"k",
"[",
"0",
"]",
")",
"==",
"'CLASSPATH'",
":",
"# Note: Before pywbem 0.12, CIMClass did not have a path, therefore",
"# classpath and class were returned as a tuple. In pywbem 0.12,",
"# CIMClass got a path, but they are still returned as a tuple.",
"class_path",
"=",
"self",
".",
"parse_classpath",
"(",
"k",
"[",
"0",
"]",
")",
"klass",
"=",
"self",
".",
"parse_class",
"(",
"k",
"[",
"1",
"]",
")",
"klass",
".",
"path",
"=",
"class_path",
"_object",
"=",
"(",
"class_path",
",",
"klass",
")",
"else",
":",
"# INSTANCEPATH",
"# convert tuple to CIMInstance object with path set",
"inst_path",
"=",
"self",
".",
"parse_instancepath",
"(",
"k",
"[",
"0",
"]",
")",
"_object",
"=",
"self",
".",
"parse_instance",
"(",
"k",
"[",
"1",
"]",
")",
"_object",
".",
"path",
"=",
"inst_path",
"return",
"(",
"name",
"(",
"tup_tree",
")",
",",
"attrs",
"(",
"tup_tree",
")",
",",
"_object",
")"
] | 40.357143 | 20.5 |
def _write_branch_and_tag_to_meta_yaml(self):
"""
Write branch and tag to meta.yaml by editing in place
"""
## set the branch to pull source from
with open(self.meta_yaml.replace("meta", "template"), 'r') as infile:
dat = infile.read()
newdat = dat.format(**{'tag': self.tag, 'branch': self.branch})
with open(self.meta_yaml, 'w') as outfile:
outfile.write(newdat) | [
"def",
"_write_branch_and_tag_to_meta_yaml",
"(",
"self",
")",
":",
"## set the branch to pull source from",
"with",
"open",
"(",
"self",
".",
"meta_yaml",
".",
"replace",
"(",
"\"meta\"",
",",
"\"template\"",
")",
",",
"'r'",
")",
"as",
"infile",
":",
"dat",
"=",
"infile",
".",
"read",
"(",
")",
"newdat",
"=",
"dat",
".",
"format",
"(",
"*",
"*",
"{",
"'tag'",
":",
"self",
".",
"tag",
",",
"'branch'",
":",
"self",
".",
"branch",
"}",
")",
"with",
"open",
"(",
"self",
".",
"meta_yaml",
",",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"newdat",
")"
] | 39.909091 | 15.363636 |
def get_publisher(self, publisher_id):
"""GetPublisher.
Get a specific service hooks publisher.
:param str publisher_id: ID for a publisher.
:rtype: :class:`<Publisher> <azure.devops.v5_0.service_hooks.models.Publisher>`
"""
route_values = {}
if publisher_id is not None:
route_values['publisherId'] = self._serialize.url('publisher_id', publisher_id, 'str')
response = self._send(http_method='GET',
location_id='1e83a210-5b53-43bc-90f0-d476a4e5d731',
version='5.0',
route_values=route_values)
return self._deserialize('Publisher', response) | [
"def",
"get_publisher",
"(",
"self",
",",
"publisher_id",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"publisher_id",
"is",
"not",
"None",
":",
"route_values",
"[",
"'publisherId'",
"]",
"=",
"self",
".",
"_serialize",
".",
"url",
"(",
"'publisher_id'",
",",
"publisher_id",
",",
"'str'",
")",
"response",
"=",
"self",
".",
"_send",
"(",
"http_method",
"=",
"'GET'",
",",
"location_id",
"=",
"'1e83a210-5b53-43bc-90f0-d476a4e5d731'",
",",
"version",
"=",
"'5.0'",
",",
"route_values",
"=",
"route_values",
")",
"return",
"self",
".",
"_deserialize",
"(",
"'Publisher'",
",",
"response",
")"
] | 50.142857 | 16.357143 |
def bet_cancel(self, bet_to_cancel, account=None, **kwargs):
""" Cancel a bet
:param str bet_to_cancel: The identifier that identifies the bet to
cancel
:param str account: (optional) the account that owns the bet
(defaults to ``default_account``)
"""
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account)
bet = Bet(bet_to_cancel)
op = operations.Bet_cancel(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"bettor_id": account["id"],
"bet_to_cancel": bet["id"],
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active", **kwargs) | [
"def",
"bet_cancel",
"(",
"self",
",",
"bet_to_cancel",
",",
"account",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"account",
":",
"if",
"\"default_account\"",
"in",
"self",
".",
"config",
":",
"account",
"=",
"self",
".",
"config",
"[",
"\"default_account\"",
"]",
"if",
"not",
"account",
":",
"raise",
"ValueError",
"(",
"\"You need to provide an account\"",
")",
"account",
"=",
"Account",
"(",
"account",
")",
"bet",
"=",
"Bet",
"(",
"bet_to_cancel",
")",
"op",
"=",
"operations",
".",
"Bet_cancel",
"(",
"*",
"*",
"{",
"\"fee\"",
":",
"{",
"\"amount\"",
":",
"0",
",",
"\"asset_id\"",
":",
"\"1.3.0\"",
"}",
",",
"\"bettor_id\"",
":",
"account",
"[",
"\"id\"",
"]",
",",
"\"bet_to_cancel\"",
":",
"bet",
"[",
"\"id\"",
"]",
",",
"\"prefix\"",
":",
"self",
".",
"prefix",
",",
"}",
")",
"return",
"self",
".",
"finalizeOp",
"(",
"op",
",",
"account",
"[",
"\"name\"",
"]",
",",
"\"active\"",
",",
"*",
"*",
"kwargs",
")"
] | 38.333333 | 16.541667 |
def list_menu(self, options, title="Choose a value", message="Choose a value", default=None, **kwargs):
"""
Show a single-selection list menu
Usage: C{dialog.list_menu(options, title="Choose a value", message="Choose a value", default=None, **kwargs)}
@param options: list of options (strings) for the dialog
@param title: window title for the dialog
@param message: message displayed above the list
@param default: default value to be selected
@return: a tuple containing the exit code and user choice
@rtype: C{DialogData(int, str)}
"""
choices = []
optionNum = 0
for option in options:
choices.append(str(optionNum))
choices.append(option)
if option == default:
choices.append("on")
else:
choices.append("off")
optionNum += 1
return_code, result = self._run_kdialog(title, ["--radiolist", message] + choices, kwargs)
choice = options[int(result)]
return DialogData(return_code, choice) | [
"def",
"list_menu",
"(",
"self",
",",
"options",
",",
"title",
"=",
"\"Choose a value\"",
",",
"message",
"=",
"\"Choose a value\"",
",",
"default",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"choices",
"=",
"[",
"]",
"optionNum",
"=",
"0",
"for",
"option",
"in",
"options",
":",
"choices",
".",
"append",
"(",
"str",
"(",
"optionNum",
")",
")",
"choices",
".",
"append",
"(",
"option",
")",
"if",
"option",
"==",
"default",
":",
"choices",
".",
"append",
"(",
"\"on\"",
")",
"else",
":",
"choices",
".",
"append",
"(",
"\"off\"",
")",
"optionNum",
"+=",
"1",
"return_code",
",",
"result",
"=",
"self",
".",
"_run_kdialog",
"(",
"title",
",",
"[",
"\"--radiolist\"",
",",
"message",
"]",
"+",
"choices",
",",
"kwargs",
")",
"choice",
"=",
"options",
"[",
"int",
"(",
"result",
")",
"]",
"return",
"DialogData",
"(",
"return_code",
",",
"choice",
")"
] | 38.931034 | 19.275862 |
def set_presence(self, state, status):
"""
Set the presence `state` and `status` on the client. This has the same
effects as writing `state` to :attr:`presence`, but the status of the
presence is also set at the same time.
`status` must be either a string or something which can be passed to
:class:`dict`. If it is a string, the string is wrapped in a ``{None:
status}`` dictionary. Otherwise, the dictionary is set as the
:attr:`~.Presence.status` attribute of the presence stanza. It
must map :class:`aioxmpp.structs.LanguageTag` instances to strings.
The `status` is the text shown alongside the `state` (indicating
availability such as *away*, *do not disturb* and *free to chat*).
"""
self._presence_server.set_presence(state, status=status) | [
"def",
"set_presence",
"(",
"self",
",",
"state",
",",
"status",
")",
":",
"self",
".",
"_presence_server",
".",
"set_presence",
"(",
"state",
",",
"status",
"=",
"status",
")"
] | 52.375 | 26.25 |
def print_log(text, *colors):
"""Print a log message to standard error."""
sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n") | [
"def",
"print_log",
"(",
"text",
",",
"*",
"colors",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"sprint",
"(",
"\"{}: {}\"",
".",
"format",
"(",
"script_name",
",",
"text",
")",
",",
"*",
"colors",
")",
"+",
"\"\\n\"",
")"
] | 52.333333 | 17 |
def clip_bounds(bounds=None, clip=None):
"""
Clips bounds by clip.
Parameters
----------
bounds : bounds to be clipped
clip : clip bounds
Returns
-------
Bounds(left, bottom, right, top)
"""
bounds = Bounds(*bounds)
clip = Bounds(*clip)
return Bounds(
max(bounds.left, clip.left),
max(bounds.bottom, clip.bottom),
min(bounds.right, clip.right),
min(bounds.top, clip.top)
) | [
"def",
"clip_bounds",
"(",
"bounds",
"=",
"None",
",",
"clip",
"=",
"None",
")",
":",
"bounds",
"=",
"Bounds",
"(",
"*",
"bounds",
")",
"clip",
"=",
"Bounds",
"(",
"*",
"clip",
")",
"return",
"Bounds",
"(",
"max",
"(",
"bounds",
".",
"left",
",",
"clip",
".",
"left",
")",
",",
"max",
"(",
"bounds",
".",
"bottom",
",",
"clip",
".",
"bottom",
")",
",",
"min",
"(",
"bounds",
".",
"right",
",",
"clip",
".",
"right",
")",
",",
"min",
"(",
"bounds",
".",
"top",
",",
"clip",
".",
"top",
")",
")"
] | 21.047619 | 15.809524 |
def _extract_rev(self, line1, line2):
"""
Extract the filename and revision hint from a line.
"""
try:
if line1.startswith('--- ') and line2.startswith('+++ '):
l1 = line1[4:].split(None, 1)
old_filename = l1[0].lstrip('a/') if len(l1) >= 1 else None
old_rev = l1[1] if len(l1) == 2 else 'old'
l2 = line2[4:].split(None, 1)
new_filename = l2[0].lstrip('b/') if len(l1) >= 1 else None
new_rev = l2[1] if len(l2) == 2 else 'new'
filename = old_filename if (old_filename !=
'dev/null') else new_filename
return filename, new_rev, old_rev
except (ValueError, IndexError):
pass
return None, None, None | [
"def",
"_extract_rev",
"(",
"self",
",",
"line1",
",",
"line2",
")",
":",
"try",
":",
"if",
"line1",
".",
"startswith",
"(",
"'--- '",
")",
"and",
"line2",
".",
"startswith",
"(",
"'+++ '",
")",
":",
"l1",
"=",
"line1",
"[",
"4",
":",
"]",
".",
"split",
"(",
"None",
",",
"1",
")",
"old_filename",
"=",
"l1",
"[",
"0",
"]",
".",
"lstrip",
"(",
"'a/'",
")",
"if",
"len",
"(",
"l1",
")",
">=",
"1",
"else",
"None",
"old_rev",
"=",
"l1",
"[",
"1",
"]",
"if",
"len",
"(",
"l1",
")",
"==",
"2",
"else",
"'old'",
"l2",
"=",
"line2",
"[",
"4",
":",
"]",
".",
"split",
"(",
"None",
",",
"1",
")",
"new_filename",
"=",
"l2",
"[",
"0",
"]",
".",
"lstrip",
"(",
"'b/'",
")",
"if",
"len",
"(",
"l1",
")",
">=",
"1",
"else",
"None",
"new_rev",
"=",
"l2",
"[",
"1",
"]",
"if",
"len",
"(",
"l2",
")",
"==",
"2",
"else",
"'new'",
"filename",
"=",
"old_filename",
"if",
"(",
"old_filename",
"!=",
"'dev/null'",
")",
"else",
"new_filename",
"return",
"filename",
",",
"new_rev",
",",
"old_rev",
"except",
"(",
"ValueError",
",",
"IndexError",
")",
":",
"pass",
"return",
"None",
",",
"None",
",",
"None"
] | 35.782609 | 21.26087 |
def filter_gradient_threshold_percentile(self, analyte, percentiles, level='population', win=15, filt=False,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
self.minimal_analytes.update([analyte])
# Calculate gradients of all samples
self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)
grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# calculate filter limits
lims = np.percentile(grad, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile Threshold Filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-grd-pcnt_below'.format(percentiles[0]),
below,
'Gradients below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-grd-pcnt_above'.format(percentiles[0]),
above,
'Gradients above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_inside',
inside,
'Gradients between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_outside',
outside,
'Gradients outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return | [
"def",
"filter_gradient_threshold_percentile",
"(",
"self",
",",
"analyte",
",",
"percentiles",
",",
"level",
"=",
"'population'",
",",
"win",
"=",
"15",
",",
"filt",
"=",
"False",
",",
"samples",
"=",
"None",
",",
"subset",
"=",
"None",
")",
":",
"params",
"=",
"locals",
"(",
")",
"del",
"(",
"params",
"[",
"'self'",
"]",
")",
"if",
"samples",
"is",
"not",
"None",
":",
"subset",
"=",
"self",
".",
"make_subset",
"(",
"samples",
")",
"samples",
"=",
"self",
".",
"_get_samples",
"(",
"subset",
")",
"self",
".",
"minimal_analytes",
".",
"update",
"(",
"[",
"analyte",
"]",
")",
"# Calculate gradients of all samples",
"self",
".",
"get_gradients",
"(",
"analytes",
"=",
"[",
"analyte",
"]",
",",
"win",
"=",
"win",
",",
"filt",
"=",
"filt",
",",
"subset",
"=",
"subset",
")",
"grad",
"=",
"self",
".",
"gradients",
"[",
"analyte",
"]",
"[",
"~",
"np",
".",
"isnan",
"(",
"self",
".",
"gradients",
"[",
"analyte",
"]",
")",
"]",
"if",
"isinstance",
"(",
"percentiles",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"percentiles",
"=",
"[",
"percentiles",
"]",
"if",
"level",
"==",
"'population'",
":",
"# calculate filter limits",
"lims",
"=",
"np",
".",
"percentile",
"(",
"grad",
",",
"percentiles",
")",
"# Calculate filter for individual samples",
"with",
"self",
".",
"pbar",
".",
"set",
"(",
"total",
"=",
"len",
"(",
"samples",
")",
",",
"desc",
"=",
"'Percentile Threshold Filter'",
")",
"as",
"prog",
":",
"for",
"s",
"in",
"samples",
":",
"d",
"=",
"self",
".",
"data",
"[",
"s",
"]",
"setn",
"=",
"d",
".",
"filt",
".",
"maxset",
"+",
"1",
"g",
"=",
"calc_grads",
"(",
"d",
".",
"Time",
",",
"d",
".",
"focus",
",",
"[",
"analyte",
"]",
",",
"win",
")",
"[",
"analyte",
"]",
"if",
"level",
"==",
"'individual'",
":",
"gt",
"=",
"nominal_values",
"(",
"g",
")",
"lims",
"=",
"np",
".",
"percentile",
"(",
"gt",
"[",
"~",
"np",
".",
"isnan",
"(",
"gt",
")",
"]",
",",
"percentiles",
")",
"if",
"len",
"(",
"lims",
")",
"==",
"1",
":",
"above",
"=",
"g",
">=",
"lims",
"[",
"0",
"]",
"below",
"=",
"g",
"<",
"lims",
"[",
"0",
"]",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_{:.1f}-grd-pcnt_below'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
")",
",",
"below",
",",
"'Gradients below {:.1f}th {:} percentile ({:.2e})'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
",",
"analyte",
",",
"lims",
"[",
"0",
"]",
")",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_{:.1f}-grd-pcnt_above'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
")",
",",
"above",
",",
"'Gradients above {:.1f}th {:} percentile ({:.2e})'",
".",
"format",
"(",
"percentiles",
"[",
"0",
"]",
",",
"analyte",
",",
"lims",
"[",
"0",
"]",
")",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"elif",
"len",
"(",
"lims",
")",
"==",
"2",
":",
"inside",
"=",
"(",
"g",
">=",
"min",
"(",
"lims",
")",
")",
"&",
"(",
"g",
"<=",
"max",
"(",
"lims",
")",
")",
"outside",
"=",
"(",
"g",
"<",
"min",
"(",
"lims",
")",
")",
"|",
"(",
"g",
">",
"max",
"(",
"lims",
")",
")",
"lpc",
"=",
"'-'",
".",
"join",
"(",
"[",
"'{:.1f}'",
".",
"format",
"(",
"p",
")",
"for",
"p",
"in",
"percentiles",
"]",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_'",
"+",
"lpc",
"+",
"'-grd-pcnt_inside'",
",",
"inside",
",",
"'Gradients between '",
"+",
"lpc",
"+",
"' '",
"+",
"analyte",
"+",
"'percentiles'",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"d",
".",
"filt",
".",
"add",
"(",
"analyte",
"+",
"'_'",
"+",
"lpc",
"+",
"'-grd-pcnt_outside'",
",",
"outside",
",",
"'Gradients outside '",
"+",
"lpc",
"+",
"' '",
"+",
"analyte",
"+",
"'percentiles'",
",",
"params",
",",
"setn",
"=",
"setn",
")",
"prog",
".",
"update",
"(",
")",
"return"
] | 40.077778 | 22.877778 |
def volume(self):
"""
The analytic volume of the cylinder primitive.
Returns
---------
volume : float
Volume of the cylinder
"""
volume = ((np.pi * self.primitive.radius ** 2) *
self.primitive.height)
return volume | [
"def",
"volume",
"(",
"self",
")",
":",
"volume",
"=",
"(",
"(",
"np",
".",
"pi",
"*",
"self",
".",
"primitive",
".",
"radius",
"**",
"2",
")",
"*",
"self",
".",
"primitive",
".",
"height",
")",
"return",
"volume"
] | 24.666667 | 15.5 |
def is_valid_pred_string(predstr):
"""
Return `True` if *predstr* is a valid predicate string.
Examples:
>>> is_valid_pred_string('"_dog_n_1_rel"')
True
>>> is_valid_pred_string('_dog_n_1')
True
>>> is_valid_pred_string('_dog_noun_1')
False
>>> is_valid_pred_string('dog_noun_1')
True
"""
predstr = predstr.strip('"').lstrip("'")
# this is a stricter regex than in Pred, but doesn't check POS
return re.match(
r'_([^ _\\]|\\.)+_[a-z](_([^ _\\]|\\.)+)?(_rel)?$'
r'|[^_]([^ \\]|\\.)+(_rel)?$',
predstr
) is not None | [
"def",
"is_valid_pred_string",
"(",
"predstr",
")",
":",
"predstr",
"=",
"predstr",
".",
"strip",
"(",
"'\"'",
")",
".",
"lstrip",
"(",
"\"'\"",
")",
"# this is a stricter regex than in Pred, but doesn't check POS",
"return",
"re",
".",
"match",
"(",
"r'_([^ _\\\\]|\\\\.)+_[a-z](_([^ _\\\\]|\\\\.)+)?(_rel)?$'",
"r'|[^_]([^ \\\\]|\\\\.)+(_rel)?$'",
",",
"predstr",
")",
"is",
"not",
"None"
] | 29.238095 | 16.571429 |
def validator(self, meth):
"""
Decorator that adds *meth* to the list of validators.
Returns *meth* unchanged.
.. versionadded:: 17.1.0
"""
if self._validator is None:
self._validator = meth
else:
self._validator = and_(self._validator, meth)
return meth | [
"def",
"validator",
"(",
"self",
",",
"meth",
")",
":",
"if",
"self",
".",
"_validator",
"is",
"None",
":",
"self",
".",
"_validator",
"=",
"meth",
"else",
":",
"self",
".",
"_validator",
"=",
"and_",
"(",
"self",
".",
"_validator",
",",
"meth",
")",
"return",
"meth"
] | 25.538462 | 15.846154 |
def check_version(version: str):
"""
Checks given version against code version and determines compatibility.
Throws if versions are incompatible.
:param version: Given version.
"""
code_version = parse_version(__version__)
given_version = parse_version(version)
check_condition(code_version[0] == given_version[0],
"Given release version (%s) does not match release code version (%s)" % (version, __version__))
check_condition(code_version[1] == given_version[1],
"Given major version (%s) does not match major code version (%s)" % (version, __version__)) | [
"def",
"check_version",
"(",
"version",
":",
"str",
")",
":",
"code_version",
"=",
"parse_version",
"(",
"__version__",
")",
"given_version",
"=",
"parse_version",
"(",
"version",
")",
"check_condition",
"(",
"code_version",
"[",
"0",
"]",
"==",
"given_version",
"[",
"0",
"]",
",",
"\"Given release version (%s) does not match release code version (%s)\"",
"%",
"(",
"version",
",",
"__version__",
")",
")",
"check_condition",
"(",
"code_version",
"[",
"1",
"]",
"==",
"given_version",
"[",
"1",
"]",
",",
"\"Given major version (%s) does not match major code version (%s)\"",
"%",
"(",
"version",
",",
"__version__",
")",
")"
] | 47.692308 | 21.076923 |
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1) | [
"def",
"backup",
"(",
"target_file",
",",
"config",
"=",
"None",
")",
":",
"storm_",
"=",
"get_storm_instance",
"(",
"config",
")",
"try",
":",
"storm_",
".",
"backup",
"(",
"target_file",
")",
"except",
"Exception",
"as",
"error",
":",
"print",
"(",
"get_formatted_message",
"(",
"str",
"(",
"error",
")",
",",
"'error'",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"sys",
".",
"exit",
"(",
"1",
")"
] | 31.1 | 12.3 |
def mkpart(device, part_type, fs_type=None, start=None, end=None):
'''
Make a part_type partition for filesystem fs_type, beginning at start and
ending at end (by default in megabytes). part_type should be one of
"primary", "logical", or "extended".
CLI Examples:
.. code-block:: bash
salt '*' partition.mkpart /dev/sda primary fs_type=fat32 start=0 end=639
salt '*' partition.mkpart /dev/sda primary start=0 end=639
'''
if part_type not in set(['primary', 'logical', 'extended']):
raise CommandExecutionError(
'Invalid part_type passed to partition.mkpart'
)
if not _is_fstype(fs_type):
raise CommandExecutionError(
'Invalid fs_type passed to partition.mkpart'
)
if start is not None and end is not None:
_validate_partition_boundary(start)
_validate_partition_boundary(end)
if start is None:
start = ''
if end is None:
end = ''
if fs_type:
cmd = ('parted', '-m', '-s', '--', device, 'mkpart', part_type, fs_type, start, end)
else:
cmd = ('parted', '-m', '-s', '--', device, 'mkpart', part_type, start, end)
out = __salt__['cmd.run'](cmd, python_shell=False).splitlines()
return out | [
"def",
"mkpart",
"(",
"device",
",",
"part_type",
",",
"fs_type",
"=",
"None",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"if",
"part_type",
"not",
"in",
"set",
"(",
"[",
"'primary'",
",",
"'logical'",
",",
"'extended'",
"]",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Invalid part_type passed to partition.mkpart'",
")",
"if",
"not",
"_is_fstype",
"(",
"fs_type",
")",
":",
"raise",
"CommandExecutionError",
"(",
"'Invalid fs_type passed to partition.mkpart'",
")",
"if",
"start",
"is",
"not",
"None",
"and",
"end",
"is",
"not",
"None",
":",
"_validate_partition_boundary",
"(",
"start",
")",
"_validate_partition_boundary",
"(",
"end",
")",
"if",
"start",
"is",
"None",
":",
"start",
"=",
"''",
"if",
"end",
"is",
"None",
":",
"end",
"=",
"''",
"if",
"fs_type",
":",
"cmd",
"=",
"(",
"'parted'",
",",
"'-m'",
",",
"'-s'",
",",
"'--'",
",",
"device",
",",
"'mkpart'",
",",
"part_type",
",",
"fs_type",
",",
"start",
",",
"end",
")",
"else",
":",
"cmd",
"=",
"(",
"'parted'",
",",
"'-m'",
",",
"'-s'",
",",
"'--'",
",",
"device",
",",
"'mkpart'",
",",
"part_type",
",",
"start",
",",
"end",
")",
"out",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")",
".",
"splitlines",
"(",
")",
"return",
"out"
] | 30.95 | 26.55 |
def generate_encoded_user_data(
env='dev',
region='us-east-1',
generated=None,
group_name='',
pipeline_type='',
canary=False,
):
r"""Generate base64 encoded User Data.
Args:
env (str): Deployment environment, e.g. dev, stage.
region (str): AWS Region, e.g. us-east-1.
generated (gogoutils.Generator): Generated naming formats.
group_name (str): Application group nane, e.g. core.
pipeline_type (str): Type of Foremast Pipeline to configure.
Returns:
str: base64 encoded User Data script.
#!/bin/bash
export CLOUD_ENVIRONMENT=dev
export CLOUD_ENVIRONMENT_C=dev
export CLOUD_ENVIRONMENT_P=dev
export CLOUD_ENVIRONMENT_S=dev
export CLOUD_APP=coreforrest
export CLOUD_APP_GROUP=forrest
export CLOUD_STACK=forrest
export EC2_REGION=us-east-1
export CLOUD_DOMAIN=dev.example.com
printenv | grep 'CLOUD\|EC2' | awk '$0="export "$0'>> /etc/gogo/cloud_env
"""
# We need to handle the case of prodp and prods for different URL generation
if env in ["prod", "prodp", "prods"]:
env_c, env_p, env_s = "prod", "prodp", "prods"
else:
env_c, env_p, env_s = env, env, env
user_data = get_template(
template_file='infrastructure/user_data.sh.j2',
env=env,
env_c=env_c,
env_p=env_p,
env_s=env_s,
region=region,
app_name=generated.app_name(),
group_name=group_name,
pipeline_type=pipeline_type,
canary=canary,
formats=generated,
)
return base64.b64encode(user_data.encode()).decode() | [
"def",
"generate_encoded_user_data",
"(",
"env",
"=",
"'dev'",
",",
"region",
"=",
"'us-east-1'",
",",
"generated",
"=",
"None",
",",
"group_name",
"=",
"''",
",",
"pipeline_type",
"=",
"''",
",",
"canary",
"=",
"False",
",",
")",
":",
"# We need to handle the case of prodp and prods for different URL generation",
"if",
"env",
"in",
"[",
"\"prod\"",
",",
"\"prodp\"",
",",
"\"prods\"",
"]",
":",
"env_c",
",",
"env_p",
",",
"env_s",
"=",
"\"prod\"",
",",
"\"prodp\"",
",",
"\"prods\"",
"else",
":",
"env_c",
",",
"env_p",
",",
"env_s",
"=",
"env",
",",
"env",
",",
"env",
"user_data",
"=",
"get_template",
"(",
"template_file",
"=",
"'infrastructure/user_data.sh.j2'",
",",
"env",
"=",
"env",
",",
"env_c",
"=",
"env_c",
",",
"env_p",
"=",
"env_p",
",",
"env_s",
"=",
"env_s",
",",
"region",
"=",
"region",
",",
"app_name",
"=",
"generated",
".",
"app_name",
"(",
")",
",",
"group_name",
"=",
"group_name",
",",
"pipeline_type",
"=",
"pipeline_type",
",",
"canary",
"=",
"canary",
",",
"formats",
"=",
"generated",
",",
")",
"return",
"base64",
".",
"b64encode",
"(",
"user_data",
".",
"encode",
"(",
")",
")",
".",
"decode",
"(",
")"
] | 31.849057 | 17.226415 |
def _sample_batch():
"""Determine if a batch should be processed and if not, pop off all of
the pending metrics for that batch.
:rtype: bool
"""
if _sample_probability == 1.0 or random.random() < _sample_probability:
return True
# Pop off all the metrics for the batch
for database in _measurements:
_measurements[database] = _measurements[database][_max_batch_size:]
return False | [
"def",
"_sample_batch",
"(",
")",
":",
"if",
"_sample_probability",
"==",
"1.0",
"or",
"random",
".",
"random",
"(",
")",
"<",
"_sample_probability",
":",
"return",
"True",
"# Pop off all the metrics for the batch",
"for",
"database",
"in",
"_measurements",
":",
"_measurements",
"[",
"database",
"]",
"=",
"_measurements",
"[",
"database",
"]",
"[",
"_max_batch_size",
":",
"]",
"return",
"False"
] | 29.857143 | 20.642857 |
def convert_table(self, table_name, cls_att=None):
'''
Returns the specified table as an orange example table.
:param table_name: table name to convert
:cls_att: class attribute name
:rtype: orange.ExampleTable
'''
import Orange
cols = self.db.cols[table_name]
attributes, metas, class_var = [], [], None
for col in cols:
att_type = self.orng_type(table_name, col)
if att_type == 'd':
att_vals = self.db.col_vals[table_name][col]
att_var = Orange.data.DiscreteVariable(str(col), values=[str(val) for val in att_vals])
elif att_type == 'c':
att_var = Orange.data.ContinuousVariable(str(col))
else:
att_var = Orange.data.StringVariable(str(col))
if col == cls_att:
if att_type == 'string':
raise Exception('Unsuitable data type for a target variable: %s' % att_type)
class_var = att_var
continue
elif att_type == 'string' or table_name in self.db.pkeys and col in self.db.pkeys[
table_name] or table_name in self.db.fkeys and col in self.db.fkeys[table_name]:
metas.append(att_var)
else:
attributes.append(att_var)
domain = Orange.data.Domain(attributes, class_vars=class_var, metas=metas)
# for meta in metas:
# domain.addmeta(Orange.newmetaid(), meta)
dataset = Orange.data.Table(domain)
dataset.name = table_name
for row in self.db.rows(table_name, cols):
example = Orange.data.Instance(domain)
for col, val in zip(cols, row):
example[str(col)] = str(val) if val != None else '?'
dataset.append(example)
return dataset | [
"def",
"convert_table",
"(",
"self",
",",
"table_name",
",",
"cls_att",
"=",
"None",
")",
":",
"import",
"Orange",
"cols",
"=",
"self",
".",
"db",
".",
"cols",
"[",
"table_name",
"]",
"attributes",
",",
"metas",
",",
"class_var",
"=",
"[",
"]",
",",
"[",
"]",
",",
"None",
"for",
"col",
"in",
"cols",
":",
"att_type",
"=",
"self",
".",
"orng_type",
"(",
"table_name",
",",
"col",
")",
"if",
"att_type",
"==",
"'d'",
":",
"att_vals",
"=",
"self",
".",
"db",
".",
"col_vals",
"[",
"table_name",
"]",
"[",
"col",
"]",
"att_var",
"=",
"Orange",
".",
"data",
".",
"DiscreteVariable",
"(",
"str",
"(",
"col",
")",
",",
"values",
"=",
"[",
"str",
"(",
"val",
")",
"for",
"val",
"in",
"att_vals",
"]",
")",
"elif",
"att_type",
"==",
"'c'",
":",
"att_var",
"=",
"Orange",
".",
"data",
".",
"ContinuousVariable",
"(",
"str",
"(",
"col",
")",
")",
"else",
":",
"att_var",
"=",
"Orange",
".",
"data",
".",
"StringVariable",
"(",
"str",
"(",
"col",
")",
")",
"if",
"col",
"==",
"cls_att",
":",
"if",
"att_type",
"==",
"'string'",
":",
"raise",
"Exception",
"(",
"'Unsuitable data type for a target variable: %s'",
"%",
"att_type",
")",
"class_var",
"=",
"att_var",
"continue",
"elif",
"att_type",
"==",
"'string'",
"or",
"table_name",
"in",
"self",
".",
"db",
".",
"pkeys",
"and",
"col",
"in",
"self",
".",
"db",
".",
"pkeys",
"[",
"table_name",
"]",
"or",
"table_name",
"in",
"self",
".",
"db",
".",
"fkeys",
"and",
"col",
"in",
"self",
".",
"db",
".",
"fkeys",
"[",
"table_name",
"]",
":",
"metas",
".",
"append",
"(",
"att_var",
")",
"else",
":",
"attributes",
".",
"append",
"(",
"att_var",
")",
"domain",
"=",
"Orange",
".",
"data",
".",
"Domain",
"(",
"attributes",
",",
"class_vars",
"=",
"class_var",
",",
"metas",
"=",
"metas",
")",
"# for meta in metas:",
"# domain.addmeta(Orange.newmetaid(), meta)",
"dataset",
"=",
"Orange",
".",
"data",
".",
"Table",
"(",
"domain",
")",
"dataset",
".",
"name",
"=",
"table_name",
"for",
"row",
"in",
"self",
".",
"db",
".",
"rows",
"(",
"table_name",
",",
"cols",
")",
":",
"example",
"=",
"Orange",
".",
"data",
".",
"Instance",
"(",
"domain",
")",
"for",
"col",
",",
"val",
"in",
"zip",
"(",
"cols",
",",
"row",
")",
":",
"example",
"[",
"str",
"(",
"col",
")",
"]",
"=",
"str",
"(",
"val",
")",
"if",
"val",
"!=",
"None",
"else",
"'?'",
"dataset",
".",
"append",
"(",
"example",
")",
"return",
"dataset"
] | 43.97619 | 18.880952 |
def signature_base_string(http_method, base_str_uri,
normalized_encoded_request_parameters):
"""**Construct the signature base string.**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_str_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string | [
"def",
"signature_base_string",
"(",
"http_method",
",",
"base_str_uri",
",",
"normalized_encoded_request_parameters",
")",
":",
"# The signature base string is constructed by concatenating together,",
"# in order, the following HTTP request elements:",
"# 1. The HTTP request method in uppercase. For example: \"HEAD\",",
"# \"GET\", \"POST\", etc. If the request uses a custom HTTP method, it",
"# MUST be encoded (`Section 3.6`_).",
"#",
"# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6",
"base_string",
"=",
"utils",
".",
"escape",
"(",
"http_method",
".",
"upper",
"(",
")",
")",
"# 2. An \"&\" character (ASCII code 38).",
"base_string",
"+=",
"'&'",
"# 3. The base string URI from `Section 3.4.1.2`_, after being encoded",
"# (`Section 3.6`_).",
"#",
"# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2",
"# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6",
"base_string",
"+=",
"utils",
".",
"escape",
"(",
"base_str_uri",
")",
"# 4. An \"&\" character (ASCII code 38).",
"base_string",
"+=",
"'&'",
"# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after",
"# being encoded (`Section 3.6`).",
"#",
"# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2",
"# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6",
"base_string",
"+=",
"utils",
".",
"escape",
"(",
"normalized_encoded_request_parameters",
")",
"return",
"base_string"
] | 39.365079 | 24.031746 |
def get_nearest_entry(self, entry, type_measurement):
"""!
@brief Find nearest entry of node for the specified entry.
@param[in] entry (cfentry): Entry that is used for calculation distance.
@param[in] type_measurement (measurement_type): Measurement type that is used for obtaining nearest entry to the specified.
@return (cfentry) Nearest entry of node for the specified entry.
"""
min_key = lambda cur_entity: cur_entity.get_distance(entry, type_measurement);
return min(self.__entries, key = min_key); | [
"def",
"get_nearest_entry",
"(",
"self",
",",
"entry",
",",
"type_measurement",
")",
":",
"min_key",
"=",
"lambda",
"cur_entity",
":",
"cur_entity",
".",
"get_distance",
"(",
"entry",
",",
"type_measurement",
")",
"return",
"min",
"(",
"self",
".",
"__entries",
",",
"key",
"=",
"min_key",
")"
] | 46.538462 | 29.846154 |
def wait(self, number, patience):
""" Waits and resets if necessary. """
# inspect indicator for our number
waiting = int(self.client.get(self.keys.indicator)) != number
# wait until someone announces our number
while waiting:
message = self.subscription.listen(patience)
if message is None:
# timeout beyond patience, bump and try again
self.message('{} bumps'.format(number))
self.bump()
continue
if message['type'] != 'message':
continue # a subscribe message
waiting = self.keys.number(message['data']) != number
# our turn now
self.message('{} started'.format(number)) | [
"def",
"wait",
"(",
"self",
",",
"number",
",",
"patience",
")",
":",
"# inspect indicator for our number",
"waiting",
"=",
"int",
"(",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"keys",
".",
"indicator",
")",
")",
"!=",
"number",
"# wait until someone announces our number",
"while",
"waiting",
":",
"message",
"=",
"self",
".",
"subscription",
".",
"listen",
"(",
"patience",
")",
"if",
"message",
"is",
"None",
":",
"# timeout beyond patience, bump and try again",
"self",
".",
"message",
"(",
"'{} bumps'",
".",
"format",
"(",
"number",
")",
")",
"self",
".",
"bump",
"(",
")",
"continue",
"if",
"message",
"[",
"'type'",
"]",
"!=",
"'message'",
":",
"continue",
"# a subscribe message",
"waiting",
"=",
"self",
".",
"keys",
".",
"number",
"(",
"message",
"[",
"'data'",
"]",
")",
"!=",
"number",
"# our turn now",
"self",
".",
"message",
"(",
"'{} started'",
".",
"format",
"(",
"number",
")",
")"
] | 37.1 | 16.9 |
def strokewidth(self, w=None):
'''Set the stroke width.
:param w: Stroke width.
:return: If no width was specified then current width is returned.
'''
if w is not None:
self._canvas.strokewidth = w
else:
return self._canvas.strokewidth | [
"def",
"strokewidth",
"(",
"self",
",",
"w",
"=",
"None",
")",
":",
"if",
"w",
"is",
"not",
"None",
":",
"self",
".",
"_canvas",
".",
"strokewidth",
"=",
"w",
"else",
":",
"return",
"self",
".",
"_canvas",
".",
"strokewidth"
] | 29.9 | 17.5 |
def extract_self_cert(signed_raw: str) -> Identity:
"""
Return self-certified Identity instance from the signed raw Revocation document
:param signed_raw: Signed raw document string
:return:
"""
lines = signed_raw.splitlines(True)
n = 0
version = int(Revocation.parse_field("Version", lines[n]))
n += 1
Revocation.parse_field("Type", lines[n])
n += 1
currency = Revocation.parse_field("Currency", lines[n])
n += 1
issuer = Revocation.parse_field("Issuer", lines[n])
n += 1
unique_id = Revocation.parse_field("IdtyUniqueID", lines[n])
n += 1
timestamp = Revocation.parse_field("IdtyTimestamp", lines[n])
n += 1
signature = Revocation.parse_field("IdtySignature", lines[n])
n += 1
return Identity(version, currency, issuer, unique_id, timestamp, signature) | [
"def",
"extract_self_cert",
"(",
"signed_raw",
":",
"str",
")",
"->",
"Identity",
":",
"lines",
"=",
"signed_raw",
".",
"splitlines",
"(",
"True",
")",
"n",
"=",
"0",
"version",
"=",
"int",
"(",
"Revocation",
".",
"parse_field",
"(",
"\"Version\"",
",",
"lines",
"[",
"n",
"]",
")",
")",
"n",
"+=",
"1",
"Revocation",
".",
"parse_field",
"(",
"\"Type\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"currency",
"=",
"Revocation",
".",
"parse_field",
"(",
"\"Currency\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"issuer",
"=",
"Revocation",
".",
"parse_field",
"(",
"\"Issuer\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"unique_id",
"=",
"Revocation",
".",
"parse_field",
"(",
"\"IdtyUniqueID\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"timestamp",
"=",
"Revocation",
".",
"parse_field",
"(",
"\"IdtyTimestamp\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"signature",
"=",
"Revocation",
".",
"parse_field",
"(",
"\"IdtySignature\"",
",",
"lines",
"[",
"n",
"]",
")",
"n",
"+=",
"1",
"return",
"Identity",
"(",
"version",
",",
"currency",
",",
"issuer",
",",
"unique_id",
",",
"timestamp",
",",
"signature",
")"
] | 28.375 | 27.25 |
def add_action_view(self, name, url, actions, **kwargs):
"""Creates an ActionsView instance and registers it.
"""
view = ActionsView(name, url=url, self_var=self, **kwargs)
if isinstance(actions, dict):
for group, actions in actions.iteritems():
view.actions.extend(load_actions(actions, group=group or None))
else:
view.actions.extend(load_actions(actions))
self.add_view(view)
return view | [
"def",
"add_action_view",
"(",
"self",
",",
"name",
",",
"url",
",",
"actions",
",",
"*",
"*",
"kwargs",
")",
":",
"view",
"=",
"ActionsView",
"(",
"name",
",",
"url",
"=",
"url",
",",
"self_var",
"=",
"self",
",",
"*",
"*",
"kwargs",
")",
"if",
"isinstance",
"(",
"actions",
",",
"dict",
")",
":",
"for",
"group",
",",
"actions",
"in",
"actions",
".",
"iteritems",
"(",
")",
":",
"view",
".",
"actions",
".",
"extend",
"(",
"load_actions",
"(",
"actions",
",",
"group",
"=",
"group",
"or",
"None",
")",
")",
"else",
":",
"view",
".",
"actions",
".",
"extend",
"(",
"load_actions",
"(",
"actions",
")",
")",
"self",
".",
"add_view",
"(",
"view",
")",
"return",
"view"
] | 43.272727 | 15.727273 |
def __monkey_patch_juman_lines(self, input_str):
# type: (text_type)->text_type
"""* What you can do
- It overwrites juman_line() method because this method causes TypeError in python3
"""
assert isinstance(self.juman, pyknp.Juman)
if not self.juman.socket and not self.juman.subprocess:
if self.juman.server is not None:
self.juman.socket = MonkeyPatchSocket(self.juman.server, self.juman.port, b"RUN -e2\n")
else:
command = "%s %s" % (self.juman.command, self.juman.option)
if self.juman.rcfile:
command += " -r %s" % self.juman.rcfile
self.juman.subprocess = pyknp.Subprocess(command)
if self.juman.socket:
return self.juman.socket.query(input_str, pattern=self.juman.pattern)
return self.juman.subprocess.query(input_str, pattern=self.juman.pattern) | [
"def",
"__monkey_patch_juman_lines",
"(",
"self",
",",
"input_str",
")",
":",
"# type: (text_type)->text_type",
"assert",
"isinstance",
"(",
"self",
".",
"juman",
",",
"pyknp",
".",
"Juman",
")",
"if",
"not",
"self",
".",
"juman",
".",
"socket",
"and",
"not",
"self",
".",
"juman",
".",
"subprocess",
":",
"if",
"self",
".",
"juman",
".",
"server",
"is",
"not",
"None",
":",
"self",
".",
"juman",
".",
"socket",
"=",
"MonkeyPatchSocket",
"(",
"self",
".",
"juman",
".",
"server",
",",
"self",
".",
"juman",
".",
"port",
",",
"b\"RUN -e2\\n\"",
")",
"else",
":",
"command",
"=",
"\"%s %s\"",
"%",
"(",
"self",
".",
"juman",
".",
"command",
",",
"self",
".",
"juman",
".",
"option",
")",
"if",
"self",
".",
"juman",
".",
"rcfile",
":",
"command",
"+=",
"\" -r %s\"",
"%",
"self",
".",
"juman",
".",
"rcfile",
"self",
".",
"juman",
".",
"subprocess",
"=",
"pyknp",
".",
"Subprocess",
"(",
"command",
")",
"if",
"self",
".",
"juman",
".",
"socket",
":",
"return",
"self",
".",
"juman",
".",
"socket",
".",
"query",
"(",
"input_str",
",",
"pattern",
"=",
"self",
".",
"juman",
".",
"pattern",
")",
"return",
"self",
".",
"juman",
".",
"subprocess",
".",
"query",
"(",
"input_str",
",",
"pattern",
"=",
"self",
".",
"juman",
".",
"pattern",
")"
] | 54.176471 | 21.176471 |
def remove(self, attendees):
""" Remove the provided attendees from the event
:param attendees: list of attendees to add
:type attendees: str or tuple(str, str) or Attendee or list[str] or
list[tuple(str,str)] or list[Attendee]
"""
if isinstance(attendees, (list, tuple)):
attendees = {
attendee.address if isinstance(attendee, Attendee) else attendee
for
attendee in attendees}
elif isinstance(attendees, str):
attendees = {attendees}
elif isinstance(attendees, Attendee):
attendees = {attendees.address}
else:
raise ValueError('Incorrect parameter type for attendees')
new_attendees = []
for attendee in self.__attendees:
if attendee.address not in attendees:
new_attendees.append(attendee)
self.__attendees = new_attendees
self._track_changes() | [
"def",
"remove",
"(",
"self",
",",
"attendees",
")",
":",
"if",
"isinstance",
"(",
"attendees",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"attendees",
"=",
"{",
"attendee",
".",
"address",
"if",
"isinstance",
"(",
"attendee",
",",
"Attendee",
")",
"else",
"attendee",
"for",
"attendee",
"in",
"attendees",
"}",
"elif",
"isinstance",
"(",
"attendees",
",",
"str",
")",
":",
"attendees",
"=",
"{",
"attendees",
"}",
"elif",
"isinstance",
"(",
"attendees",
",",
"Attendee",
")",
":",
"attendees",
"=",
"{",
"attendees",
".",
"address",
"}",
"else",
":",
"raise",
"ValueError",
"(",
"'Incorrect parameter type for attendees'",
")",
"new_attendees",
"=",
"[",
"]",
"for",
"attendee",
"in",
"self",
".",
"__attendees",
":",
"if",
"attendee",
".",
"address",
"not",
"in",
"attendees",
":",
"new_attendees",
".",
"append",
"(",
"attendee",
")",
"self",
".",
"__attendees",
"=",
"new_attendees",
"self",
".",
"_track_changes",
"(",
")"
] | 38.16 | 13.64 |
def _run_get_data_background(macs, queue, shared_data, bt_device):
"""
Background process function for RuuviTag Sensors
"""
run_flag = RunFlag()
def add_data(data):
if not shared_data['run_flag']:
run_flag.running = False
data[1]['time'] = datetime.utcnow().isoformat()
queue.put(data)
RuuviTagSensor.get_datas(add_data, macs, run_flag, bt_device) | [
"def",
"_run_get_data_background",
"(",
"macs",
",",
"queue",
",",
"shared_data",
",",
"bt_device",
")",
":",
"run_flag",
"=",
"RunFlag",
"(",
")",
"def",
"add_data",
"(",
"data",
")",
":",
"if",
"not",
"shared_data",
"[",
"'run_flag'",
"]",
":",
"run_flag",
".",
"running",
"=",
"False",
"data",
"[",
"1",
"]",
"[",
"'time'",
"]",
"=",
"datetime",
".",
"utcnow",
"(",
")",
".",
"isoformat",
"(",
")",
"queue",
".",
"put",
"(",
"data",
")",
"RuuviTagSensor",
".",
"get_datas",
"(",
"add_data",
",",
"macs",
",",
"run_flag",
",",
"bt_device",
")"
] | 26.466667 | 19.533333 |
def awd_lstm_lm_1150(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(),
root=os.path.join(get_home_dir(), 'models'), **kwargs):
r"""3-layer LSTM language model with weight-drop, variational dropout, and tied weights.
Embedding size is 400, and hidden layer size is 1150.
Parameters
----------
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
The pre-trained model achieves 73.32/69.74 ppl on Val and Test of wikitext-2 respectively.
vocab : gluonnlp.Vocab or None, default None
Vocab object to be used with the language model.
Required when dataset_name is not specified.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '$MXNET_HOME/models'
Location for keeping the model parameters.
MXNET_HOME defaults to '~/.mxnet'.
Returns
-------
gluon.Block, gluonnlp.Vocab
"""
predefined_args = {'embed_size': 400,
'hidden_size': 1150,
'mode': 'lstm',
'num_layers': 3,
'tie_weights': True,
'dropout': 0.4,
'weight_drop': 0.5,
'drop_h': 0.2,
'drop_i': 0.65,
'drop_e': 0.1}
mutable_args = frozenset(['dropout', 'weight_drop', 'drop_h', 'drop_i', 'drop_e'])
assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \
'Cannot override predefined model settings.'
predefined_args.update(kwargs)
return _get_rnn_model(AWDRNN, 'awd_lstm_lm_1150', dataset_name, vocab, pretrained,
ctx, root, **predefined_args) | [
"def",
"awd_lstm_lm_1150",
"(",
"dataset_name",
"=",
"None",
",",
"vocab",
"=",
"None",
",",
"pretrained",
"=",
"False",
",",
"ctx",
"=",
"cpu",
"(",
")",
",",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"get_home_dir",
"(",
")",
",",
"'models'",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"predefined_args",
"=",
"{",
"'embed_size'",
":",
"400",
",",
"'hidden_size'",
":",
"1150",
",",
"'mode'",
":",
"'lstm'",
",",
"'num_layers'",
":",
"3",
",",
"'tie_weights'",
":",
"True",
",",
"'dropout'",
":",
"0.4",
",",
"'weight_drop'",
":",
"0.5",
",",
"'drop_h'",
":",
"0.2",
",",
"'drop_i'",
":",
"0.65",
",",
"'drop_e'",
":",
"0.1",
"}",
"mutable_args",
"=",
"frozenset",
"(",
"[",
"'dropout'",
",",
"'weight_drop'",
",",
"'drop_h'",
",",
"'drop_i'",
",",
"'drop_e'",
"]",
")",
"assert",
"all",
"(",
"(",
"k",
"not",
"in",
"kwargs",
"or",
"k",
"in",
"mutable_args",
")",
"for",
"k",
"in",
"predefined_args",
")",
",",
"'Cannot override predefined model settings.'",
"predefined_args",
".",
"update",
"(",
"kwargs",
")",
"return",
"_get_rnn_model",
"(",
"AWDRNN",
",",
"'awd_lstm_lm_1150'",
",",
"dataset_name",
",",
"vocab",
",",
"pretrained",
",",
"ctx",
",",
"root",
",",
"*",
"*",
"predefined_args",
")"
] | 46.043478 | 18.456522 |
def benchmark(args):
"""
%prog benchmark at bedfile
Compare SynFind, MCScanx, iADHoRe and OrthoFinder against the truth.
"""
p = OptionParser(benchmark.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
pf, bedfile = args
truth = pf + ".truth"
synfind = pf + ".synfind"
mcscanx = pf + ".mcscanx"
iadhore = pf + ".iadhore"
orthofinder = pf + ".orthofinder"
pivots = set([x.accn for x in Bed(bedfile)])
fp = open(truth)
truth = set()
for row in fp:
a, b = row.strip().split("\t")[:2]
pivots.add(a)
truth.add(tuple(sorted((a, b))))
logging.debug("Truth: {0} pairs".format(len(truth)))
fp = open(synfind)
benchmarkfile = pf + ".benchmark"
fw = must_open(benchmarkfile, "w")
synfind = set()
for row in fp:
atoms = row.strip().split("\t")
query, hit, tag = atoms[:3]
if tag != "S":
continue
synfind.add(tuple(sorted((query, hit))))
calc_sensitivity_specificity(synfind, truth, "SynFind", fw)
fp = open(mcscanx)
mcscanx = set()
for row in fp:
if row[0] == '#':
continue
atoms = row.strip().split(":")[1].split()
query, hit = atoms[:2]
mcscanx.add(tuple(sorted((query, hit))))
calc_sensitivity_specificity(mcscanx, truth, "MCScanX", fw)
fp = open(iadhore)
iadhore = set()
next(fp)
for row in fp:
atoms = row.strip().split("\t")
query, hit = atoms[3:5]
iadhore.add(tuple(sorted((query, hit))))
calc_sensitivity_specificity(iadhore, truth, "iADHoRe", fw)
fp = open(orthofinder)
orthofinder = set()
next(fp)
for row in fp:
row = row.replace('"', "")
atoms = row.replace(",", " ").split()
genes = [x.strip() for x in atoms if not x.startswith("OG")]
genes = [gene_name(x) for x in genes]
pps = [x for x in genes if x in pivots]
for p in pps:
for g in genes:
if p == g:
continue
orthofinder.add(tuple(sorted((p, g))))
#write_pairs(orthofinder, "orthofinder.pairs")
calc_sensitivity_specificity(orthofinder, truth, "OrthoFinder", fw)
fw.close() | [
"def",
"benchmark",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"benchmark",
".",
"__doc__",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"2",
":",
"sys",
".",
"exit",
"(",
"not",
"p",
".",
"print_help",
"(",
")",
")",
"pf",
",",
"bedfile",
"=",
"args",
"truth",
"=",
"pf",
"+",
"\".truth\"",
"synfind",
"=",
"pf",
"+",
"\".synfind\"",
"mcscanx",
"=",
"pf",
"+",
"\".mcscanx\"",
"iadhore",
"=",
"pf",
"+",
"\".iadhore\"",
"orthofinder",
"=",
"pf",
"+",
"\".orthofinder\"",
"pivots",
"=",
"set",
"(",
"[",
"x",
".",
"accn",
"for",
"x",
"in",
"Bed",
"(",
"bedfile",
")",
"]",
")",
"fp",
"=",
"open",
"(",
"truth",
")",
"truth",
"=",
"set",
"(",
")",
"for",
"row",
"in",
"fp",
":",
"a",
",",
"b",
"=",
"row",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"[",
":",
"2",
"]",
"pivots",
".",
"add",
"(",
"a",
")",
"truth",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"a",
",",
"b",
")",
")",
")",
")",
"logging",
".",
"debug",
"(",
"\"Truth: {0} pairs\"",
".",
"format",
"(",
"len",
"(",
"truth",
")",
")",
")",
"fp",
"=",
"open",
"(",
"synfind",
")",
"benchmarkfile",
"=",
"pf",
"+",
"\".benchmark\"",
"fw",
"=",
"must_open",
"(",
"benchmarkfile",
",",
"\"w\"",
")",
"synfind",
"=",
"set",
"(",
")",
"for",
"row",
"in",
"fp",
":",
"atoms",
"=",
"row",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"query",
",",
"hit",
",",
"tag",
"=",
"atoms",
"[",
":",
"3",
"]",
"if",
"tag",
"!=",
"\"S\"",
":",
"continue",
"synfind",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"query",
",",
"hit",
")",
")",
")",
")",
"calc_sensitivity_specificity",
"(",
"synfind",
",",
"truth",
",",
"\"SynFind\"",
",",
"fw",
")",
"fp",
"=",
"open",
"(",
"mcscanx",
")",
"mcscanx",
"=",
"set",
"(",
")",
"for",
"row",
"in",
"fp",
":",
"if",
"row",
"[",
"0",
"]",
"==",
"'#'",
":",
"continue",
"atoms",
"=",
"row",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"query",
",",
"hit",
"=",
"atoms",
"[",
":",
"2",
"]",
"mcscanx",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"query",
",",
"hit",
")",
")",
")",
")",
"calc_sensitivity_specificity",
"(",
"mcscanx",
",",
"truth",
",",
"\"MCScanX\"",
",",
"fw",
")",
"fp",
"=",
"open",
"(",
"iadhore",
")",
"iadhore",
"=",
"set",
"(",
")",
"next",
"(",
"fp",
")",
"for",
"row",
"in",
"fp",
":",
"atoms",
"=",
"row",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"query",
",",
"hit",
"=",
"atoms",
"[",
"3",
":",
"5",
"]",
"iadhore",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"query",
",",
"hit",
")",
")",
")",
")",
"calc_sensitivity_specificity",
"(",
"iadhore",
",",
"truth",
",",
"\"iADHoRe\"",
",",
"fw",
")",
"fp",
"=",
"open",
"(",
"orthofinder",
")",
"orthofinder",
"=",
"set",
"(",
")",
"next",
"(",
"fp",
")",
"for",
"row",
"in",
"fp",
":",
"row",
"=",
"row",
".",
"replace",
"(",
"'\"'",
",",
"\"\"",
")",
"atoms",
"=",
"row",
".",
"replace",
"(",
"\",\"",
",",
"\" \"",
")",
".",
"split",
"(",
")",
"genes",
"=",
"[",
"x",
".",
"strip",
"(",
")",
"for",
"x",
"in",
"atoms",
"if",
"not",
"x",
".",
"startswith",
"(",
"\"OG\"",
")",
"]",
"genes",
"=",
"[",
"gene_name",
"(",
"x",
")",
"for",
"x",
"in",
"genes",
"]",
"pps",
"=",
"[",
"x",
"for",
"x",
"in",
"genes",
"if",
"x",
"in",
"pivots",
"]",
"for",
"p",
"in",
"pps",
":",
"for",
"g",
"in",
"genes",
":",
"if",
"p",
"==",
"g",
":",
"continue",
"orthofinder",
".",
"add",
"(",
"tuple",
"(",
"sorted",
"(",
"(",
"p",
",",
"g",
")",
")",
")",
")",
"#write_pairs(orthofinder, \"orthofinder.pairs\")",
"calc_sensitivity_specificity",
"(",
"orthofinder",
",",
"truth",
",",
"\"OrthoFinder\"",
",",
"fw",
")",
"fw",
".",
"close",
"(",
")"
] | 29.171053 | 16.802632 |
def make_csv(self):
"""
Get the text representation of a report element as csv.
"""
import csv
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import StringIO
out = StringIO()
writer = csv.writer(out, delimiter='|', lineterminator='\n', quoting=csv.QUOTE_MINIMAL)
if self.function == 'total':
writer.writerows(self.results)
elif self.function == 'top':
rows = [['Value', self.headers.strip('"')]]
if self.results[0] is not None:
for res in self.results:
if res is not None:
rows.append(tuple([res[0], ','.join(res[1])]))
writer.writerows(rows)
elif self.function == 'table':
rows = [[header.strip('"') for header in re.split('\s*,\s*', self.headers)]]
for res in sorted(self.results, key=lambda x: x[0]):
row = list(res[:-1])
lastcol = get_fmt_results(res[-1], limit=10)
if lastcol[-1][0] == '[' and lastcol[-1][-1] == ']':
row.append(u'{0} {1}'.format(u', '.join(lastcol[:-1]), lastcol[-1]))
else:
row.append(u', '.join(lastcol))
rows.append(row)
writer.writerows(rows)
self.csv = out.getvalue() | [
"def",
"make_csv",
"(",
"self",
")",
":",
"import",
"csv",
"try",
":",
"from",
"StringIO",
"import",
"StringIO",
"# Python 2.7",
"except",
"ImportError",
":",
"from",
"io",
"import",
"StringIO",
"out",
"=",
"StringIO",
"(",
")",
"writer",
"=",
"csv",
".",
"writer",
"(",
"out",
",",
"delimiter",
"=",
"'|'",
",",
"lineterminator",
"=",
"'\\n'",
",",
"quoting",
"=",
"csv",
".",
"QUOTE_MINIMAL",
")",
"if",
"self",
".",
"function",
"==",
"'total'",
":",
"writer",
".",
"writerows",
"(",
"self",
".",
"results",
")",
"elif",
"self",
".",
"function",
"==",
"'top'",
":",
"rows",
"=",
"[",
"[",
"'Value'",
",",
"self",
".",
"headers",
".",
"strip",
"(",
"'\"'",
")",
"]",
"]",
"if",
"self",
".",
"results",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"for",
"res",
"in",
"self",
".",
"results",
":",
"if",
"res",
"is",
"not",
"None",
":",
"rows",
".",
"append",
"(",
"tuple",
"(",
"[",
"res",
"[",
"0",
"]",
",",
"','",
".",
"join",
"(",
"res",
"[",
"1",
"]",
")",
"]",
")",
")",
"writer",
".",
"writerows",
"(",
"rows",
")",
"elif",
"self",
".",
"function",
"==",
"'table'",
":",
"rows",
"=",
"[",
"[",
"header",
".",
"strip",
"(",
"'\"'",
")",
"for",
"header",
"in",
"re",
".",
"split",
"(",
"'\\s*,\\s*'",
",",
"self",
".",
"headers",
")",
"]",
"]",
"for",
"res",
"in",
"sorted",
"(",
"self",
".",
"results",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")",
":",
"row",
"=",
"list",
"(",
"res",
"[",
":",
"-",
"1",
"]",
")",
"lastcol",
"=",
"get_fmt_results",
"(",
"res",
"[",
"-",
"1",
"]",
",",
"limit",
"=",
"10",
")",
"if",
"lastcol",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"==",
"'['",
"and",
"lastcol",
"[",
"-",
"1",
"]",
"[",
"-",
"1",
"]",
"==",
"']'",
":",
"row",
".",
"append",
"(",
"u'{0} {1}'",
".",
"format",
"(",
"u', '",
".",
"join",
"(",
"lastcol",
"[",
":",
"-",
"1",
"]",
")",
",",
"lastcol",
"[",
"-",
"1",
"]",
")",
")",
"else",
":",
"row",
".",
"append",
"(",
"u', '",
".",
"join",
"(",
"lastcol",
")",
")",
"rows",
".",
"append",
"(",
"row",
")",
"writer",
".",
"writerows",
"(",
"rows",
")",
"self",
".",
"csv",
"=",
"out",
".",
"getvalue",
"(",
")"
] | 37.210526 | 19.052632 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self,
'destination_field') and self.destination_field is not None:
_dict['destination_field'] = self.destination_field
if hasattr(self, 'source_field') and self.source_field is not None:
_dict['source_field'] = self.source_field
if hasattr(self, 'overwrite') and self.overwrite is not None:
_dict['overwrite'] = self.overwrite
if hasattr(self,
'enrichment_name') and self.enrichment_name is not None:
_dict['enrichment'] = self.enrichment_name
if hasattr(self, 'ignore_downstream_errors'
) and self.ignore_downstream_errors is not None:
_dict['ignore_downstream_errors'] = self.ignore_downstream_errors
if hasattr(self, 'options') and self.options is not None:
_dict['options'] = self.options._to_dict()
return _dict | [
"def",
"_to_dict",
"(",
"self",
")",
":",
"_dict",
"=",
"{",
"}",
"if",
"hasattr",
"(",
"self",
",",
"'description'",
")",
"and",
"self",
".",
"description",
"is",
"not",
"None",
":",
"_dict",
"[",
"'description'",
"]",
"=",
"self",
".",
"description",
"if",
"hasattr",
"(",
"self",
",",
"'destination_field'",
")",
"and",
"self",
".",
"destination_field",
"is",
"not",
"None",
":",
"_dict",
"[",
"'destination_field'",
"]",
"=",
"self",
".",
"destination_field",
"if",
"hasattr",
"(",
"self",
",",
"'source_field'",
")",
"and",
"self",
".",
"source_field",
"is",
"not",
"None",
":",
"_dict",
"[",
"'source_field'",
"]",
"=",
"self",
".",
"source_field",
"if",
"hasattr",
"(",
"self",
",",
"'overwrite'",
")",
"and",
"self",
".",
"overwrite",
"is",
"not",
"None",
":",
"_dict",
"[",
"'overwrite'",
"]",
"=",
"self",
".",
"overwrite",
"if",
"hasattr",
"(",
"self",
",",
"'enrichment_name'",
")",
"and",
"self",
".",
"enrichment_name",
"is",
"not",
"None",
":",
"_dict",
"[",
"'enrichment'",
"]",
"=",
"self",
".",
"enrichment_name",
"if",
"hasattr",
"(",
"self",
",",
"'ignore_downstream_errors'",
")",
"and",
"self",
".",
"ignore_downstream_errors",
"is",
"not",
"None",
":",
"_dict",
"[",
"'ignore_downstream_errors'",
"]",
"=",
"self",
".",
"ignore_downstream_errors",
"if",
"hasattr",
"(",
"self",
",",
"'options'",
")",
"and",
"self",
".",
"options",
"is",
"not",
"None",
":",
"_dict",
"[",
"'options'",
"]",
"=",
"self",
".",
"options",
".",
"_to_dict",
"(",
")",
"return",
"_dict"
] | 53.333333 | 21.285714 |
def _process_response(self, response: Response):
'''Handle the response and update the internal state.'''
_logger.debug('Handling response')
self._redirect_tracker.load(response)
if self._redirect_tracker.is_redirect():
self._process_redirect()
self._loop_type = LoopType.redirect
elif response.status_code == http.client.UNAUTHORIZED and self._next_request.password:
self._process_authentication(response)
else:
self._next_request = None
self._loop_type = LoopType.normal
if self._cookie_jar:
self._extract_cookies(response)
if self._next_request:
self._add_cookies(self._next_request) | [
"def",
"_process_response",
"(",
"self",
",",
"response",
":",
"Response",
")",
":",
"_logger",
".",
"debug",
"(",
"'Handling response'",
")",
"self",
".",
"_redirect_tracker",
".",
"load",
"(",
"response",
")",
"if",
"self",
".",
"_redirect_tracker",
".",
"is_redirect",
"(",
")",
":",
"self",
".",
"_process_redirect",
"(",
")",
"self",
".",
"_loop_type",
"=",
"LoopType",
".",
"redirect",
"elif",
"response",
".",
"status_code",
"==",
"http",
".",
"client",
".",
"UNAUTHORIZED",
"and",
"self",
".",
"_next_request",
".",
"password",
":",
"self",
".",
"_process_authentication",
"(",
"response",
")",
"else",
":",
"self",
".",
"_next_request",
"=",
"None",
"self",
".",
"_loop_type",
"=",
"LoopType",
".",
"normal",
"if",
"self",
".",
"_cookie_jar",
":",
"self",
".",
"_extract_cookies",
"(",
"response",
")",
"if",
"self",
".",
"_next_request",
":",
"self",
".",
"_add_cookies",
"(",
"self",
".",
"_next_request",
")"
] | 36.35 | 17.55 |
def clean_markdown(text):
"""
Parse markdown sintaxt to html.
"""
result = text
if isinstance(text, str):
result = ''.join(
BeautifulSoup(markdown(text), 'lxml').findAll(text=True))
return result | [
"def",
"clean_markdown",
"(",
"text",
")",
":",
"result",
"=",
"text",
"if",
"isinstance",
"(",
"text",
",",
"str",
")",
":",
"result",
"=",
"''",
".",
"join",
"(",
"BeautifulSoup",
"(",
"markdown",
"(",
"text",
")",
",",
"'lxml'",
")",
".",
"findAll",
"(",
"text",
"=",
"True",
")",
")",
"return",
"result"
] | 21 | 18.272727 |
def random_draw(self, size=None):
"""Draw random samples of the hyperparameters.
Parameters
----------
size : None, int or array-like, optional
The number/shape of samples to draw. If None, only one sample is
returned. Default is None.
"""
return scipy.asarray([scipy.stats.gamma.rvs(a, loc=0, scale=1.0 / b, size=size) for a, b in zip(self.a, self.b)]) | [
"def",
"random_draw",
"(",
"self",
",",
"size",
"=",
"None",
")",
":",
"return",
"scipy",
".",
"asarray",
"(",
"[",
"scipy",
".",
"stats",
".",
"gamma",
".",
"rvs",
"(",
"a",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"1.0",
"/",
"b",
",",
"size",
"=",
"size",
")",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"self",
".",
"a",
",",
"self",
".",
"b",
")",
"]",
")"
] | 42.5 | 21 |
def _load_calib(self):
"""Load and compute intrinsic and extrinsic calibration parameters."""
# We'll build the calibration parameters as a dictionary, then
# convert it to a namedtuple to prevent it from being modified later
data = {}
# Load the calibration file
calib_filepath = os.path.join(self.sequence_path, 'calib.txt')
filedata = utils.read_calib_file(calib_filepath)
# Create 3x4 projection matrices
P_rect_00 = np.reshape(filedata['P0'], (3, 4))
P_rect_10 = np.reshape(filedata['P1'], (3, 4))
P_rect_20 = np.reshape(filedata['P2'], (3, 4))
P_rect_30 = np.reshape(filedata['P3'], (3, 4))
data['P_rect_00'] = P_rect_00
data['P_rect_10'] = P_rect_10
data['P_rect_20'] = P_rect_20
data['P_rect_30'] = P_rect_30
# Compute the rectified extrinsics from cam0 to camN
T1 = np.eye(4)
T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]
T2 = np.eye(4)
T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]
T3 = np.eye(4)
T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]
# Compute the velodyne to rectified camera coordinate transforms
data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))
data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])
data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])
data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])
data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])
# Compute the camera intrinsics
data['K_cam0'] = P_rect_00[0:3, 0:3]
data['K_cam1'] = P_rect_10[0:3, 0:3]
data['K_cam2'] = P_rect_20[0:3, 0:3]
data['K_cam3'] = P_rect_30[0:3, 0:3]
# Compute the stereo baselines in meters by projecting the origin of
# each camera frame into the velodyne frame and computing the distances
# between them
p_cam = np.array([0, 0, 0, 1])
p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)
p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)
p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)
p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)
data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline
data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline
self.calib = namedtuple('CalibData', data.keys())(*data.values()) | [
"def",
"_load_calib",
"(",
"self",
")",
":",
"# We'll build the calibration parameters as a dictionary, then",
"# convert it to a namedtuple to prevent it from being modified later",
"data",
"=",
"{",
"}",
"# Load the calibration file",
"calib_filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"sequence_path",
",",
"'calib.txt'",
")",
"filedata",
"=",
"utils",
".",
"read_calib_file",
"(",
"calib_filepath",
")",
"# Create 3x4 projection matrices",
"P_rect_00",
"=",
"np",
".",
"reshape",
"(",
"filedata",
"[",
"'P0'",
"]",
",",
"(",
"3",
",",
"4",
")",
")",
"P_rect_10",
"=",
"np",
".",
"reshape",
"(",
"filedata",
"[",
"'P1'",
"]",
",",
"(",
"3",
",",
"4",
")",
")",
"P_rect_20",
"=",
"np",
".",
"reshape",
"(",
"filedata",
"[",
"'P2'",
"]",
",",
"(",
"3",
",",
"4",
")",
")",
"P_rect_30",
"=",
"np",
".",
"reshape",
"(",
"filedata",
"[",
"'P3'",
"]",
",",
"(",
"3",
",",
"4",
")",
")",
"data",
"[",
"'P_rect_00'",
"]",
"=",
"P_rect_00",
"data",
"[",
"'P_rect_10'",
"]",
"=",
"P_rect_10",
"data",
"[",
"'P_rect_20'",
"]",
"=",
"P_rect_20",
"data",
"[",
"'P_rect_30'",
"]",
"=",
"P_rect_30",
"# Compute the rectified extrinsics from cam0 to camN",
"T1",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"T1",
"[",
"0",
",",
"3",
"]",
"=",
"P_rect_10",
"[",
"0",
",",
"3",
"]",
"/",
"P_rect_10",
"[",
"0",
",",
"0",
"]",
"T2",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"T2",
"[",
"0",
",",
"3",
"]",
"=",
"P_rect_20",
"[",
"0",
",",
"3",
"]",
"/",
"P_rect_20",
"[",
"0",
",",
"0",
"]",
"T3",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"T3",
"[",
"0",
",",
"3",
"]",
"=",
"P_rect_30",
"[",
"0",
",",
"3",
"]",
"/",
"P_rect_30",
"[",
"0",
",",
"0",
"]",
"# Compute the velodyne to rectified camera coordinate transforms",
"data",
"[",
"'T_cam0_velo'",
"]",
"=",
"np",
".",
"reshape",
"(",
"filedata",
"[",
"'Tr'",
"]",
",",
"(",
"3",
",",
"4",
")",
")",
"data",
"[",
"'T_cam0_velo'",
"]",
"=",
"np",
".",
"vstack",
"(",
"[",
"data",
"[",
"'T_cam0_velo'",
"]",
",",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
"]",
"]",
")",
"data",
"[",
"'T_cam1_velo'",
"]",
"=",
"T1",
".",
"dot",
"(",
"data",
"[",
"'T_cam0_velo'",
"]",
")",
"data",
"[",
"'T_cam2_velo'",
"]",
"=",
"T2",
".",
"dot",
"(",
"data",
"[",
"'T_cam0_velo'",
"]",
")",
"data",
"[",
"'T_cam3_velo'",
"]",
"=",
"T3",
".",
"dot",
"(",
"data",
"[",
"'T_cam0_velo'",
"]",
")",
"# Compute the camera intrinsics",
"data",
"[",
"'K_cam0'",
"]",
"=",
"P_rect_00",
"[",
"0",
":",
"3",
",",
"0",
":",
"3",
"]",
"data",
"[",
"'K_cam1'",
"]",
"=",
"P_rect_10",
"[",
"0",
":",
"3",
",",
"0",
":",
"3",
"]",
"data",
"[",
"'K_cam2'",
"]",
"=",
"P_rect_20",
"[",
"0",
":",
"3",
",",
"0",
":",
"3",
"]",
"data",
"[",
"'K_cam3'",
"]",
"=",
"P_rect_30",
"[",
"0",
":",
"3",
",",
"0",
":",
"3",
"]",
"# Compute the stereo baselines in meters by projecting the origin of",
"# each camera frame into the velodyne frame and computing the distances",
"# between them",
"p_cam",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"0",
",",
"1",
"]",
")",
"p_velo0",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"data",
"[",
"'T_cam0_velo'",
"]",
")",
".",
"dot",
"(",
"p_cam",
")",
"p_velo1",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"data",
"[",
"'T_cam1_velo'",
"]",
")",
".",
"dot",
"(",
"p_cam",
")",
"p_velo2",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"data",
"[",
"'T_cam2_velo'",
"]",
")",
".",
"dot",
"(",
"p_cam",
")",
"p_velo3",
"=",
"np",
".",
"linalg",
".",
"inv",
"(",
"data",
"[",
"'T_cam3_velo'",
"]",
")",
".",
"dot",
"(",
"p_cam",
")",
"data",
"[",
"'b_gray'",
"]",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"p_velo1",
"-",
"p_velo0",
")",
"# gray baseline",
"data",
"[",
"'b_rgb'",
"]",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"p_velo3",
"-",
"p_velo2",
")",
"# rgb baseline",
"self",
".",
"calib",
"=",
"namedtuple",
"(",
"'CalibData'",
",",
"data",
".",
"keys",
"(",
")",
")",
"(",
"*",
"data",
".",
"values",
"(",
")",
")"
] | 43.581818 | 20.818182 |
def _PrintStorageInformationAsJSON(self, storage_reader):
"""Writes a summary of sessions as machine-readable JSON.
Args:
storage_reader (StorageReader): storage reader.
"""
serializer = json_serializer.JSONAttributeContainerSerializer
storage_counters = self._CalculateStorageCounters(storage_reader)
storage_counters_json = json.dumps(storage_counters)
self._output_writer.Write('{')
self._output_writer.Write('"storage_counters": {0:s}'.format(
storage_counters_json))
self._output_writer.Write(',\n')
self._output_writer.Write(' "sessions": {')
for index, session in enumerate(storage_reader.GetSessions()):
json_string = serializer.WriteSerialized(session)
if index != 0:
self._output_writer.Write(',\n')
self._output_writer.Write('"session_{0:s}": {1:s} '.format(
session.identifier, json_string))
self._output_writer.Write('}}') | [
"def",
"_PrintStorageInformationAsJSON",
"(",
"self",
",",
"storage_reader",
")",
":",
"serializer",
"=",
"json_serializer",
".",
"JSONAttributeContainerSerializer",
"storage_counters",
"=",
"self",
".",
"_CalculateStorageCounters",
"(",
"storage_reader",
")",
"storage_counters_json",
"=",
"json",
".",
"dumps",
"(",
"storage_counters",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"'{'",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"'\"storage_counters\": {0:s}'",
".",
"format",
"(",
"storage_counters_json",
")",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"',\\n'",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"' \"sessions\": {'",
")",
"for",
"index",
",",
"session",
"in",
"enumerate",
"(",
"storage_reader",
".",
"GetSessions",
"(",
")",
")",
":",
"json_string",
"=",
"serializer",
".",
"WriteSerialized",
"(",
"session",
")",
"if",
"index",
"!=",
"0",
":",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"',\\n'",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"'\"session_{0:s}\": {1:s} '",
".",
"format",
"(",
"session",
".",
"identifier",
",",
"json_string",
")",
")",
"self",
".",
"_output_writer",
".",
"Write",
"(",
"'}}'",
")"
] | 43.52381 | 15.047619 |
def remove_tag(self, name, user, message=None, date=None):
"""
Removes tag with the given ``name``.
:param name: name of the tag to be removed
:param user: full username, i.e.: "Joe Doe <joe.doe@example.com>"
:param message: message of the tag's removal commit
:param date: date of tag's removal commit
:raises TagDoesNotExistError: if tag with given name does not exists
"""
if name not in self.tags:
raise TagDoesNotExistError("Tag %s does not exist" % name)
tagpath = posixpath.join(self._repo.refs.path, 'refs', 'tags', name)
try:
os.remove(tagpath)
self._parsed_refs = self._get_parsed_refs()
self.tags = self._get_tags()
except OSError, e:
raise RepositoryError(e.strerror) | [
"def",
"remove_tag",
"(",
"self",
",",
"name",
",",
"user",
",",
"message",
"=",
"None",
",",
"date",
"=",
"None",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"tags",
":",
"raise",
"TagDoesNotExistError",
"(",
"\"Tag %s does not exist\"",
"%",
"name",
")",
"tagpath",
"=",
"posixpath",
".",
"join",
"(",
"self",
".",
"_repo",
".",
"refs",
".",
"path",
",",
"'refs'",
",",
"'tags'",
",",
"name",
")",
"try",
":",
"os",
".",
"remove",
"(",
"tagpath",
")",
"self",
".",
"_parsed_refs",
"=",
"self",
".",
"_get_parsed_refs",
"(",
")",
"self",
".",
"tags",
"=",
"self",
".",
"_get_tags",
"(",
")",
"except",
"OSError",
",",
"e",
":",
"raise",
"RepositoryError",
"(",
"e",
".",
"strerror",
")"
] | 40.9 | 17.7 |
def _Close(self):
"""Closes the file system.
Raises:
IOError: if the close failed.
"""
self._vshadow_volume.close()
self._vshadow_volume = None
self._file_object.close()
self._file_object = None | [
"def",
"_Close",
"(",
"self",
")",
":",
"self",
".",
"_vshadow_volume",
".",
"close",
"(",
")",
"self",
".",
"_vshadow_volume",
"=",
"None",
"self",
".",
"_file_object",
".",
"close",
"(",
")",
"self",
".",
"_file_object",
"=",
"None"
] | 20 | 16.090909 |
def destroy_image(self, image_id_or_slug):
"""
This method allows you to destroy an image. There is no way to restore
a deleted image so be careful and ensure your data is properly backed up.
Required parameters
image_id:
Numeric, this is the id of the image you would like to destroy
"""
if not image_id_or_slug:
msg = 'image_id_or_slug is required to destroy an image!'
raise DOPException(msg)
json = self.request('/images/%s/destroy' % image_id_or_slug, method='GET')
status = json.get('status')
return status | [
"def",
"destroy_image",
"(",
"self",
",",
"image_id_or_slug",
")",
":",
"if",
"not",
"image_id_or_slug",
":",
"msg",
"=",
"'image_id_or_slug is required to destroy an image!'",
"raise",
"DOPException",
"(",
"msg",
")",
"json",
"=",
"self",
".",
"request",
"(",
"'/images/%s/destroy'",
"%",
"image_id_or_slug",
",",
"method",
"=",
"'GET'",
")",
"status",
"=",
"json",
".",
"get",
"(",
"'status'",
")",
"return",
"status"
] | 34.611111 | 23.277778 |
def adapt(obj, to_cls):
"""
Will adapt `obj` to an instance of `to_cls`.
First sees if `obj` has an `__adapt__` method and uses it to adapt. If that fails
it checks if `to_cls` has an `__adapt__` classmethod and uses it to adapt. IF that
fails, MRO is used. If that
fails, a `TypeError` is raised.
"""
if obj is None:
return obj
elif isinstance(obj, to_cls):
return obj
errors = []
if hasattr(obj, '__adapt__') and obj.__adapt__:
try:
return obj.__adapt__(to_cls)
except (AdaptError, TypeError) as e:
ex_type, ex, tb = sys.exc_info()
errors.append((obj.__adapt__, ex_type, ex, tb))
if hasattr(to_cls, '__adapt__') and to_cls.__adapt__:
try:
return to_cls.__adapt__(obj)
except (AdaptError, TypeError) as e:
ex_type, ex, tb = sys.exc_info()
errors.append((to_cls.__adapt__, ex_type, ex, tb))
for k in get_adapter_path(obj, to_cls):
if k in __adapters__:
try:
return __adapters__[k](obj, to_cls)
except (AdaptError, TypeError) as e:
ex_type, ex, tb = sys.exc_info()
errors.append((__adapters__[k], ex_type, ex, tb))
break
raise AdaptErrors('Could not adapt %r to %r' % (obj, to_cls), errors=errors) | [
"def",
"adapt",
"(",
"obj",
",",
"to_cls",
")",
":",
"if",
"obj",
"is",
"None",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"to_cls",
")",
":",
"return",
"obj",
"errors",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"obj",
",",
"'__adapt__'",
")",
"and",
"obj",
".",
"__adapt__",
":",
"try",
":",
"return",
"obj",
".",
"__adapt__",
"(",
"to_cls",
")",
"except",
"(",
"AdaptError",
",",
"TypeError",
")",
"as",
"e",
":",
"ex_type",
",",
"ex",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"errors",
".",
"append",
"(",
"(",
"obj",
".",
"__adapt__",
",",
"ex_type",
",",
"ex",
",",
"tb",
")",
")",
"if",
"hasattr",
"(",
"to_cls",
",",
"'__adapt__'",
")",
"and",
"to_cls",
".",
"__adapt__",
":",
"try",
":",
"return",
"to_cls",
".",
"__adapt__",
"(",
"obj",
")",
"except",
"(",
"AdaptError",
",",
"TypeError",
")",
"as",
"e",
":",
"ex_type",
",",
"ex",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"errors",
".",
"append",
"(",
"(",
"to_cls",
".",
"__adapt__",
",",
"ex_type",
",",
"ex",
",",
"tb",
")",
")",
"for",
"k",
"in",
"get_adapter_path",
"(",
"obj",
",",
"to_cls",
")",
":",
"if",
"k",
"in",
"__adapters__",
":",
"try",
":",
"return",
"__adapters__",
"[",
"k",
"]",
"(",
"obj",
",",
"to_cls",
")",
"except",
"(",
"AdaptError",
",",
"TypeError",
")",
"as",
"e",
":",
"ex_type",
",",
"ex",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"errors",
".",
"append",
"(",
"(",
"__adapters__",
"[",
"k",
"]",
",",
"ex_type",
",",
"ex",
",",
"tb",
")",
")",
"break",
"raise",
"AdaptErrors",
"(",
"'Could not adapt %r to %r'",
"%",
"(",
"obj",
",",
"to_cls",
")",
",",
"errors",
"=",
"errors",
")"
] | 33.375 | 18.925 |
def get_instance(self, payload):
"""
Build an instance of CertificateInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateInstance
"""
return CertificateInstance(self._version, payload, fleet_sid=self._solution['fleet_sid'], ) | [
"def",
"get_instance",
"(",
"self",
",",
"payload",
")",
":",
"return",
"CertificateInstance",
"(",
"self",
".",
"_version",
",",
"payload",
",",
"fleet_sid",
"=",
"self",
".",
"_solution",
"[",
"'fleet_sid'",
"]",
",",
")"
] | 44.1 | 27.5 |
def _dbc_decorate_namespace(bases: List[type], namespace: MutableMapping[str, Any]) -> None:
"""
Collect invariants, preconditions and postconditions from the bases and decorate all the methods.
Instance methods are simply replaced with the decorated function/ Properties, class methods and static methods are
overridden with new instances of ``property``, ``classmethod`` and ``staticmethod``, respectively.
"""
_collapse_invariants(bases=bases, namespace=namespace)
for key, value in namespace.items():
if inspect.isfunction(value) or isinstance(value, (staticmethod, classmethod)):
_decorate_namespace_function(bases=bases, namespace=namespace, key=key)
elif isinstance(value, property):
_decorate_namespace_property(bases=bases, namespace=namespace, key=key)
else:
# Ignore the value which is neither a function nor a property
pass | [
"def",
"_dbc_decorate_namespace",
"(",
"bases",
":",
"List",
"[",
"type",
"]",
",",
"namespace",
":",
"MutableMapping",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"None",
":",
"_collapse_invariants",
"(",
"bases",
"=",
"bases",
",",
"namespace",
"=",
"namespace",
")",
"for",
"key",
",",
"value",
"in",
"namespace",
".",
"items",
"(",
")",
":",
"if",
"inspect",
".",
"isfunction",
"(",
"value",
")",
"or",
"isinstance",
"(",
"value",
",",
"(",
"staticmethod",
",",
"classmethod",
")",
")",
":",
"_decorate_namespace_function",
"(",
"bases",
"=",
"bases",
",",
"namespace",
"=",
"namespace",
",",
"key",
"=",
"key",
")",
"elif",
"isinstance",
"(",
"value",
",",
"property",
")",
":",
"_decorate_namespace_property",
"(",
"bases",
"=",
"bases",
",",
"namespace",
"=",
"namespace",
",",
"key",
"=",
"key",
")",
"else",
":",
"# Ignore the value which is neither a function nor a property",
"pass"
] | 48.473684 | 34.157895 |
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`IC \\leq ICMAX`.
>>> from hydpy.models.hland import *
>>> parameterstep('1d')
>>> nmbzones(5)
>>> icmax(2.0)
>>> states.ic(-1.0, 0.0, 1.0, 2.0, 3.0)
>>> states.ic
ic(0.0, 0.0, 1.0, 2.0, 2.0)
"""
if upper is None:
control = self.subseqs.seqs.model.parameters.control
upper = control.icmax
hland_sequences.State1DSequence.trim(self, lower, upper) | [
"def",
"trim",
"(",
"self",
",",
"lower",
"=",
"None",
",",
"upper",
"=",
"None",
")",
":",
"if",
"upper",
"is",
"None",
":",
"control",
"=",
"self",
".",
"subseqs",
".",
"seqs",
".",
"model",
".",
"parameters",
".",
"control",
"upper",
"=",
"control",
".",
"icmax",
"hland_sequences",
".",
"State1DSequence",
".",
"trim",
"(",
"self",
",",
"lower",
",",
"upper",
")"
] | 35.333333 | 12.666667 |
def current_function(frame):
"""
Get reference to currently running function from inspect/trace stack frame.
Parameters
----------
frame : stack frame
Stack frame obtained via trace or inspect
Returns
-------
fnc : function reference
Currently running function
"""
if frame is None:
return None
code = frame.f_code
# Attempting to extract the function reference for these calls appears
# to be problematic
if code.co_name == '__del__' or code.co_name == '_remove' or \
code.co_name == '_removeHandlerRef':
return None
try:
# Solution follows suggestion at http://stackoverflow.com/a/37099372
lst = [referer for referer in gc.get_referrers(code)
if getattr(referer, "__code__", None) is code and
inspect.getclosurevars(referer).nonlocals.items() <=
frame.f_locals.items()]
if lst:
return lst[0]
else:
return None
except ValueError:
# inspect.getclosurevars can fail with ValueError: Cell is empty
return None | [
"def",
"current_function",
"(",
"frame",
")",
":",
"if",
"frame",
"is",
"None",
":",
"return",
"None",
"code",
"=",
"frame",
".",
"f_code",
"# Attempting to extract the function reference for these calls appears",
"# to be problematic",
"if",
"code",
".",
"co_name",
"==",
"'__del__'",
"or",
"code",
".",
"co_name",
"==",
"'_remove'",
"or",
"code",
".",
"co_name",
"==",
"'_removeHandlerRef'",
":",
"return",
"None",
"try",
":",
"# Solution follows suggestion at http://stackoverflow.com/a/37099372",
"lst",
"=",
"[",
"referer",
"for",
"referer",
"in",
"gc",
".",
"get_referrers",
"(",
"code",
")",
"if",
"getattr",
"(",
"referer",
",",
"\"__code__\"",
",",
"None",
")",
"is",
"code",
"and",
"inspect",
".",
"getclosurevars",
"(",
"referer",
")",
".",
"nonlocals",
".",
"items",
"(",
")",
"<=",
"frame",
".",
"f_locals",
".",
"items",
"(",
")",
"]",
"if",
"lst",
":",
"return",
"lst",
"[",
"0",
"]",
"else",
":",
"return",
"None",
"except",
"ValueError",
":",
"# inspect.getclosurevars can fail with ValueError: Cell is empty",
"return",
"None"
] | 28.710526 | 22.605263 |
def calculate_border_width(self):
"""
Calculate the width of the menu border. This will be the width of the maximum allowable
dimensions (usually the screen size), minus the left and right margins and the newline character.
For example, given a maximum width of 80 characters, with left and right margins both
set to 1, the border width would be 77 (80 - 1 - 1 - 1 = 77).
Returns:
int: the menu border width in columns.
"""
return self.max_dimension.width - self.margins.left - self.margins.right - 1 | [
"def",
"calculate_border_width",
"(",
"self",
")",
":",
"return",
"self",
".",
"max_dimension",
".",
"width",
"-",
"self",
".",
"margins",
".",
"left",
"-",
"self",
".",
"margins",
".",
"right",
"-",
"1"
] | 51.545455 | 29.727273 |
def write(self, file_name, delim=',', sep='\t'):
"""Write a directed hypergraph to a file, where nodes are
represented as strings.
Each column is separated by "sep", and the individual
tail nodes and head nodes are delimited by "delim".
The header line is currently ignored, but columns should be of
the format:
tailnode1[delim]..tailnodeM[sep]headnode1[delim]..headnodeN[sep]weight
As a concrete example, an arbitrary line with delim=',' and
sep=' ' (4 spaces) may look like:
::
x1,x2 x3,x4,x5 12
which defines a hyperedge of weight 12 from a tail set containing
nodes "x1" and "x2" to a head set containing nodes "x3", "x4", and "x5"
"""
out_file = open(file_name, 'w')
# write first header line
out_file.write("tail" + sep + "head" + sep + "weight\n")
for hyperedge_id in self.get_hyperedge_id_set():
line = ""
# Write each tail node to the line, separated by delim
for tail_node in self.get_hyperedge_tail(hyperedge_id):
line += tail_node + delim
# Remove last (extra) delim
line = line[:-1]
# Add sep between columns
line += sep
# Write each head node to the line, separated by delim
for head_node in self.get_hyperedge_head(hyperedge_id):
line += head_node + delim
# Remove last (extra) delim
line = line[:-1]
# Write the weight to the line and end the line
line += sep + str(self.get_hyperedge_weight(hyperedge_id)) + "\n"
out_file.write(line)
out_file.close() | [
"def",
"write",
"(",
"self",
",",
"file_name",
",",
"delim",
"=",
"','",
",",
"sep",
"=",
"'\\t'",
")",
":",
"out_file",
"=",
"open",
"(",
"file_name",
",",
"'w'",
")",
"# write first header line",
"out_file",
".",
"write",
"(",
"\"tail\"",
"+",
"sep",
"+",
"\"head\"",
"+",
"sep",
"+",
"\"weight\\n\"",
")",
"for",
"hyperedge_id",
"in",
"self",
".",
"get_hyperedge_id_set",
"(",
")",
":",
"line",
"=",
"\"\"",
"# Write each tail node to the line, separated by delim",
"for",
"tail_node",
"in",
"self",
".",
"get_hyperedge_tail",
"(",
"hyperedge_id",
")",
":",
"line",
"+=",
"tail_node",
"+",
"delim",
"# Remove last (extra) delim",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"# Add sep between columns",
"line",
"+=",
"sep",
"# Write each head node to the line, separated by delim",
"for",
"head_node",
"in",
"self",
".",
"get_hyperedge_head",
"(",
"hyperedge_id",
")",
":",
"line",
"+=",
"head_node",
"+",
"delim",
"# Remove last (extra) delim",
"line",
"=",
"line",
"[",
":",
"-",
"1",
"]",
"# Write the weight to the line and end the line",
"line",
"+=",
"sep",
"+",
"str",
"(",
"self",
".",
"get_hyperedge_weight",
"(",
"hyperedge_id",
")",
")",
"+",
"\"\\n\"",
"out_file",
".",
"write",
"(",
"line",
")",
"out_file",
".",
"close",
"(",
")"
] | 36.106383 | 21.808511 |
def parse_samtools_rmdup(self):
""" Find Samtools rmdup logs and parse their data """
self.samtools_rmdup = dict()
for f in self.find_log_files('samtools/rmdup', filehandles=True):
# Example below:
# [bam_rmdupse_core] 26602816 / 103563641 = 0.2569 in library ' '
dups_regex = "\[bam_rmdups?e?_core\] (\d+) / (\d+) = (\d+\.\d+) in library '(.*)'"
s_name = f['s_name']
for l in f['f']:
match = re.search(dups_regex, l)
if match:
library_name = match.group(4).strip()
if library_name != '':
s_name = library_name
if s_name in self.samtools_rmdup:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.samtools_rmdup[s_name] = dict()
self.samtools_rmdup[s_name]['n_dups'] = int(match.group(1))
self.samtools_rmdup[s_name]['n_tot'] = int(match.group(2))
self.samtools_rmdup[s_name]['n_unique'] = int(match.group(2)) - int(match.group(1))
self.samtools_rmdup[s_name]['pct_dups'] = float(match.group(3))*100
# Filter to strip out ignored sample names
self.samtools_rmdup = self.ignore_samples(self.samtools_rmdup)
if len(self.samtools_rmdup) > 0:
# Write parsed report data to a file
self.write_data_file(self.samtools_rmdup, 'multiqc_samtools_rmdup')
# Make a bar plot showing duplicates
keys = OrderedDict()
keys['n_unique'] = {'name': 'Non-duplicated reads'}
keys['n_dups'] = {'name': 'Duplicated reads'}
pconfig = {
'id': 'samtools_rmdup_plot',
'title': 'Samtools rmdup: Duplicate alignments',
'ylab': 'Number of reads',
'yDecimals': False
}
self.add_section (
name = 'Duplicates removed',
anchor = 'samtools-rmdup',
plot = bargraph.plot(self.samtools_rmdup, keys, pconfig)
)
# Add a column to the General Stats table
# General Stats Table
stats_headers = OrderedDict()
stats_headers['pct_dups'] = {
'title': '% Dups',
'description': 'Percent of duplicate alignments',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'OrRd'
}
self.general_stats_addcols(self.samtools_rmdup, stats_headers, 'Samtools rmdup')
return len(self.samtools_rmdup) | [
"def",
"parse_samtools_rmdup",
"(",
"self",
")",
":",
"self",
".",
"samtools_rmdup",
"=",
"dict",
"(",
")",
"for",
"f",
"in",
"self",
".",
"find_log_files",
"(",
"'samtools/rmdup'",
",",
"filehandles",
"=",
"True",
")",
":",
"# Example below:",
"# [bam_rmdupse_core] 26602816 / 103563641 = 0.2569 in library ' '",
"dups_regex",
"=",
"\"\\[bam_rmdups?e?_core\\] (\\d+) / (\\d+) = (\\d+\\.\\d+) in library '(.*)'\"",
"s_name",
"=",
"f",
"[",
"'s_name'",
"]",
"for",
"l",
"in",
"f",
"[",
"'f'",
"]",
":",
"match",
"=",
"re",
".",
"search",
"(",
"dups_regex",
",",
"l",
")",
"if",
"match",
":",
"library_name",
"=",
"match",
".",
"group",
"(",
"4",
")",
".",
"strip",
"(",
")",
"if",
"library_name",
"!=",
"''",
":",
"s_name",
"=",
"library_name",
"if",
"s_name",
"in",
"self",
".",
"samtools_rmdup",
":",
"log",
".",
"debug",
"(",
"\"Duplicate sample name found in {}! Overwriting: {}\"",
".",
"format",
"(",
"f",
"[",
"'fn'",
"]",
",",
"s_name",
")",
")",
"self",
".",
"add_data_source",
"(",
"f",
",",
"s_name",
")",
"self",
".",
"samtools_rmdup",
"[",
"s_name",
"]",
"=",
"dict",
"(",
")",
"self",
".",
"samtools_rmdup",
"[",
"s_name",
"]",
"[",
"'n_dups'",
"]",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"self",
".",
"samtools_rmdup",
"[",
"s_name",
"]",
"[",
"'n_tot'",
"]",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"self",
".",
"samtools_rmdup",
"[",
"s_name",
"]",
"[",
"'n_unique'",
"]",
"=",
"int",
"(",
"match",
".",
"group",
"(",
"2",
")",
")",
"-",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
"self",
".",
"samtools_rmdup",
"[",
"s_name",
"]",
"[",
"'pct_dups'",
"]",
"=",
"float",
"(",
"match",
".",
"group",
"(",
"3",
")",
")",
"*",
"100",
"# Filter to strip out ignored sample names",
"self",
".",
"samtools_rmdup",
"=",
"self",
".",
"ignore_samples",
"(",
"self",
".",
"samtools_rmdup",
")",
"if",
"len",
"(",
"self",
".",
"samtools_rmdup",
")",
">",
"0",
":",
"# Write parsed report data to a file",
"self",
".",
"write_data_file",
"(",
"self",
".",
"samtools_rmdup",
",",
"'multiqc_samtools_rmdup'",
")",
"# Make a bar plot showing duplicates",
"keys",
"=",
"OrderedDict",
"(",
")",
"keys",
"[",
"'n_unique'",
"]",
"=",
"{",
"'name'",
":",
"'Non-duplicated reads'",
"}",
"keys",
"[",
"'n_dups'",
"]",
"=",
"{",
"'name'",
":",
"'Duplicated reads'",
"}",
"pconfig",
"=",
"{",
"'id'",
":",
"'samtools_rmdup_plot'",
",",
"'title'",
":",
"'Samtools rmdup: Duplicate alignments'",
",",
"'ylab'",
":",
"'Number of reads'",
",",
"'yDecimals'",
":",
"False",
"}",
"self",
".",
"add_section",
"(",
"name",
"=",
"'Duplicates removed'",
",",
"anchor",
"=",
"'samtools-rmdup'",
",",
"plot",
"=",
"bargraph",
".",
"plot",
"(",
"self",
".",
"samtools_rmdup",
",",
"keys",
",",
"pconfig",
")",
")",
"# Add a column to the General Stats table",
"# General Stats Table",
"stats_headers",
"=",
"OrderedDict",
"(",
")",
"stats_headers",
"[",
"'pct_dups'",
"]",
"=",
"{",
"'title'",
":",
"'% Dups'",
",",
"'description'",
":",
"'Percent of duplicate alignments'",
",",
"'min'",
":",
"0",
",",
"'max'",
":",
"100",
",",
"'suffix'",
":",
"'%'",
",",
"'scale'",
":",
"'OrRd'",
"}",
"self",
".",
"general_stats_addcols",
"(",
"self",
".",
"samtools_rmdup",
",",
"stats_headers",
",",
"'Samtools rmdup'",
")",
"return",
"len",
"(",
"self",
".",
"samtools_rmdup",
")"
] | 44.737705 | 20.229508 |
def facts(self, **kwargs):
"""Get all facts of this node. Additional arguments may also be
specified that will be passed to the query function.
"""
return self.__api.facts(query=EqualsOperator("certname", self.name),
**kwargs) | [
"def",
"facts",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"__api",
".",
"facts",
"(",
"query",
"=",
"EqualsOperator",
"(",
"\"certname\"",
",",
"self",
".",
"name",
")",
",",
"*",
"*",
"kwargs",
")"
] | 47.5 | 11.833333 |
def auth(self, request):
"""
let's auth the user to the Service
"""
client = self.get_evernote_client()
request_token = client.get_request_token(self.callback_url(request))
# Save the request token information for later
request.session['oauth_token'] = request_token['oauth_token']
request.session['oauth_token_secret'] = request_token['oauth_token_secret']
# Redirect the user to the Evernote authorization URL
# return the URL string which will be used by redirect()
# from the calling func
return client.get_authorize_url(request_token) | [
"def",
"auth",
"(",
"self",
",",
"request",
")",
":",
"client",
"=",
"self",
".",
"get_evernote_client",
"(",
")",
"request_token",
"=",
"client",
".",
"get_request_token",
"(",
"self",
".",
"callback_url",
"(",
"request",
")",
")",
"# Save the request token information for later",
"request",
".",
"session",
"[",
"'oauth_token'",
"]",
"=",
"request_token",
"[",
"'oauth_token'",
"]",
"request",
".",
"session",
"[",
"'oauth_token_secret'",
"]",
"=",
"request_token",
"[",
"'oauth_token_secret'",
"]",
"# Redirect the user to the Evernote authorization URL",
"# return the URL string which will be used by redirect()",
"# from the calling func",
"return",
"client",
".",
"get_authorize_url",
"(",
"request_token",
")"
] | 48.230769 | 16.538462 |
def move_state(self, direction, activate):
"""
Set the camera position move state
:param direction: What direction to update
:param activate: Start or stop moving in the direction
"""
if direction == RIGHT:
self._xdir = POSITIVE if activate else STILL
elif direction == LEFT:
self._xdir = NEGATIVE if activate else STILL
elif direction == FORWARD:
self._zdir = NEGATIVE if activate else STILL
elif direction == BACKWARD:
self._zdir = POSITIVE if activate else STILL
elif direction == UP:
self._ydir = POSITIVE if activate else STILL
elif direction == DOWN:
self._ydir = NEGATIVE if activate else STILL | [
"def",
"move_state",
"(",
"self",
",",
"direction",
",",
"activate",
")",
":",
"if",
"direction",
"==",
"RIGHT",
":",
"self",
".",
"_xdir",
"=",
"POSITIVE",
"if",
"activate",
"else",
"STILL",
"elif",
"direction",
"==",
"LEFT",
":",
"self",
".",
"_xdir",
"=",
"NEGATIVE",
"if",
"activate",
"else",
"STILL",
"elif",
"direction",
"==",
"FORWARD",
":",
"self",
".",
"_zdir",
"=",
"NEGATIVE",
"if",
"activate",
"else",
"STILL",
"elif",
"direction",
"==",
"BACKWARD",
":",
"self",
".",
"_zdir",
"=",
"POSITIVE",
"if",
"activate",
"else",
"STILL",
"elif",
"direction",
"==",
"UP",
":",
"self",
".",
"_ydir",
"=",
"POSITIVE",
"if",
"activate",
"else",
"STILL",
"elif",
"direction",
"==",
"DOWN",
":",
"self",
".",
"_ydir",
"=",
"NEGATIVE",
"if",
"activate",
"else",
"STILL"
] | 39.157895 | 11.684211 |
def teardown(self):
"""
Teardown the link.
Removes endpoints from properties I{links} collection.
@return: self
@rtype: L{Link}
"""
pA, pB = self.endpoints
if pA in pB.links:
pB.links.remove(pA)
if pB in pA.links:
pA.links.remove(pB)
return self | [
"def",
"teardown",
"(",
"self",
")",
":",
"pA",
",",
"pB",
"=",
"self",
".",
"endpoints",
"if",
"pA",
"in",
"pB",
".",
"links",
":",
"pB",
".",
"links",
".",
"remove",
"(",
"pA",
")",
"if",
"pB",
"in",
"pA",
".",
"links",
":",
"pA",
".",
"links",
".",
"remove",
"(",
"pB",
")",
"return",
"self"
] | 25.923077 | 13 |
def tuples_as_bytes(cmds):
"""Format list of tuples to CAM message with format /key:val.
Parameters
----------
cmds : list of tuples
List of commands as tuples.
Returns
-------
bytes
Sequence of /key:val.
Example
-------
::
>>> tuples_as_bytes([('cmd', 'val'), ('cmd2', 'val2')])
b'/cmd:val /cmd2:val2'
"""
cmds = OrderedDict(cmds) # override equal keys
tmp = []
for key, val in cmds.items():
key = str(key)
val = str(val)
tmp.append('/' + key + ':' + val)
return ' '.join(tmp).encode() | [
"def",
"tuples_as_bytes",
"(",
"cmds",
")",
":",
"cmds",
"=",
"OrderedDict",
"(",
"cmds",
")",
"# override equal keys",
"tmp",
"=",
"[",
"]",
"for",
"key",
",",
"val",
"in",
"cmds",
".",
"items",
"(",
")",
":",
"key",
"=",
"str",
"(",
"key",
")",
"val",
"=",
"str",
"(",
"val",
")",
"tmp",
".",
"append",
"(",
"'/'",
"+",
"key",
"+",
"':'",
"+",
"val",
")",
"return",
"' '",
".",
"join",
"(",
"tmp",
")",
".",
"encode",
"(",
")"
] | 20.75 | 21.464286 |
def exact_volume_sphere(rvec, pos, radius, zscale=1.0, volume_error=1e-5,
function=sphere_analytical_gaussian, max_radius_change=1e-2, args=()):
"""
Perform an iterative method to calculate the effective sphere that perfectly
(up to the volume_error) conserves volume. Return the resulting image
"""
vol_goal = 4./3*np.pi*radius**3 / zscale
rprime = radius
dr = inner(rvec, pos, rprime, zscale=zscale)
t = function(dr, rprime, *args)
for i in range(MAX_VOLUME_ITERATIONS):
vol_curr = np.abs(t.sum())
if np.abs(vol_goal - vol_curr)/vol_goal < volume_error:
break
rprime = rprime + 1.0*(vol_goal - vol_curr) / (4*np.pi*rprime**2)
if np.abs(rprime - radius)/radius > max_radius_change:
break
dr = inner(rvec, pos, rprime, zscale=zscale)
t = function(dr, rprime, *args)
return t | [
"def",
"exact_volume_sphere",
"(",
"rvec",
",",
"pos",
",",
"radius",
",",
"zscale",
"=",
"1.0",
",",
"volume_error",
"=",
"1e-5",
",",
"function",
"=",
"sphere_analytical_gaussian",
",",
"max_radius_change",
"=",
"1e-2",
",",
"args",
"=",
"(",
")",
")",
":",
"vol_goal",
"=",
"4.",
"/",
"3",
"*",
"np",
".",
"pi",
"*",
"radius",
"**",
"3",
"/",
"zscale",
"rprime",
"=",
"radius",
"dr",
"=",
"inner",
"(",
"rvec",
",",
"pos",
",",
"rprime",
",",
"zscale",
"=",
"zscale",
")",
"t",
"=",
"function",
"(",
"dr",
",",
"rprime",
",",
"*",
"args",
")",
"for",
"i",
"in",
"range",
"(",
"MAX_VOLUME_ITERATIONS",
")",
":",
"vol_curr",
"=",
"np",
".",
"abs",
"(",
"t",
".",
"sum",
"(",
")",
")",
"if",
"np",
".",
"abs",
"(",
"vol_goal",
"-",
"vol_curr",
")",
"/",
"vol_goal",
"<",
"volume_error",
":",
"break",
"rprime",
"=",
"rprime",
"+",
"1.0",
"*",
"(",
"vol_goal",
"-",
"vol_curr",
")",
"/",
"(",
"4",
"*",
"np",
".",
"pi",
"*",
"rprime",
"**",
"2",
")",
"if",
"np",
".",
"abs",
"(",
"rprime",
"-",
"radius",
")",
"/",
"radius",
">",
"max_radius_change",
":",
"break",
"dr",
"=",
"inner",
"(",
"rvec",
",",
"pos",
",",
"rprime",
",",
"zscale",
"=",
"zscale",
")",
"t",
"=",
"function",
"(",
"dr",
",",
"rprime",
",",
"*",
"args",
")",
"return",
"t"
] | 35.04 | 22.24 |
def get_all_keys(self, headers=None, callback=None, **params):
"""
A lower-level method for listing contents of a bucket.
This closely models the actual S3 API and requires you to manually
handle the paging of results. For a higher-level method
that handles the details of paging for you, you can use the list method.
:type max_keys: int
:param max_keys: The maximum number of keys to retrieve
:type prefix: string
:param prefix: The prefix of the keys you want to retrieve
:type marker: string
:param marker: The "marker" of where you are in the result set
:type delimiter: string
:param delimiter: If this optional, Unicode string parameter
is included with your request, then keys that
contain the same string between the prefix and
the first occurrence of the delimiter will be
rolled up into a single result element in the
CommonPrefixes collection. These rolled-up keys
are not returned elsewhere in the response.
:rtype: ResultSet
:return: The result from S3 listing the keys requested
"""
return self._get_all([('Contents', self.key_class),
('CommonPrefixes', boto.s3.prefix.Prefix)],
'', headers, callback=callback, **params) | [
"def",
"get_all_keys",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"_get_all",
"(",
"[",
"(",
"'Contents'",
",",
"self",
".",
"key_class",
")",
",",
"(",
"'CommonPrefixes'",
",",
"boto",
".",
"s3",
".",
"prefix",
".",
"Prefix",
")",
"]",
",",
"''",
",",
"headers",
",",
"callback",
"=",
"callback",
",",
"*",
"*",
"params",
")"
] | 46.96875 | 25.03125 |
def _hjoin_multiline(join_char, strings):
"""Horizontal join of multiline strings
"""
cstrings = [string.split("\n") for string in strings]
max_num_lines = max(len(item) for item in cstrings)
pp = []
for k in range(max_num_lines):
p = [cstring[k] for cstring in cstrings]
pp.append(join_char + join_char.join(p) + join_char)
return "\n".join([p.rstrip() for p in pp]) | [
"def",
"_hjoin_multiline",
"(",
"join_char",
",",
"strings",
")",
":",
"cstrings",
"=",
"[",
"string",
".",
"split",
"(",
"\"\\n\"",
")",
"for",
"string",
"in",
"strings",
"]",
"max_num_lines",
"=",
"max",
"(",
"len",
"(",
"item",
")",
"for",
"item",
"in",
"cstrings",
")",
"pp",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"max_num_lines",
")",
":",
"p",
"=",
"[",
"cstring",
"[",
"k",
"]",
"for",
"cstring",
"in",
"cstrings",
"]",
"pp",
".",
"append",
"(",
"join_char",
"+",
"join_char",
".",
"join",
"(",
"p",
")",
"+",
"join_char",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"[",
"p",
".",
"rstrip",
"(",
")",
"for",
"p",
"in",
"pp",
"]",
")"
] | 36.545455 | 12.909091 |
def get_caller_module():
"""
Returns the name of the caller's module as a string.
>>> get_caller_module()
'__main__'
"""
stack = inspect.stack()
assert len(stack) > 1
caller = stack[2][0]
return caller.f_globals['__name__'] | [
"def",
"get_caller_module",
"(",
")",
":",
"stack",
"=",
"inspect",
".",
"stack",
"(",
")",
"assert",
"len",
"(",
"stack",
")",
">",
"1",
"caller",
"=",
"stack",
"[",
"2",
"]",
"[",
"0",
"]",
"return",
"caller",
".",
"f_globals",
"[",
"'__name__'",
"]"
] | 22.727273 | 14.181818 |
def set_policy_priorities(self, priorities_json):
'''**Description**
Change the policy evaluation order
**Arguments**
- priorities_json: a description of the new policy order.
**Success Return Value**
A JSON object representing the updated list of policy ids.
**Example**
`examples/set_policy_order.py <https://github.com/draios/python-sdc-client/blob/master/examples/set_policy_order.py>`_
'''
try:
json.loads(priorities_json)
except Exception as e:
return [False, "priorities json is not valid json: {}".format(str(e))]
res = requests.put(self.url + '/api/policies/priorities', headers=self.hdrs, data=priorities_json, verify=self.ssl_verify)
return self._request_result(res) | [
"def",
"set_policy_priorities",
"(",
"self",
",",
"priorities_json",
")",
":",
"try",
":",
"json",
".",
"loads",
"(",
"priorities_json",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"[",
"False",
",",
"\"priorities json is not valid json: {}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
")",
"]",
"res",
"=",
"requests",
".",
"put",
"(",
"self",
".",
"url",
"+",
"'/api/policies/priorities'",
",",
"headers",
"=",
"self",
".",
"hdrs",
",",
"data",
"=",
"priorities_json",
",",
"verify",
"=",
"self",
".",
"ssl_verify",
")",
"return",
"self",
".",
"_request_result",
"(",
"res",
")"
] | 36.636364 | 30.272727 |
def create_annotation_node(self, annotation):
"""
Return an annotation node.
"""
annotation_node = URIRef(str(annotation.spdx_id))
type_triple = (annotation_node, RDF.type, self.spdx_namespace.Annotation)
self.graph.add(type_triple)
annotator_node = Literal(annotation.annotator.to_value())
self.graph.add((annotation_node, self.spdx_namespace.annotator, annotator_node))
annotation_date_node = Literal(annotation.annotation_date_iso_format)
annotation_triple = (annotation_node, self.spdx_namespace.annotationDate, annotation_date_node)
self.graph.add(annotation_triple)
if annotation.has_comment:
comment_node = Literal(annotation.comment)
comment_triple = (annotation_node, RDFS.comment, comment_node)
self.graph.add(comment_triple)
annotation_type_node = Literal(annotation.annotation_type)
annotation_type_triple = (annotation_node, self.spdx_namespace.annotationType, annotation_type_node)
self.graph.add(annotation_type_triple)
return annotation_node | [
"def",
"create_annotation_node",
"(",
"self",
",",
"annotation",
")",
":",
"annotation_node",
"=",
"URIRef",
"(",
"str",
"(",
"annotation",
".",
"spdx_id",
")",
")",
"type_triple",
"=",
"(",
"annotation_node",
",",
"RDF",
".",
"type",
",",
"self",
".",
"spdx_namespace",
".",
"Annotation",
")",
"self",
".",
"graph",
".",
"add",
"(",
"type_triple",
")",
"annotator_node",
"=",
"Literal",
"(",
"annotation",
".",
"annotator",
".",
"to_value",
"(",
")",
")",
"self",
".",
"graph",
".",
"add",
"(",
"(",
"annotation_node",
",",
"self",
".",
"spdx_namespace",
".",
"annotator",
",",
"annotator_node",
")",
")",
"annotation_date_node",
"=",
"Literal",
"(",
"annotation",
".",
"annotation_date_iso_format",
")",
"annotation_triple",
"=",
"(",
"annotation_node",
",",
"self",
".",
"spdx_namespace",
".",
"annotationDate",
",",
"annotation_date_node",
")",
"self",
".",
"graph",
".",
"add",
"(",
"annotation_triple",
")",
"if",
"annotation",
".",
"has_comment",
":",
"comment_node",
"=",
"Literal",
"(",
"annotation",
".",
"comment",
")",
"comment_triple",
"=",
"(",
"annotation_node",
",",
"RDFS",
".",
"comment",
",",
"comment_node",
")",
"self",
".",
"graph",
".",
"add",
"(",
"comment_triple",
")",
"annotation_type_node",
"=",
"Literal",
"(",
"annotation",
".",
"annotation_type",
")",
"annotation_type_triple",
"=",
"(",
"annotation_node",
",",
"self",
".",
"spdx_namespace",
".",
"annotationType",
",",
"annotation_type_node",
")",
"self",
".",
"graph",
".",
"add",
"(",
"annotation_type_triple",
")",
"return",
"annotation_node"
] | 50.090909 | 22.454545 |
def get(self, name):
"""Get the set of compatible packages given a resolvable name."""
resolvable, packages, parent, constraint_only = self._collapse().get(
self.normalize(name), _ResolvedPackages.empty())
return packages | [
"def",
"get",
"(",
"self",
",",
"name",
")",
":",
"resolvable",
",",
"packages",
",",
"parent",
",",
"constraint_only",
"=",
"self",
".",
"_collapse",
"(",
")",
".",
"get",
"(",
"self",
".",
"normalize",
"(",
"name",
")",
",",
"_ResolvedPackages",
".",
"empty",
"(",
")",
")",
"return",
"packages"
] | 47.4 | 18 |
def utime(self, path, times=None, ns=None, follow_symlinks=True):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If `None`, both times are set to the current time.
New in Python 3.3.
follow_symlinks: If `False` and entry_path points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
self._handle_utime_arg_errors(ns, times)
try:
file_object = self.resolve(path, follow_symlinks, allow_fd=True)
except IOError as io_error:
if io_error.errno == errno.ENOENT:
self.raise_os_error(errno.ENOENT, path)
raise
if times is not None:
for file_time in times:
if not isinstance(file_time, (int, float)):
raise TypeError('atime and mtime must be numbers')
file_object.st_atime = times[0]
file_object.st_mtime = times[1]
elif ns is not None:
for file_time in ns:
if not isinstance(file_time, int):
raise TypeError('atime and mtime must be ints')
file_object.st_atime_ns = ns[0]
file_object.st_mtime_ns = ns[1]
else:
current_time = time.time()
file_object.st_atime = current_time
file_object.st_mtime = current_time | [
"def",
"utime",
"(",
"self",
",",
"path",
",",
"times",
"=",
"None",
",",
"ns",
"=",
"None",
",",
"follow_symlinks",
"=",
"True",
")",
":",
"self",
".",
"_handle_utime_arg_errors",
"(",
"ns",
",",
"times",
")",
"try",
":",
"file_object",
"=",
"self",
".",
"resolve",
"(",
"path",
",",
"follow_symlinks",
",",
"allow_fd",
"=",
"True",
")",
"except",
"IOError",
"as",
"io_error",
":",
"if",
"io_error",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"self",
".",
"raise_os_error",
"(",
"errno",
".",
"ENOENT",
",",
"path",
")",
"raise",
"if",
"times",
"is",
"not",
"None",
":",
"for",
"file_time",
"in",
"times",
":",
"if",
"not",
"isinstance",
"(",
"file_time",
",",
"(",
"int",
",",
"float",
")",
")",
":",
"raise",
"TypeError",
"(",
"'atime and mtime must be numbers'",
")",
"file_object",
".",
"st_atime",
"=",
"times",
"[",
"0",
"]",
"file_object",
".",
"st_mtime",
"=",
"times",
"[",
"1",
"]",
"elif",
"ns",
"is",
"not",
"None",
":",
"for",
"file_time",
"in",
"ns",
":",
"if",
"not",
"isinstance",
"(",
"file_time",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"'atime and mtime must be ints'",
")",
"file_object",
".",
"st_atime_ns",
"=",
"ns",
"[",
"0",
"]",
"file_object",
".",
"st_mtime_ns",
"=",
"ns",
"[",
"1",
"]",
"else",
":",
"current_time",
"=",
"time",
".",
"time",
"(",
")",
"file_object",
".",
"st_atime",
"=",
"current_time",
"file_object",
".",
"st_mtime",
"=",
"current_time"
] | 43.625 | 19.729167 |
def ReadStoredProcedure(self, sproc_link, options=None):
"""Reads a stored procedure.
:param str sproc_link:
The link to the stored procedure.
:param dict options:
The request options for the request.
:return:
The read Stored Procedure.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(sproc_link)
sproc_id = base.GetResourceIdOrFullNameFromLink(sproc_link)
return self.Read(path, 'sprocs', sproc_id, None, options) | [
"def",
"ReadStoredProcedure",
"(",
"self",
",",
"sproc_link",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"{",
"}",
"path",
"=",
"base",
".",
"GetPathFromLink",
"(",
"sproc_link",
")",
"sproc_id",
"=",
"base",
".",
"GetResourceIdOrFullNameFromLink",
"(",
"sproc_link",
")",
"return",
"self",
".",
"Read",
"(",
"path",
",",
"'sprocs'",
",",
"sproc_id",
",",
"None",
",",
"options",
")"
] | 28.45 | 18.7 |
def get_alias_dict(config_obj):
"""
Return a dictionary consisting of all aliases known to eg.
The format is {'alias': 'resolved_program'}.
If the aliases file does not exist, returns an empty dict.
"""
if not config_obj.examples_dir:
return {}
alias_file_path = _get_alias_file_path(config_obj)
if not os.path.isfile(alias_file_path):
return {}
alias_file_contents = _get_contents_of_file(alias_file_path)
result = json.loads(alias_file_contents)
return result | [
"def",
"get_alias_dict",
"(",
"config_obj",
")",
":",
"if",
"not",
"config_obj",
".",
"examples_dir",
":",
"return",
"{",
"}",
"alias_file_path",
"=",
"_get_alias_file_path",
"(",
"config_obj",
")",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"alias_file_path",
")",
":",
"return",
"{",
"}",
"alias_file_contents",
"=",
"_get_contents_of_file",
"(",
"alias_file_path",
")",
"result",
"=",
"json",
".",
"loads",
"(",
"alias_file_contents",
")",
"return",
"result"
] | 28.222222 | 18.888889 |
async def verify_chain_of_trust(chain):
"""Build and verify the chain of trust.
Args:
chain (ChainOfTrust): the chain we're operating on
Raises:
CoTError: on failure
"""
log_path = os.path.join(chain.context.config["task_log_dir"], "chain_of_trust.log")
scriptworker_log = logging.getLogger('scriptworker')
with contextual_log_handler(
chain.context, path=log_path, log_obj=scriptworker_log,
formatter=AuditLogFormatter(
fmt=chain.context.config['log_fmt'],
datefmt=chain.context.config['log_datefmt'],
)
):
try:
# build LinkOfTrust objects
await build_task_dependencies(chain, chain.task, chain.name, chain.task_id)
# download the signed chain of trust artifacts
await download_cot(chain)
# verify the signatures and populate the ``link.cot``s
verify_cot_signatures(chain)
# download all other artifacts needed to verify chain of trust
await download_cot_artifacts(chain)
# verify the task types, e.g. decision
task_count = await verify_task_types(chain)
check_num_tasks(chain, task_count)
# verify the worker_impls, e.g. docker-worker
await verify_worker_impls(chain)
await trace_back_to_tree(chain)
except (BaseDownloadError, KeyError, AttributeError) as exc:
log.critical("Chain of Trust verification error!", exc_info=True)
if isinstance(exc, CoTError):
raise
else:
raise CoTError(str(exc))
log.info("Good.") | [
"async",
"def",
"verify_chain_of_trust",
"(",
"chain",
")",
":",
"log_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"chain",
".",
"context",
".",
"config",
"[",
"\"task_log_dir\"",
"]",
",",
"\"chain_of_trust.log\"",
")",
"scriptworker_log",
"=",
"logging",
".",
"getLogger",
"(",
"'scriptworker'",
")",
"with",
"contextual_log_handler",
"(",
"chain",
".",
"context",
",",
"path",
"=",
"log_path",
",",
"log_obj",
"=",
"scriptworker_log",
",",
"formatter",
"=",
"AuditLogFormatter",
"(",
"fmt",
"=",
"chain",
".",
"context",
".",
"config",
"[",
"'log_fmt'",
"]",
",",
"datefmt",
"=",
"chain",
".",
"context",
".",
"config",
"[",
"'log_datefmt'",
"]",
",",
")",
")",
":",
"try",
":",
"# build LinkOfTrust objects",
"await",
"build_task_dependencies",
"(",
"chain",
",",
"chain",
".",
"task",
",",
"chain",
".",
"name",
",",
"chain",
".",
"task_id",
")",
"# download the signed chain of trust artifacts",
"await",
"download_cot",
"(",
"chain",
")",
"# verify the signatures and populate the ``link.cot``s",
"verify_cot_signatures",
"(",
"chain",
")",
"# download all other artifacts needed to verify chain of trust",
"await",
"download_cot_artifacts",
"(",
"chain",
")",
"# verify the task types, e.g. decision",
"task_count",
"=",
"await",
"verify_task_types",
"(",
"chain",
")",
"check_num_tasks",
"(",
"chain",
",",
"task_count",
")",
"# verify the worker_impls, e.g. docker-worker",
"await",
"verify_worker_impls",
"(",
"chain",
")",
"await",
"trace_back_to_tree",
"(",
"chain",
")",
"except",
"(",
"BaseDownloadError",
",",
"KeyError",
",",
"AttributeError",
")",
"as",
"exc",
":",
"log",
".",
"critical",
"(",
"\"Chain of Trust verification error!\"",
",",
"exc_info",
"=",
"True",
")",
"if",
"isinstance",
"(",
"exc",
",",
"CoTError",
")",
":",
"raise",
"else",
":",
"raise",
"CoTError",
"(",
"str",
"(",
"exc",
")",
")",
"log",
".",
"info",
"(",
"\"Good.\"",
")"
] | 39.804878 | 18.04878 |
def get(self, block=True, timeout=None):
'''Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
'''
self._parent._check_closing()
with self._parent._sync_not_empty:
if not block:
if not self._parent._qsize():
raise SyncQueueEmpty
elif timeout is None:
while not self._parent._qsize():
self._parent._sync_not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
time = self._parent._loop.time
endtime = time() + timeout
while not self._parent._qsize():
remaining = endtime - time()
if remaining <= 0.0:
raise SyncQueueEmpty
self._parent._sync_not_empty.wait(remaining)
item = self._parent._get()
self._parent._sync_not_full.notify()
self._parent._notify_async_not_full(threadsafe=True)
return item | [
"def",
"get",
"(",
"self",
",",
"block",
"=",
"True",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"_parent",
".",
"_check_closing",
"(",
")",
"with",
"self",
".",
"_parent",
".",
"_sync_not_empty",
":",
"if",
"not",
"block",
":",
"if",
"not",
"self",
".",
"_parent",
".",
"_qsize",
"(",
")",
":",
"raise",
"SyncQueueEmpty",
"elif",
"timeout",
"is",
"None",
":",
"while",
"not",
"self",
".",
"_parent",
".",
"_qsize",
"(",
")",
":",
"self",
".",
"_parent",
".",
"_sync_not_empty",
".",
"wait",
"(",
")",
"elif",
"timeout",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"'timeout' must be a non-negative number\"",
")",
"else",
":",
"time",
"=",
"self",
".",
"_parent",
".",
"_loop",
".",
"time",
"endtime",
"=",
"time",
"(",
")",
"+",
"timeout",
"while",
"not",
"self",
".",
"_parent",
".",
"_qsize",
"(",
")",
":",
"remaining",
"=",
"endtime",
"-",
"time",
"(",
")",
"if",
"remaining",
"<=",
"0.0",
":",
"raise",
"SyncQueueEmpty",
"self",
".",
"_parent",
".",
"_sync_not_empty",
".",
"wait",
"(",
"remaining",
")",
"item",
"=",
"self",
".",
"_parent",
".",
"_get",
"(",
")",
"self",
".",
"_parent",
".",
"_sync_not_full",
".",
"notify",
"(",
")",
"self",
".",
"_parent",
".",
"_notify_async_not_full",
"(",
"threadsafe",
"=",
"True",
")",
"return",
"item"
] | 45.909091 | 15.909091 |
def _setElegant(self, **infiles):
""" set input parameters for elegant tracking, available keys: 'ltefile', 'elefile'
"""
ltefile, elefile = infiles['ltefile'], infiles['elefile']
self.lattice_file = ltefile
self.elegant_file = elefile | [
"def",
"_setElegant",
"(",
"self",
",",
"*",
"*",
"infiles",
")",
":",
"ltefile",
",",
"elefile",
"=",
"infiles",
"[",
"'ltefile'",
"]",
",",
"infiles",
"[",
"'elefile'",
"]",
"self",
".",
"lattice_file",
"=",
"ltefile",
"self",
".",
"elegant_file",
"=",
"elefile"
] | 45 | 7 |
def get_ids(self, features, threshold=0.0, func=np.sum, get_weights=False):
""" Returns a list of all studies in the table that meet the desired
feature-based criteria.
Will most commonly be used to retrieve studies that use one or more
features with some minimum frequency; e.g.,:
get_ids(['fear', 'anxiety'], threshold=0.001)
Args:
features (lists): a list of feature names to search on.
threshold (float): optional float indicating threshold features
must pass to be included.
func (Callable): any numpy function to use for thresholding
(default: sum). The function will be applied to the list of
features and the result compared to the threshold. This can be
used to change the meaning of the query in powerful ways. E.g,:
max: any of the features have to pass threshold
(i.e., max > thresh)
min: all features must each individually pass threshold
(i.e., min > thresh)
sum: the summed weight of all features must pass threshold
(i.e., sum > thresh)
get_weights (bool): if True, returns a dict with ids => weights.
Returns:
When get_weights is false (default), returns a list of study
names. When true, returns a dict, with study names as keys
and feature weights as values.
"""
if isinstance(features, str):
features = [features]
features = self.search_features(features) # Expand wild cards
feature_weights = self.data.ix[:, features]
weights = feature_weights.apply(func, 1)
above_thresh = weights[weights >= threshold]
# ids_to_keep = self.ids[above_thresh]
return above_thresh if get_weights else list(above_thresh.index) | [
"def",
"get_ids",
"(",
"self",
",",
"features",
",",
"threshold",
"=",
"0.0",
",",
"func",
"=",
"np",
".",
"sum",
",",
"get_weights",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"features",
",",
"str",
")",
":",
"features",
"=",
"[",
"features",
"]",
"features",
"=",
"self",
".",
"search_features",
"(",
"features",
")",
"# Expand wild cards",
"feature_weights",
"=",
"self",
".",
"data",
".",
"ix",
"[",
":",
",",
"features",
"]",
"weights",
"=",
"feature_weights",
".",
"apply",
"(",
"func",
",",
"1",
")",
"above_thresh",
"=",
"weights",
"[",
"weights",
">=",
"threshold",
"]",
"# ids_to_keep = self.ids[above_thresh]",
"return",
"above_thresh",
"if",
"get_weights",
"else",
"list",
"(",
"above_thresh",
".",
"index",
")"
] | 50.552632 | 22.473684 |
def retag(self, tagging, tag=None):
"""
Copies the object, applying a new tagging to it
:param tagging:
A dict containing the keys "explicit" and "implicit". Legacy
API allows a unicode string of "implicit" or "explicit".
:param tag:
A integer tag number. Only used when tagging is a unicode string.
:return:
An Asn1Value object
"""
# This is required to preserve the old API
if not isinstance(tagging, dict):
tagging = {tagging: tag}
new_obj = self.__class__(explicit=tagging.get('explicit'), implicit=tagging.get('implicit'))
new_obj._copy(self, copy.deepcopy)
return new_obj | [
"def",
"retag",
"(",
"self",
",",
"tagging",
",",
"tag",
"=",
"None",
")",
":",
"# This is required to preserve the old API",
"if",
"not",
"isinstance",
"(",
"tagging",
",",
"dict",
")",
":",
"tagging",
"=",
"{",
"tagging",
":",
"tag",
"}",
"new_obj",
"=",
"self",
".",
"__class__",
"(",
"explicit",
"=",
"tagging",
".",
"get",
"(",
"'explicit'",
")",
",",
"implicit",
"=",
"tagging",
".",
"get",
"(",
"'implicit'",
")",
")",
"new_obj",
".",
"_copy",
"(",
"self",
",",
"copy",
".",
"deepcopy",
")",
"return",
"new_obj"
] | 33.761905 | 21.095238 |
def _unlinkUser(self):
"""Remove the UID of the current Contact in the User properties and
update all relevant own properties.
"""
KEY = "linked_contact_uid"
# Nothing to do if no user is linked
if not self.hasUser():
return False
user = self.getUser()
username = user.getId()
# Unset the UID from the User Property
user.setMemberProperties({KEY: ""})
logger.info("Unlinked Contact UID from User {}"
.format(user.getProperty(KEY, "")))
# Unset the Username
self.setUsername(None)
# Unset the Email
self.setEmailAddress(None)
# somehow the `getUsername` index gets out of sync
self.reindexObject()
# N.B. Local owner role and client group applies only to client
# contacts, but not lab contacts.
if IClient.providedBy(self.aq_parent):
# Revoke local Owner role
self._delLocalOwnerRole(username)
# Remove user from "Clients" group
self._delUserFromGroup(username, group="Clients")
return True | [
"def",
"_unlinkUser",
"(",
"self",
")",
":",
"KEY",
"=",
"\"linked_contact_uid\"",
"# Nothing to do if no user is linked",
"if",
"not",
"self",
".",
"hasUser",
"(",
")",
":",
"return",
"False",
"user",
"=",
"self",
".",
"getUser",
"(",
")",
"username",
"=",
"user",
".",
"getId",
"(",
")",
"# Unset the UID from the User Property",
"user",
".",
"setMemberProperties",
"(",
"{",
"KEY",
":",
"\"\"",
"}",
")",
"logger",
".",
"info",
"(",
"\"Unlinked Contact UID from User {}\"",
".",
"format",
"(",
"user",
".",
"getProperty",
"(",
"KEY",
",",
"\"\"",
")",
")",
")",
"# Unset the Username",
"self",
".",
"setUsername",
"(",
"None",
")",
"# Unset the Email",
"self",
".",
"setEmailAddress",
"(",
"None",
")",
"# somehow the `getUsername` index gets out of sync",
"self",
".",
"reindexObject",
"(",
")",
"# N.B. Local owner role and client group applies only to client",
"# contacts, but not lab contacts.",
"if",
"IClient",
".",
"providedBy",
"(",
"self",
".",
"aq_parent",
")",
":",
"# Revoke local Owner role",
"self",
".",
"_delLocalOwnerRole",
"(",
"username",
")",
"# Remove user from \"Clients\" group",
"self",
".",
"_delUserFromGroup",
"(",
"username",
",",
"group",
"=",
"\"Clients\"",
")",
"return",
"True"
] | 31 | 16.888889 |
def snapshot(self, lock=False):
"""Create a snapshot of latest objects in bucket.
:param lock: Create the new bucket in a locked state.
:returns: Newly created bucket containing copied ObjectVersion.
"""
with db.session.begin_nested():
bucket = Bucket(
default_location=self.default_location,
default_storage_class=self.default_storage_class,
quota_size=self.quota_size,
)
db.session.add(bucket)
for o in ObjectVersion.get_by_bucket(self):
o.copy(bucket=bucket)
bucket.locked = True if lock else self.locked
return bucket | [
"def",
"snapshot",
"(",
"self",
",",
"lock",
"=",
"False",
")",
":",
"with",
"db",
".",
"session",
".",
"begin_nested",
"(",
")",
":",
"bucket",
"=",
"Bucket",
"(",
"default_location",
"=",
"self",
".",
"default_location",
",",
"default_storage_class",
"=",
"self",
".",
"default_storage_class",
",",
"quota_size",
"=",
"self",
".",
"quota_size",
",",
")",
"db",
".",
"session",
".",
"add",
"(",
"bucket",
")",
"for",
"o",
"in",
"ObjectVersion",
".",
"get_by_bucket",
"(",
"self",
")",
":",
"o",
".",
"copy",
"(",
"bucket",
"=",
"bucket",
")",
"bucket",
".",
"locked",
"=",
"True",
"if",
"lock",
"else",
"self",
".",
"locked",
"return",
"bucket"
] | 33.3 | 18 |
def init_datastore(config):
"""
Take the config definition and initialize the datastore.
The config must contain either a 'datastore' parameter, which
will be simply returned, or
must contain a 'factory' which is a callable or entry
point definition. The callable should take the remainder of
the params in config as kwargs and return a DataStore instance.
"""
if 'datastore' in config:
# the datastore has already been initialized, just use it.
return config['datastore']
factory = config.pop('factory')
if isinstance(factory, str):
"""
factory should be a string defined in the pkg_resources.EntryPoint
format.
"""
factory = pkg_resources.EntryPoint.parse('x=' + factory).resolve()
return factory(**config) | [
"def",
"init_datastore",
"(",
"config",
")",
":",
"if",
"'datastore'",
"in",
"config",
":",
"# the datastore has already been initialized, just use it.",
"return",
"config",
"[",
"'datastore'",
"]",
"factory",
"=",
"config",
".",
"pop",
"(",
"'factory'",
")",
"if",
"isinstance",
"(",
"factory",
",",
"str",
")",
":",
"\"\"\"\n factory should be a string defined in the pkg_resources.EntryPoint\n format.\n \"\"\"",
"factory",
"=",
"pkg_resources",
".",
"EntryPoint",
".",
"parse",
"(",
"'x='",
"+",
"factory",
")",
".",
"resolve",
"(",
")",
"return",
"factory",
"(",
"*",
"*",
"config",
")"
] | 37.761905 | 15.952381 |
def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True):
"""The slicenet model, main step used for training."""
with tf.variable_scope("slicenet"):
# Project to hidden size if necessary
if inputs.get_shape().as_list()[-1] != hparams.hidden_size:
inputs = common_layers.conv_block(
inputs,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True)
# Flatten inputs and encode.
inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2)
inputs_mask = 1.0 - embedding_to_padding(inputs)
inputs = common_layers.add_timing_signal(inputs) # Add position info.
target_space_emb = embed_target_space(target_space, hparams.hidden_size)
extra_layers = int(hparams.num_hidden_layers * 1.5)
inputs_encoded = multi_conv_res(
inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask)
if not run_decoder:
return inputs_encoded
# Do the middle part.
decoder_start, similarity_loss = slicenet_middle(
inputs_encoded, targets, target_space_emb, inputs_mask, hparams)
# Decode.
decoder_final = multi_conv_res(
decoder_start,
"LEFT",
"decoder",
hparams.num_hidden_layers,
hparams,
mask=inputs_mask,
source=inputs_encoded)
return decoder_final, tf.reduce_mean(similarity_loss) | [
"def",
"slicenet_internal",
"(",
"inputs",
",",
"targets",
",",
"target_space",
",",
"hparams",
",",
"run_decoder",
"=",
"True",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"slicenet\"",
")",
":",
"# Project to hidden size if necessary",
"if",
"inputs",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"-",
"1",
"]",
"!=",
"hparams",
".",
"hidden_size",
":",
"inputs",
"=",
"common_layers",
".",
"conv_block",
"(",
"inputs",
",",
"hparams",
".",
"hidden_size",
",",
"[",
"(",
"(",
"1",
",",
"1",
")",
",",
"(",
"3",
",",
"3",
")",
")",
"]",
",",
"first_relu",
"=",
"False",
",",
"padding",
"=",
"\"SAME\"",
",",
"force2d",
"=",
"True",
")",
"# Flatten inputs and encode.",
"inputs",
"=",
"tf",
".",
"expand_dims",
"(",
"common_layers",
".",
"flatten4d3d",
"(",
"inputs",
")",
",",
"axis",
"=",
"2",
")",
"inputs_mask",
"=",
"1.0",
"-",
"embedding_to_padding",
"(",
"inputs",
")",
"inputs",
"=",
"common_layers",
".",
"add_timing_signal",
"(",
"inputs",
")",
"# Add position info.",
"target_space_emb",
"=",
"embed_target_space",
"(",
"target_space",
",",
"hparams",
".",
"hidden_size",
")",
"extra_layers",
"=",
"int",
"(",
"hparams",
".",
"num_hidden_layers",
"*",
"1.5",
")",
"inputs_encoded",
"=",
"multi_conv_res",
"(",
"inputs",
",",
"\"SAME\"",
",",
"\"encoder\"",
",",
"extra_layers",
",",
"hparams",
",",
"mask",
"=",
"inputs_mask",
")",
"if",
"not",
"run_decoder",
":",
"return",
"inputs_encoded",
"# Do the middle part.",
"decoder_start",
",",
"similarity_loss",
"=",
"slicenet_middle",
"(",
"inputs_encoded",
",",
"targets",
",",
"target_space_emb",
",",
"inputs_mask",
",",
"hparams",
")",
"# Decode.",
"decoder_final",
"=",
"multi_conv_res",
"(",
"decoder_start",
",",
"\"LEFT\"",
",",
"\"decoder\"",
",",
"hparams",
".",
"num_hidden_layers",
",",
"hparams",
",",
"mask",
"=",
"inputs_mask",
",",
"source",
"=",
"inputs_encoded",
")",
"return",
"decoder_final",
",",
"tf",
".",
"reduce_mean",
"(",
"similarity_loss",
")"
] | 39.828571 | 17.657143 |
def download(self, FILENAME=None):
"""Downloads the files related to the query."""
resultFilename = None
url = self.__url+'/services'
result = Util.retrieveJsonResponseFromServer(url)
dataItems = result['data']
for item in dataItems:
plugin = Plugin(item)
result = plugin.getParameterByValue("fr.cnes.sitools.resources.order.DirectOrderResource")
if result == None:
continue
else:
urlParameter = plugin.getParameterByType('PARAMETER_ATTACHMENT')
urlPlugin = urlParameter.getValue()
encodingParameter = plugin.getParameterByName('archiveType')
encodingPlugin = encodingParameter.getValue()
query = self.getQueries()
query.setBaseUrl(self.__url + urlPlugin)
url = query.getUrl()
(filename, header) = Util.urlretrieve('%s' % url, FILENAME)
if FILENAME == None:
os.rename(filename, filename + "." + encodingPlugin)
resultFilename = filename + "." + encodingPlugin
else:
os.rename(FILENAME, FILENAME + "." + encodingPlugin)
resultFilename = FILENAME + "." + encodingPlugin
break
return resultFilename | [
"def",
"download",
"(",
"self",
",",
"FILENAME",
"=",
"None",
")",
":",
"resultFilename",
"=",
"None",
"url",
"=",
"self",
".",
"__url",
"+",
"'/services'",
"result",
"=",
"Util",
".",
"retrieveJsonResponseFromServer",
"(",
"url",
")",
"dataItems",
"=",
"result",
"[",
"'data'",
"]",
"for",
"item",
"in",
"dataItems",
":",
"plugin",
"=",
"Plugin",
"(",
"item",
")",
"result",
"=",
"plugin",
".",
"getParameterByValue",
"(",
"\"fr.cnes.sitools.resources.order.DirectOrderResource\"",
")",
"if",
"result",
"==",
"None",
":",
"continue",
"else",
":",
"urlParameter",
"=",
"plugin",
".",
"getParameterByType",
"(",
"'PARAMETER_ATTACHMENT'",
")",
"urlPlugin",
"=",
"urlParameter",
".",
"getValue",
"(",
")",
"encodingParameter",
"=",
"plugin",
".",
"getParameterByName",
"(",
"'archiveType'",
")",
"encodingPlugin",
"=",
"encodingParameter",
".",
"getValue",
"(",
")",
"query",
"=",
"self",
".",
"getQueries",
"(",
")",
"query",
".",
"setBaseUrl",
"(",
"self",
".",
"__url",
"+",
"urlPlugin",
")",
"url",
"=",
"query",
".",
"getUrl",
"(",
")",
"(",
"filename",
",",
"header",
")",
"=",
"Util",
".",
"urlretrieve",
"(",
"'%s'",
"%",
"url",
",",
"FILENAME",
")",
"if",
"FILENAME",
"==",
"None",
":",
"os",
".",
"rename",
"(",
"filename",
",",
"filename",
"+",
"\".\"",
"+",
"encodingPlugin",
")",
"resultFilename",
"=",
"filename",
"+",
"\".\"",
"+",
"encodingPlugin",
"else",
":",
"os",
".",
"rename",
"(",
"FILENAME",
",",
"FILENAME",
"+",
"\".\"",
"+",
"encodingPlugin",
")",
"resultFilename",
"=",
"FILENAME",
"+",
"\".\"",
"+",
"encodingPlugin",
"break",
"return",
"resultFilename"
] | 49.857143 | 18.964286 |
def motif3struct_bin(A):
'''
Structural motifs are patterns of local connectivity. Motif frequency
is the frequency of occurrence of motifs around a node.
Parameters
----------
A : NxN np.ndarray
binary directed connection matrix
Returns
-------
F : 13xN np.ndarray
motif frequency matrix
f : 13x1 np.ndarray
motif frequency vector (averaged over all nodes)
'''
from scipy import io
import os
fname = os.path.join(os.path.dirname(__file__), motiflib)
mot = io.loadmat(fname)
m3n = mot['m3n']
id3 = mot['id3'].squeeze()
n = len(A) # number of vertices in A
f = np.zeros((13,)) # motif count for whole graph
F = np.zeros((13, n)) # motif frequency
A = binarize(A, copy=True) # ensure A is binary
As = np.logical_or(A, A.T) # symmetrized adjmat
for u in range(n - 2):
# v1: neighbors of u (>u)
V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1])
for v1 in np.where(V1)[0]:
# v2: neighbors of v1 (>u)
V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1])
V2[V1] = 0 # not already in V1
# and all neighbors of u (>v1)
V2 = np.logical_or(
np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2)
for v2 in np.where(V2)[0]:
a = np.array((A[v1, u], A[v2, u], A[u, v1],
A[v2, v1], A[u, v2], A[v1, v2]))
s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
ix = id3[np.squeeze(s == m3n)] - 1
F[ix, u] += 1
F[ix, v1] += 1
F[ix, v2] += 1
f[ix] += 1
return f, F | [
"def",
"motif3struct_bin",
"(",
"A",
")",
":",
"from",
"scipy",
"import",
"io",
"import",
"os",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"motiflib",
")",
"mot",
"=",
"io",
".",
"loadmat",
"(",
"fname",
")",
"m3n",
"=",
"mot",
"[",
"'m3n'",
"]",
"id3",
"=",
"mot",
"[",
"'id3'",
"]",
".",
"squeeze",
"(",
")",
"n",
"=",
"len",
"(",
"A",
")",
"# number of vertices in A",
"f",
"=",
"np",
".",
"zeros",
"(",
"(",
"13",
",",
")",
")",
"# motif count for whole graph",
"F",
"=",
"np",
".",
"zeros",
"(",
"(",
"13",
",",
"n",
")",
")",
"# motif frequency",
"A",
"=",
"binarize",
"(",
"A",
",",
"copy",
"=",
"True",
")",
"# ensure A is binary",
"As",
"=",
"np",
".",
"logical_or",
"(",
"A",
",",
"A",
".",
"T",
")",
"# symmetrized adjmat",
"for",
"u",
"in",
"range",
"(",
"n",
"-",
"2",
")",
":",
"# v1: neighbors of u (>u)",
"V1",
"=",
"np",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"(",
"u",
",",
")",
",",
"dtype",
"=",
"int",
")",
",",
"As",
"[",
"u",
",",
"u",
"+",
"1",
":",
"n",
"+",
"1",
"]",
")",
"for",
"v1",
"in",
"np",
".",
"where",
"(",
"V1",
")",
"[",
"0",
"]",
":",
"# v2: neighbors of v1 (>u)",
"V2",
"=",
"np",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"(",
"u",
",",
")",
",",
"dtype",
"=",
"int",
")",
",",
"As",
"[",
"v1",
",",
"u",
"+",
"1",
":",
"n",
"+",
"1",
"]",
")",
"V2",
"[",
"V1",
"]",
"=",
"0",
"# not already in V1",
"# and all neighbors of u (>v1)",
"V2",
"=",
"np",
".",
"logical_or",
"(",
"np",
".",
"append",
"(",
"np",
".",
"zeros",
"(",
"(",
"v1",
",",
")",
")",
",",
"As",
"[",
"u",
",",
"v1",
"+",
"1",
":",
"n",
"+",
"1",
"]",
")",
",",
"V2",
")",
"for",
"v2",
"in",
"np",
".",
"where",
"(",
"V2",
")",
"[",
"0",
"]",
":",
"a",
"=",
"np",
".",
"array",
"(",
"(",
"A",
"[",
"v1",
",",
"u",
"]",
",",
"A",
"[",
"v2",
",",
"u",
"]",
",",
"A",
"[",
"u",
",",
"v1",
"]",
",",
"A",
"[",
"v2",
",",
"v1",
"]",
",",
"A",
"[",
"u",
",",
"v2",
"]",
",",
"A",
"[",
"v1",
",",
"v2",
"]",
")",
")",
"s",
"=",
"np",
".",
"uint32",
"(",
"np",
".",
"sum",
"(",
"np",
".",
"power",
"(",
"10",
",",
"np",
".",
"arange",
"(",
"5",
",",
"-",
"1",
",",
"-",
"1",
")",
")",
"*",
"a",
")",
")",
"ix",
"=",
"id3",
"[",
"np",
".",
"squeeze",
"(",
"s",
"==",
"m3n",
")",
"]",
"-",
"1",
"F",
"[",
"ix",
",",
"u",
"]",
"+=",
"1",
"F",
"[",
"ix",
",",
"v1",
"]",
"+=",
"1",
"F",
"[",
"ix",
",",
"v2",
"]",
"+=",
"1",
"f",
"[",
"ix",
"]",
"+=",
"1",
"return",
"f",
",",
"F"
] | 32.826923 | 19.365385 |
def post(request):
""" Create a Gallery """
defaultname = 'New Gallery %i' % Gallery.objects.all().count()
data = request.POST or json.loads(request.body)['body']
title = data.get('title', defaultname)
description = data.get('description', '')
security = int(data.get('security', Gallery.PUBLIC))
g, created = Gallery.objects.get_or_create(title=title)
g.security = security
g.description = description
g.owner = request.user
g.save()
res = Result()
res.append(g.json())
res.message = 'Gallery created' if created else ''
return JsonResponse(res.asDict()) | [
"def",
"post",
"(",
"request",
")",
":",
"defaultname",
"=",
"'New Gallery %i'",
"%",
"Gallery",
".",
"objects",
".",
"all",
"(",
")",
".",
"count",
"(",
")",
"data",
"=",
"request",
".",
"POST",
"or",
"json",
".",
"loads",
"(",
"request",
".",
"body",
")",
"[",
"'body'",
"]",
"title",
"=",
"data",
".",
"get",
"(",
"'title'",
",",
"defaultname",
")",
"description",
"=",
"data",
".",
"get",
"(",
"'description'",
",",
"''",
")",
"security",
"=",
"int",
"(",
"data",
".",
"get",
"(",
"'security'",
",",
"Gallery",
".",
"PUBLIC",
")",
")",
"g",
",",
"created",
"=",
"Gallery",
".",
"objects",
".",
"get_or_create",
"(",
"title",
"=",
"title",
")",
"g",
".",
"security",
"=",
"security",
"g",
".",
"description",
"=",
"description",
"g",
".",
"owner",
"=",
"request",
".",
"user",
"g",
".",
"save",
"(",
")",
"res",
"=",
"Result",
"(",
")",
"res",
".",
"append",
"(",
"g",
".",
"json",
"(",
")",
")",
"res",
".",
"message",
"=",
"'Gallery created'",
"if",
"created",
"else",
"''",
"return",
"JsonResponse",
"(",
"res",
".",
"asDict",
"(",
")",
")"
] | 31.578947 | 18.421053 |
def find_command(cmd, path=None, pathext=None):
"""
Taken `from Django http://bit.ly/1njB3Y9>`_.
"""
if path is None:
path = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(path, string_types):
path = [path]
# check if there are path extensions for Windows executables
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
pathext = pathext.split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None | [
"def",
"find_command",
"(",
"cmd",
",",
"path",
"=",
"None",
",",
"pathext",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"if",
"isinstance",
"(",
"path",
",",
"string_types",
")",
":",
"path",
"=",
"[",
"path",
"]",
"# check if there are path extensions for Windows executables",
"if",
"pathext",
"is",
"None",
":",
"pathext",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATHEXT'",
",",
"'.COM;.EXE;.BAT;.CMD'",
")",
"pathext",
"=",
"pathext",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"# don't use extensions if the command ends with one of them",
"for",
"ext",
"in",
"pathext",
":",
"if",
"cmd",
".",
"endswith",
"(",
"ext",
")",
":",
"pathext",
"=",
"[",
"''",
"]",
"break",
"# check if we find the command on PATH",
"for",
"p",
"in",
"path",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"cmd",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"return",
"f",
"for",
"ext",
"in",
"pathext",
":",
"fext",
"=",
"f",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fext",
")",
":",
"return",
"fext",
"return",
"None"
] | 29.1 | 16.166667 |
def get_bitcoind_info(self):
"""
Get bitcoind info. Try the cache, and on cache miss,
fetch from bitcoind and cache.
"""
cached_bitcoind_info = self.get_cached_bitcoind_info()
if cached_bitcoind_info:
return cached_bitcoind_info
bitcoind_opts = default_bitcoind_opts( virtualchain.get_config_filename(virtualchain_hooks, self.working_dir), prefix=True )
bitcoind = get_bitcoind( new_bitcoind_opts=bitcoind_opts, new=True )
if bitcoind is None:
return {'error': 'Internal server error: failed to connect to bitcoind'}
try:
info = bitcoind.getinfo()
assert 'error' not in info
assert 'blocks' in info
self.set_cached_bitcoind_info(info)
return info
except Exception as e:
raise | [
"def",
"get_bitcoind_info",
"(",
"self",
")",
":",
"cached_bitcoind_info",
"=",
"self",
".",
"get_cached_bitcoind_info",
"(",
")",
"if",
"cached_bitcoind_info",
":",
"return",
"cached_bitcoind_info",
"bitcoind_opts",
"=",
"default_bitcoind_opts",
"(",
"virtualchain",
".",
"get_config_filename",
"(",
"virtualchain_hooks",
",",
"self",
".",
"working_dir",
")",
",",
"prefix",
"=",
"True",
")",
"bitcoind",
"=",
"get_bitcoind",
"(",
"new_bitcoind_opts",
"=",
"bitcoind_opts",
",",
"new",
"=",
"True",
")",
"if",
"bitcoind",
"is",
"None",
":",
"return",
"{",
"'error'",
":",
"'Internal server error: failed to connect to bitcoind'",
"}",
"try",
":",
"info",
"=",
"bitcoind",
".",
"getinfo",
"(",
")",
"assert",
"'error'",
"not",
"in",
"info",
"assert",
"'blocks'",
"in",
"info",
"self",
".",
"set_cached_bitcoind_info",
"(",
"info",
")",
"return",
"info",
"except",
"Exception",
"as",
"e",
":",
"raise"
] | 34 | 21.52 |
def after_return(self, status, retval, task_id, args, kwargs, einfo):
"""
After a task has run (both succesfully or with a failure) clear the
lock if "unlock_before_run" is False.
"""
# Only clear the lock after the task's execution if the
# "unlock_before_run" option is False
if not self.unlock_before_run():
key = self.get_key(args, kwargs)
self.once_backend.clear_lock(key) | [
"def",
"after_return",
"(",
"self",
",",
"status",
",",
"retval",
",",
"task_id",
",",
"args",
",",
"kwargs",
",",
"einfo",
")",
":",
"# Only clear the lock after the task's execution if the",
"# \"unlock_before_run\" option is False",
"if",
"not",
"self",
".",
"unlock_before_run",
"(",
")",
":",
"key",
"=",
"self",
".",
"get_key",
"(",
"args",
",",
"kwargs",
")",
"self",
".",
"once_backend",
".",
"clear_lock",
"(",
"key",
")"
] | 44.8 | 10.6 |
def is_valid(self):
""" Checks recursively if the tree is valid
It is valid if each node splits correctly """
if not self:
return True
if self.left and self.data[self.axis] < self.left.data[self.axis]:
return False
if self.right and self.data[self.axis] > self.right.data[self.axis]:
return False
return all(c.is_valid() for c, _ in self.children) or self.is_leaf | [
"def",
"is_valid",
"(",
"self",
")",
":",
"if",
"not",
"self",
":",
"return",
"True",
"if",
"self",
".",
"left",
"and",
"self",
".",
"data",
"[",
"self",
".",
"axis",
"]",
"<",
"self",
".",
"left",
".",
"data",
"[",
"self",
".",
"axis",
"]",
":",
"return",
"False",
"if",
"self",
".",
"right",
"and",
"self",
".",
"data",
"[",
"self",
".",
"axis",
"]",
">",
"self",
".",
"right",
".",
"data",
"[",
"self",
".",
"axis",
"]",
":",
"return",
"False",
"return",
"all",
"(",
"c",
".",
"is_valid",
"(",
")",
"for",
"c",
",",
"_",
"in",
"self",
".",
"children",
")",
"or",
"self",
".",
"is_leaf"
] | 29.2 | 26.266667 |
def serialize_link(ctx, document, elem, root):
"""Serilaze link element.
This works only for external links at the moment.
"""
_a = etree.SubElement(root, 'a')
for el in elem.elements:
_ser = ctx.get_serializer(el)
if _ser:
_td = _ser(ctx, document, el, _a)
else:
if isinstance(el, doc.Text):
children = list(_a)
if len(children) == 0:
_text = _a.text or u''
_a.text = u'{}{}'.format(_text, el.value())
else:
_text = children[-1].tail or u''
children[-1].tail = u'{}{}'.format(_text, el.value())
if elem.rid in document.relationships[ctx.options['relationship']]:
_a.set('href', document.relationships[ctx.options['relationship']][elem.rid].get('target', ''))
fire_hooks(ctx, document, elem, _a, ctx.get_hook('a'))
return root | [
"def",
"serialize_link",
"(",
"ctx",
",",
"document",
",",
"elem",
",",
"root",
")",
":",
"_a",
"=",
"etree",
".",
"SubElement",
"(",
"root",
",",
"'a'",
")",
"for",
"el",
"in",
"elem",
".",
"elements",
":",
"_ser",
"=",
"ctx",
".",
"get_serializer",
"(",
"el",
")",
"if",
"_ser",
":",
"_td",
"=",
"_ser",
"(",
"ctx",
",",
"document",
",",
"el",
",",
"_a",
")",
"else",
":",
"if",
"isinstance",
"(",
"el",
",",
"doc",
".",
"Text",
")",
":",
"children",
"=",
"list",
"(",
"_a",
")",
"if",
"len",
"(",
"children",
")",
"==",
"0",
":",
"_text",
"=",
"_a",
".",
"text",
"or",
"u''",
"_a",
".",
"text",
"=",
"u'{}{}'",
".",
"format",
"(",
"_text",
",",
"el",
".",
"value",
"(",
")",
")",
"else",
":",
"_text",
"=",
"children",
"[",
"-",
"1",
"]",
".",
"tail",
"or",
"u''",
"children",
"[",
"-",
"1",
"]",
".",
"tail",
"=",
"u'{}{}'",
".",
"format",
"(",
"_text",
",",
"el",
".",
"value",
"(",
")",
")",
"if",
"elem",
".",
"rid",
"in",
"document",
".",
"relationships",
"[",
"ctx",
".",
"options",
"[",
"'relationship'",
"]",
"]",
":",
"_a",
".",
"set",
"(",
"'href'",
",",
"document",
".",
"relationships",
"[",
"ctx",
".",
"options",
"[",
"'relationship'",
"]",
"]",
"[",
"elem",
".",
"rid",
"]",
".",
"get",
"(",
"'target'",
",",
"''",
")",
")",
"fire_hooks",
"(",
"ctx",
",",
"document",
",",
"elem",
",",
"_a",
",",
"ctx",
".",
"get_hook",
"(",
"'a'",
")",
")",
"return",
"root"
] | 28.875 | 22.625 |
def process_pool(self, limited_run=False):
"""Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value"""
from multiprocessing import cpu_count
from ambry.bundle.concurrent import Pool, init_library
if self.processes:
cpus = self.processes
else:
cpus = cpu_count()
self.logger.info('Starting MP pool with {} processors'.format(cpus))
return Pool(self, processes=cpus, initializer=init_library,
maxtasksperchild=1,
initargs=[self.database.dsn, self._account_password, limited_run]) | [
"def",
"process_pool",
"(",
"self",
",",
"limited_run",
"=",
"False",
")",
":",
"from",
"multiprocessing",
"import",
"cpu_count",
"from",
"ambry",
".",
"bundle",
".",
"concurrent",
"import",
"Pool",
",",
"init_library",
"if",
"self",
".",
"processes",
":",
"cpus",
"=",
"self",
".",
"processes",
"else",
":",
"cpus",
"=",
"cpu_count",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Starting MP pool with {} processors'",
".",
"format",
"(",
"cpus",
")",
")",
"return",
"Pool",
"(",
"self",
",",
"processes",
"=",
"cpus",
",",
"initializer",
"=",
"init_library",
",",
"maxtasksperchild",
"=",
"1",
",",
"initargs",
"=",
"[",
"self",
".",
"database",
".",
"dsn",
",",
"self",
".",
"_account_password",
",",
"limited_run",
"]",
")"
] | 42.2 | 21.133333 |
def _ForemanOp(self):
"""Sends Foreman checks periodically."""
period = config.CONFIG["Client.foreman_check_frequency"]
self._threads["Worker"].SendReply(
rdf_protodict.DataBlob(),
session_id=rdfvalue.FlowSessionID(flow_name="Foreman"),
require_fastpoll=False)
time.sleep(period) | [
"def",
"_ForemanOp",
"(",
"self",
")",
":",
"period",
"=",
"config",
".",
"CONFIG",
"[",
"\"Client.foreman_check_frequency\"",
"]",
"self",
".",
"_threads",
"[",
"\"Worker\"",
"]",
".",
"SendReply",
"(",
"rdf_protodict",
".",
"DataBlob",
"(",
")",
",",
"session_id",
"=",
"rdfvalue",
".",
"FlowSessionID",
"(",
"flow_name",
"=",
"\"Foreman\"",
")",
",",
"require_fastpoll",
"=",
"False",
")",
"time",
".",
"sleep",
"(",
"period",
")"
] | 39 | 12.25 |
def _colorize(val, color):
"""Colorize a string using termcolor or colorama.
If any of them are available.
"""
if termcolor is not None:
val = termcolor.colored(val, color)
elif colorama is not None:
val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL
return val | [
"def",
"_colorize",
"(",
"val",
",",
"color",
")",
":",
"if",
"termcolor",
"is",
"not",
"None",
":",
"val",
"=",
"termcolor",
".",
"colored",
"(",
"val",
",",
"color",
")",
"elif",
"colorama",
"is",
"not",
"None",
":",
"val",
"=",
"TERMCOLOR2COLORAMA",
"[",
"color",
"]",
"+",
"val",
"+",
"colorama",
".",
"Style",
".",
"RESET_ALL",
"return",
"val"
] | 27.909091 | 16.636364 |
def validate(self, text, **kwargs):
"""Returns a boolean to indicate if this is a valid instance of
the type."""
cleaned = self.clean(text, **kwargs)
return cleaned is not None | [
"def",
"validate",
"(",
"self",
",",
"text",
",",
"*",
"*",
"kwargs",
")",
":",
"cleaned",
"=",
"self",
".",
"clean",
"(",
"text",
",",
"*",
"*",
"kwargs",
")",
"return",
"cleaned",
"is",
"not",
"None"
] | 40.8 | 3 |
def _proc_uri(self, request, result):
"""
Process the URI rules for the request. Both the desired API
version and desired content type can be determined from those
rules.
:param request: The Request object provided by WebOb.
:param result: The Result object to store the results in.
"""
if result:
# Result has already been fully determined
return
# First, determine the version based on the URI prefix
for prefix, version in self.uris:
if (request.path_info == prefix or
request.path_info.startswith(prefix + '/')):
result.set_version(version)
# Update the request particulars
request.script_name += prefix
request.path_info = request.path_info[len(prefix):]
if not request.path_info:
request.path_info = '/'
break
# Next, determine the content type based on the URI suffix
for format, ctype in self.formats.items():
if request.path_info.endswith(format):
result.set_ctype(ctype)
# Update the request particulars
request.path_info = request.path_info[:-len(format)]
break | [
"def",
"_proc_uri",
"(",
"self",
",",
"request",
",",
"result",
")",
":",
"if",
"result",
":",
"# Result has already been fully determined",
"return",
"# First, determine the version based on the URI prefix",
"for",
"prefix",
",",
"version",
"in",
"self",
".",
"uris",
":",
"if",
"(",
"request",
".",
"path_info",
"==",
"prefix",
"or",
"request",
".",
"path_info",
".",
"startswith",
"(",
"prefix",
"+",
"'/'",
")",
")",
":",
"result",
".",
"set_version",
"(",
"version",
")",
"# Update the request particulars",
"request",
".",
"script_name",
"+=",
"prefix",
"request",
".",
"path_info",
"=",
"request",
".",
"path_info",
"[",
"len",
"(",
"prefix",
")",
":",
"]",
"if",
"not",
"request",
".",
"path_info",
":",
"request",
".",
"path_info",
"=",
"'/'",
"break",
"# Next, determine the content type based on the URI suffix",
"for",
"format",
",",
"ctype",
"in",
"self",
".",
"formats",
".",
"items",
"(",
")",
":",
"if",
"request",
".",
"path_info",
".",
"endswith",
"(",
"format",
")",
":",
"result",
".",
"set_ctype",
"(",
"ctype",
")",
"# Update the request particulars",
"request",
".",
"path_info",
"=",
"request",
".",
"path_info",
"[",
":",
"-",
"len",
"(",
"format",
")",
"]",
"break"
] | 36.828571 | 18.6 |
def __capture_screenshot(webdriver, folder_location, file_name):
"Capture a screenshot"
# Check folder location exists.
if not os.path.exists(folder_location):
os.makedirs(folder_location)
file_location = os.path.join(folder_location, file_name)
if isinstance(webdriver, remote.webdriver.WebDriver):
# If this is a remote webdriver. We need to transmit the image data
# back across system boundries as a base 64 encoded string so it can
# be decoded back on the local system and written to disk.
base64_data = webdriver.get_screenshot_as_base64()
screenshot_data = base64.decodestring(base64_data)
screenshot_file = open(file_location, "wb")
screenshot_file.write(screenshot_data)
screenshot_file.close()
else:
webdriver.save_screenshot(file_location) | [
"def",
"__capture_screenshot",
"(",
"webdriver",
",",
"folder_location",
",",
"file_name",
")",
":",
"# Check folder location exists.",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"folder_location",
")",
":",
"os",
".",
"makedirs",
"(",
"folder_location",
")",
"file_location",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder_location",
",",
"file_name",
")",
"if",
"isinstance",
"(",
"webdriver",
",",
"remote",
".",
"webdriver",
".",
"WebDriver",
")",
":",
"# If this is a remote webdriver. We need to transmit the image data",
"# back across system boundries as a base 64 encoded string so it can",
"# be decoded back on the local system and written to disk.",
"base64_data",
"=",
"webdriver",
".",
"get_screenshot_as_base64",
"(",
")",
"screenshot_data",
"=",
"base64",
".",
"decodestring",
"(",
"base64_data",
")",
"screenshot_file",
"=",
"open",
"(",
"file_location",
",",
"\"wb\"",
")",
"screenshot_file",
".",
"write",
"(",
"screenshot_data",
")",
"screenshot_file",
".",
"close",
"(",
")",
"else",
":",
"webdriver",
".",
"save_screenshot",
"(",
"file_location",
")"
] | 47.578947 | 20.526316 |
def call(self, parameters):
""" Call an endpoint given the parameters
:param parameters: Dictionary of parameters
:type parameters: dict
:rtype: text
"""
# DEV !
parameters = {
key: str(parameters[key]) for key in parameters if parameters[key] is not None
}
if self.inventory is not None and "inv" not in parameters:
parameters["inv"] = self.inventory
request = requests.get(self.endpoint, params=parameters)
request.raise_for_status()
if request.encoding is None:
request.encoding = "utf-8"
return request.text | [
"def",
"call",
"(",
"self",
",",
"parameters",
")",
":",
"# DEV !",
"parameters",
"=",
"{",
"key",
":",
"str",
"(",
"parameters",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"parameters",
"if",
"parameters",
"[",
"key",
"]",
"is",
"not",
"None",
"}",
"if",
"self",
".",
"inventory",
"is",
"not",
"None",
"and",
"\"inv\"",
"not",
"in",
"parameters",
":",
"parameters",
"[",
"\"inv\"",
"]",
"=",
"self",
".",
"inventory",
"request",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"endpoint",
",",
"params",
"=",
"parameters",
")",
"request",
".",
"raise_for_status",
"(",
")",
"if",
"request",
".",
"encoding",
"is",
"None",
":",
"request",
".",
"encoding",
"=",
"\"utf-8\"",
"return",
"request",
".",
"text"
] | 33.421053 | 17.842105 |
def stream_keys(self, bucket, timeout=None):
"""
Streams keys from a bucket, returning an iterator that yields
lists of keys.
"""
msg_code = riak.pb.messages.MSG_CODE_LIST_KEYS_REQ
codec = self._get_codec(msg_code)
msg = codec.encode_stream_keys(bucket, timeout)
self._send_msg(msg.msg_code, msg.data)
return PbufKeyStream(self, codec) | [
"def",
"stream_keys",
"(",
"self",
",",
"bucket",
",",
"timeout",
"=",
"None",
")",
":",
"msg_code",
"=",
"riak",
".",
"pb",
".",
"messages",
".",
"MSG_CODE_LIST_KEYS_REQ",
"codec",
"=",
"self",
".",
"_get_codec",
"(",
"msg_code",
")",
"msg",
"=",
"codec",
".",
"encode_stream_keys",
"(",
"bucket",
",",
"timeout",
")",
"self",
".",
"_send_msg",
"(",
"msg",
".",
"msg_code",
",",
"msg",
".",
"data",
")",
"return",
"PbufKeyStream",
"(",
"self",
",",
"codec",
")"
] | 39.8 | 9.2 |
def space_wave(phase, amplitude=12, frequency=0.1):
"""
Function: space_wave
Summary: This function is used to generate a wave-like padding
spacement based on the variable lambda
Examples: >>> print('\n'.join(space_wave(x) for x in range(100))
β
βββ
ββββ
ββββββ
βββββββ
ββββββββ
βββββββββ
ββββββββββ
ββββββββββ
ββββββββββ
ββββββββββ
ββββββββββ
ββββββββββ
βββββββββ
ββββββββ
βββββββ
βββββ
ββββ
ββ
β
Attributes:
@param (phase): your positive variable, can be a int or float
@param (char) default='β': the char to construct the space_wave
@param (amplitude) default=10: a float/int number to describe
how long is the space_wave max
@param (frequency) default=0.1: the speed of change
Returns: a unique string of a sequence of 'char'
"""
wave = cycle(horizontal)
return ''.join((next(wave) for x in range
(int((amplitude + 1) * abs(sin(frequency * (phase))))))) | [
"def",
"space_wave",
"(",
"phase",
",",
"amplitude",
"=",
"12",
",",
"frequency",
"=",
"0.1",
")",
":",
"wave",
"=",
"cycle",
"(",
"horizontal",
")",
"return",
"''",
".",
"join",
"(",
"(",
"next",
"(",
"wave",
")",
"for",
"x",
"in",
"range",
"(",
"int",
"(",
"(",
"amplitude",
"+",
"1",
")",
"*",
"abs",
"(",
"sin",
"(",
"frequency",
"*",
"(",
"phase",
")",
")",
")",
")",
")",
")",
")"
] | 32.657895 | 19.605263 |
def create(cls, paas_info, vhost, alter_zone, background):
""" Create a new vhost. """
if not background and not cls.intty():
background = True
params = {'paas_id': paas_info['id'],
'vhost': vhost,
'zone_alter': alter_zone}
result = cls.call('paas.vhost.create', params, dry_run=True)
if background:
return result
cls.echo('Creating a new vhost.')
cls.display_progress(result)
cls.echo('Your vhost %s has been created.' % vhost)
return result | [
"def",
"create",
"(",
"cls",
",",
"paas_info",
",",
"vhost",
",",
"alter_zone",
",",
"background",
")",
":",
"if",
"not",
"background",
"and",
"not",
"cls",
".",
"intty",
"(",
")",
":",
"background",
"=",
"True",
"params",
"=",
"{",
"'paas_id'",
":",
"paas_info",
"[",
"'id'",
"]",
",",
"'vhost'",
":",
"vhost",
",",
"'zone_alter'",
":",
"alter_zone",
"}",
"result",
"=",
"cls",
".",
"call",
"(",
"'paas.vhost.create'",
",",
"params",
",",
"dry_run",
"=",
"True",
")",
"if",
"background",
":",
"return",
"result",
"cls",
".",
"echo",
"(",
"'Creating a new vhost.'",
")",
"cls",
".",
"display_progress",
"(",
"result",
")",
"cls",
".",
"echo",
"(",
"'Your vhost %s has been created.'",
"%",
"vhost",
")",
"return",
"result"
] | 31.166667 | 17.444444 |
def get_activities_by_query(self, activity_query):
"""Gets a list of ``Activities`` matching the given activity query.
arg: activity_query (osid.learning.ActivityQuery): the
activity query
return: (osid.learning.ActivityList) - the returned
``ActivityList``
raise: NullArgument - ``activity_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``activity_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in activity_query._query_terms:
if '$in' in activity_query._query_terms[term] and '$nin' in activity_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': activity_query._query_terms[term]['$in']}},
{term: {'$nin': activity_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: activity_query._query_terms[term]})
for term in activity_query._keyword_terms:
or_list.append({term: activity_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('learning',
collection='Activity',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.ActivityList(result, runtime=self._runtime, proxy=self._proxy) | [
"def",
"get_activities_by_query",
"(",
"self",
",",
"activity_query",
")",
":",
"# Implemented from template for",
"# osid.resource.ResourceQuerySession.get_resources_by_query",
"and_list",
"=",
"list",
"(",
")",
"or_list",
"=",
"list",
"(",
")",
"for",
"term",
"in",
"activity_query",
".",
"_query_terms",
":",
"if",
"'$in'",
"in",
"activity_query",
".",
"_query_terms",
"[",
"term",
"]",
"and",
"'$nin'",
"in",
"activity_query",
".",
"_query_terms",
"[",
"term",
"]",
":",
"and_list",
".",
"append",
"(",
"{",
"'$or'",
":",
"[",
"{",
"term",
":",
"{",
"'$in'",
":",
"activity_query",
".",
"_query_terms",
"[",
"term",
"]",
"[",
"'$in'",
"]",
"}",
"}",
",",
"{",
"term",
":",
"{",
"'$nin'",
":",
"activity_query",
".",
"_query_terms",
"[",
"term",
"]",
"[",
"'$nin'",
"]",
"}",
"}",
"]",
"}",
")",
"else",
":",
"and_list",
".",
"append",
"(",
"{",
"term",
":",
"activity_query",
".",
"_query_terms",
"[",
"term",
"]",
"}",
")",
"for",
"term",
"in",
"activity_query",
".",
"_keyword_terms",
":",
"or_list",
".",
"append",
"(",
"{",
"term",
":",
"activity_query",
".",
"_keyword_terms",
"[",
"term",
"]",
"}",
")",
"if",
"or_list",
":",
"and_list",
".",
"append",
"(",
"{",
"'$or'",
":",
"or_list",
"}",
")",
"view_filter",
"=",
"self",
".",
"_view_filter",
"(",
")",
"if",
"view_filter",
":",
"and_list",
".",
"append",
"(",
"view_filter",
")",
"if",
"and_list",
":",
"query_terms",
"=",
"{",
"'$and'",
":",
"and_list",
"}",
"collection",
"=",
"JSONClientValidated",
"(",
"'learning'",
",",
"collection",
"=",
"'Activity'",
",",
"runtime",
"=",
"self",
".",
"_runtime",
")",
"result",
"=",
"collection",
".",
"find",
"(",
"query_terms",
")",
".",
"sort",
"(",
"'_id'",
",",
"DESCENDING",
")",
"else",
":",
"result",
"=",
"[",
"]",
"return",
"objects",
".",
"ActivityList",
"(",
"result",
",",
"runtime",
"=",
"self",
".",
"_runtime",
",",
"proxy",
"=",
"self",
".",
"_proxy",
")"
] | 48.390244 | 21.073171 |
def _full_axis_reduce(self, axis, func, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
result = self.data.map_across_full_axis(axis, func)
if axis == 0:
columns = alternate_index if alternate_index is not None else self.columns
return self.__constructor__(result, index=["__reduced__"], columns=columns)
else:
index = alternate_index if alternate_index is not None else self.index
return self.__constructor__(result, index=index, columns=["__reduced__"]) | [
"def",
"_full_axis_reduce",
"(",
"self",
",",
"axis",
",",
"func",
",",
"alternate_index",
"=",
"None",
")",
":",
"result",
"=",
"self",
".",
"data",
".",
"map_across_full_axis",
"(",
"axis",
",",
"func",
")",
"if",
"axis",
"==",
"0",
":",
"columns",
"=",
"alternate_index",
"if",
"alternate_index",
"is",
"not",
"None",
"else",
"self",
".",
"columns",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"index",
"=",
"[",
"\"__reduced__\"",
"]",
",",
"columns",
"=",
"columns",
")",
"else",
":",
"index",
"=",
"alternate_index",
"if",
"alternate_index",
"is",
"not",
"None",
"else",
"self",
".",
"index",
"return",
"self",
".",
"__constructor__",
"(",
"result",
",",
"index",
"=",
"index",
",",
"columns",
"=",
"[",
"\"__reduced__\"",
"]",
")"
] | 50.526316 | 28.263158 |
def _exec_requested_job(self):
"""Execute the requested job after the timer has timeout."""
self._timer.stop()
self._job(*self._args, **self._kwargs) | [
"def",
"_exec_requested_job",
"(",
"self",
")",
":",
"self",
".",
"_timer",
".",
"stop",
"(",
")",
"self",
".",
"_job",
"(",
"*",
"self",
".",
"_args",
",",
"*",
"*",
"self",
".",
"_kwargs",
")"
] | 42.5 | 7.5 |
def _validate_query(query):
"""Validate and clean up a query to be sent to Search.
Cleans the query string, removes unneeded parameters, and validates for correctness.
Does not modify the original argument.
Raises an Exception on invalid input.
Arguments:
query (dict): The query to validate.
Returns:
dict: The validated query.
"""
query = deepcopy(query)
# q is always required
if query["q"] == BLANK_QUERY["q"]:
raise ValueError("No query specified.")
query["q"] = _clean_query_string(query["q"])
# limit should be set to appropriate default if not specified
if query["limit"] is None:
query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT
# If specified, the limit should not be greater than the Search maximum
elif query["limit"] > SEARCH_LIMIT:
warnings.warn('Reduced result limit from {} to the Search maximum: {}'
.format(query["limit"], SEARCH_LIMIT), RuntimeWarning)
query["limit"] = SEARCH_LIMIT
# Remove all blank/default values
for key, val in BLANK_QUERY.items():
# Default for get is NaN so comparison is always False
if query.get(key, float('nan')) == val:
query.pop(key)
# Remove unsupported fields
to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()]
[query.pop(field) for field in to_remove]
return query | [
"def",
"_validate_query",
"(",
"query",
")",
":",
"query",
"=",
"deepcopy",
"(",
"query",
")",
"# q is always required",
"if",
"query",
"[",
"\"q\"",
"]",
"==",
"BLANK_QUERY",
"[",
"\"q\"",
"]",
":",
"raise",
"ValueError",
"(",
"\"No query specified.\"",
")",
"query",
"[",
"\"q\"",
"]",
"=",
"_clean_query_string",
"(",
"query",
"[",
"\"q\"",
"]",
")",
"# limit should be set to appropriate default if not specified",
"if",
"query",
"[",
"\"limit\"",
"]",
"is",
"None",
":",
"query",
"[",
"\"limit\"",
"]",
"=",
"SEARCH_LIMIT",
"if",
"query",
"[",
"\"advanced\"",
"]",
"else",
"NONADVANCED_LIMIT",
"# If specified, the limit should not be greater than the Search maximum",
"elif",
"query",
"[",
"\"limit\"",
"]",
">",
"SEARCH_LIMIT",
":",
"warnings",
".",
"warn",
"(",
"'Reduced result limit from {} to the Search maximum: {}'",
".",
"format",
"(",
"query",
"[",
"\"limit\"",
"]",
",",
"SEARCH_LIMIT",
")",
",",
"RuntimeWarning",
")",
"query",
"[",
"\"limit\"",
"]",
"=",
"SEARCH_LIMIT",
"# Remove all blank/default values",
"for",
"key",
",",
"val",
"in",
"BLANK_QUERY",
".",
"items",
"(",
")",
":",
"# Default for get is NaN so comparison is always False",
"if",
"query",
".",
"get",
"(",
"key",
",",
"float",
"(",
"'nan'",
")",
")",
"==",
"val",
":",
"query",
".",
"pop",
"(",
"key",
")",
"# Remove unsupported fields",
"to_remove",
"=",
"[",
"field",
"for",
"field",
"in",
"query",
".",
"keys",
"(",
")",
"if",
"field",
"not",
"in",
"BLANK_QUERY",
".",
"keys",
"(",
")",
"]",
"[",
"query",
".",
"pop",
"(",
"field",
")",
"for",
"field",
"in",
"to_remove",
"]",
"return",
"query"
] | 36.461538 | 19.717949 |
def is_child(self, node):
"""Check if a node is a child of the current node
Parameters
----------
node : instance of Node
The potential child.
Returns
-------
child : bool
Whether or not the node is a child.
"""
if node in self.children:
return True
for c in self.children:
if c.is_child(node):
return True
return False | [
"def",
"is_child",
"(",
"self",
",",
"node",
")",
":",
"if",
"node",
"in",
"self",
".",
"children",
":",
"return",
"True",
"for",
"c",
"in",
"self",
".",
"children",
":",
"if",
"c",
".",
"is_child",
"(",
"node",
")",
":",
"return",
"True",
"return",
"False"
] | 23.947368 | 16.157895 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.