text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def on_success(self, inv_plugin, emit_set_slot):
"""
Called when the click was successful
and should be applied to the inventory.
Args:
inv_plugin (InventoryPlugin): inventory plugin instance
emit_set_slot (func): function to signal a slot change,
should be InventoryPlugin().emit_set_slot
"""
self.dirty = set()
self.apply(inv_plugin)
for changed_slot in self.dirty:
emit_set_slot(changed_slot) | [
"def",
"on_success",
"(",
"self",
",",
"inv_plugin",
",",
"emit_set_slot",
")",
":",
"self",
".",
"dirty",
"=",
"set",
"(",
")",
"self",
".",
"apply",
"(",
"inv_plugin",
")",
"for",
"changed_slot",
"in",
"self",
".",
"dirty",
":",
"emit_set_slot",
"(",
"changed_slot",
")"
] | 35.642857 | 13.071429 |
def delete_consumer_group(self, project, logstore, consumer_group):
""" Delete consumer group
:type project: string
:param project: project name
:type logstore: string
:param logstore: logstore name
:type consumer_group: string
:param consumer_group: consumer group name
:return: None
"""
headers = {"x-log-bodyrawsize": '0'}
params = {}
resource = "/logstores/" + logstore + "/consumergroups/" + consumer_group
(resp, header) = self._send("DELETE", project, None, resource, params, headers)
return DeleteConsumerGroupResponse(header, resp) | [
"def",
"delete_consumer_group",
"(",
"self",
",",
"project",
",",
"logstore",
",",
"consumer_group",
")",
":",
"headers",
"=",
"{",
"\"x-log-bodyrawsize\"",
":",
"'0'",
"}",
"params",
"=",
"{",
"}",
"resource",
"=",
"\"/logstores/\"",
"+",
"logstore",
"+",
"\"/consumergroups/\"",
"+",
"consumer_group",
"(",
"resp",
",",
"header",
")",
"=",
"self",
".",
"_send",
"(",
"\"DELETE\"",
",",
"project",
",",
"None",
",",
"resource",
",",
"params",
",",
"headers",
")",
"return",
"DeleteConsumerGroupResponse",
"(",
"header",
",",
"resp",
")"
] | 31.333333 | 21.333333 |
def add_errors(self, *errors: Union[BaseSchemaError, SchemaErrorCollection]) -> None:
""" Adds errors to the error store for the schema """
for error in errors:
self._error_cache.add(error) | [
"def",
"add_errors",
"(",
"self",
",",
"*",
"errors",
":",
"Union",
"[",
"BaseSchemaError",
",",
"SchemaErrorCollection",
"]",
")",
"->",
"None",
":",
"for",
"error",
"in",
"errors",
":",
"self",
".",
"_error_cache",
".",
"add",
"(",
"error",
")"
] | 53.5 | 14.25 |
def _do_run(self, mode='1'):
"""workhorse for the self.do_run_xx methods."""
for a in range(len(self.particle_groups)):
group = self.particle_groups[a]
lp = LMParticles(self.state, group, **self._kwargs)
if mode == 'internal':
lp.J, lp.JTJ, lp._dif_tile = self._load_j_diftile(a)
if mode == '1':
lp.do_run_1()
if mode == '2':
lp.do_run_2()
if mode == 'internal':
lp.do_internal_run()
self.stats.append(lp.get_termination_stats(get_cos=self.get_cos))
if self.save_J and (mode != 'internal'):
self._dump_j_diftile(a, lp.J, lp._dif_tile)
self._has_saved_J[a] = True | [
"def",
"_do_run",
"(",
"self",
",",
"mode",
"=",
"'1'",
")",
":",
"for",
"a",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"particle_groups",
")",
")",
":",
"group",
"=",
"self",
".",
"particle_groups",
"[",
"a",
"]",
"lp",
"=",
"LMParticles",
"(",
"self",
".",
"state",
",",
"group",
",",
"*",
"*",
"self",
".",
"_kwargs",
")",
"if",
"mode",
"==",
"'internal'",
":",
"lp",
".",
"J",
",",
"lp",
".",
"JTJ",
",",
"lp",
".",
"_dif_tile",
"=",
"self",
".",
"_load_j_diftile",
"(",
"a",
")",
"if",
"mode",
"==",
"'1'",
":",
"lp",
".",
"do_run_1",
"(",
")",
"if",
"mode",
"==",
"'2'",
":",
"lp",
".",
"do_run_2",
"(",
")",
"if",
"mode",
"==",
"'internal'",
":",
"lp",
".",
"do_internal_run",
"(",
")",
"self",
".",
"stats",
".",
"append",
"(",
"lp",
".",
"get_termination_stats",
"(",
"get_cos",
"=",
"self",
".",
"get_cos",
")",
")",
"if",
"self",
".",
"save_J",
"and",
"(",
"mode",
"!=",
"'internal'",
")",
":",
"self",
".",
"_dump_j_diftile",
"(",
"a",
",",
"lp",
".",
"J",
",",
"lp",
".",
"_dif_tile",
")",
"self",
".",
"_has_saved_J",
"[",
"a",
"]",
"=",
"True"
] | 39.684211 | 15.315789 |
def pcap_name(self, devname):
"""Return pcap device name for given Windows device name."""
try:
pcap_name = self.data[devname].pcap_name
except KeyError:
raise ValueError("Unknown network interface %r" % devname)
else:
return pcap_name | [
"def",
"pcap_name",
"(",
"self",
",",
"devname",
")",
":",
"try",
":",
"pcap_name",
"=",
"self",
".",
"data",
"[",
"devname",
"]",
".",
"pcap_name",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"\"Unknown network interface %r\"",
"%",
"devname",
")",
"else",
":",
"return",
"pcap_name"
] | 32.888889 | 19.555556 |
def close(self):
"""Commit changes and close the database."""
import sys, os
for store in self.stores:
if hasattr(store, 'save'):
store.save(reimport=False)
path, filename = os.path.split(store._filename)
modname = filename[:-3]
if modname in sys.modules:
del sys.modules[modname]
super().close() | [
"def",
"close",
"(",
"self",
")",
":",
"import",
"sys",
",",
"os",
"for",
"store",
"in",
"self",
".",
"stores",
":",
"if",
"hasattr",
"(",
"store",
",",
"'save'",
")",
":",
"store",
".",
"save",
"(",
"reimport",
"=",
"False",
")",
"path",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"store",
".",
"_filename",
")",
"modname",
"=",
"filename",
"[",
":",
"-",
"3",
"]",
"if",
"modname",
"in",
"sys",
".",
"modules",
":",
"del",
"sys",
".",
"modules",
"[",
"modname",
"]",
"super",
"(",
")",
".",
"close",
"(",
")"
] | 36.181818 | 8.727273 |
def _readFromUrl(cls, url, writable):
"""
Writes the contents of a file to a source (writes url to writable)
using a ~10Mb buffer.
:param str url: A path as a string of the file to be read from.
:param object writable: An open file object to write to.
"""
# we use a ~10Mb buffer to improve speed
with open(cls._extractPathFromUrl(url), 'rb') as readable:
shutil.copyfileobj(readable, writable, length=cls.BUFFER_SIZE) | [
"def",
"_readFromUrl",
"(",
"cls",
",",
"url",
",",
"writable",
")",
":",
"# we use a ~10Mb buffer to improve speed",
"with",
"open",
"(",
"cls",
".",
"_extractPathFromUrl",
"(",
"url",
")",
",",
"'rb'",
")",
"as",
"readable",
":",
"shutil",
".",
"copyfileobj",
"(",
"readable",
",",
"writable",
",",
"length",
"=",
"cls",
".",
"BUFFER_SIZE",
")"
] | 44.090909 | 19.181818 |
def from_global_driver(self):
"""Connect to the global driver."""
address, _ = _read_driver()
if address is None:
raise DriverNotRunningError("No driver currently running")
security = Security.from_default()
return Client(address=address, security=security) | [
"def",
"from_global_driver",
"(",
"self",
")",
":",
"address",
",",
"_",
"=",
"_read_driver",
"(",
")",
"if",
"address",
"is",
"None",
":",
"raise",
"DriverNotRunningError",
"(",
"\"No driver currently running\"",
")",
"security",
"=",
"Security",
".",
"from_default",
"(",
")",
"return",
"Client",
"(",
"address",
"=",
"address",
",",
"security",
"=",
"security",
")"
] | 33.666667 | 17.555556 |
def append_dict_values(list_of_dicts, keys=None):
"""
Return a dict of lists from a list of dicts with the same keys.
For each dict in list_of_dicts with look for the values of the
given keys and append it to the output dict.
Parameters
----------
list_of_dicts: list of dicts
keys: list of str
List of keys to create in the output dict
If None will use all keys in the first element of list_of_dicts
Returns
-------
DefaultOrderedDict of lists
"""
if keys is None:
keys = list(list_of_dicts[0].keys())
dict_of_lists = DefaultOrderedDict(list)
for d in list_of_dicts:
for k in keys:
dict_of_lists[k].append(d[k])
return dict_of_lists | [
"def",
"append_dict_values",
"(",
"list_of_dicts",
",",
"keys",
"=",
"None",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"list",
"(",
"list_of_dicts",
"[",
"0",
"]",
".",
"keys",
"(",
")",
")",
"dict_of_lists",
"=",
"DefaultOrderedDict",
"(",
"list",
")",
"for",
"d",
"in",
"list_of_dicts",
":",
"for",
"k",
"in",
"keys",
":",
"dict_of_lists",
"[",
"k",
"]",
".",
"append",
"(",
"d",
"[",
"k",
"]",
")",
"return",
"dict_of_lists"
] | 28.8 | 18.08 |
def _makedirs(self, path):
"""Make folders recursively for the given path and
check read and write permission on the path
Args:
path -- path to the leaf folder
"""
try:
oldmask = os.umask(0)
os.makedirs(path, self._conf['dmode'])
os.umask(oldmask)
except OSError as e:
if(e.errno == errno.EACCES):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
elif(e.errno == errno.EEXIST):
fstat = os.stat(path)
if not stat.S_ISDIR(fstat.st_mode):
raise Exception('fsdb folder already exists but it is not a regular folder: "{0}"'.format(path))
elif not os.access(path, os.R_OK and os.W_OK):
raise Exception('not sufficent permissions to write on fsdb folder: "{0}"'.format(path))
else:
raise e | [
"def",
"_makedirs",
"(",
"self",
",",
"path",
")",
":",
"try",
":",
"oldmask",
"=",
"os",
".",
"umask",
"(",
"0",
")",
"os",
".",
"makedirs",
"(",
"path",
",",
"self",
".",
"_conf",
"[",
"'dmode'",
"]",
")",
"os",
".",
"umask",
"(",
"oldmask",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"errno",
"==",
"errno",
".",
"EACCES",
")",
":",
"raise",
"Exception",
"(",
"'not sufficent permissions to write on fsdb folder: \"{0}\"'",
".",
"format",
"(",
"path",
")",
")",
"elif",
"(",
"e",
".",
"errno",
"==",
"errno",
".",
"EEXIST",
")",
":",
"fstat",
"=",
"os",
".",
"stat",
"(",
"path",
")",
"if",
"not",
"stat",
".",
"S_ISDIR",
"(",
"fstat",
".",
"st_mode",
")",
":",
"raise",
"Exception",
"(",
"'fsdb folder already exists but it is not a regular folder: \"{0}\"'",
".",
"format",
"(",
"path",
")",
")",
"elif",
"not",
"os",
".",
"access",
"(",
"path",
",",
"os",
".",
"R_OK",
"and",
"os",
".",
"W_OK",
")",
":",
"raise",
"Exception",
"(",
"'not sufficent permissions to write on fsdb folder: \"{0}\"'",
".",
"format",
"(",
"path",
")",
")",
"else",
":",
"raise",
"e"
] | 45.666667 | 19.52381 |
def make_datastore_api(client):
"""Create an instance of the GAPIC Datastore API.
:type client: :class:`~google.cloud.datastore.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`.datastore.v1.datastore_client.DatastoreClient`
:returns: A datastore API instance with the proper credentials.
"""
parse_result = six.moves.urllib_parse.urlparse(client._base_url)
host = parse_result.netloc
if parse_result.scheme == "https":
channel = make_secure_channel(client._credentials, DEFAULT_USER_AGENT, host)
else:
channel = insecure_channel(host)
return datastore_client.DatastoreClient(
channel=channel,
client_info=client_info.ClientInfo(
client_library_version=__version__, gapic_version=__version__
),
) | [
"def",
"make_datastore_api",
"(",
"client",
")",
":",
"parse_result",
"=",
"six",
".",
"moves",
".",
"urllib_parse",
".",
"urlparse",
"(",
"client",
".",
"_base_url",
")",
"host",
"=",
"parse_result",
".",
"netloc",
"if",
"parse_result",
".",
"scheme",
"==",
"\"https\"",
":",
"channel",
"=",
"make_secure_channel",
"(",
"client",
".",
"_credentials",
",",
"DEFAULT_USER_AGENT",
",",
"host",
")",
"else",
":",
"channel",
"=",
"insecure_channel",
"(",
"host",
")",
"return",
"datastore_client",
".",
"DatastoreClient",
"(",
"channel",
"=",
"channel",
",",
"client_info",
"=",
"client_info",
".",
"ClientInfo",
"(",
"client_library_version",
"=",
"__version__",
",",
"gapic_version",
"=",
"__version__",
")",
",",
")"
] | 37.272727 | 21.181818 |
def get_recent_repeated_responses(chatbot, conversation, sample=10, threshold=3, quantity=3):
"""
A filter that eliminates possibly repetitive responses to prevent
a chat bot from repeating statements that it has recently said.
"""
from collections import Counter
# Get the most recent statements from the conversation
conversation_statements = list(chatbot.storage.filter(
conversation=conversation,
order_by=['id']
))[sample * -1:]
text_of_recent_responses = [
statement.text for statement in conversation_statements
]
counter = Counter(text_of_recent_responses)
# Find the n most common responses from the conversation
most_common = counter.most_common(quantity)
return [
counted[0] for counted in most_common
if counted[1] >= threshold
] | [
"def",
"get_recent_repeated_responses",
"(",
"chatbot",
",",
"conversation",
",",
"sample",
"=",
"10",
",",
"threshold",
"=",
"3",
",",
"quantity",
"=",
"3",
")",
":",
"from",
"collections",
"import",
"Counter",
"# Get the most recent statements from the conversation",
"conversation_statements",
"=",
"list",
"(",
"chatbot",
".",
"storage",
".",
"filter",
"(",
"conversation",
"=",
"conversation",
",",
"order_by",
"=",
"[",
"'id'",
"]",
")",
")",
"[",
"sample",
"*",
"-",
"1",
":",
"]",
"text_of_recent_responses",
"=",
"[",
"statement",
".",
"text",
"for",
"statement",
"in",
"conversation_statements",
"]",
"counter",
"=",
"Counter",
"(",
"text_of_recent_responses",
")",
"# Find the n most common responses from the conversation",
"most_common",
"=",
"counter",
".",
"most_common",
"(",
"quantity",
")",
"return",
"[",
"counted",
"[",
"0",
"]",
"for",
"counted",
"in",
"most_common",
"if",
"counted",
"[",
"1",
"]",
">=",
"threshold",
"]"
] | 31.576923 | 21.807692 |
def process_reply(self, reply, status=None, description=None):
"""
Re-entry for processing a successful reply.
Depending on how the ``retxml`` option is set, may return the SOAP
reply XML or process it and return the Python object representing the
returned value.
@param reply: The SOAP reply envelope.
@type reply: I{bytes}
@param status: The HTTP status code.
@type status: int
@param description: Additional status description.
@type description: I{bytes}
@return: The invoked web service operation return value.
@rtype: I{builtin}|I{subclass of} L{Object}|I{bytes}|I{None}
"""
return self.__process_reply(reply, status, description) | [
"def",
"process_reply",
"(",
"self",
",",
"reply",
",",
"status",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"return",
"self",
".",
"__process_reply",
"(",
"reply",
",",
"status",
",",
"description",
")"
] | 39 | 19.736842 |
def get_weather(self, time, max_hour=6):
"""Get the current weather data from met.no."""
if self.data is None:
return {}
ordered_entries = []
for time_entry in self.data['product']['time']:
valid_from = parse_datetime(time_entry['@from'])
valid_to = parse_datetime(time_entry['@to'])
if time > valid_to:
# Has already passed. Never select this.
continue
average_dist = (abs((valid_to - time).total_seconds()) +
abs((valid_from - time).total_seconds()))
if average_dist > max_hour * 3600:
continue
ordered_entries.append((average_dist, time_entry))
if not ordered_entries:
return {}
ordered_entries.sort(key=lambda item: item[0])
res = dict()
res['datetime'] = time
res['temperature'] = get_data('temperature', ordered_entries)
res['condition'] = CONDITIONS.get(get_data('symbol', ordered_entries))
res['pressure'] = get_data('pressure', ordered_entries)
res['humidity'] = get_data('humidity', ordered_entries)
res['wind_speed'] = get_data('windSpeed', ordered_entries)
res['wind_bearing'] = get_data('windDirection', ordered_entries)
return res | [
"def",
"get_weather",
"(",
"self",
",",
"time",
",",
"max_hour",
"=",
"6",
")",
":",
"if",
"self",
".",
"data",
"is",
"None",
":",
"return",
"{",
"}",
"ordered_entries",
"=",
"[",
"]",
"for",
"time_entry",
"in",
"self",
".",
"data",
"[",
"'product'",
"]",
"[",
"'time'",
"]",
":",
"valid_from",
"=",
"parse_datetime",
"(",
"time_entry",
"[",
"'@from'",
"]",
")",
"valid_to",
"=",
"parse_datetime",
"(",
"time_entry",
"[",
"'@to'",
"]",
")",
"if",
"time",
">",
"valid_to",
":",
"# Has already passed. Never select this.",
"continue",
"average_dist",
"=",
"(",
"abs",
"(",
"(",
"valid_to",
"-",
"time",
")",
".",
"total_seconds",
"(",
")",
")",
"+",
"abs",
"(",
"(",
"valid_from",
"-",
"time",
")",
".",
"total_seconds",
"(",
")",
")",
")",
"if",
"average_dist",
">",
"max_hour",
"*",
"3600",
":",
"continue",
"ordered_entries",
".",
"append",
"(",
"(",
"average_dist",
",",
"time_entry",
")",
")",
"if",
"not",
"ordered_entries",
":",
"return",
"{",
"}",
"ordered_entries",
".",
"sort",
"(",
"key",
"=",
"lambda",
"item",
":",
"item",
"[",
"0",
"]",
")",
"res",
"=",
"dict",
"(",
")",
"res",
"[",
"'datetime'",
"]",
"=",
"time",
"res",
"[",
"'temperature'",
"]",
"=",
"get_data",
"(",
"'temperature'",
",",
"ordered_entries",
")",
"res",
"[",
"'condition'",
"]",
"=",
"CONDITIONS",
".",
"get",
"(",
"get_data",
"(",
"'symbol'",
",",
"ordered_entries",
")",
")",
"res",
"[",
"'pressure'",
"]",
"=",
"get_data",
"(",
"'pressure'",
",",
"ordered_entries",
")",
"res",
"[",
"'humidity'",
"]",
"=",
"get_data",
"(",
"'humidity'",
",",
"ordered_entries",
")",
"res",
"[",
"'wind_speed'",
"]",
"=",
"get_data",
"(",
"'windSpeed'",
",",
"ordered_entries",
")",
"res",
"[",
"'wind_bearing'",
"]",
"=",
"get_data",
"(",
"'windDirection'",
",",
"ordered_entries",
")",
"return",
"res"
] | 39.666667 | 21.212121 |
def paint( self, painter, option, index ):
"""
Overloads the paint method from Qt to perform some additional painting
on items.
:param painter | <QPainter>
option | <QStyleOption>
index | <QModelIndex>
"""
# draw the background
edit = self.parent()
item = edit.item(index.row())
if ( not isinstance(item, XMultiTagCreateItem) ):
if ( item.isSelected() ):
painter.setBrush(edit.highlightColor())
else:
painter.setBrush(edit.tagColor())
painter.drawRect(option.rect)
painter.setBrush(Qt.NoBrush)
painter.setPen(item.foreground().color())
super(XMultiTagDelegate, self).paint(painter, option, index)
# draw the border
item = self.parent().item(index.row())
if ( not isinstance(item, XMultiTagCreateItem) ):
painter.setPen(edit.borderColor())
painter.setBrush(Qt.NoBrush)
painter.drawRect(option.rect)
painter.drawText(option.rect.right() - 14,
option.rect.top() + 1,
16,
16,
Qt.AlignCenter,
'x') | [
"def",
"paint",
"(",
"self",
",",
"painter",
",",
"option",
",",
"index",
")",
":",
"# draw the background",
"edit",
"=",
"self",
".",
"parent",
"(",
")",
"item",
"=",
"edit",
".",
"item",
"(",
"index",
".",
"row",
"(",
")",
")",
"if",
"(",
"not",
"isinstance",
"(",
"item",
",",
"XMultiTagCreateItem",
")",
")",
":",
"if",
"(",
"item",
".",
"isSelected",
"(",
")",
")",
":",
"painter",
".",
"setBrush",
"(",
"edit",
".",
"highlightColor",
"(",
")",
")",
"else",
":",
"painter",
".",
"setBrush",
"(",
"edit",
".",
"tagColor",
"(",
")",
")",
"painter",
".",
"drawRect",
"(",
"option",
".",
"rect",
")",
"painter",
".",
"setBrush",
"(",
"Qt",
".",
"NoBrush",
")",
"painter",
".",
"setPen",
"(",
"item",
".",
"foreground",
"(",
")",
".",
"color",
"(",
")",
")",
"super",
"(",
"XMultiTagDelegate",
",",
"self",
")",
".",
"paint",
"(",
"painter",
",",
"option",
",",
"index",
")",
"# draw the border\r",
"item",
"=",
"self",
".",
"parent",
"(",
")",
".",
"item",
"(",
"index",
".",
"row",
"(",
")",
")",
"if",
"(",
"not",
"isinstance",
"(",
"item",
",",
"XMultiTagCreateItem",
")",
")",
":",
"painter",
".",
"setPen",
"(",
"edit",
".",
"borderColor",
"(",
")",
")",
"painter",
".",
"setBrush",
"(",
"Qt",
".",
"NoBrush",
")",
"painter",
".",
"drawRect",
"(",
"option",
".",
"rect",
")",
"painter",
".",
"drawText",
"(",
"option",
".",
"rect",
".",
"right",
"(",
")",
"-",
"14",
",",
"option",
".",
"rect",
".",
"top",
"(",
")",
"+",
"1",
",",
"16",
",",
"16",
",",
"Qt",
".",
"AlignCenter",
",",
"'x'",
")"
] | 36.621622 | 12.405405 |
def override_spec(cls, **kwargs):
"""OVerride 'spec' and '_default_spec' with given values"""
cls._default_spec.set(**kwargs)
cls.spec.set(**kwargs) | [
"def",
"override_spec",
"(",
"cls",
",",
"*",
"*",
"kwargs",
")",
":",
"cls",
".",
"_default_spec",
".",
"set",
"(",
"*",
"*",
"kwargs",
")",
"cls",
".",
"spec",
".",
"set",
"(",
"*",
"*",
"kwargs",
")"
] | 42.25 | 4.5 |
def isRevoked(self, crl_list):
"""
Given a list of trusted CRL (their signature has already been
verified with trusted anchors), this function returns True if
the certificate is marked as revoked by one of those CRL.
Note that if the Certificate was on hold in a previous CRL and
is now valid again in a new CRL and bot are in the list, it
will be considered revoked: this is because _all_ CRLs are
checked (not only the freshest) and revocation status is not
handled.
Also note that the check on the issuer is performed on the
Authority Key Identifier if available in _both_ the CRL and the
Cert. Otherwise, the issuers are simply compared.
"""
for c in crl_list:
if (self.authorityKeyID is not None and
c.authorityKeyID is not None and
self.authorityKeyID == c.authorityKeyID):
return self.serial in (x[0] for x in c.revoked_cert_serials)
elif self.issuer == c.issuer:
return self.serial in (x[0] for x in c.revoked_cert_serials)
return False | [
"def",
"isRevoked",
"(",
"self",
",",
"crl_list",
")",
":",
"for",
"c",
"in",
"crl_list",
":",
"if",
"(",
"self",
".",
"authorityKeyID",
"is",
"not",
"None",
"and",
"c",
".",
"authorityKeyID",
"is",
"not",
"None",
"and",
"self",
".",
"authorityKeyID",
"==",
"c",
".",
"authorityKeyID",
")",
":",
"return",
"self",
".",
"serial",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"c",
".",
"revoked_cert_serials",
")",
"elif",
"self",
".",
"issuer",
"==",
"c",
".",
"issuer",
":",
"return",
"self",
".",
"serial",
"in",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"c",
".",
"revoked_cert_serials",
")",
"return",
"False"
] | 47.291667 | 22.041667 |
def flush(self, log=False): # pylint:disable=too-many-branches, too-many-nested-blocks
"""Send inner stored metrics to the configured Graphite or InfluxDB
Returns False if the sending failed with a warning log if log parameter is set
:param log: to log information or not
:type log: bool
:return: bool
"""
if not self.my_metrics:
logger.debug("Flushing - no metrics to send")
return True
now = int(time.time())
if self.last_failure and self.last_failure + self.metrics_flush_pause > now:
if not self.log_metrics_flush_pause:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.log_metrics_flush_pause = True
if not self.test_connection():
return False
metrics_sent = False
metrics_saved = False
# Flushing to Graphite
if self.graphite_enabled:
try:
logger.debug("Flushing %d metrics to Graphite/carbon", self.metrics_count)
carbon_data = []
for metric in self.my_metrics:
# Get path
path = metric['tags']['path']
for name, value in metric['fields'].items():
carbon_data.append(
('.'.join([self.graphite_prefix, '.'.join([path, name])]),
(metric['time'], value)))
self.carbon.add_data_list(carbon_data)
if self.carbon.send_data():
metrics_sent = True
else:
if log:
logger.warning("Failed sending metrics to Graphite/carbon. "
"Inner stored metric: %d", self.metrics_count)
if self.log_metrics_flush_pause:
logger.warning("Metrics flush restored. "
"Remaining stored metric: %d", self.metrics_count)
self.last_failure = 0
self.log_metrics_flush_pause = False
except Exception as exp: # pylint: disable=broad-except
if not self.log_metrics_flush_pause:
logger.warning("Failed sending metrics to Graphite/carbon: %s:%d. "
"Inner stored metrics count: %d.",
self.graphite_host, self.graphite_port, self.metrics_count)
logger.warning("Exception: %s / %s", str(exp), traceback.print_exc())
else:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.last_failure = now
return False
# Flushing to InfluxDB
# pylint: disable=too-many-nested-blocks
if self.influxdb_enabled:
try:
logger.debug("Flushing %d metrics to InfluxDB", self.metrics_count)
for metric in self.my_metrics:
metric['time'] *= 1000000000
for name, value in metric['fields'].items():
if name.startswith('uom_'):
continue
# Force set float values
if not isinstance(value, float):
try:
value = float(value)
except Exception: # pylint: disable=broad-except
pass
metric['fields'][name] = value
if self.influxdb_tags is not None and isinstance(self.influxdb_tags, dict):
metric['tags'].update(self.influxdb_tags)
# Write data to InfluxDB
metrics_sent = self.influx.write_points(self.my_metrics)
if self.log_metrics_flush_pause:
logger.warning("Metrics flush restored. "
"Remaining stored metric: %d", self.metrics_count)
self.last_failure = 0
self.log_metrics_flush_pause = False
except Exception as exp: # pylint: disable=broad-except
logger.warning("*** Exception: %s", str(exp))
if not self.log_metrics_flush_pause:
logger.warning("Failed sending metrics to InfluxDB: %s:%d. "
"Inner stored metrics count: %d.",
self.influxdb_host, self.influxdb_port, self.metrics_count)
logger.warning("Exception: %s", str(exp))
else:
logger.warning("Flush paused on connection error (last failed: %d). "
"Inner stored metric: %d. Trying to send...",
self.last_failure, self.metrics_count)
self.last_failure = now
return False
if self.output_file:
try:
logger.debug("Storing %d metrics to %s", self.metrics_count, self.output_file)
with open(self.output_file, 'a') as fp:
for metric in self.my_metrics:
# Get path
path = metric['tags']['path']
for name, value in metric['fields'].items():
fp.write("%s;%s;%s\n" % (metric['time'], '.'.join((path, name)), value))
metrics_saved = True
except Exception as exp: # pylint: disable=broad-except
logger.warning("Failed writing to a file: %s. "
"Inner stored metrics count: %d\n Exception: %s",
self.output_file, self.metrics_count, str(exp))
return False
if ((self.graphite_host or self.influxdb_host) and metrics_sent) or \
(self.output_file and metrics_saved):
self.my_metrics = []
return True | [
"def",
"flush",
"(",
"self",
",",
"log",
"=",
"False",
")",
":",
"# pylint:disable=too-many-branches, too-many-nested-blocks",
"if",
"not",
"self",
".",
"my_metrics",
":",
"logger",
".",
"debug",
"(",
"\"Flushing - no metrics to send\"",
")",
"return",
"True",
"now",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"if",
"self",
".",
"last_failure",
"and",
"self",
".",
"last_failure",
"+",
"self",
".",
"metrics_flush_pause",
">",
"now",
":",
"if",
"not",
"self",
".",
"log_metrics_flush_pause",
":",
"logger",
".",
"warning",
"(",
"\"Flush paused on connection error (last failed: %d). \"",
"\"Inner stored metric: %d. Trying to send...\"",
",",
"self",
".",
"last_failure",
",",
"self",
".",
"metrics_count",
")",
"self",
".",
"log_metrics_flush_pause",
"=",
"True",
"if",
"not",
"self",
".",
"test_connection",
"(",
")",
":",
"return",
"False",
"metrics_sent",
"=",
"False",
"metrics_saved",
"=",
"False",
"# Flushing to Graphite",
"if",
"self",
".",
"graphite_enabled",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Flushing %d metrics to Graphite/carbon\"",
",",
"self",
".",
"metrics_count",
")",
"carbon_data",
"=",
"[",
"]",
"for",
"metric",
"in",
"self",
".",
"my_metrics",
":",
"# Get path",
"path",
"=",
"metric",
"[",
"'tags'",
"]",
"[",
"'path'",
"]",
"for",
"name",
",",
"value",
"in",
"metric",
"[",
"'fields'",
"]",
".",
"items",
"(",
")",
":",
"carbon_data",
".",
"append",
"(",
"(",
"'.'",
".",
"join",
"(",
"[",
"self",
".",
"graphite_prefix",
",",
"'.'",
".",
"join",
"(",
"[",
"path",
",",
"name",
"]",
")",
"]",
")",
",",
"(",
"metric",
"[",
"'time'",
"]",
",",
"value",
")",
")",
")",
"self",
".",
"carbon",
".",
"add_data_list",
"(",
"carbon_data",
")",
"if",
"self",
".",
"carbon",
".",
"send_data",
"(",
")",
":",
"metrics_sent",
"=",
"True",
"else",
":",
"if",
"log",
":",
"logger",
".",
"warning",
"(",
"\"Failed sending metrics to Graphite/carbon. \"",
"\"Inner stored metric: %d\"",
",",
"self",
".",
"metrics_count",
")",
"if",
"self",
".",
"log_metrics_flush_pause",
":",
"logger",
".",
"warning",
"(",
"\"Metrics flush restored. \"",
"\"Remaining stored metric: %d\"",
",",
"self",
".",
"metrics_count",
")",
"self",
".",
"last_failure",
"=",
"0",
"self",
".",
"log_metrics_flush_pause",
"=",
"False",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"if",
"not",
"self",
".",
"log_metrics_flush_pause",
":",
"logger",
".",
"warning",
"(",
"\"Failed sending metrics to Graphite/carbon: %s:%d. \"",
"\"Inner stored metrics count: %d.\"",
",",
"self",
".",
"graphite_host",
",",
"self",
".",
"graphite_port",
",",
"self",
".",
"metrics_count",
")",
"logger",
".",
"warning",
"(",
"\"Exception: %s / %s\"",
",",
"str",
"(",
"exp",
")",
",",
"traceback",
".",
"print_exc",
"(",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Flush paused on connection error (last failed: %d). \"",
"\"Inner stored metric: %d. Trying to send...\"",
",",
"self",
".",
"last_failure",
",",
"self",
".",
"metrics_count",
")",
"self",
".",
"last_failure",
"=",
"now",
"return",
"False",
"# Flushing to InfluxDB",
"# pylint: disable=too-many-nested-blocks",
"if",
"self",
".",
"influxdb_enabled",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Flushing %d metrics to InfluxDB\"",
",",
"self",
".",
"metrics_count",
")",
"for",
"metric",
"in",
"self",
".",
"my_metrics",
":",
"metric",
"[",
"'time'",
"]",
"*=",
"1000000000",
"for",
"name",
",",
"value",
"in",
"metric",
"[",
"'fields'",
"]",
".",
"items",
"(",
")",
":",
"if",
"name",
".",
"startswith",
"(",
"'uom_'",
")",
":",
"continue",
"# Force set float values",
"if",
"not",
"isinstance",
"(",
"value",
",",
"float",
")",
":",
"try",
":",
"value",
"=",
"float",
"(",
"value",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"pass",
"metric",
"[",
"'fields'",
"]",
"[",
"name",
"]",
"=",
"value",
"if",
"self",
".",
"influxdb_tags",
"is",
"not",
"None",
"and",
"isinstance",
"(",
"self",
".",
"influxdb_tags",
",",
"dict",
")",
":",
"metric",
"[",
"'tags'",
"]",
".",
"update",
"(",
"self",
".",
"influxdb_tags",
")",
"# Write data to InfluxDB",
"metrics_sent",
"=",
"self",
".",
"influx",
".",
"write_points",
"(",
"self",
".",
"my_metrics",
")",
"if",
"self",
".",
"log_metrics_flush_pause",
":",
"logger",
".",
"warning",
"(",
"\"Metrics flush restored. \"",
"\"Remaining stored metric: %d\"",
",",
"self",
".",
"metrics_count",
")",
"self",
".",
"last_failure",
"=",
"0",
"self",
".",
"log_metrics_flush_pause",
"=",
"False",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"warning",
"(",
"\"*** Exception: %s\"",
",",
"str",
"(",
"exp",
")",
")",
"if",
"not",
"self",
".",
"log_metrics_flush_pause",
":",
"logger",
".",
"warning",
"(",
"\"Failed sending metrics to InfluxDB: %s:%d. \"",
"\"Inner stored metrics count: %d.\"",
",",
"self",
".",
"influxdb_host",
",",
"self",
".",
"influxdb_port",
",",
"self",
".",
"metrics_count",
")",
"logger",
".",
"warning",
"(",
"\"Exception: %s\"",
",",
"str",
"(",
"exp",
")",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Flush paused on connection error (last failed: %d). \"",
"\"Inner stored metric: %d. Trying to send...\"",
",",
"self",
".",
"last_failure",
",",
"self",
".",
"metrics_count",
")",
"self",
".",
"last_failure",
"=",
"now",
"return",
"False",
"if",
"self",
".",
"output_file",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Storing %d metrics to %s\"",
",",
"self",
".",
"metrics_count",
",",
"self",
".",
"output_file",
")",
"with",
"open",
"(",
"self",
".",
"output_file",
",",
"'a'",
")",
"as",
"fp",
":",
"for",
"metric",
"in",
"self",
".",
"my_metrics",
":",
"# Get path",
"path",
"=",
"metric",
"[",
"'tags'",
"]",
"[",
"'path'",
"]",
"for",
"name",
",",
"value",
"in",
"metric",
"[",
"'fields'",
"]",
".",
"items",
"(",
")",
":",
"fp",
".",
"write",
"(",
"\"%s;%s;%s\\n\"",
"%",
"(",
"metric",
"[",
"'time'",
"]",
",",
"'.'",
".",
"join",
"(",
"(",
"path",
",",
"name",
")",
")",
",",
"value",
")",
")",
"metrics_saved",
"=",
"True",
"except",
"Exception",
"as",
"exp",
":",
"# pylint: disable=broad-except",
"logger",
".",
"warning",
"(",
"\"Failed writing to a file: %s. \"",
"\"Inner stored metrics count: %d\\n Exception: %s\"",
",",
"self",
".",
"output_file",
",",
"self",
".",
"metrics_count",
",",
"str",
"(",
"exp",
")",
")",
"return",
"False",
"if",
"(",
"(",
"self",
".",
"graphite_host",
"or",
"self",
".",
"influxdb_host",
")",
"and",
"metrics_sent",
")",
"or",
"(",
"self",
".",
"output_file",
"and",
"metrics_saved",
")",
":",
"self",
".",
"my_metrics",
"=",
"[",
"]",
"return",
"True"
] | 47 | 23.932331 |
def delete_pipeline(self, pipeline_key):
'''Deletes the pipeline specified by the key
Args:
returns (status code for the DELETE request, success message dict)
expect (200 , {'success': 'true'}) for successful execution}
'''
if pipeline_key:
uri = '/'.join([
self.api_uri,
self.pipelines_suffix,
pipeline_key
])
return self._req('delete', uri)
else:
return requests.codes.bad_request, None | [
"def",
"delete_pipeline",
"(",
"self",
",",
"pipeline_key",
")",
":",
"if",
"pipeline_key",
":",
"uri",
"=",
"'/'",
".",
"join",
"(",
"[",
"self",
".",
"api_uri",
",",
"self",
".",
"pipelines_suffix",
",",
"pipeline_key",
"]",
")",
"return",
"self",
".",
"_req",
"(",
"'delete'",
",",
"uri",
")",
"else",
":",
"return",
"requests",
".",
"codes",
".",
"bad_request",
",",
"None"
] | 28.733333 | 19.8 |
def disable_avatar(self):
"""
Temporarily disable the avatar.
If :attr:`synchronize_vcard` is true, the vCard avatar is
disabled (even if disabling the PEP avatar fails).
This is done by setting the avatar metadata node empty and if
:attr:`synchronize_vcard` is true, downloading the vCard,
removing the avatar data and re-uploading the vCard.
This method does not error if neither protocol is active.
:raises aioxmpp.errors.GatherError: if an exception is raised
by the spawned tasks.
"""
with (yield from self._publish_lock):
todo = []
if self._synchronize_vcard:
todo.append(self._disable_vcard_avatar())
if (yield from self._pep.available()):
todo.append(self._pep.publish(
namespaces.xep0084_metadata,
avatar_xso.Metadata()
))
yield from gather_reraise_multi(*todo, message="disable_avatar") | [
"def",
"disable_avatar",
"(",
"self",
")",
":",
"with",
"(",
"yield",
"from",
"self",
".",
"_publish_lock",
")",
":",
"todo",
"=",
"[",
"]",
"if",
"self",
".",
"_synchronize_vcard",
":",
"todo",
".",
"append",
"(",
"self",
".",
"_disable_vcard_avatar",
"(",
")",
")",
"if",
"(",
"yield",
"from",
"self",
".",
"_pep",
".",
"available",
"(",
")",
")",
":",
"todo",
".",
"append",
"(",
"self",
".",
"_pep",
".",
"publish",
"(",
"namespaces",
".",
"xep0084_metadata",
",",
"avatar_xso",
".",
"Metadata",
"(",
")",
")",
")",
"yield",
"from",
"gather_reraise_multi",
"(",
"*",
"todo",
",",
"message",
"=",
"\"disable_avatar\"",
")"
] | 34.862069 | 20.655172 |
def _sort_converters(cls, app_ready=False):
'''Sorts the converter functions'''
# app_ready is True when called from DMP's AppConfig.ready()
# we can't sort before then because models aren't ready
cls._sorting_enabled = cls._sorting_enabled or app_ready
if cls._sorting_enabled:
for converter in cls.converters:
converter.prepare_sort_key()
cls.converters.sort(key=attrgetter('sort_key')) | [
"def",
"_sort_converters",
"(",
"cls",
",",
"app_ready",
"=",
"False",
")",
":",
"# app_ready is True when called from DMP's AppConfig.ready()",
"# we can't sort before then because models aren't ready",
"cls",
".",
"_sorting_enabled",
"=",
"cls",
".",
"_sorting_enabled",
"or",
"app_ready",
"if",
"cls",
".",
"_sorting_enabled",
":",
"for",
"converter",
"in",
"cls",
".",
"converters",
":",
"converter",
".",
"prepare_sort_key",
"(",
")",
"cls",
".",
"converters",
".",
"sort",
"(",
"key",
"=",
"attrgetter",
"(",
"'sort_key'",
")",
")"
] | 51.111111 | 12.888889 |
def unitResponse(self,band):
"""This is used internally for :ref:`pysynphot-formula-effstim`
calculations."""
#sumfilt(wave,1,band)
# SUMFILT = Sum [ FILT(I) * WAVE(I) ** NPOW * DWAVE(I) ]
wave=band.wave
total = band.trapezoidIntegration(wave,band.throughput*wave)
modtot = total/(H*C)
return 2.5*math.log10(modtot) + STZERO | [
"def",
"unitResponse",
"(",
"self",
",",
"band",
")",
":",
"#sumfilt(wave,1,band)",
"# SUMFILT = Sum [ FILT(I) * WAVE(I) ** NPOW * DWAVE(I) ]",
"wave",
"=",
"band",
".",
"wave",
"total",
"=",
"band",
".",
"trapezoidIntegration",
"(",
"wave",
",",
"band",
".",
"throughput",
"*",
"wave",
")",
"modtot",
"=",
"total",
"/",
"(",
"H",
"*",
"C",
")",
"return",
"2.5",
"*",
"math",
".",
"log10",
"(",
"modtot",
")",
"+",
"STZERO"
] | 42.222222 | 12.333333 |
def addKwdArgsToSig(sigStr, kwArgsDict):
""" Alter the passed function signature string to add the given kewords """
retval = sigStr
if len(kwArgsDict) > 0:
retval = retval.strip(' ,)') # open up the r.h.s. for more args
for k in kwArgsDict:
if retval[-1] != '(': retval += ", "
retval += str(k)+"="+str(kwArgsDict[k])
retval += ')'
retval = retval
return retval | [
"def",
"addKwdArgsToSig",
"(",
"sigStr",
",",
"kwArgsDict",
")",
":",
"retval",
"=",
"sigStr",
"if",
"len",
"(",
"kwArgsDict",
")",
">",
"0",
":",
"retval",
"=",
"retval",
".",
"strip",
"(",
"' ,)'",
")",
"# open up the r.h.s. for more args",
"for",
"k",
"in",
"kwArgsDict",
":",
"if",
"retval",
"[",
"-",
"1",
"]",
"!=",
"'('",
":",
"retval",
"+=",
"\", \"",
"retval",
"+=",
"str",
"(",
"k",
")",
"+",
"\"=\"",
"+",
"str",
"(",
"kwArgsDict",
"[",
"k",
"]",
")",
"retval",
"+=",
"')'",
"retval",
"=",
"retval",
"return",
"retval"
] | 38.181818 | 14.454545 |
async def _connect(self, host_loc):
'''
Simple enough stuff to figure out where we should connect, and creates
the appropriate connection.
'''
scheme, host, path, parameters, query, fragment = urlparse(
host_loc)
if parameters or query or fragment:
raise ValueError('Supplied info beyond scheme, host.' +
' Host should be top level only: ', path)
host, port = get_netloc_port(scheme, host)
if scheme == 'http':
return await self._open_connection_http(
(host, int(port))), port
else:
return await self._open_connection_https(
(host, int(port))), port | [
"async",
"def",
"_connect",
"(",
"self",
",",
"host_loc",
")",
":",
"scheme",
",",
"host",
",",
"path",
",",
"parameters",
",",
"query",
",",
"fragment",
"=",
"urlparse",
"(",
"host_loc",
")",
"if",
"parameters",
"or",
"query",
"or",
"fragment",
":",
"raise",
"ValueError",
"(",
"'Supplied info beyond scheme, host.'",
"+",
"' Host should be top level only: '",
",",
"path",
")",
"host",
",",
"port",
"=",
"get_netloc_port",
"(",
"scheme",
",",
"host",
")",
"if",
"scheme",
"==",
"'http'",
":",
"return",
"await",
"self",
".",
"_open_connection_http",
"(",
"(",
"host",
",",
"int",
"(",
"port",
")",
")",
")",
",",
"port",
"else",
":",
"return",
"await",
"self",
".",
"_open_connection_https",
"(",
"(",
"host",
",",
"int",
"(",
"port",
")",
")",
")",
",",
"port"
] | 37.578947 | 19.263158 |
def check_version(expected_version):
""" Make sure the package version in setuptools matches what we expect it to be """
with open(os.path.join(root_folder, 'VERSION'), 'r') as version_file:
version = version_file.read().strip()
if version != expected_version:
raise EnvironmentError("Version mismatch during release, expected={}, found={}"
.format(expected_version, version)) | [
"def",
"check_version",
"(",
"expected_version",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"root_folder",
",",
"'VERSION'",
")",
",",
"'r'",
")",
"as",
"version_file",
":",
"version",
"=",
"version_file",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"if",
"version",
"!=",
"expected_version",
":",
"raise",
"EnvironmentError",
"(",
"\"Version mismatch during release, expected={}, found={}\"",
".",
"format",
"(",
"expected_version",
",",
"version",
")",
")"
] | 47.666667 | 22.222222 |
def get_groups_by_token(cls, username, token, request):
""" Get user's groups if user with :username: exists and their api key
token equals :token:
Used by Token-based authentication as `check` kwarg.
"""
try:
user = cls.get_item(username=username)
except Exception as ex:
log.error(str(ex))
forget(request)
return
else:
if user and user.api_key.token == token:
return ['g:%s' % g for g in user.groups] | [
"def",
"get_groups_by_token",
"(",
"cls",
",",
"username",
",",
"token",
",",
"request",
")",
":",
"try",
":",
"user",
"=",
"cls",
".",
"get_item",
"(",
"username",
"=",
"username",
")",
"except",
"Exception",
"as",
"ex",
":",
"log",
".",
"error",
"(",
"str",
"(",
"ex",
")",
")",
"forget",
"(",
"request",
")",
"return",
"else",
":",
"if",
"user",
"and",
"user",
".",
"api_key",
".",
"token",
"==",
"token",
":",
"return",
"[",
"'g:%s'",
"%",
"g",
"for",
"g",
"in",
"user",
".",
"groups",
"]"
] | 34.733333 | 15.6 |
def cluster_del_slots(self, slot, *slots):
"""Set hash slots as unbound in receiving node."""
slots = (slot,) + slots
if not all(isinstance(s, int) for s in slots):
raise TypeError("All parameters must be of type int")
fut = self.execute(b'CLUSTER', b'DELSLOTS', *slots)
return wait_ok(fut) | [
"def",
"cluster_del_slots",
"(",
"self",
",",
"slot",
",",
"*",
"slots",
")",
":",
"slots",
"=",
"(",
"slot",
",",
")",
"+",
"slots",
"if",
"not",
"all",
"(",
"isinstance",
"(",
"s",
",",
"int",
")",
"for",
"s",
"in",
"slots",
")",
":",
"raise",
"TypeError",
"(",
"\"All parameters must be of type int\"",
")",
"fut",
"=",
"self",
".",
"execute",
"(",
"b'CLUSTER'",
",",
"b'DELSLOTS'",
",",
"*",
"slots",
")",
"return",
"wait_ok",
"(",
"fut",
")"
] | 48 | 11.714286 |
def add_tag(self, name, value):
"""
:param name: Name of the tag
:type name: string
:param value: Value of the tag
:type value: string
"""
self.tags.append(Tag(name, value)) | [
"def",
"add_tag",
"(",
"self",
",",
"name",
",",
"value",
")",
":",
"self",
".",
"tags",
".",
"append",
"(",
"Tag",
"(",
"name",
",",
"value",
")",
")"
] | 26.444444 | 8 |
def _choi_to_superop(data, input_dim, output_dim):
"""Transform Choi to SuperOp representation."""
shape = (input_dim, output_dim, input_dim, output_dim)
return _reshuffle(data, shape) | [
"def",
"_choi_to_superop",
"(",
"data",
",",
"input_dim",
",",
"output_dim",
")",
":",
"shape",
"=",
"(",
"input_dim",
",",
"output_dim",
",",
"input_dim",
",",
"output_dim",
")",
"return",
"_reshuffle",
"(",
"data",
",",
"shape",
")"
] | 48.25 | 8.5 |
def resolve(self, component_type, **kwargs):
"""
Resolves an instance of the component type.
:param component_type: The type of the component (e.g. a class).
:param kwargs: Overriding arguments to use (by name) instead of resolving them.
:return: An instance of the component.
"""
with self._resolve_lock:
context = _ComponentContext(self)
return context.resolve(component_type, **kwargs) | [
"def",
"resolve",
"(",
"self",
",",
"component_type",
",",
"*",
"*",
"kwargs",
")",
":",
"with",
"self",
".",
"_resolve_lock",
":",
"context",
"=",
"_ComponentContext",
"(",
"self",
")",
"return",
"context",
".",
"resolve",
"(",
"component_type",
",",
"*",
"*",
"kwargs",
")"
] | 45.9 | 13.3 |
def pint_multiply(da, q, out_units=None):
"""Multiply xarray.DataArray by pint.Quantity.
Parameters
----------
da : xr.DataArray
Input array.
q : pint.Quantity
Multiplicating factor.
out_units : str
Units the output array should be converted into.
"""
a = 1 * units2pint(da)
f = a * q.to_base_units()
if out_units:
f = f.to(out_units)
out = da * f.magnitude
out.attrs['units'] = pint2cfunits(f.units)
return out | [
"def",
"pint_multiply",
"(",
"da",
",",
"q",
",",
"out_units",
"=",
"None",
")",
":",
"a",
"=",
"1",
"*",
"units2pint",
"(",
"da",
")",
"f",
"=",
"a",
"*",
"q",
".",
"to_base_units",
"(",
")",
"if",
"out_units",
":",
"f",
"=",
"f",
".",
"to",
"(",
"out_units",
")",
"out",
"=",
"da",
"*",
"f",
".",
"magnitude",
"out",
".",
"attrs",
"[",
"'units'",
"]",
"=",
"pint2cfunits",
"(",
"f",
".",
"units",
")",
"return",
"out"
] | 24.842105 | 16.157895 |
def get_choice_status(self):
"""
Returns a message field, which indicates whether choices statically
or dynamically defined, and flag indicating whether a dynamic file
selection loading error occurred.
Throws an error if this is not a choice parameter.
"""
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter')
status = self.dto[self.name]['choiceInfo']['status']
return status['message'], status['flag'] | [
"def",
"get_choice_status",
"(",
"self",
")",
":",
"if",
"'choiceInfo'",
"not",
"in",
"self",
".",
"dto",
"[",
"self",
".",
"name",
"]",
":",
"raise",
"GPException",
"(",
"'not a choice parameter'",
")",
"status",
"=",
"self",
".",
"dto",
"[",
"self",
".",
"name",
"]",
"[",
"'choiceInfo'",
"]",
"[",
"'status'",
"]",
"return",
"status",
"[",
"'message'",
"]",
",",
"status",
"[",
"'flag'",
"]"
] | 39.384615 | 18 |
def _align(self, axes, key_shape=None):
"""
Align local bolt array so that axes for iteration are in the keys.
This operation is applied before most functional operators.
It ensures that the specified axes are valid, and might transpose/reshape
the underlying array so that the functional operators can be applied
over the correct records.
Parameters
----------
axes: tuple[int]
One or more axes that will be iterated over by a functional operator
Returns
-------
BoltArrayLocal
"""
# ensure that the key axes are valid for an ndarray of this shape
inshape(self.shape, axes)
# compute the set of dimensions/axes that will be used to reshape
remaining = [dim for dim in range(len(self.shape)) if dim not in axes]
key_shape = key_shape if key_shape else [self.shape[axis] for axis in axes]
remaining_shape = [self.shape[axis] for axis in remaining]
linearized_shape = [prod(key_shape)] + remaining_shape
# compute the transpose permutation
transpose_order = axes + remaining
# transpose the array so that the keys being mapped over come first, then linearize keys
reshaped = self.transpose(*transpose_order).reshape(*linearized_shape)
return reshaped | [
"def",
"_align",
"(",
"self",
",",
"axes",
",",
"key_shape",
"=",
"None",
")",
":",
"# ensure that the key axes are valid for an ndarray of this shape",
"inshape",
"(",
"self",
".",
"shape",
",",
"axes",
")",
"# compute the set of dimensions/axes that will be used to reshape",
"remaining",
"=",
"[",
"dim",
"for",
"dim",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"shape",
")",
")",
"if",
"dim",
"not",
"in",
"axes",
"]",
"key_shape",
"=",
"key_shape",
"if",
"key_shape",
"else",
"[",
"self",
".",
"shape",
"[",
"axis",
"]",
"for",
"axis",
"in",
"axes",
"]",
"remaining_shape",
"=",
"[",
"self",
".",
"shape",
"[",
"axis",
"]",
"for",
"axis",
"in",
"remaining",
"]",
"linearized_shape",
"=",
"[",
"prod",
"(",
"key_shape",
")",
"]",
"+",
"remaining_shape",
"# compute the transpose permutation",
"transpose_order",
"=",
"axes",
"+",
"remaining",
"# transpose the array so that the keys being mapped over come first, then linearize keys",
"reshaped",
"=",
"self",
".",
"transpose",
"(",
"*",
"transpose_order",
")",
".",
"reshape",
"(",
"*",
"linearized_shape",
")",
"return",
"reshaped"
] | 38.114286 | 27.2 |
def label(self, input_grid):
"""
Labels input grid using enhanced watershed algorithm.
Args:
input_grid (numpy.ndarray): Grid to be labeled.
Returns:
Array of labeled pixels
"""
marked = self.find_local_maxima(input_grid)
marked = np.where(marked >= 0, 1, 0)
# splabel returns two things in a tuple: an array and an integer
# assign the first thing (array) to markers
markers = splabel(marked)[0]
return markers | [
"def",
"label",
"(",
"self",
",",
"input_grid",
")",
":",
"marked",
"=",
"self",
".",
"find_local_maxima",
"(",
"input_grid",
")",
"marked",
"=",
"np",
".",
"where",
"(",
"marked",
">=",
"0",
",",
"1",
",",
"0",
")",
"# splabel returns two things in a tuple: an array and an integer",
"# assign the first thing (array) to markers",
"markers",
"=",
"splabel",
"(",
"marked",
")",
"[",
"0",
"]",
"return",
"markers"
] | 31.875 | 16.75 |
def decrypt(data, digest=True):
"""Decrypt provided data."""
alg, _, data = data.rpartition("$")
if not alg:
return data
data = _from_hex_digest(data) if digest else data
try:
return implementations["decryption"][alg](
data, implementations["get_key"]()
)
except KeyError:
raise CryptError("Can not decrypt key for algorithm: %s" % alg) | [
"def",
"decrypt",
"(",
"data",
",",
"digest",
"=",
"True",
")",
":",
"alg",
",",
"_",
",",
"data",
"=",
"data",
".",
"rpartition",
"(",
"\"$\"",
")",
"if",
"not",
"alg",
":",
"return",
"data",
"data",
"=",
"_from_hex_digest",
"(",
"data",
")",
"if",
"digest",
"else",
"data",
"try",
":",
"return",
"implementations",
"[",
"\"decryption\"",
"]",
"[",
"alg",
"]",
"(",
"data",
",",
"implementations",
"[",
"\"get_key\"",
"]",
"(",
")",
")",
"except",
"KeyError",
":",
"raise",
"CryptError",
"(",
"\"Can not decrypt key for algorithm: %s\"",
"%",
"alg",
")"
] | 32.75 | 16.583333 |
def wwpn_alloc(self):
"""
Allocates a WWPN unique to this partition, in the range of
0xAFFEAFFE00008000 to 0xAFFEAFFE0000FFFF.
Returns:
string: The WWPN as 16 hexadecimal digits in upper case.
Raises:
ValueError: No more WWPNs available in that range.
"""
wwpn_int = self._wwpn_pool.alloc()
wwpn = "AFFEAFFE0000" + "{:04X}".format(wwpn_int)
return wwpn | [
"def",
"wwpn_alloc",
"(",
"self",
")",
":",
"wwpn_int",
"=",
"self",
".",
"_wwpn_pool",
".",
"alloc",
"(",
")",
"wwpn",
"=",
"\"AFFEAFFE0000\"",
"+",
"\"{:04X}\"",
".",
"format",
"(",
"wwpn_int",
")",
"return",
"wwpn"
] | 30.928571 | 19.214286 |
def get_label_map(opts):
''' Find volume labels from filesystem and return in dict format. '''
results = {}
try:
for entry in os.scandir(diskdir):
target = normpath(join(diskdir, os.readlink(entry.path)))
decoded_name = entry.name.encode('utf8').decode('unicode_escape')
results[target] = decoded_name
if opts.debug:
print('\n\nlabel_map:', results)
except FileNotFoundError:
pass
return results | [
"def",
"get_label_map",
"(",
"opts",
")",
":",
"results",
"=",
"{",
"}",
"try",
":",
"for",
"entry",
"in",
"os",
".",
"scandir",
"(",
"diskdir",
")",
":",
"target",
"=",
"normpath",
"(",
"join",
"(",
"diskdir",
",",
"os",
".",
"readlink",
"(",
"entry",
".",
"path",
")",
")",
")",
"decoded_name",
"=",
"entry",
".",
"name",
".",
"encode",
"(",
"'utf8'",
")",
".",
"decode",
"(",
"'unicode_escape'",
")",
"results",
"[",
"target",
"]",
"=",
"decoded_name",
"if",
"opts",
".",
"debug",
":",
"print",
"(",
"'\\n\\nlabel_map:'",
",",
"results",
")",
"except",
"FileNotFoundError",
":",
"pass",
"return",
"results"
] | 36.538462 | 19.769231 |
def update(self, hosted_number_order_sids=values.unset,
address_sid=values.unset, email=values.unset, cc_emails=values.unset,
status=values.unset, contact_title=values.unset,
contact_phone_number=values.unset):
"""
Update the AuthorizationDocumentInstance
:param unicode hosted_number_order_sids: A list of HostedNumberOrder sids.
:param unicode address_sid: Address sid.
:param unicode email: Email.
:param unicode cc_emails: A list of emails.
:param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument.
:param unicode contact_title: Title of signee of this Authorization Document.
:param unicode contact_phone_number: Authorization Document's signee's phone number.
:returns: Updated AuthorizationDocumentInstance
:rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentInstance
"""
return self._proxy.update(
hosted_number_order_sids=hosted_number_order_sids,
address_sid=address_sid,
email=email,
cc_emails=cc_emails,
status=status,
contact_title=contact_title,
contact_phone_number=contact_phone_number,
) | [
"def",
"update",
"(",
"self",
",",
"hosted_number_order_sids",
"=",
"values",
".",
"unset",
",",
"address_sid",
"=",
"values",
".",
"unset",
",",
"email",
"=",
"values",
".",
"unset",
",",
"cc_emails",
"=",
"values",
".",
"unset",
",",
"status",
"=",
"values",
".",
"unset",
",",
"contact_title",
"=",
"values",
".",
"unset",
",",
"contact_phone_number",
"=",
"values",
".",
"unset",
")",
":",
"return",
"self",
".",
"_proxy",
".",
"update",
"(",
"hosted_number_order_sids",
"=",
"hosted_number_order_sids",
",",
"address_sid",
"=",
"address_sid",
",",
"email",
"=",
"email",
",",
"cc_emails",
"=",
"cc_emails",
",",
"status",
"=",
"status",
",",
"contact_title",
"=",
"contact_title",
",",
"contact_phone_number",
"=",
"contact_phone_number",
",",
")"
] | 47.851852 | 22.074074 |
def bp_rate_limit_heavy_bp_rate_limit_slot_bp_rate_limit_slot_num(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bp_rate_limit = ET.SubElement(config, "bp-rate-limit", xmlns="urn:brocade.com:mgmt:brocade-bprate-limit")
heavy = ET.SubElement(bp_rate_limit, "heavy")
bp_rate_limit_slot = ET.SubElement(heavy, "bp-rate-limit-slot")
bp_rate_limit_slot_num = ET.SubElement(bp_rate_limit_slot, "bp-rate-limit-slot-num")
bp_rate_limit_slot_num.text = kwargs.pop('bp_rate_limit_slot_num')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"bp_rate_limit_heavy_bp_rate_limit_slot_bp_rate_limit_slot_num",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"bp_rate_limit",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"bp-rate-limit\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-bprate-limit\"",
")",
"heavy",
"=",
"ET",
".",
"SubElement",
"(",
"bp_rate_limit",
",",
"\"heavy\"",
")",
"bp_rate_limit_slot",
"=",
"ET",
".",
"SubElement",
"(",
"heavy",
",",
"\"bp-rate-limit-slot\"",
")",
"bp_rate_limit_slot_num",
"=",
"ET",
".",
"SubElement",
"(",
"bp_rate_limit_slot",
",",
"\"bp-rate-limit-slot-num\"",
")",
"bp_rate_limit_slot_num",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'bp_rate_limit_slot_num'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 54.25 | 26.166667 |
def gdaArray(arry, dtype, numGhosts=1):
"""
ghosted distributed array constructor
@param arry numpy-like array
@param numGhosts the number of ghosts (>= 0)
"""
a = numpy.array(arry, dtype)
res = GhostedDistArray(a.shape, a.dtype)
res.setNumberOfGhosts(numGhosts)
res[:] = a
return res | [
"def",
"gdaArray",
"(",
"arry",
",",
"dtype",
",",
"numGhosts",
"=",
"1",
")",
":",
"a",
"=",
"numpy",
".",
"array",
"(",
"arry",
",",
"dtype",
")",
"res",
"=",
"GhostedDistArray",
"(",
"a",
".",
"shape",
",",
"a",
".",
"dtype",
")",
"res",
".",
"setNumberOfGhosts",
"(",
"numGhosts",
")",
"res",
"[",
":",
"]",
"=",
"a",
"return",
"res"
] | 28.545455 | 7.818182 |
def _generatePayload(self, query):
"""Adds the following defaults to the payload:
__rev, __user, __a, ttstamp, fb_dtsg, __req
"""
payload = self._payload_default.copy()
if query:
payload.update(query)
payload["__req"] = str_base(self._req_counter, 36)
payload["seq"] = self._seq
self._req_counter += 1
return payload | [
"def",
"_generatePayload",
"(",
"self",
",",
"query",
")",
":",
"payload",
"=",
"self",
".",
"_payload_default",
".",
"copy",
"(",
")",
"if",
"query",
":",
"payload",
".",
"update",
"(",
"query",
")",
"payload",
"[",
"\"__req\"",
"]",
"=",
"str_base",
"(",
"self",
".",
"_req_counter",
",",
"36",
")",
"payload",
"[",
"\"seq\"",
"]",
"=",
"self",
".",
"_seq",
"self",
".",
"_req_counter",
"+=",
"1",
"return",
"payload"
] | 35.636364 | 9.727273 |
def _clone(self):
"""Make a (shallow) copy of the set.
There is a 'clone protocol' that subclasses of this class
should use. To make a copy, first call your super's _clone()
method, and use the object returned as the new instance. Then
make shallow copies of the attributes defined in the subclass.
This protocol allows us to write the set algorithms that
return new instances (e.g. union) once, and keep using them in
subclasses.
"""
cls = self.__class__
obj = cls.__new__(cls)
obj.items = list(self.items)
return obj | [
"def",
"_clone",
"(",
"self",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"obj",
"=",
"cls",
".",
"__new__",
"(",
"cls",
")",
"obj",
".",
"items",
"=",
"list",
"(",
"self",
".",
"items",
")",
"return",
"obj"
] | 35.941176 | 22.352941 |
def list_vars(script_path, ignore=IGNORE_DEFAULT):
"""
Given a shell script, returns a list of shell variable names.
Note: this method executes the script, so beware if it contains side-effects.
:param script_path: Path the a shell script
:type script_path: str or unicode
:param ignore: variable names to ignore. By default we ignore variables
that env injects into the script's environment.
See IGNORE_DEFAULT.
:type ignore: iterable
:return: Key value pairs representing the environment variables defined
in the script.
:rtype: list
"""
if path.isfile(script_path):
input = (""". "%s"; env | awk -F = '/[a-zA-Z_][a-zA-Z_0-9]*=/ """ % script_path +
"""{ if (!system("[ -n \\"${" $1 "}\\" ]")) print $1 }'""")
cmd = "env -i bash".split()
p = Popen(cmd, stdout=PIPE, stdin=PIPE, stderr=PIPE)
stdout_data, stderr_data = p.communicate(input=input)
if stderr_data:
raise ShellScriptException(script_path, stderr_data)
else:
lines = stdout_data.split()
return [elt for elt in lines if elt not in ignore]
else:
raise _noscripterror(script_path) | [
"def",
"list_vars",
"(",
"script_path",
",",
"ignore",
"=",
"IGNORE_DEFAULT",
")",
":",
"if",
"path",
".",
"isfile",
"(",
"script_path",
")",
":",
"input",
"=",
"(",
"\"\"\". \"%s\"; env | awk -F = '/[a-zA-Z_][a-zA-Z_0-9]*=/ \"\"\"",
"%",
"script_path",
"+",
"\"\"\"{ if (!system(\"[ -n \\\\\"${\" $1 \"}\\\\\" ]\")) print $1 }'\"\"\"",
")",
"cmd",
"=",
"\"env -i bash\"",
".",
"split",
"(",
")",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stdin",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"stdout_data",
",",
"stderr_data",
"=",
"p",
".",
"communicate",
"(",
"input",
"=",
"input",
")",
"if",
"stderr_data",
":",
"raise",
"ShellScriptException",
"(",
"script_path",
",",
"stderr_data",
")",
"else",
":",
"lines",
"=",
"stdout_data",
".",
"split",
"(",
")",
"return",
"[",
"elt",
"for",
"elt",
"in",
"lines",
"if",
"elt",
"not",
"in",
"ignore",
"]",
"else",
":",
"raise",
"_noscripterror",
"(",
"script_path",
")"
] | 43.678571 | 16.214286 |
def colgen(*lstcol,**kargs):
"""Returns a generator that mixes provided quantities forever
trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default"""
if len(lstcol) < 2:
lstcol *= 2
trans = kargs.get("trans", lambda x,y,z: (x,y,z))
while 1:
for i in range(len(lstcol)):
for j in range(len(lstcol)):
for k in range(len(lstcol)):
if i != j or j != k or k != i:
yield trans(lstcol[(i+j)%len(lstcol)],lstcol[(j+k)%len(lstcol)],lstcol[(k+i)%len(lstcol)]) | [
"def",
"colgen",
"(",
"*",
"lstcol",
",",
"*",
"*",
"kargs",
")",
":",
"if",
"len",
"(",
"lstcol",
")",
"<",
"2",
":",
"lstcol",
"*=",
"2",
"trans",
"=",
"kargs",
".",
"get",
"(",
"\"trans\"",
",",
"lambda",
"x",
",",
"y",
",",
"z",
":",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
"while",
"1",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"lstcol",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"lstcol",
")",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"lstcol",
")",
")",
":",
"if",
"i",
"!=",
"j",
"or",
"j",
"!=",
"k",
"or",
"k",
"!=",
"i",
":",
"yield",
"trans",
"(",
"lstcol",
"[",
"(",
"i",
"+",
"j",
")",
"%",
"len",
"(",
"lstcol",
")",
"]",
",",
"lstcol",
"[",
"(",
"j",
"+",
"k",
")",
"%",
"len",
"(",
"lstcol",
")",
"]",
",",
"lstcol",
"[",
"(",
"k",
"+",
"i",
")",
"%",
"len",
"(",
"lstcol",
")",
"]",
")"
] | 48.75 | 15.25 |
def _parse(jsonOutput):
'''
Parses JSON response from Tika REST API server
:param jsonOutput: JSON output from Tika Server
:return: a dictionary having 'metadata' and 'content' values
'''
parsed={}
if not jsonOutput:
return parsed
parsed["status"] = jsonOutput[0]
if jsonOutput[1] == None or jsonOutput[1] == "":
return parsed
realJson = json.loads(jsonOutput[1])
content = ""
for js in realJson:
if "X-TIKA:content" in js:
content += js["X-TIKA:content"]
if content == "":
content = None
parsed["content"] = content
parsed["metadata"] = {}
for js in realJson:
for n in js:
if n != "X-TIKA:content":
if n in parsed["metadata"]:
if not isinstance(parsed["metadata"][n], list):
parsed["metadata"][n] = [parsed["metadata"][n]]
parsed["metadata"][n].append(js[n])
else:
parsed["metadata"][n] = js[n]
return parsed | [
"def",
"_parse",
"(",
"jsonOutput",
")",
":",
"parsed",
"=",
"{",
"}",
"if",
"not",
"jsonOutput",
":",
"return",
"parsed",
"parsed",
"[",
"\"status\"",
"]",
"=",
"jsonOutput",
"[",
"0",
"]",
"if",
"jsonOutput",
"[",
"1",
"]",
"==",
"None",
"or",
"jsonOutput",
"[",
"1",
"]",
"==",
"\"\"",
":",
"return",
"parsed",
"realJson",
"=",
"json",
".",
"loads",
"(",
"jsonOutput",
"[",
"1",
"]",
")",
"content",
"=",
"\"\"",
"for",
"js",
"in",
"realJson",
":",
"if",
"\"X-TIKA:content\"",
"in",
"js",
":",
"content",
"+=",
"js",
"[",
"\"X-TIKA:content\"",
"]",
"if",
"content",
"==",
"\"\"",
":",
"content",
"=",
"None",
"parsed",
"[",
"\"content\"",
"]",
"=",
"content",
"parsed",
"[",
"\"metadata\"",
"]",
"=",
"{",
"}",
"for",
"js",
"in",
"realJson",
":",
"for",
"n",
"in",
"js",
":",
"if",
"n",
"!=",
"\"X-TIKA:content\"",
":",
"if",
"n",
"in",
"parsed",
"[",
"\"metadata\"",
"]",
":",
"if",
"not",
"isinstance",
"(",
"parsed",
"[",
"\"metadata\"",
"]",
"[",
"n",
"]",
",",
"list",
")",
":",
"parsed",
"[",
"\"metadata\"",
"]",
"[",
"n",
"]",
"=",
"[",
"parsed",
"[",
"\"metadata\"",
"]",
"[",
"n",
"]",
"]",
"parsed",
"[",
"\"metadata\"",
"]",
"[",
"n",
"]",
".",
"append",
"(",
"js",
"[",
"n",
"]",
")",
"else",
":",
"parsed",
"[",
"\"metadata\"",
"]",
"[",
"n",
"]",
"=",
"js",
"[",
"n",
"]",
"return",
"parsed"
] | 27.864865 | 19.972973 |
def drp(points, epsilon):
""" Douglas ramer peucker
Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Args:
points (:obj:`list` of :obj:`Point`)
epsilon (float): drp threshold
Returns:
:obj:`list` of :obj:`Point`
"""
dmax = 0.0
index = 0
for i in range(1, len(points)-1):
dist = point_line_distance(points[i], points[0], points[-1])
if dist > dmax:
index = i
dmax = dist
if dmax > epsilon:
return drp(points[:index+1], epsilon)[:-1] + drp(points[index:], epsilon)
else:
return [points[0], points[-1]] | [
"def",
"drp",
"(",
"points",
",",
"epsilon",
")",
":",
"dmax",
"=",
"0.0",
"index",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"points",
")",
"-",
"1",
")",
":",
"dist",
"=",
"point_line_distance",
"(",
"points",
"[",
"i",
"]",
",",
"points",
"[",
"0",
"]",
",",
"points",
"[",
"-",
"1",
"]",
")",
"if",
"dist",
">",
"dmax",
":",
"index",
"=",
"i",
"dmax",
"=",
"dist",
"if",
"dmax",
">",
"epsilon",
":",
"return",
"drp",
"(",
"points",
"[",
":",
"index",
"+",
"1",
"]",
",",
"epsilon",
")",
"[",
":",
"-",
"1",
"]",
"+",
"drp",
"(",
"points",
"[",
"index",
":",
"]",
",",
"epsilon",
")",
"else",
":",
"return",
"[",
"points",
"[",
"0",
"]",
",",
"points",
"[",
"-",
"1",
"]",
"]"
] | 26.583333 | 21.833333 |
def discover(timeout=DISCOVERY_TIMEOUT):
""" Discover devices on the local network.
:param timeout: Optional timeout in seconds.
:returns: Set of discovered host addresses.
"""
hosts = {}
payload = MAGIC + DISCOVERY
for _ in range(RETRIES):
_SOCKET.sendto(bytearray(payload), ('255.255.255.255', PORT))
start = time.time()
while time.time() < start + timeout:
for host, data in _BUFFER.copy().items():
if not _is_discovery_response(data):
continue
if host not in hosts:
_LOGGER.debug("Discovered device at %s", host)
entry = {}
entry['mac'] = data[7:13]
entry['imac'] = data[19:25]
entry['next'] = 0
entry['st'] = int(data[-1])
entry['time'] = _device_time(data[37:41])
entry['serverTime'] = int(time.time())
hosts[host] = entry
return hosts | [
"def",
"discover",
"(",
"timeout",
"=",
"DISCOVERY_TIMEOUT",
")",
":",
"hosts",
"=",
"{",
"}",
"payload",
"=",
"MAGIC",
"+",
"DISCOVERY",
"for",
"_",
"in",
"range",
"(",
"RETRIES",
")",
":",
"_SOCKET",
".",
"sendto",
"(",
"bytearray",
"(",
"payload",
")",
",",
"(",
"'255.255.255.255'",
",",
"PORT",
")",
")",
"start",
"=",
"time",
".",
"time",
"(",
")",
"while",
"time",
".",
"time",
"(",
")",
"<",
"start",
"+",
"timeout",
":",
"for",
"host",
",",
"data",
"in",
"_BUFFER",
".",
"copy",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"not",
"_is_discovery_response",
"(",
"data",
")",
":",
"continue",
"if",
"host",
"not",
"in",
"hosts",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Discovered device at %s\"",
",",
"host",
")",
"entry",
"=",
"{",
"}",
"entry",
"[",
"'mac'",
"]",
"=",
"data",
"[",
"7",
":",
"13",
"]",
"entry",
"[",
"'imac'",
"]",
"=",
"data",
"[",
"19",
":",
"25",
"]",
"entry",
"[",
"'next'",
"]",
"=",
"0",
"entry",
"[",
"'st'",
"]",
"=",
"int",
"(",
"data",
"[",
"-",
"1",
"]",
")",
"entry",
"[",
"'time'",
"]",
"=",
"_device_time",
"(",
"data",
"[",
"37",
":",
"41",
"]",
")",
"entry",
"[",
"'serverTime'",
"]",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"hosts",
"[",
"host",
"]",
"=",
"entry",
"return",
"hosts"
] | 39.115385 | 11.923077 |
def get_users(self):
"""Return the configuration of the users."""
users = {}
_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = {
"super-user": 15,
"superuser": 15,
"operator": 5,
"read-only": 1,
"unauthorized": 0,
}
_DEFAULT_USER_DETAILS = {"level": 0, "password": "", "sshkeys": []}
users_table = junos_views.junos_users_table(self.device)
users_table.get()
users_items = users_table.items()
root_user = self._get_root()
for user_entry in users_items:
username = user_entry[0]
user_details = _DEFAULT_USER_DETAILS.copy()
user_details.update({d[0]: d[1] for d in user_entry[1] if d[1]})
user_class = user_details.pop("class", "")
user_details = {
key: py23_compat.text_type(user_details[key])
for key in user_details.keys()
}
level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0)
user_details.update({"level": level})
user_details["sshkeys"] = [
user_details.pop(key)
for key in ["ssh_rsa", "ssh_dsa", "ssh_ecdsa"]
if user_details.get(key, "")
]
users[username] = user_details
users.update(root_user)
return users | [
"def",
"get_users",
"(",
"self",
")",
":",
"users",
"=",
"{",
"}",
"_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP",
"=",
"{",
"\"super-user\"",
":",
"15",
",",
"\"superuser\"",
":",
"15",
",",
"\"operator\"",
":",
"5",
",",
"\"read-only\"",
":",
"1",
",",
"\"unauthorized\"",
":",
"0",
",",
"}",
"_DEFAULT_USER_DETAILS",
"=",
"{",
"\"level\"",
":",
"0",
",",
"\"password\"",
":",
"\"\"",
",",
"\"sshkeys\"",
":",
"[",
"]",
"}",
"users_table",
"=",
"junos_views",
".",
"junos_users_table",
"(",
"self",
".",
"device",
")",
"users_table",
".",
"get",
"(",
")",
"users_items",
"=",
"users_table",
".",
"items",
"(",
")",
"root_user",
"=",
"self",
".",
"_get_root",
"(",
")",
"for",
"user_entry",
"in",
"users_items",
":",
"username",
"=",
"user_entry",
"[",
"0",
"]",
"user_details",
"=",
"_DEFAULT_USER_DETAILS",
".",
"copy",
"(",
")",
"user_details",
".",
"update",
"(",
"{",
"d",
"[",
"0",
"]",
":",
"d",
"[",
"1",
"]",
"for",
"d",
"in",
"user_entry",
"[",
"1",
"]",
"if",
"d",
"[",
"1",
"]",
"}",
")",
"user_class",
"=",
"user_details",
".",
"pop",
"(",
"\"class\"",
",",
"\"\"",
")",
"user_details",
"=",
"{",
"key",
":",
"py23_compat",
".",
"text_type",
"(",
"user_details",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"user_details",
".",
"keys",
"(",
")",
"}",
"level",
"=",
"_JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP",
".",
"get",
"(",
"user_class",
",",
"0",
")",
"user_details",
".",
"update",
"(",
"{",
"\"level\"",
":",
"level",
"}",
")",
"user_details",
"[",
"\"sshkeys\"",
"]",
"=",
"[",
"user_details",
".",
"pop",
"(",
"key",
")",
"for",
"key",
"in",
"[",
"\"ssh_rsa\"",
",",
"\"ssh_dsa\"",
",",
"\"ssh_ecdsa\"",
"]",
"if",
"user_details",
".",
"get",
"(",
"key",
",",
"\"\"",
")",
"]",
"users",
"[",
"username",
"]",
"=",
"user_details",
"users",
".",
"update",
"(",
"root_user",
")",
"return",
"users"
] | 35.552632 | 17.184211 |
def smooth_image(image, sigma, sigma_in_physical_coordinates=True, FWHM=False, max_kernel_width=32):
"""
Smooth an image
ANTsR function: `smoothImage`
Arguments
---------
image
Image to smooth
sigma
Smoothing factor. Can be scalar, in which case the same sigma is applied to each dimension, or a vector of length dim(inimage) to specify a unique smoothness for each dimension.
sigma_in_physical_coordinates : boolean
If true, the smoothing factor is in millimeters; if false, it is in pixels.
FWHM : boolean
If true, sigma is interpreted as the full-width-half-max (FWHM) of the filter, not the sigma of a Gaussian kernel.
max_kernel_width : scalar
Maximum kernel width
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> image = ants.image_read( ants.get_ants_data('r16'))
>>> simage = ants.smooth_image(image, (1.2,1.5))
"""
if image.components == 1:
return _smooth_image_helper(image, sigma, sigma_in_physical_coordinates, FWHM, max_kernel_width)
else:
imagelist = utils.split_channels(image)
newimages = []
for image in imagelist:
newimage = _smooth_image_helper(image, sigma, sigma_in_physical_coordinates, FWHM, max_kernel_width)
newimages.append(newimage)
return utils.merge_channels(newimages) | [
"def",
"smooth_image",
"(",
"image",
",",
"sigma",
",",
"sigma_in_physical_coordinates",
"=",
"True",
",",
"FWHM",
"=",
"False",
",",
"max_kernel_width",
"=",
"32",
")",
":",
"if",
"image",
".",
"components",
"==",
"1",
":",
"return",
"_smooth_image_helper",
"(",
"image",
",",
"sigma",
",",
"sigma_in_physical_coordinates",
",",
"FWHM",
",",
"max_kernel_width",
")",
"else",
":",
"imagelist",
"=",
"utils",
".",
"split_channels",
"(",
"image",
")",
"newimages",
"=",
"[",
"]",
"for",
"image",
"in",
"imagelist",
":",
"newimage",
"=",
"_smooth_image_helper",
"(",
"image",
",",
"sigma",
",",
"sigma_in_physical_coordinates",
",",
"FWHM",
",",
"max_kernel_width",
")",
"newimages",
".",
"append",
"(",
"newimage",
")",
"return",
"utils",
".",
"merge_channels",
"(",
"newimages",
")"
] | 33.47619 | 29.47619 |
def clean_pid_file(pidfile):
"""clean pid file.
"""
if pidfile and os.path.exists(pidfile):
os.unlink(pidfile) | [
"def",
"clean_pid_file",
"(",
"pidfile",
")",
":",
"if",
"pidfile",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"pidfile",
")",
":",
"os",
".",
"unlink",
"(",
"pidfile",
")"
] | 25.2 | 5.8 |
def get_redirect_args(self, request, callback):
"Get request parameters for redirect url."
callback = request.build_absolute_uri(callback)
args = {
'client_id': self.provider.consumer_key,
'redirect_uri': callback,
'response_type': 'code',
}
state = self.get_application_state(request, callback)
if state is not None:
args['state'] = state
request.session[self.session_key] = state
return args | [
"def",
"get_redirect_args",
"(",
"self",
",",
"request",
",",
"callback",
")",
":",
"callback",
"=",
"request",
".",
"build_absolute_uri",
"(",
"callback",
")",
"args",
"=",
"{",
"'client_id'",
":",
"self",
".",
"provider",
".",
"consumer_key",
",",
"'redirect_uri'",
":",
"callback",
",",
"'response_type'",
":",
"'code'",
",",
"}",
"state",
"=",
"self",
".",
"get_application_state",
"(",
"request",
",",
"callback",
")",
"if",
"state",
"is",
"not",
"None",
":",
"args",
"[",
"'state'",
"]",
"=",
"state",
"request",
".",
"session",
"[",
"self",
".",
"session_key",
"]",
"=",
"state",
"return",
"args"
] | 38.230769 | 13.769231 |
def get_host(self):
"""
Gets the host name or IP address.
:return: the host name or IP address.
"""
host = self.get_as_nullable_string("host")
host = host if host != None else self.get_as_nullable_string("ip")
return host | [
"def",
"get_host",
"(",
"self",
")",
":",
"host",
"=",
"self",
".",
"get_as_nullable_string",
"(",
"\"host\"",
")",
"host",
"=",
"host",
"if",
"host",
"!=",
"None",
"else",
"self",
".",
"get_as_nullable_string",
"(",
"\"ip\"",
")",
"return",
"host"
] | 30 | 14.666667 |
def alias(self):
"""
Returns the device's alias (name).
"""
try:
return self._properties.Get('org.bluez.Device1', 'Alias')
except dbus.exceptions.DBusException as e:
if e.get_dbus_name() == 'org.freedesktop.DBus.Error.UnknownObject':
# BlueZ sometimes doesn't provide an alias, we then simply return `None`.
# Might occur when device was deleted as the following issue points out:
# https://github.com/blueman-project/blueman/issues/460
return None
else:
raise _error_from_dbus_error(e) | [
"def",
"alias",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_properties",
".",
"Get",
"(",
"'org.bluez.Device1'",
",",
"'Alias'",
")",
"except",
"dbus",
".",
"exceptions",
".",
"DBusException",
"as",
"e",
":",
"if",
"e",
".",
"get_dbus_name",
"(",
")",
"==",
"'org.freedesktop.DBus.Error.UnknownObject'",
":",
"# BlueZ sometimes doesn't provide an alias, we then simply return `None`.",
"# Might occur when device was deleted as the following issue points out:",
"# https://github.com/blueman-project/blueman/issues/460",
"return",
"None",
"else",
":",
"raise",
"_error_from_dbus_error",
"(",
"e",
")"
] | 44.928571 | 21.642857 |
def yield_amd_require_string_arguments(
node, pos,
reserved_module=reserved_module, wrapped=define_wrapped):
"""
This yields only strings within the lists provided in the argument
list at the specified position from a function call.
Originally, this was implemented for yield a list of module names to
be imported as represented by this given node, which must be of the
FunctionCall type.
"""
for i, child in enumerate(node.args.items[pos]):
if isinstance(child, asttypes.String):
result = to_str(child)
if ((result not in reserved_module) and (
result != define_wrapped.get(i))):
yield result | [
"def",
"yield_amd_require_string_arguments",
"(",
"node",
",",
"pos",
",",
"reserved_module",
"=",
"reserved_module",
",",
"wrapped",
"=",
"define_wrapped",
")",
":",
"for",
"i",
",",
"child",
"in",
"enumerate",
"(",
"node",
".",
"args",
".",
"items",
"[",
"pos",
"]",
")",
":",
"if",
"isinstance",
"(",
"child",
",",
"asttypes",
".",
"String",
")",
":",
"result",
"=",
"to_str",
"(",
"child",
")",
"if",
"(",
"(",
"result",
"not",
"in",
"reserved_module",
")",
"and",
"(",
"result",
"!=",
"define_wrapped",
".",
"get",
"(",
"i",
")",
")",
")",
":",
"yield",
"result"
] | 38.555556 | 17.666667 |
def path(self, path):
"""
Creates a path based on the location attribute of the backend and the path argument
of the function. If the path argument is an absolute path the path is returned.
:param path: The path that should be joined with the backends location.
"""
if os.path.isabs(path):
return path
return os.path.join(self.location, path) | [
"def",
"path",
"(",
"self",
",",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"path",
")",
":",
"return",
"path",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"location",
",",
"path",
")"
] | 40.2 | 23 |
def send_media(self, media_id, user_ids, text='', thread_id=None):
"""
:param media_id:
:param self: bot
:param text: text of message
:param user_ids: list of user_ids for creating group or one user_id for send to one person
:param thread_id: thread_id
"""
user_ids = _get_user_ids(self, user_ids)
if not isinstance(text, str) and not isinstance(user_ids, (list, str)):
self.logger.error('Text must be an string, user_ids must be an list or string')
return False
if self.reached_limit('messages'):
self.logger.info("Out of messages for today.")
return False
media = self.get_media_info(media_id)
media = media[0] if isinstance(media, list) else media
self.delay('message')
if self.api.send_direct_item(
'media_share',
user_ids,
text=text,
thread=thread_id,
media_type=media.get('media_type'),
media_id=media.get('id')
):
self.total['messages'] += 1
return True
self.logger.info("Message to {user_ids} wasn't sent".format(user_ids=user_ids))
return False | [
"def",
"send_media",
"(",
"self",
",",
"media_id",
",",
"user_ids",
",",
"text",
"=",
"''",
",",
"thread_id",
"=",
"None",
")",
":",
"user_ids",
"=",
"_get_user_ids",
"(",
"self",
",",
"user_ids",
")",
"if",
"not",
"isinstance",
"(",
"text",
",",
"str",
")",
"and",
"not",
"isinstance",
"(",
"user_ids",
",",
"(",
"list",
",",
"str",
")",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'Text must be an string, user_ids must be an list or string'",
")",
"return",
"False",
"if",
"self",
".",
"reached_limit",
"(",
"'messages'",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"\"Out of messages for today.\"",
")",
"return",
"False",
"media",
"=",
"self",
".",
"get_media_info",
"(",
"media_id",
")",
"media",
"=",
"media",
"[",
"0",
"]",
"if",
"isinstance",
"(",
"media",
",",
"list",
")",
"else",
"media",
"self",
".",
"delay",
"(",
"'message'",
")",
"if",
"self",
".",
"api",
".",
"send_direct_item",
"(",
"'media_share'",
",",
"user_ids",
",",
"text",
"=",
"text",
",",
"thread",
"=",
"thread_id",
",",
"media_type",
"=",
"media",
".",
"get",
"(",
"'media_type'",
")",
",",
"media_id",
"=",
"media",
".",
"get",
"(",
"'id'",
")",
")",
":",
"self",
".",
"total",
"[",
"'messages'",
"]",
"+=",
"1",
"return",
"True",
"self",
".",
"logger",
".",
"info",
"(",
"\"Message to {user_ids} wasn't sent\"",
".",
"format",
"(",
"user_ids",
"=",
"user_ids",
")",
")",
"return",
"False"
] | 32.969697 | 19.878788 |
def main():
"""Handles external calling for this module
Execute this python module and provide the args shown below to
external call this module to send Slack messages with attachments!
:return: None
"""
log = logging.getLogger(mod_logger + '.main')
parser = argparse.ArgumentParser(description='This Python module allows '
'sending Slack messages.')
parser.add_argument('-u', '--url', help='Slack webhook URL', required=True)
parser.add_argument('-t', '--text', help='Text of the message', required=True)
parser.add_argument('-n', '--channel', help='Slack channel', required=True)
parser.add_argument('-i', '--icon', help='URL for the Slack icon', required=False)
parser.add_argument('-c', '--color', help='Color of the Slack post', required=False)
parser.add_argument('-a', '--attachment', help='Text for the Slack Attachment', required=False)
parser.add_argument('-p', '--pretext', help='Pretext for the Slack attachment', required=False)
args = parser.parse_args()
# Create the SlackMessage object
try:
slack_msg = SlackMessage(args.url, channel=args.channel, icon_url=args.icon, text=args.text)
except ValueError as e:
msg = 'Unable to create slack message\n{ex}'.format(ex=e)
log.error(msg)
print(msg)
return
# If provided, create the SlackAttachment object
if args.attachment:
try:
slack_att = SlackAttachment(fallback=args.attachment, color=args.color,
pretext=args.pretext, text=args.attachment)
except ValueError:
_, ex, trace = sys.exc_info()
log.error('Unable to create slack attachment\n{e}'.format(e=str(ex)))
return
slack_msg.add_attachment(slack_att)
# Send Slack message
try:
slack_msg.send()
except(TypeError, ValueError, IOError):
_, ex, trace = sys.exc_info()
log.error('Unable to send Slack message\n{e}'.format(e=str(ex)))
return
log.debug('Your message has been Slacked successfully!') | [
"def",
"main",
"(",
")",
":",
"log",
"=",
"logging",
".",
"getLogger",
"(",
"mod_logger",
"+",
"'.main'",
")",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'This Python module allows '",
"'sending Slack messages.'",
")",
"parser",
".",
"add_argument",
"(",
"'-u'",
",",
"'--url'",
",",
"help",
"=",
"'Slack webhook URL'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--text'",
",",
"help",
"=",
"'Text of the message'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-n'",
",",
"'--channel'",
",",
"help",
"=",
"'Slack channel'",
",",
"required",
"=",
"True",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--icon'",
",",
"help",
"=",
"'URL for the Slack icon'",
",",
"required",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-c'",
",",
"'--color'",
",",
"help",
"=",
"'Color of the Slack post'",
",",
"required",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-a'",
",",
"'--attachment'",
",",
"help",
"=",
"'Text for the Slack Attachment'",
",",
"required",
"=",
"False",
")",
"parser",
".",
"add_argument",
"(",
"'-p'",
",",
"'--pretext'",
",",
"help",
"=",
"'Pretext for the Slack attachment'",
",",
"required",
"=",
"False",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"# Create the SlackMessage object",
"try",
":",
"slack_msg",
"=",
"SlackMessage",
"(",
"args",
".",
"url",
",",
"channel",
"=",
"args",
".",
"channel",
",",
"icon_url",
"=",
"args",
".",
"icon",
",",
"text",
"=",
"args",
".",
"text",
")",
"except",
"ValueError",
"as",
"e",
":",
"msg",
"=",
"'Unable to create slack message\\n{ex}'",
".",
"format",
"(",
"ex",
"=",
"e",
")",
"log",
".",
"error",
"(",
"msg",
")",
"print",
"(",
"msg",
")",
"return",
"# If provided, create the SlackAttachment object",
"if",
"args",
".",
"attachment",
":",
"try",
":",
"slack_att",
"=",
"SlackAttachment",
"(",
"fallback",
"=",
"args",
".",
"attachment",
",",
"color",
"=",
"args",
".",
"color",
",",
"pretext",
"=",
"args",
".",
"pretext",
",",
"text",
"=",
"args",
".",
"attachment",
")",
"except",
"ValueError",
":",
"_",
",",
"ex",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"log",
".",
"error",
"(",
"'Unable to create slack attachment\\n{e}'",
".",
"format",
"(",
"e",
"=",
"str",
"(",
"ex",
")",
")",
")",
"return",
"slack_msg",
".",
"add_attachment",
"(",
"slack_att",
")",
"# Send Slack message",
"try",
":",
"slack_msg",
".",
"send",
"(",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"IOError",
")",
":",
"_",
",",
"ex",
",",
"trace",
"=",
"sys",
".",
"exc_info",
"(",
")",
"log",
".",
"error",
"(",
"'Unable to send Slack message\\n{e}'",
".",
"format",
"(",
"e",
"=",
"str",
"(",
"ex",
")",
")",
")",
"return",
"log",
".",
"debug",
"(",
"'Your message has been Slacked successfully!'",
")"
] | 43.645833 | 27.145833 |
def loads(s, cls=BinaryQuadraticModel, vartype=None):
"""Load a COOrdinate formatted binary quadratic model from a string."""
return load(s.split('\n'), cls=cls, vartype=vartype) | [
"def",
"loads",
"(",
"s",
",",
"cls",
"=",
"BinaryQuadraticModel",
",",
"vartype",
"=",
"None",
")",
":",
"return",
"load",
"(",
"s",
".",
"split",
"(",
"'\\n'",
")",
",",
"cls",
"=",
"cls",
",",
"vartype",
"=",
"vartype",
")"
] | 61.333333 | 9.666667 |
def _set_snps(self, snps, build=37):
""" Set `_snps` and `_build` properties of this ``Individual``.
Notes
-----
Intended to be used internally to `lineage`.
Parameters
----------
snps : pandas.DataFrame
individual's genetic data normalized for use with `lineage`
build : int
build of this ``Individual``'s SNPs
"""
self._snps = snps
self._build = build | [
"def",
"_set_snps",
"(",
"self",
",",
"snps",
",",
"build",
"=",
"37",
")",
":",
"self",
".",
"_snps",
"=",
"snps",
"self",
".",
"_build",
"=",
"build"
] | 28.25 | 18.125 |
def parse_frog(lines):
"""
Interpret the output of the frog parser.
Input should be an iterable of lines (i.e. the output of call_frog)
Result is a sequence of dicts representing the tokens
"""
sid = 0
for i, line in enumerate(lines):
if not line:
# end of sentence marker
sid += 1
else:
parts = line.split("\t")
tid, token, lemma, morph, pos, conf, ne, _, parent, rel = parts
if rel:
rel = (rel, int(parent) - 1)
word = u' '.join(token.split(u'_'))
result = dict(id=i, sentence=sid, word=word, lemma=lemma, pos=pos,
pos_confidence=float(conf), rel=rel)
if ne != 'O':
# NER label from BIO tags
result["ne"] = ne.split('_', 1)[0][2:]
yield result | [
"def",
"parse_frog",
"(",
"lines",
")",
":",
"sid",
"=",
"0",
"for",
"i",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"not",
"line",
":",
"# end of sentence marker",
"sid",
"+=",
"1",
"else",
":",
"parts",
"=",
"line",
".",
"split",
"(",
"\"\\t\"",
")",
"tid",
",",
"token",
",",
"lemma",
",",
"morph",
",",
"pos",
",",
"conf",
",",
"ne",
",",
"_",
",",
"parent",
",",
"rel",
"=",
"parts",
"if",
"rel",
":",
"rel",
"=",
"(",
"rel",
",",
"int",
"(",
"parent",
")",
"-",
"1",
")",
"word",
"=",
"u' '",
".",
"join",
"(",
"token",
".",
"split",
"(",
"u'_'",
")",
")",
"result",
"=",
"dict",
"(",
"id",
"=",
"i",
",",
"sentence",
"=",
"sid",
",",
"word",
"=",
"word",
",",
"lemma",
"=",
"lemma",
",",
"pos",
"=",
"pos",
",",
"pos_confidence",
"=",
"float",
"(",
"conf",
")",
",",
"rel",
"=",
"rel",
")",
"if",
"ne",
"!=",
"'O'",
":",
"# NER label from BIO tags",
"result",
"[",
"\"ne\"",
"]",
"=",
"ne",
".",
"split",
"(",
"'_'",
",",
"1",
")",
"[",
"0",
"]",
"[",
"2",
":",
"]",
"yield",
"result"
] | 36.913043 | 15.26087 |
def trim_baseline_of_removed_secrets(results, baseline, filelist):
"""
NOTE: filelist is not a comprehensive list of all files in the repo
(because we can't be sure whether --all-files is passed in as a
parameter to pre-commit).
:type results: SecretsCollection
:type baseline: SecretsCollection
:type filelist: list(str)
:param filelist: filenames that are scanned.
:rtype: bool
:returns: True if baseline was updated
"""
updated = False
for filename in filelist:
if filename not in baseline.data:
# Nothing to modify, because not even there in the first place.
continue
if filename not in results.data:
# All secrets relating to that file was removed.
# We know this because:
# 1. It's a file that was scanned (in filelist)
# 2. It was in the baseline
# 3. It has no results now.
del baseline.data[filename]
updated = True
continue
# We clone the baseline, so that we can modify the baseline,
# without messing up the iteration.
for baseline_secret in baseline.data[filename].copy():
new_secret_found = results.get_secret(
filename,
baseline_secret.secret_hash,
baseline_secret.type,
)
if not new_secret_found:
# No longer in results, so can remove from baseline
old_secret_to_delete = baseline.get_secret(
filename,
baseline_secret.secret_hash,
baseline_secret.type,
)
del baseline.data[filename][old_secret_to_delete]
updated = True
elif new_secret_found.lineno != baseline_secret.lineno:
# Secret moved around, should update baseline with new location
old_secret_to_update = baseline.get_secret(
filename,
baseline_secret.secret_hash,
baseline_secret.type,
)
old_secret_to_update.lineno = new_secret_found.lineno
updated = True
return updated | [
"def",
"trim_baseline_of_removed_secrets",
"(",
"results",
",",
"baseline",
",",
"filelist",
")",
":",
"updated",
"=",
"False",
"for",
"filename",
"in",
"filelist",
":",
"if",
"filename",
"not",
"in",
"baseline",
".",
"data",
":",
"# Nothing to modify, because not even there in the first place.",
"continue",
"if",
"filename",
"not",
"in",
"results",
".",
"data",
":",
"# All secrets relating to that file was removed.",
"# We know this because:",
"# 1. It's a file that was scanned (in filelist)",
"# 2. It was in the baseline",
"# 3. It has no results now.",
"del",
"baseline",
".",
"data",
"[",
"filename",
"]",
"updated",
"=",
"True",
"continue",
"# We clone the baseline, so that we can modify the baseline,",
"# without messing up the iteration.",
"for",
"baseline_secret",
"in",
"baseline",
".",
"data",
"[",
"filename",
"]",
".",
"copy",
"(",
")",
":",
"new_secret_found",
"=",
"results",
".",
"get_secret",
"(",
"filename",
",",
"baseline_secret",
".",
"secret_hash",
",",
"baseline_secret",
".",
"type",
",",
")",
"if",
"not",
"new_secret_found",
":",
"# No longer in results, so can remove from baseline",
"old_secret_to_delete",
"=",
"baseline",
".",
"get_secret",
"(",
"filename",
",",
"baseline_secret",
".",
"secret_hash",
",",
"baseline_secret",
".",
"type",
",",
")",
"del",
"baseline",
".",
"data",
"[",
"filename",
"]",
"[",
"old_secret_to_delete",
"]",
"updated",
"=",
"True",
"elif",
"new_secret_found",
".",
"lineno",
"!=",
"baseline_secret",
".",
"lineno",
":",
"# Secret moved around, should update baseline with new location",
"old_secret_to_update",
"=",
"baseline",
".",
"get_secret",
"(",
"filename",
",",
"baseline_secret",
".",
"secret_hash",
",",
"baseline_secret",
".",
"type",
",",
")",
"old_secret_to_update",
".",
"lineno",
"=",
"new_secret_found",
".",
"lineno",
"updated",
"=",
"True",
"return",
"updated"
] | 35.95082 | 17.491803 |
def update_vertices(self, vertices):
"""
Update the triangle vertices.
"""
vertices = np.array(vertices, dtype=np.float32)
self._vbo_v.set_data(vertices) | [
"def",
"update_vertices",
"(",
"self",
",",
"vertices",
")",
":",
"vertices",
"=",
"np",
".",
"array",
"(",
"vertices",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"self",
".",
"_vbo_v",
".",
"set_data",
"(",
"vertices",
")"
] | 26.857143 | 9.142857 |
def find_team(self, color: str = None):
"""Find the :class:`~.Team` with the given properties
Returns the team whose attributes match the given properties, or
``None`` if no match is found.
:param color: The :class:`~.Team.Color` of the Team
"""
if color != None:
if color is Team.Color.BLUE:
return self.blue_team
else:
return self.orange_team
else:
return None | [
"def",
"find_team",
"(",
"self",
",",
"color",
":",
"str",
"=",
"None",
")",
":",
"if",
"color",
"!=",
"None",
":",
"if",
"color",
"is",
"Team",
".",
"Color",
".",
"BLUE",
":",
"return",
"self",
".",
"blue_team",
"else",
":",
"return",
"self",
".",
"orange_team",
"else",
":",
"return",
"None"
] | 31.6 | 14.666667 |
def _put_bucket_policy(self):
"""Attach a bucket policy to app bucket."""
if self.s3props['bucket_policy']:
policy_str = json.dumps(self.s3props['bucket_policy'])
_response = self.s3client.put_bucket_policy(Bucket=self.bucket, Policy=policy_str)
else:
_response = self.s3client.delete_bucket_policy(Bucket=self.bucket)
LOG.debug('Response adding bucket policy: %s', _response)
LOG.info('S3 Bucket Policy Attached') | [
"def",
"_put_bucket_policy",
"(",
"self",
")",
":",
"if",
"self",
".",
"s3props",
"[",
"'bucket_policy'",
"]",
":",
"policy_str",
"=",
"json",
".",
"dumps",
"(",
"self",
".",
"s3props",
"[",
"'bucket_policy'",
"]",
")",
"_response",
"=",
"self",
".",
"s3client",
".",
"put_bucket_policy",
"(",
"Bucket",
"=",
"self",
".",
"bucket",
",",
"Policy",
"=",
"policy_str",
")",
"else",
":",
"_response",
"=",
"self",
".",
"s3client",
".",
"delete_bucket_policy",
"(",
"Bucket",
"=",
"self",
".",
"bucket",
")",
"LOG",
".",
"debug",
"(",
"'Response adding bucket policy: %s'",
",",
"_response",
")",
"LOG",
".",
"info",
"(",
"'S3 Bucket Policy Attached'",
")"
] | 53.555556 | 20.777778 |
def list_semod():
'''
Return a structure listing all of the selinux modules on the system and
what state they are in
CLI Example:
.. code-block:: bash
salt '*' selinux.list_semod
.. versionadded:: 2016.3.0
'''
helptext = __salt__['cmd.run']('semodule -h').splitlines()
semodule_version = ''
for line in helptext:
if line.strip().startswith('full'):
semodule_version = 'new'
if semodule_version == 'new':
mdata = __salt__['cmd.run']('semodule -lfull').splitlines()
ret = {}
for line in mdata:
if not line.strip():
continue
comps = line.split()
if len(comps) == 4:
ret[comps[1]] = {'Enabled': False,
'Version': None}
else:
ret[comps[1]] = {'Enabled': True,
'Version': None}
else:
mdata = __salt__['cmd.run']('semodule -l').splitlines()
ret = {}
for line in mdata:
if not line.strip():
continue
comps = line.split()
if len(comps) == 3:
ret[comps[0]] = {'Enabled': False,
'Version': comps[1]}
else:
ret[comps[0]] = {'Enabled': True,
'Version': comps[1]}
return ret | [
"def",
"list_semod",
"(",
")",
":",
"helptext",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'semodule -h'",
")",
".",
"splitlines",
"(",
")",
"semodule_version",
"=",
"''",
"for",
"line",
"in",
"helptext",
":",
"if",
"line",
".",
"strip",
"(",
")",
".",
"startswith",
"(",
"'full'",
")",
":",
"semodule_version",
"=",
"'new'",
"if",
"semodule_version",
"==",
"'new'",
":",
"mdata",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'semodule -lfull'",
")",
".",
"splitlines",
"(",
")",
"ret",
"=",
"{",
"}",
"for",
"line",
"in",
"mdata",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"comps",
")",
"==",
"4",
":",
"ret",
"[",
"comps",
"[",
"1",
"]",
"]",
"=",
"{",
"'Enabled'",
":",
"False",
",",
"'Version'",
":",
"None",
"}",
"else",
":",
"ret",
"[",
"comps",
"[",
"1",
"]",
"]",
"=",
"{",
"'Enabled'",
":",
"True",
",",
"'Version'",
":",
"None",
"}",
"else",
":",
"mdata",
"=",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'semodule -l'",
")",
".",
"splitlines",
"(",
")",
"ret",
"=",
"{",
"}",
"for",
"line",
"in",
"mdata",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"continue",
"comps",
"=",
"line",
".",
"split",
"(",
")",
"if",
"len",
"(",
"comps",
")",
"==",
"3",
":",
"ret",
"[",
"comps",
"[",
"0",
"]",
"]",
"=",
"{",
"'Enabled'",
":",
"False",
",",
"'Version'",
":",
"comps",
"[",
"1",
"]",
"}",
"else",
":",
"ret",
"[",
"comps",
"[",
"0",
"]",
"]",
"=",
"{",
"'Enabled'",
":",
"True",
",",
"'Version'",
":",
"comps",
"[",
"1",
"]",
"}",
"return",
"ret"
] | 29.826087 | 18.521739 |
def add_l2_normalize(self, name, input_name, output_name, epsilon = 1e-5):
"""
Add L2 normalize layer. Normalizes the input by the L2 norm, i.e. divides by the
the square root of the sum of squares of all elements of the input along C, H and W dimensions.
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
epsilon: float
small bias to avoid division by zero.
See Also
--------
add_mvn, add_lrn
"""
spec = self.spec
nn_spec = self.nn_spec
# Add a new layer
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name)
spec_layer_params = spec_layer.l2normalize
spec_layer_params.epsilon = epsilon | [
"def",
"add_l2_normalize",
"(",
"self",
",",
"name",
",",
"input_name",
",",
"output_name",
",",
"epsilon",
"=",
"1e-5",
")",
":",
"spec",
"=",
"self",
".",
"spec",
"nn_spec",
"=",
"self",
".",
"nn_spec",
"# Add a new layer",
"spec_layer",
"=",
"nn_spec",
".",
"layers",
".",
"add",
"(",
")",
"spec_layer",
".",
"name",
"=",
"name",
"spec_layer",
".",
"input",
".",
"append",
"(",
"input_name",
")",
"spec_layer",
".",
"output",
".",
"append",
"(",
"output_name",
")",
"spec_layer_params",
"=",
"spec_layer",
".",
"l2normalize",
"spec_layer_params",
".",
"epsilon",
"=",
"epsilon"
] | 27.8 | 21.342857 |
def _cut_tree(tree, n_clusters, membs):
""" Cut the tree to get desired number of clusters as n_clusters
2 <= n_desired <= n_clusters
"""
## starting from root,
## a node is added to the cut_set or
## its children are added to node_set
assert(n_clusters >= 2)
assert(n_clusters <= len(tree.leaves()))
cut_centers = dict() #np.empty(shape=(n_clusters, ndim), dtype=float)
for i in range(n_clusters-1):
if i==0:
search_set = set(tree.children(0))
node_set,cut_set = set(), set()
else:
search_set = node_set.union(cut_set)
node_set,cut_set = set(), set()
if i+2 == n_clusters:
cut_set = search_set
else:
for _ in range(len(search_set)):
n = search_set.pop()
if n.data['ilev'] is None or n.data['ilev']>i+2:
cut_set.add(n)
else:
nid = n.identifier
if n.data['ilev']-2==i:
node_set = node_set.union(set(tree.children(nid)))
conv_membs = membs.copy()
for node in cut_set:
nid = node.identifier
label = node.data['label']
cut_centers[label] = node.data['center']
sub_leaves = tree.leaves(nid)
for leaf in sub_leaves:
indx = np.where(conv_membs == leaf)[0]
conv_membs[indx] = nid
return(conv_membs, cut_centers) | [
"def",
"_cut_tree",
"(",
"tree",
",",
"n_clusters",
",",
"membs",
")",
":",
"## starting from root,",
"## a node is added to the cut_set or ",
"## its children are added to node_set",
"assert",
"(",
"n_clusters",
">=",
"2",
")",
"assert",
"(",
"n_clusters",
"<=",
"len",
"(",
"tree",
".",
"leaves",
"(",
")",
")",
")",
"cut_centers",
"=",
"dict",
"(",
")",
"#np.empty(shape=(n_clusters, ndim), dtype=float)",
"for",
"i",
"in",
"range",
"(",
"n_clusters",
"-",
"1",
")",
":",
"if",
"i",
"==",
"0",
":",
"search_set",
"=",
"set",
"(",
"tree",
".",
"children",
"(",
"0",
")",
")",
"node_set",
",",
"cut_set",
"=",
"set",
"(",
")",
",",
"set",
"(",
")",
"else",
":",
"search_set",
"=",
"node_set",
".",
"union",
"(",
"cut_set",
")",
"node_set",
",",
"cut_set",
"=",
"set",
"(",
")",
",",
"set",
"(",
")",
"if",
"i",
"+",
"2",
"==",
"n_clusters",
":",
"cut_set",
"=",
"search_set",
"else",
":",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"search_set",
")",
")",
":",
"n",
"=",
"search_set",
".",
"pop",
"(",
")",
"if",
"n",
".",
"data",
"[",
"'ilev'",
"]",
"is",
"None",
"or",
"n",
".",
"data",
"[",
"'ilev'",
"]",
">",
"i",
"+",
"2",
":",
"cut_set",
".",
"add",
"(",
"n",
")",
"else",
":",
"nid",
"=",
"n",
".",
"identifier",
"if",
"n",
".",
"data",
"[",
"'ilev'",
"]",
"-",
"2",
"==",
"i",
":",
"node_set",
"=",
"node_set",
".",
"union",
"(",
"set",
"(",
"tree",
".",
"children",
"(",
"nid",
")",
")",
")",
"conv_membs",
"=",
"membs",
".",
"copy",
"(",
")",
"for",
"node",
"in",
"cut_set",
":",
"nid",
"=",
"node",
".",
"identifier",
"label",
"=",
"node",
".",
"data",
"[",
"'label'",
"]",
"cut_centers",
"[",
"label",
"]",
"=",
"node",
".",
"data",
"[",
"'center'",
"]",
"sub_leaves",
"=",
"tree",
".",
"leaves",
"(",
"nid",
")",
"for",
"leaf",
"in",
"sub_leaves",
":",
"indx",
"=",
"np",
".",
"where",
"(",
"conv_membs",
"==",
"leaf",
")",
"[",
"0",
"]",
"conv_membs",
"[",
"indx",
"]",
"=",
"nid",
"return",
"(",
"conv_membs",
",",
"cut_centers",
")"
] | 32.863636 | 13.477273 |
def get_article(doi, output_format='txt'):
"""Get the full body of an article from Elsevier.
Parameters
----------
doi : str
The doi for the desired article.
output_format : 'txt' or 'xml'
The desired format for the output. Selecting 'txt' (default) strips all
xml tags and joins the pieces of text in the main text, while 'xml'
simply takes the tag containing the body of the article and returns it
as is . In the latter case, downstream code needs to be able to
interpret Elsever's XML format.
Returns
-------
content : str
Either text content or xml, as described above, for the given doi.
"""
xml_string = download_article(doi)
if output_format == 'txt' and xml_string is not None:
text = extract_text(xml_string)
return text
return xml_string | [
"def",
"get_article",
"(",
"doi",
",",
"output_format",
"=",
"'txt'",
")",
":",
"xml_string",
"=",
"download_article",
"(",
"doi",
")",
"if",
"output_format",
"==",
"'txt'",
"and",
"xml_string",
"is",
"not",
"None",
":",
"text",
"=",
"extract_text",
"(",
"xml_string",
")",
"return",
"text",
"return",
"xml_string"
] | 35.25 | 20.25 |
def parse_properties(node):
""" Parse a Tiled xml node and return a dict that represents a tiled "property"
:param node: etree element
:return: dict
"""
d = dict()
for child in node.findall('properties'):
for subnode in child.findall('property'):
cls = None
try:
if "type" in subnode.keys():
module = importlib.import_module('builtins')
cls = getattr(module, subnode.get("type"))
except AttributeError:
logger.info("Type [} Not a built-in type. Defaulting to string-cast.")
d[subnode.get('name')] = cls(subnode.get('value')) if cls is not None else subnode.get('value')
return d | [
"def",
"parse_properties",
"(",
"node",
")",
":",
"d",
"=",
"dict",
"(",
")",
"for",
"child",
"in",
"node",
".",
"findall",
"(",
"'properties'",
")",
":",
"for",
"subnode",
"in",
"child",
".",
"findall",
"(",
"'property'",
")",
":",
"cls",
"=",
"None",
"try",
":",
"if",
"\"type\"",
"in",
"subnode",
".",
"keys",
"(",
")",
":",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"'builtins'",
")",
"cls",
"=",
"getattr",
"(",
"module",
",",
"subnode",
".",
"get",
"(",
"\"type\"",
")",
")",
"except",
"AttributeError",
":",
"logger",
".",
"info",
"(",
"\"Type [} Not a built-in type. Defaulting to string-cast.\"",
")",
"d",
"[",
"subnode",
".",
"get",
"(",
"'name'",
")",
"]",
"=",
"cls",
"(",
"subnode",
".",
"get",
"(",
"'value'",
")",
")",
"if",
"cls",
"is",
"not",
"None",
"else",
"subnode",
".",
"get",
"(",
"'value'",
")",
"return",
"d"
] | 39.888889 | 20.222222 |
def full_installation(self, location=None):
"""Return the full details of the installation."""
url = ("https://tccna.honeywell.com/WebAPI/emea/api/v1/location"
"/%s/installationInfo?includeTemperatureControlSystems=True"
% self._get_location(location))
response = requests.get(url, headers=self._headers())
response.raise_for_status()
return response.json() | [
"def",
"full_installation",
"(",
"self",
",",
"location",
"=",
"None",
")",
":",
"url",
"=",
"(",
"\"https://tccna.honeywell.com/WebAPI/emea/api/v1/location\"",
"\"/%s/installationInfo?includeTemperatureControlSystems=True\"",
"%",
"self",
".",
"_get_location",
"(",
"location",
")",
")",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"headers",
"=",
"self",
".",
"_headers",
"(",
")",
")",
"response",
".",
"raise_for_status",
"(",
")",
"return",
"response",
".",
"json",
"(",
")"
] | 42 | 19.2 |
def _parse_bands(lines, n_start):
"""Parse band structure from cp2k output"""
kpoints = []
labels = []
bands_s1 = []
bands_s2 = []
known_kpoints = {}
pattern = re.compile(".*?Nr.*?Spin.*?K-Point.*?", re.DOTALL)
selected_lines = lines[n_start:]
for current_line, line in enumerate(selected_lines):
splitted = line.split()
if "KPOINTS| Special K-Point" in line:
kpoint = tuple(map(float, splitted[-3:]))
if " ".join(splitted[-5:-3]) != "not specified":
label = splitted[-4]
known_kpoints[kpoint] = label
elif pattern.match(line):
spin = int(splitted[3])
kpoint = tuple(map(float, splitted[-3:]))
kpoint_n_lines = int(math.ceil(int(selected_lines[current_line + 1]) / 4.))
band = list(
map(float, ' '.join(selected_lines[current_line + 2:current_line + 2 + kpoint_n_lines]).split()))
if spin == 1:
if kpoint in known_kpoints:
labels.append((len(kpoints), known_kpoints[kpoint]))
kpoints.append(kpoint)
bands_s1.append(band)
elif spin == 2:
bands_s2.append(band)
if bands_s2:
bands = [bands_s1, bands_s2]
else:
bands = bands_s1
return np.array(kpoints), labels, np.array(bands) | [
"def",
"_parse_bands",
"(",
"lines",
",",
"n_start",
")",
":",
"kpoints",
"=",
"[",
"]",
"labels",
"=",
"[",
"]",
"bands_s1",
"=",
"[",
"]",
"bands_s2",
"=",
"[",
"]",
"known_kpoints",
"=",
"{",
"}",
"pattern",
"=",
"re",
".",
"compile",
"(",
"\".*?Nr.*?Spin.*?K-Point.*?\"",
",",
"re",
".",
"DOTALL",
")",
"selected_lines",
"=",
"lines",
"[",
"n_start",
":",
"]",
"for",
"current_line",
",",
"line",
"in",
"enumerate",
"(",
"selected_lines",
")",
":",
"splitted",
"=",
"line",
".",
"split",
"(",
")",
"if",
"\"KPOINTS| Special K-Point\"",
"in",
"line",
":",
"kpoint",
"=",
"tuple",
"(",
"map",
"(",
"float",
",",
"splitted",
"[",
"-",
"3",
":",
"]",
")",
")",
"if",
"\" \"",
".",
"join",
"(",
"splitted",
"[",
"-",
"5",
":",
"-",
"3",
"]",
")",
"!=",
"\"not specified\"",
":",
"label",
"=",
"splitted",
"[",
"-",
"4",
"]",
"known_kpoints",
"[",
"kpoint",
"]",
"=",
"label",
"elif",
"pattern",
".",
"match",
"(",
"line",
")",
":",
"spin",
"=",
"int",
"(",
"splitted",
"[",
"3",
"]",
")",
"kpoint",
"=",
"tuple",
"(",
"map",
"(",
"float",
",",
"splitted",
"[",
"-",
"3",
":",
"]",
")",
")",
"kpoint_n_lines",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"int",
"(",
"selected_lines",
"[",
"current_line",
"+",
"1",
"]",
")",
"/",
"4.",
")",
")",
"band",
"=",
"list",
"(",
"map",
"(",
"float",
",",
"' '",
".",
"join",
"(",
"selected_lines",
"[",
"current_line",
"+",
"2",
":",
"current_line",
"+",
"2",
"+",
"kpoint_n_lines",
"]",
")",
".",
"split",
"(",
")",
")",
")",
"if",
"spin",
"==",
"1",
":",
"if",
"kpoint",
"in",
"known_kpoints",
":",
"labels",
".",
"append",
"(",
"(",
"len",
"(",
"kpoints",
")",
",",
"known_kpoints",
"[",
"kpoint",
"]",
")",
")",
"kpoints",
".",
"append",
"(",
"kpoint",
")",
"bands_s1",
".",
"append",
"(",
"band",
")",
"elif",
"spin",
"==",
"2",
":",
"bands_s2",
".",
"append",
"(",
"band",
")",
"if",
"bands_s2",
":",
"bands",
"=",
"[",
"bands_s1",
",",
"bands_s2",
"]",
"else",
":",
"bands",
"=",
"bands_s1",
"return",
"np",
".",
"array",
"(",
"kpoints",
")",
",",
"labels",
",",
"np",
".",
"array",
"(",
"bands",
")"
] | 42.514286 | 15.914286 |
def push_results(self):
"""Push the checks/actions results to our schedulers
:return: None
"""
# For all schedulers, we check for wait_homerun
# and we send back results
for scheduler_link_uuid in self.schedulers:
scheduler_link = self.schedulers[scheduler_link_uuid]
if not scheduler_link.active:
logger.warning("My scheduler '%s' is not active currently", scheduler_link.name)
continue
if not scheduler_link.wait_homerun:
# Nothing to push back...
continue
# NB: it's **mostly** safe for us to not use some lock around
# this 'results' / sched['wait_homerun'].
# Because it can only be modified (for adding new values) by the
# same thread running this function (that is the main satellite
# thread), and this occurs exactly in self.manage_action_return().
# Another possibility is for the sched['wait_homerun'] to be
# cleared within/by :
# ISchedulers.get_results() -> Satelitte.get_return_for_passive()
# This can so happen in an (http) client thread.
results = scheduler_link.wait_homerun
logger.debug("Pushing %d results to '%s'", len(results), scheduler_link.name)
# So, at worst, some results would be received twice on the
# scheduler level, which shouldn't be a problem given they are
# indexed by their "action_id".
scheduler_link.push_results(list(results.values()), self.name)
results.clear() | [
"def",
"push_results",
"(",
"self",
")",
":",
"# For all schedulers, we check for wait_homerun",
"# and we send back results",
"for",
"scheduler_link_uuid",
"in",
"self",
".",
"schedulers",
":",
"scheduler_link",
"=",
"self",
".",
"schedulers",
"[",
"scheduler_link_uuid",
"]",
"if",
"not",
"scheduler_link",
".",
"active",
":",
"logger",
".",
"warning",
"(",
"\"My scheduler '%s' is not active currently\"",
",",
"scheduler_link",
".",
"name",
")",
"continue",
"if",
"not",
"scheduler_link",
".",
"wait_homerun",
":",
"# Nothing to push back...",
"continue",
"# NB: it's **mostly** safe for us to not use some lock around",
"# this 'results' / sched['wait_homerun'].",
"# Because it can only be modified (for adding new values) by the",
"# same thread running this function (that is the main satellite",
"# thread), and this occurs exactly in self.manage_action_return().",
"# Another possibility is for the sched['wait_homerun'] to be",
"# cleared within/by :",
"# ISchedulers.get_results() -> Satelitte.get_return_for_passive()",
"# This can so happen in an (http) client thread.",
"results",
"=",
"scheduler_link",
".",
"wait_homerun",
"logger",
".",
"debug",
"(",
"\"Pushing %d results to '%s'\"",
",",
"len",
"(",
"results",
")",
",",
"scheduler_link",
".",
"name",
")",
"# So, at worst, some results would be received twice on the",
"# scheduler level, which shouldn't be a problem given they are",
"# indexed by their \"action_id\".",
"scheduler_link",
".",
"push_results",
"(",
"list",
"(",
"results",
".",
"values",
"(",
")",
")",
",",
"self",
".",
"name",
")",
"results",
".",
"clear",
"(",
")"
] | 46.2 | 23.257143 |
def tokenized_texts_to_sequences_generator(self, tok_texts):
"""Transforms tokenized text to a sequence of integers.
Only top "num_words" most frequent words will be taken into account.
Only words known by the tokenizer will be taken into account.
# Arguments
tokenized texts: List[List[str]]
# Yields
Yields individual sequences.
"""
for seq in tok_texts:
vect = []
for w in seq:
# if the word is missing you get oov_index
i = self.word_index.get(w, 1)
vect.append(i)
yield vect | [
"def",
"tokenized_texts_to_sequences_generator",
"(",
"self",
",",
"tok_texts",
")",
":",
"for",
"seq",
"in",
"tok_texts",
":",
"vect",
"=",
"[",
"]",
"for",
"w",
"in",
"seq",
":",
"# if the word is missing you get oov_index",
"i",
"=",
"self",
".",
"word_index",
".",
"get",
"(",
"w",
",",
"1",
")",
"vect",
".",
"append",
"(",
"i",
")",
"yield",
"vect"
] | 39.3125 | 14.4375 |
def load_reference_data(name=None):
"""
Fetch LAtools reference data from online repository.
Parameters
----------
name : str<
Which data to download. Can be one of 'culture_reference',
'culture_test', 'downcore_reference', 'downcore_test', 'iolite_reference'
or 'zircon_reference'.
If None, all are downloaded and returned as a dict.
Returns
-------
pandas.DataFrame or dict.
"""
base_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQJfCeuqrtFFMAeSpA9rguzLAo9OVuw50AHhAULuqjMJzbd3h46PK1KjF69YiJAeNAAjjMDkJK7wMpG/pub?gid={:}&single=true&output=csv'
gids = {'culture_reference': '0',
'culture_test': '1170065442',
'downcore_reference': '190752797',
'downcore_test': '721359794',
'iolite_reference': '483581945',
'zircon_reference': '1355554964'}
if name is None:
out = {}
for nm, gid in gids.items():
url = base_url.format(gid)
tmp = pd.read_csv(url, header=[0], index_col=[0, 1])
tmp.index.names = ['sample', 'rep']
tmp.columns.names = ['analyte']
tmp.sort_index(1, inplace=True)
out[nm] = tmp
else:
gid = gids[name]
url = base_url.format(gid)
out = pd.read_csv(url, index_col=[0, 1])
out.columns.names = ['analyte']
out.sort_index(1, inplace=True)
return out | [
"def",
"load_reference_data",
"(",
"name",
"=",
"None",
")",
":",
"base_url",
"=",
"'https://docs.google.com/spreadsheets/d/e/2PACX-1vQJfCeuqrtFFMAeSpA9rguzLAo9OVuw50AHhAULuqjMJzbd3h46PK1KjF69YiJAeNAAjjMDkJK7wMpG/pub?gid={:}&single=true&output=csv'",
"gids",
"=",
"{",
"'culture_reference'",
":",
"'0'",
",",
"'culture_test'",
":",
"'1170065442'",
",",
"'downcore_reference'",
":",
"'190752797'",
",",
"'downcore_test'",
":",
"'721359794'",
",",
"'iolite_reference'",
":",
"'483581945'",
",",
"'zircon_reference'",
":",
"'1355554964'",
"}",
"if",
"name",
"is",
"None",
":",
"out",
"=",
"{",
"}",
"for",
"nm",
",",
"gid",
"in",
"gids",
".",
"items",
"(",
")",
":",
"url",
"=",
"base_url",
".",
"format",
"(",
"gid",
")",
"tmp",
"=",
"pd",
".",
"read_csv",
"(",
"url",
",",
"header",
"=",
"[",
"0",
"]",
",",
"index_col",
"=",
"[",
"0",
",",
"1",
"]",
")",
"tmp",
".",
"index",
".",
"names",
"=",
"[",
"'sample'",
",",
"'rep'",
"]",
"tmp",
".",
"columns",
".",
"names",
"=",
"[",
"'analyte'",
"]",
"tmp",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"out",
"[",
"nm",
"]",
"=",
"tmp",
"else",
":",
"gid",
"=",
"gids",
"[",
"name",
"]",
"url",
"=",
"base_url",
".",
"format",
"(",
"gid",
")",
"out",
"=",
"pd",
".",
"read_csv",
"(",
"url",
",",
"index_col",
"=",
"[",
"0",
",",
"1",
"]",
")",
"out",
".",
"columns",
".",
"names",
"=",
"[",
"'analyte'",
"]",
"out",
".",
"sort_index",
"(",
"1",
",",
"inplace",
"=",
"True",
")",
"return",
"out"
] | 35.175 | 18.325 |
def main():
"""
main method
"""
# initialize parser
usage = "usage: %prog [-u USER] [-p PASSWORD] [-t TITLE] [-s selection] url"
parser = OptionParser(usage, version="%prog "+instapaperlib.__version__)
parser.add_option("-u", "--user", action="store", dest="user",
metavar="USER", help="instapaper username")
parser.add_option("-p", "--password", action="store", dest="password",
metavar="USER", help="instapaper password")
parser.add_option("-t", "--title", action="store", dest="title",
metavar="TITLE", help="title of the link to add")
parser.add_option("-s", "--selection", action="store", dest="selection",
metavar="SELECTION", help="short text for description")
(options, args) = parser.parse_args()
if not len(args) > 0:
parser.error("What do you want to read later?")
if not options.user:
# auth regex
login = re.compile("(.+?):(.+)")
try:
config = open(os.path.expanduser("~") + "/.instapaperrc")
for line in config:
matches = login.match(line)
if matches:
user = matches.group(1).strip()
password = matches.group(2).strip()
except IOError:
parser.error("No login information present.")
sys.exit(-1)
else:
user = options.user
# make sure all parameters are present
if not options.password:
password = getpass()
else:
password = options.password
(status, text) = instapaperlib.add_item(user, password, args[0],
options.title, options.selection)
print text | [
"def",
"main",
"(",
")",
":",
"# initialize parser",
"usage",
"=",
"\"usage: %prog [-u USER] [-p PASSWORD] [-t TITLE] [-s selection] url\"",
"parser",
"=",
"OptionParser",
"(",
"usage",
",",
"version",
"=",
"\"%prog \"",
"+",
"instapaperlib",
".",
"__version__",
")",
"parser",
".",
"add_option",
"(",
"\"-u\"",
",",
"\"--user\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"user\"",
",",
"metavar",
"=",
"\"USER\"",
",",
"help",
"=",
"\"instapaper username\"",
")",
"parser",
".",
"add_option",
"(",
"\"-p\"",
",",
"\"--password\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"password\"",
",",
"metavar",
"=",
"\"USER\"",
",",
"help",
"=",
"\"instapaper password\"",
")",
"parser",
".",
"add_option",
"(",
"\"-t\"",
",",
"\"--title\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"title\"",
",",
"metavar",
"=",
"\"TITLE\"",
",",
"help",
"=",
"\"title of the link to add\"",
")",
"parser",
".",
"add_option",
"(",
"\"-s\"",
",",
"\"--selection\"",
",",
"action",
"=",
"\"store\"",
",",
"dest",
"=",
"\"selection\"",
",",
"metavar",
"=",
"\"SELECTION\"",
",",
"help",
"=",
"\"short text for description\"",
")",
"(",
"options",
",",
"args",
")",
"=",
"parser",
".",
"parse_args",
"(",
")",
"if",
"not",
"len",
"(",
"args",
")",
">",
"0",
":",
"parser",
".",
"error",
"(",
"\"What do you want to read later?\"",
")",
"if",
"not",
"options",
".",
"user",
":",
"# auth regex",
"login",
"=",
"re",
".",
"compile",
"(",
"\"(.+?):(.+)\"",
")",
"try",
":",
"config",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"+",
"\"/.instapaperrc\"",
")",
"for",
"line",
"in",
"config",
":",
"matches",
"=",
"login",
".",
"match",
"(",
"line",
")",
"if",
"matches",
":",
"user",
"=",
"matches",
".",
"group",
"(",
"1",
")",
".",
"strip",
"(",
")",
"password",
"=",
"matches",
".",
"group",
"(",
"2",
")",
".",
"strip",
"(",
")",
"except",
"IOError",
":",
"parser",
".",
"error",
"(",
"\"No login information present.\"",
")",
"sys",
".",
"exit",
"(",
"-",
"1",
")",
"else",
":",
"user",
"=",
"options",
".",
"user",
"# make sure all parameters are present",
"if",
"not",
"options",
".",
"password",
":",
"password",
"=",
"getpass",
"(",
")",
"else",
":",
"password",
"=",
"options",
".",
"password",
"(",
"status",
",",
"text",
")",
"=",
"instapaperlib",
".",
"add_item",
"(",
"user",
",",
"password",
",",
"args",
"[",
"0",
"]",
",",
"options",
".",
"title",
",",
"options",
".",
"selection",
")",
"print",
"text"
] | 38.644444 | 21.222222 |
def merge_conf(to_hash, other_hash, path=[]):
"merges other_hash into to_hash"
for key in other_hash:
if (key in to_hash and isinstance(to_hash[key], dict)
and isinstance(other_hash[key], dict)):
merge_conf(to_hash[key], other_hash[key], path + [str(key)])
else:
to_hash[key] = other_hash[key]
return to_hash | [
"def",
"merge_conf",
"(",
"to_hash",
",",
"other_hash",
",",
"path",
"=",
"[",
"]",
")",
":",
"for",
"key",
"in",
"other_hash",
":",
"if",
"(",
"key",
"in",
"to_hash",
"and",
"isinstance",
"(",
"to_hash",
"[",
"key",
"]",
",",
"dict",
")",
"and",
"isinstance",
"(",
"other_hash",
"[",
"key",
"]",
",",
"dict",
")",
")",
":",
"merge_conf",
"(",
"to_hash",
"[",
"key",
"]",
",",
"other_hash",
"[",
"key",
"]",
",",
"path",
"+",
"[",
"str",
"(",
"key",
")",
"]",
")",
"else",
":",
"to_hash",
"[",
"key",
"]",
"=",
"other_hash",
"[",
"key",
"]",
"return",
"to_hash"
] | 40.888889 | 15.777778 |
def __numero_tres_cifras(self, number, indice=None, sing=False):
"""Convierte a texto numeros de tres cifras"""
number = int(number)
if number < 30:
if sing:
return especiales_apocopado[number]
else:
return especiales_masculino[number]
elif number < 100:
texto = decenas[number // 10]
resto = number % 10
if resto:
texto += ' y %s' % self.__numero_tres_cifras(resto, None, sing)
return texto
if number == 100:
return 'cien'
if number < 1000:
texto = centena_masculino[number // 100]
resto = number % 100
if resto:
texto += ' %s' % self.__numero_tres_cifras(resto, None, sing)
return texto | [
"def",
"__numero_tres_cifras",
"(",
"self",
",",
"number",
",",
"indice",
"=",
"None",
",",
"sing",
"=",
"False",
")",
":",
"number",
"=",
"int",
"(",
"number",
")",
"if",
"number",
"<",
"30",
":",
"if",
"sing",
":",
"return",
"especiales_apocopado",
"[",
"number",
"]",
"else",
":",
"return",
"especiales_masculino",
"[",
"number",
"]",
"elif",
"number",
"<",
"100",
":",
"texto",
"=",
"decenas",
"[",
"number",
"//",
"10",
"]",
"resto",
"=",
"number",
"%",
"10",
"if",
"resto",
":",
"texto",
"+=",
"' y %s'",
"%",
"self",
".",
"__numero_tres_cifras",
"(",
"resto",
",",
"None",
",",
"sing",
")",
"return",
"texto",
"if",
"number",
"==",
"100",
":",
"return",
"'cien'",
"if",
"number",
"<",
"1000",
":",
"texto",
"=",
"centena_masculino",
"[",
"number",
"//",
"100",
"]",
"resto",
"=",
"number",
"%",
"100",
"if",
"resto",
":",
"texto",
"+=",
"' %s'",
"%",
"self",
".",
"__numero_tres_cifras",
"(",
"resto",
",",
"None",
",",
"sing",
")",
"return",
"texto"
] | 31.192308 | 19.730769 |
def compose(*decorators):
"""Helper to compose decorators::
@a
@b
def f():
pass
Is equivalent to::
@compose(a, b)
def f():
...
"""
def composed(f):
for decor in reversed(decorators):
f = decor(f)
return f
return composed | [
"def",
"compose",
"(",
"*",
"decorators",
")",
":",
"def",
"composed",
"(",
"f",
")",
":",
"for",
"decor",
"in",
"reversed",
"(",
"decorators",
")",
":",
"f",
"=",
"decor",
"(",
"f",
")",
"return",
"f",
"return",
"composed"
] | 16.684211 | 21.631579 |
def job_success(self, job, queue, job_result):
"""
Called just after an execute call was successful.
job_result is the value returned by the callback, if any.
"""
job.queued.delete()
job.hmset(end=str(datetime.utcnow()), status=STATUSES.SUCCESS)
queue.success.rpush(job.ident)
self.log(self.job_success_message(job, queue, job_result))
if hasattr(job, 'on_success'):
job.on_success(queue, job_result) | [
"def",
"job_success",
"(",
"self",
",",
"job",
",",
"queue",
",",
"job_result",
")",
":",
"job",
".",
"queued",
".",
"delete",
"(",
")",
"job",
".",
"hmset",
"(",
"end",
"=",
"str",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
",",
"status",
"=",
"STATUSES",
".",
"SUCCESS",
")",
"queue",
".",
"success",
".",
"rpush",
"(",
"job",
".",
"ident",
")",
"self",
".",
"log",
"(",
"self",
".",
"job_success_message",
"(",
"job",
",",
"queue",
",",
"job_result",
")",
")",
"if",
"hasattr",
"(",
"job",
",",
"'on_success'",
")",
":",
"job",
".",
"on_success",
"(",
"queue",
",",
"job_result",
")"
] | 43.090909 | 11.454545 |
def make_job_graph(infiles, fragfiles, blastcmds):
"""Return a job dependency graph, based on the passed input sequence files.
- infiles - a list of paths to input FASTA files
- fragfiles - a list of paths to fragmented input FASTA files
By default, will run ANIb - it *is* possible to make a mess of passing the
wrong executable for the mode you're using.
All items in the returned graph list are BLAST executable jobs that must
be run *after* the corresponding database creation. The Job objects
corresponding to the database creation are contained as dependencies.
How those jobs are scheduled depends on the scheduler (see
run_multiprocessing.py, run_sge.py)
"""
joblist = [] # Holds list of job dependency graphs
# Get dictionary of database-building jobs
dbjobdict = build_db_jobs(infiles, blastcmds)
# Create list of BLAST executable jobs, with dependencies
jobnum = len(dbjobdict)
for idx, fname1 in enumerate(fragfiles[:-1]):
for fname2 in fragfiles[idx + 1 :]:
jobnum += 1
jobs = [
pyani_jobs.Job(
"%s_exe_%06d_a" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")),
),
pyani_jobs.Job(
"%s_exe_%06d_b" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")),
),
]
jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")])
jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")])
joblist.extend(jobs)
# Return the dependency graph
return joblist | [
"def",
"make_job_graph",
"(",
"infiles",
",",
"fragfiles",
",",
"blastcmds",
")",
":",
"joblist",
"=",
"[",
"]",
"# Holds list of job dependency graphs",
"# Get dictionary of database-building jobs",
"dbjobdict",
"=",
"build_db_jobs",
"(",
"infiles",
",",
"blastcmds",
")",
"# Create list of BLAST executable jobs, with dependencies",
"jobnum",
"=",
"len",
"(",
"dbjobdict",
")",
"for",
"idx",
",",
"fname1",
"in",
"enumerate",
"(",
"fragfiles",
"[",
":",
"-",
"1",
"]",
")",
":",
"for",
"fname2",
"in",
"fragfiles",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"jobnum",
"+=",
"1",
"jobs",
"=",
"[",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_exe_%06d_a\"",
"%",
"(",
"blastcmds",
".",
"prefix",
",",
"jobnum",
")",
",",
"blastcmds",
".",
"build_blast_cmd",
"(",
"fname1",
",",
"fname2",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
")",
",",
")",
",",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_exe_%06d_b\"",
"%",
"(",
"blastcmds",
".",
"prefix",
",",
"jobnum",
")",
",",
"blastcmds",
".",
"build_blast_cmd",
"(",
"fname2",
",",
"fname1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
")",
",",
")",
",",
"]",
"jobs",
"[",
"0",
"]",
".",
"add_dependency",
"(",
"dbjobdict",
"[",
"fname1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"]",
")",
"jobs",
"[",
"1",
"]",
".",
"add_dependency",
"(",
"dbjobdict",
"[",
"fname2",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"]",
")",
"joblist",
".",
"extend",
"(",
"jobs",
")",
"# Return the dependency graph",
"return",
"joblist"
] | 42.195122 | 22.390244 |
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
pass | [
"def",
"init_states",
"(",
"self",
",",
"source_encoded",
":",
"mx",
".",
"sym",
".",
"Symbol",
",",
"source_encoded_lengths",
":",
"mx",
".",
"sym",
".",
"Symbol",
",",
"source_encoded_max_length",
":",
"int",
")",
"->",
"List",
"[",
"mx",
".",
"sym",
".",
"Symbol",
"]",
":",
"pass"
] | 48.928571 | 27.357143 |
def parse_table(document, tbl):
"Parse table element."
def _change(rows, pos_x):
if len(rows) == 1:
return rows
count_x = 1
for x in rows[-1]:
if count_x == pos_x:
x.row_span += 1
count_x += x.grid_span
return rows
table = doc.Table()
tbl_pr = tbl.find(_name('{{{w}}}tblPr'))
if tbl_pr is not None:
parse_table_properties(document, table, tbl_pr)
for tr in tbl.xpath('./w:tr', namespaces=NAMESPACES):
columns = []
pos_x = 0
for tc in tr.xpath('./w:tc', namespaces=NAMESPACES):
cell = doc.TableCell()
tc_pr = tc.find(_name('{{{w}}}tcPr'))
if tc_pr is not None:
parse_table_column_properties(doc, cell, tc_pr)
# maybe after
pos_x += cell.grid_span
if cell.vmerge is not None and cell.vmerge == "":
table.rows = _change(table.rows, pos_x)
else:
for p in tc.xpath('./w:p', namespaces=NAMESPACES):
cell.elements.append(parse_paragraph(document, p))
columns.append(cell)
table.rows.append(columns)
return table | [
"def",
"parse_table",
"(",
"document",
",",
"tbl",
")",
":",
"def",
"_change",
"(",
"rows",
",",
"pos_x",
")",
":",
"if",
"len",
"(",
"rows",
")",
"==",
"1",
":",
"return",
"rows",
"count_x",
"=",
"1",
"for",
"x",
"in",
"rows",
"[",
"-",
"1",
"]",
":",
"if",
"count_x",
"==",
"pos_x",
":",
"x",
".",
"row_span",
"+=",
"1",
"count_x",
"+=",
"x",
".",
"grid_span",
"return",
"rows",
"table",
"=",
"doc",
".",
"Table",
"(",
")",
"tbl_pr",
"=",
"tbl",
".",
"find",
"(",
"_name",
"(",
"'{{{w}}}tblPr'",
")",
")",
"if",
"tbl_pr",
"is",
"not",
"None",
":",
"parse_table_properties",
"(",
"document",
",",
"table",
",",
"tbl_pr",
")",
"for",
"tr",
"in",
"tbl",
".",
"xpath",
"(",
"'./w:tr'",
",",
"namespaces",
"=",
"NAMESPACES",
")",
":",
"columns",
"=",
"[",
"]",
"pos_x",
"=",
"0",
"for",
"tc",
"in",
"tr",
".",
"xpath",
"(",
"'./w:tc'",
",",
"namespaces",
"=",
"NAMESPACES",
")",
":",
"cell",
"=",
"doc",
".",
"TableCell",
"(",
")",
"tc_pr",
"=",
"tc",
".",
"find",
"(",
"_name",
"(",
"'{{{w}}}tcPr'",
")",
")",
"if",
"tc_pr",
"is",
"not",
"None",
":",
"parse_table_column_properties",
"(",
"doc",
",",
"cell",
",",
"tc_pr",
")",
"# maybe after",
"pos_x",
"+=",
"cell",
".",
"grid_span",
"if",
"cell",
".",
"vmerge",
"is",
"not",
"None",
"and",
"cell",
".",
"vmerge",
"==",
"\"\"",
":",
"table",
".",
"rows",
"=",
"_change",
"(",
"table",
".",
"rows",
",",
"pos_x",
")",
"else",
":",
"for",
"p",
"in",
"tc",
".",
"xpath",
"(",
"'./w:p'",
",",
"namespaces",
"=",
"NAMESPACES",
")",
":",
"cell",
".",
"elements",
".",
"append",
"(",
"parse_paragraph",
"(",
"document",
",",
"p",
")",
")",
"columns",
".",
"append",
"(",
"cell",
")",
"table",
".",
"rows",
".",
"append",
"(",
"columns",
")",
"return",
"table"
] | 23.84 | 23.36 |
def skip_prepare(func):
"""
A convenience decorator for indicating the raw data should not be prepared.
"""
@wraps(func)
def _wrapper(self, *args, **kwargs):
value = func(self, *args, **kwargs)
return Data(value, should_prepare=False)
return _wrapper | [
"def",
"skip_prepare",
"(",
"func",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"value",
"=",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"Data",
"(",
"value",
",",
"should_prepare",
"=",
"False",
")",
"return",
"_wrapper"
] | 31.333333 | 12.444444 |
def btc_bitcoind_tx_serialize( tx ):
"""
Convert a *Bitcoind*-given transaction into its hex string.
tx format is {'vin': [...], 'vout': [...], 'locktime': ..., 'version': ...},
with the same formatting rules as getrawtransaction.
(in particular, each value in vout is a Decimal, in BTC)
"""
tx_ins = []
tx_outs = []
try:
for inp in tx['vin']:
next_inp = {
"outpoint": {
"index": int(inp['vout']),
"hash": str(inp['txid'])
}
}
if 'sequence' in inp:
next_inp['sequence'] = int(inp['sequence'])
else:
next_inp['sequence'] = UINT_MAX
if 'scriptSig' in inp:
next_inp['script'] = str(inp['scriptSig']['hex'])
else:
next_inp['script'] = ""
if 'txinwitness' in inp:
next_inp['witness_script'] = btc_witness_script_serialize(inp['txinwitness'])
tx_ins.append(next_inp)
for out in tx['vout']:
assert out['value'] < 1000, "High transaction value\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True)
next_out = {
'value': int(Decimal(out['value'] * 10**8)),
'script': str(out['scriptPubKey']['hex'])
}
tx_outs.append(next_out)
tx_fields = {
"locktime": int(tx['locktime']),
"version": int(tx['version']),
"ins": tx_ins,
"outs": tx_outs
}
tx_serialized = btc_tx_serialize( tx_fields )
return str(tx_serialized)
except KeyError, ke:
if btc_bitcoind_tx_is_coinbase(tx) and 'hex' in tx.keys():
tx_serialized = tx['hex']
return str(tx_serialized)
log.error("Key error in:\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True))
traceback.print_exc()
raise ke | [
"def",
"btc_bitcoind_tx_serialize",
"(",
"tx",
")",
":",
"tx_ins",
"=",
"[",
"]",
"tx_outs",
"=",
"[",
"]",
"try",
":",
"for",
"inp",
"in",
"tx",
"[",
"'vin'",
"]",
":",
"next_inp",
"=",
"{",
"\"outpoint\"",
":",
"{",
"\"index\"",
":",
"int",
"(",
"inp",
"[",
"'vout'",
"]",
")",
",",
"\"hash\"",
":",
"str",
"(",
"inp",
"[",
"'txid'",
"]",
")",
"}",
"}",
"if",
"'sequence'",
"in",
"inp",
":",
"next_inp",
"[",
"'sequence'",
"]",
"=",
"int",
"(",
"inp",
"[",
"'sequence'",
"]",
")",
"else",
":",
"next_inp",
"[",
"'sequence'",
"]",
"=",
"UINT_MAX",
"if",
"'scriptSig'",
"in",
"inp",
":",
"next_inp",
"[",
"'script'",
"]",
"=",
"str",
"(",
"inp",
"[",
"'scriptSig'",
"]",
"[",
"'hex'",
"]",
")",
"else",
":",
"next_inp",
"[",
"'script'",
"]",
"=",
"\"\"",
"if",
"'txinwitness'",
"in",
"inp",
":",
"next_inp",
"[",
"'witness_script'",
"]",
"=",
"btc_witness_script_serialize",
"(",
"inp",
"[",
"'txinwitness'",
"]",
")",
"tx_ins",
".",
"append",
"(",
"next_inp",
")",
"for",
"out",
"in",
"tx",
"[",
"'vout'",
"]",
":",
"assert",
"out",
"[",
"'value'",
"]",
"<",
"1000",
",",
"\"High transaction value\\n%s\"",
"%",
"simplejson",
".",
"dumps",
"(",
"tx",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
"next_out",
"=",
"{",
"'value'",
":",
"int",
"(",
"Decimal",
"(",
"out",
"[",
"'value'",
"]",
"*",
"10",
"**",
"8",
")",
")",
",",
"'script'",
":",
"str",
"(",
"out",
"[",
"'scriptPubKey'",
"]",
"[",
"'hex'",
"]",
")",
"}",
"tx_outs",
".",
"append",
"(",
"next_out",
")",
"tx_fields",
"=",
"{",
"\"locktime\"",
":",
"int",
"(",
"tx",
"[",
"'locktime'",
"]",
")",
",",
"\"version\"",
":",
"int",
"(",
"tx",
"[",
"'version'",
"]",
")",
",",
"\"ins\"",
":",
"tx_ins",
",",
"\"outs\"",
":",
"tx_outs",
"}",
"tx_serialized",
"=",
"btc_tx_serialize",
"(",
"tx_fields",
")",
"return",
"str",
"(",
"tx_serialized",
")",
"except",
"KeyError",
",",
"ke",
":",
"if",
"btc_bitcoind_tx_is_coinbase",
"(",
"tx",
")",
"and",
"'hex'",
"in",
"tx",
".",
"keys",
"(",
")",
":",
"tx_serialized",
"=",
"tx",
"[",
"'hex'",
"]",
"return",
"str",
"(",
"tx_serialized",
")",
"log",
".",
"error",
"(",
"\"Key error in:\\n%s\"",
"%",
"simplejson",
".",
"dumps",
"(",
"tx",
",",
"indent",
"=",
"4",
",",
"sort_keys",
"=",
"True",
")",
")",
"traceback",
".",
"print_exc",
"(",
")",
"raise",
"ke"
] | 31.677419 | 21.129032 |
def search_and_extract_nucleotides_matching_nucleotide_database(self,
unpack,
euk_check,
search_method,
maximum_range,
threads,
evalue,
hmmsearch_output_table,
hit_reads_fasta):
'''As per nt_db_search() except slightly lower level. Search an
input read set (unpack) and then extract the sequences that hit.
Parameters
----------
hmmsearch_output_table: str
path to hmmsearch output table
hit_reads_fasta: str
path to hit nucleotide sequences
Returns
-------
direction_information: dict
{read_1: False
...
read n: True}
where True = Forward direction
and False = Reverse direction
result: DBSearchResult object containing file locations and hit
information
'''
if search_method == "hmmsearch":
# First search the reads using the HMM
search_result, table_list = self.nhmmer(
hmmsearch_output_table,
unpack,
threads,
evalue
)
elif search_method == 'diamond':
raise Exception("Diamond searches not supported for nucelotide databases yet")
if maximum_range:
hits = self._get_read_names(
search_result, # define the span of hits
maximum_range
)
else:
hits = self._get_sequence_directions(search_result)
hit_readnames = hits.keys()
if euk_check:
euk_reads = self._check_euk_contamination(table_list)
hit_readnames = set([read for read in hit_readnames if read not in euk_reads])
hits = {key:item for key, item in hits.iteritems() if key in hit_readnames}
hit_read_count = [len(euk_reads), len(hit_readnames)]
else:
hit_read_count = [0, len(hit_readnames)]
hit_reads_fasta, direction_information = self._extract_from_raw_reads(
hit_reads_fasta,
hit_readnames,
unpack.read_file,
unpack.format(),
hits
)
if not hit_readnames:
result = DBSearchResult(None,
search_result,
hit_read_count,
None)
else:
slash_endings=self._check_for_slash_endings(hit_readnames)
result = DBSearchResult(hit_reads_fasta,
search_result,
hit_read_count,
slash_endings)
if maximum_range:
n_hits = sum([len(x["strand"]) for x in hits.values()])
else:
n_hits = len(hits)
logging.info("%s read(s) detected" % n_hits)
return result, direction_information | [
"def",
"search_and_extract_nucleotides_matching_nucleotide_database",
"(",
"self",
",",
"unpack",
",",
"euk_check",
",",
"search_method",
",",
"maximum_range",
",",
"threads",
",",
"evalue",
",",
"hmmsearch_output_table",
",",
"hit_reads_fasta",
")",
":",
"if",
"search_method",
"==",
"\"hmmsearch\"",
":",
"# First search the reads using the HMM",
"search_result",
",",
"table_list",
"=",
"self",
".",
"nhmmer",
"(",
"hmmsearch_output_table",
",",
"unpack",
",",
"threads",
",",
"evalue",
")",
"elif",
"search_method",
"==",
"'diamond'",
":",
"raise",
"Exception",
"(",
"\"Diamond searches not supported for nucelotide databases yet\"",
")",
"if",
"maximum_range",
":",
"hits",
"=",
"self",
".",
"_get_read_names",
"(",
"search_result",
",",
"# define the span of hits",
"maximum_range",
")",
"else",
":",
"hits",
"=",
"self",
".",
"_get_sequence_directions",
"(",
"search_result",
")",
"hit_readnames",
"=",
"hits",
".",
"keys",
"(",
")",
"if",
"euk_check",
":",
"euk_reads",
"=",
"self",
".",
"_check_euk_contamination",
"(",
"table_list",
")",
"hit_readnames",
"=",
"set",
"(",
"[",
"read",
"for",
"read",
"in",
"hit_readnames",
"if",
"read",
"not",
"in",
"euk_reads",
"]",
")",
"hits",
"=",
"{",
"key",
":",
"item",
"for",
"key",
",",
"item",
"in",
"hits",
".",
"iteritems",
"(",
")",
"if",
"key",
"in",
"hit_readnames",
"}",
"hit_read_count",
"=",
"[",
"len",
"(",
"euk_reads",
")",
",",
"len",
"(",
"hit_readnames",
")",
"]",
"else",
":",
"hit_read_count",
"=",
"[",
"0",
",",
"len",
"(",
"hit_readnames",
")",
"]",
"hit_reads_fasta",
",",
"direction_information",
"=",
"self",
".",
"_extract_from_raw_reads",
"(",
"hit_reads_fasta",
",",
"hit_readnames",
",",
"unpack",
".",
"read_file",
",",
"unpack",
".",
"format",
"(",
")",
",",
"hits",
")",
"if",
"not",
"hit_readnames",
":",
"result",
"=",
"DBSearchResult",
"(",
"None",
",",
"search_result",
",",
"hit_read_count",
",",
"None",
")",
"else",
":",
"slash_endings",
"=",
"self",
".",
"_check_for_slash_endings",
"(",
"hit_readnames",
")",
"result",
"=",
"DBSearchResult",
"(",
"hit_reads_fasta",
",",
"search_result",
",",
"hit_read_count",
",",
"slash_endings",
")",
"if",
"maximum_range",
":",
"n_hits",
"=",
"sum",
"(",
"[",
"len",
"(",
"x",
"[",
"\"strand\"",
"]",
")",
"for",
"x",
"in",
"hits",
".",
"values",
"(",
")",
"]",
")",
"else",
":",
"n_hits",
"=",
"len",
"(",
"hits",
")",
"logging",
".",
"info",
"(",
"\"%s read(s) detected\"",
"%",
"n_hits",
")",
"return",
"result",
",",
"direction_information"
] | 41.202128 | 21.989362 |
def set_split_extents_by_indices_per_axis(self):
"""
Sets split shape :attr:`split_shape` and
split extents (:attr:`split_begs` and :attr:`split_ends`)
from values in :attr:`indices_per_axis`.
"""
if self.indices_per_axis is None:
raise ValueError("Got None for self.indices_per_axis")
self.logger.debug("self.array_shape=%s", self.array_shape)
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis)
self.indices_per_axis = \
pad_with_none(self.indices_per_axis, len(self.array_shape))
# Define the start and stop indices (extents) for each axis slice
self.split_shape = _np.ones(len(self.array_shape), dtype="int64")
self.split_begs = [[], ] * len(self.array_shape)
self.split_ends = [[], ] * len(self.array_shape)
for i in range(len(self.indices_per_axis)):
indices = self.indices_per_axis[i]
if (indices is not None) and (len(indices) > 0):
self.split_shape[i] = len(indices) + 1
self.split_begs[i] = _np.zeros((len(indices) + 1,), dtype="int64")
self.split_begs[i][1:] = indices
self.split_ends[i] = _np.zeros((len(self.split_begs[i]),), dtype="int64")
self.split_ends[i][0:-1] = self.split_begs[i][1:]
self.split_ends[i][-1] = self.array_shape[i]
else:
# start and stop is the full width of the axis
self.split_begs[i] = [0, ]
self.split_ends[i] = [self.array_shape[i], ]
self.logger.debug("self.indices_per_axis=%s", self.indices_per_axis) | [
"def",
"set_split_extents_by_indices_per_axis",
"(",
"self",
")",
":",
"if",
"self",
".",
"indices_per_axis",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Got None for self.indices_per_axis\"",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"self.array_shape=%s\"",
",",
"self",
".",
"array_shape",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"self.indices_per_axis=%s\"",
",",
"self",
".",
"indices_per_axis",
")",
"self",
".",
"indices_per_axis",
"=",
"pad_with_none",
"(",
"self",
".",
"indices_per_axis",
",",
"len",
"(",
"self",
".",
"array_shape",
")",
")",
"# Define the start and stop indices (extents) for each axis slice",
"self",
".",
"split_shape",
"=",
"_np",
".",
"ones",
"(",
"len",
"(",
"self",
".",
"array_shape",
")",
",",
"dtype",
"=",
"\"int64\"",
")",
"self",
".",
"split_begs",
"=",
"[",
"[",
"]",
",",
"]",
"*",
"len",
"(",
"self",
".",
"array_shape",
")",
"self",
".",
"split_ends",
"=",
"[",
"[",
"]",
",",
"]",
"*",
"len",
"(",
"self",
".",
"array_shape",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"indices_per_axis",
")",
")",
":",
"indices",
"=",
"self",
".",
"indices_per_axis",
"[",
"i",
"]",
"if",
"(",
"indices",
"is",
"not",
"None",
")",
"and",
"(",
"len",
"(",
"indices",
")",
">",
"0",
")",
":",
"self",
".",
"split_shape",
"[",
"i",
"]",
"=",
"len",
"(",
"indices",
")",
"+",
"1",
"self",
".",
"split_begs",
"[",
"i",
"]",
"=",
"_np",
".",
"zeros",
"(",
"(",
"len",
"(",
"indices",
")",
"+",
"1",
",",
")",
",",
"dtype",
"=",
"\"int64\"",
")",
"self",
".",
"split_begs",
"[",
"i",
"]",
"[",
"1",
":",
"]",
"=",
"indices",
"self",
".",
"split_ends",
"[",
"i",
"]",
"=",
"_np",
".",
"zeros",
"(",
"(",
"len",
"(",
"self",
".",
"split_begs",
"[",
"i",
"]",
")",
",",
")",
",",
"dtype",
"=",
"\"int64\"",
")",
"self",
".",
"split_ends",
"[",
"i",
"]",
"[",
"0",
":",
"-",
"1",
"]",
"=",
"self",
".",
"split_begs",
"[",
"i",
"]",
"[",
"1",
":",
"]",
"self",
".",
"split_ends",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
"=",
"self",
".",
"array_shape",
"[",
"i",
"]",
"else",
":",
"# start and stop is the full width of the axis",
"self",
".",
"split_begs",
"[",
"i",
"]",
"=",
"[",
"0",
",",
"]",
"self",
".",
"split_ends",
"[",
"i",
"]",
"=",
"[",
"self",
".",
"array_shape",
"[",
"i",
"]",
",",
"]",
"self",
".",
"logger",
".",
"debug",
"(",
"\"self.indices_per_axis=%s\"",
",",
"self",
".",
"indices_per_axis",
")"
] | 50.121212 | 20.969697 |
def get_stdout(self, workflow_id, task_id):
"""Get stdout for a particular task.
Args:
workflow_id (str): Workflow id.
task_id (str): Task id.
Returns:
Stdout of the task (string).
"""
url = '%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % {
'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.text | [
"def",
"get_stdout",
"(",
"self",
",",
"workflow_id",
",",
"task_id",
")",
":",
"url",
"=",
"'%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout'",
"%",
"{",
"'wf_url'",
":",
"self",
".",
"workflows_url",
",",
"'wf_id'",
":",
"workflow_id",
",",
"'task_id'",
":",
"task_id",
"}",
"r",
"=",
"self",
".",
"gbdx_connection",
".",
"get",
"(",
"url",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"r",
".",
"text"
] | 29.176471 | 18.294118 |
def add_channel(self, chname, workspace=None,
num_images=None, settings=None,
settings_template=None,
settings_share=None, share_keylist=None):
"""Create a new Ginga channel.
Parameters
----------
chname : str
The name of the channel to create.
workspace : str or None
The name of the workspace in which to create the channel
num_images : int or None
The cache size for the number of images to keep in memory
settings : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences. If not given, one will be created.
settings_template : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences template
settings_share : `~ginga.misc.Settings.SettingGroup` or `None`
Viewer preferences instance to share with newly created settings
share_keylist : list of str
List of names of settings that should be shared
Returns
-------
channel : `~ginga.misc.Bunch.Bunch`
The channel info bunch.
"""
with self.lock:
if self.has_channel(chname):
return self.get_channel(chname)
if chname in self.ds.get_tabnames(None):
raise ValueError("Tab name already in use: '%s'" % (chname))
name = chname
if settings is None:
settings = self.prefs.create_category('channel_' + name)
try:
settings.load(onError='raise')
except Exception as e:
self.logger.warning("no saved preferences found for channel "
"'%s': %s" % (name, str(e)))
# copy template settings to new channel
if settings_template is not None:
osettings = settings_template
osettings.copy_settings(settings)
else:
try:
# use channel_Image as a template if one was not
# provided
osettings = self.prefs.get_settings('channel_Image')
self.logger.debug("Copying settings from 'Image' to "
"'%s'" % (name))
osettings.copy_settings(settings)
except KeyError:
pass
if (share_keylist is not None) and (settings_share is not None):
# caller wants us to share settings with another viewer
settings_share.share_settings(settings, keylist=share_keylist)
# Make sure these preferences are at least defined
if num_images is None:
num_images = settings.get('numImages',
self.settings.get('numImages', 1))
settings.set_defaults(switchnew=True, numImages=num_images,
raisenew=True, genthumb=True,
focus_indicator=False,
preload_images=False, sort_order='loadtime')
self.logger.debug("Adding channel '%s'" % (chname))
channel = Channel(chname, self, datasrc=None,
settings=settings)
bnch = self.add_viewer(chname, settings,
workspace=workspace)
# for debugging
bnch.image_viewer.set_name('channel:%s' % (chname))
opmon = self.get_plugin_manager(self.logger, self,
self.ds, self.mm)
channel.widget = bnch.widget
channel.container = bnch.container
channel.workspace = bnch.workspace
channel.connect_viewer(bnch.image_viewer)
channel.viewer = bnch.image_viewer
# older name, should eventually be deprecated
channel.fitsimage = bnch.image_viewer
channel.opmon = opmon
name = chname.lower()
self.channel[name] = channel
# Update the channels control
self.channel_names.append(chname)
self.channel_names.sort()
if len(self.channel_names) == 1:
self.cur_channel = channel
# Prepare local plugins for this channel
for spec in self.get_plugins():
opname = spec.get('klass', spec.get('module'))
if spec.get('ptype', 'global') == 'local':
opmon.load_plugin(opname, spec, chinfo=channel)
self.make_gui_callback('add-channel', channel)
return channel | [
"def",
"add_channel",
"(",
"self",
",",
"chname",
",",
"workspace",
"=",
"None",
",",
"num_images",
"=",
"None",
",",
"settings",
"=",
"None",
",",
"settings_template",
"=",
"None",
",",
"settings_share",
"=",
"None",
",",
"share_keylist",
"=",
"None",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"self",
".",
"has_channel",
"(",
"chname",
")",
":",
"return",
"self",
".",
"get_channel",
"(",
"chname",
")",
"if",
"chname",
"in",
"self",
".",
"ds",
".",
"get_tabnames",
"(",
"None",
")",
":",
"raise",
"ValueError",
"(",
"\"Tab name already in use: '%s'\"",
"%",
"(",
"chname",
")",
")",
"name",
"=",
"chname",
"if",
"settings",
"is",
"None",
":",
"settings",
"=",
"self",
".",
"prefs",
".",
"create_category",
"(",
"'channel_'",
"+",
"name",
")",
"try",
":",
"settings",
".",
"load",
"(",
"onError",
"=",
"'raise'",
")",
"except",
"Exception",
"as",
"e",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"no saved preferences found for channel \"",
"\"'%s': %s\"",
"%",
"(",
"name",
",",
"str",
"(",
"e",
")",
")",
")",
"# copy template settings to new channel",
"if",
"settings_template",
"is",
"not",
"None",
":",
"osettings",
"=",
"settings_template",
"osettings",
".",
"copy_settings",
"(",
"settings",
")",
"else",
":",
"try",
":",
"# use channel_Image as a template if one was not",
"# provided",
"osettings",
"=",
"self",
".",
"prefs",
".",
"get_settings",
"(",
"'channel_Image'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Copying settings from 'Image' to \"",
"\"'%s'\"",
"%",
"(",
"name",
")",
")",
"osettings",
".",
"copy_settings",
"(",
"settings",
")",
"except",
"KeyError",
":",
"pass",
"if",
"(",
"share_keylist",
"is",
"not",
"None",
")",
"and",
"(",
"settings_share",
"is",
"not",
"None",
")",
":",
"# caller wants us to share settings with another viewer",
"settings_share",
".",
"share_settings",
"(",
"settings",
",",
"keylist",
"=",
"share_keylist",
")",
"# Make sure these preferences are at least defined",
"if",
"num_images",
"is",
"None",
":",
"num_images",
"=",
"settings",
".",
"get",
"(",
"'numImages'",
",",
"self",
".",
"settings",
".",
"get",
"(",
"'numImages'",
",",
"1",
")",
")",
"settings",
".",
"set_defaults",
"(",
"switchnew",
"=",
"True",
",",
"numImages",
"=",
"num_images",
",",
"raisenew",
"=",
"True",
",",
"genthumb",
"=",
"True",
",",
"focus_indicator",
"=",
"False",
",",
"preload_images",
"=",
"False",
",",
"sort_order",
"=",
"'loadtime'",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"\"Adding channel '%s'\"",
"%",
"(",
"chname",
")",
")",
"channel",
"=",
"Channel",
"(",
"chname",
",",
"self",
",",
"datasrc",
"=",
"None",
",",
"settings",
"=",
"settings",
")",
"bnch",
"=",
"self",
".",
"add_viewer",
"(",
"chname",
",",
"settings",
",",
"workspace",
"=",
"workspace",
")",
"# for debugging",
"bnch",
".",
"image_viewer",
".",
"set_name",
"(",
"'channel:%s'",
"%",
"(",
"chname",
")",
")",
"opmon",
"=",
"self",
".",
"get_plugin_manager",
"(",
"self",
".",
"logger",
",",
"self",
",",
"self",
".",
"ds",
",",
"self",
".",
"mm",
")",
"channel",
".",
"widget",
"=",
"bnch",
".",
"widget",
"channel",
".",
"container",
"=",
"bnch",
".",
"container",
"channel",
".",
"workspace",
"=",
"bnch",
".",
"workspace",
"channel",
".",
"connect_viewer",
"(",
"bnch",
".",
"image_viewer",
")",
"channel",
".",
"viewer",
"=",
"bnch",
".",
"image_viewer",
"# older name, should eventually be deprecated",
"channel",
".",
"fitsimage",
"=",
"bnch",
".",
"image_viewer",
"channel",
".",
"opmon",
"=",
"opmon",
"name",
"=",
"chname",
".",
"lower",
"(",
")",
"self",
".",
"channel",
"[",
"name",
"]",
"=",
"channel",
"# Update the channels control",
"self",
".",
"channel_names",
".",
"append",
"(",
"chname",
")",
"self",
".",
"channel_names",
".",
"sort",
"(",
")",
"if",
"len",
"(",
"self",
".",
"channel_names",
")",
"==",
"1",
":",
"self",
".",
"cur_channel",
"=",
"channel",
"# Prepare local plugins for this channel",
"for",
"spec",
"in",
"self",
".",
"get_plugins",
"(",
")",
":",
"opname",
"=",
"spec",
".",
"get",
"(",
"'klass'",
",",
"spec",
".",
"get",
"(",
"'module'",
")",
")",
"if",
"spec",
".",
"get",
"(",
"'ptype'",
",",
"'global'",
")",
"==",
"'local'",
":",
"opmon",
".",
"load_plugin",
"(",
"opname",
",",
"spec",
",",
"chinfo",
"=",
"channel",
")",
"self",
".",
"make_gui_callback",
"(",
"'add-channel'",
",",
"channel",
")",
"return",
"channel"
] | 39.789916 | 21.07563 |
def to_dict(self):
"""
Return this ModuleDoc as a dict. In addition to `CommentDoc` defaults,
this has:
- **name**: The module name.
- **dependencies**: A list of immediate dependencies.
- **all_dependencies**: A list of all dependencies.
"""
vars = super(ModuleDoc, self).to_dict()
vars['dependencies'] = self.dependencies
vars['name'] = self.name
try:
vars['all_dependencies'] = self.all_dependencies[:]
except AttributeError:
vars['all_dependencies'] = []
return vars | [
"def",
"to_dict",
"(",
"self",
")",
":",
"vars",
"=",
"super",
"(",
"ModuleDoc",
",",
"self",
")",
".",
"to_dict",
"(",
")",
"vars",
"[",
"'dependencies'",
"]",
"=",
"self",
".",
"dependencies",
"vars",
"[",
"'name'",
"]",
"=",
"self",
".",
"name",
"try",
":",
"vars",
"[",
"'all_dependencies'",
"]",
"=",
"self",
".",
"all_dependencies",
"[",
":",
"]",
"except",
"AttributeError",
":",
"vars",
"[",
"'all_dependencies'",
"]",
"=",
"[",
"]",
"return",
"vars"
] | 35.058824 | 16.352941 |
def known(self, object):
""" get the type specified in the object's metadata """
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass | [
"def",
"known",
"(",
"self",
",",
"object",
")",
":",
"try",
":",
"md",
"=",
"object",
".",
"__metadata__",
"known",
"=",
"md",
".",
"sxtype",
"return",
"known",
"except",
":",
"pass"
] | 27.375 | 15.5 |
def get_distance(F, x):
"""Helper function for margin-based loss. Return a distance matrix given a matrix."""
n = x.shape[0]
square = F.sum(x ** 2.0, axis=1, keepdims=True)
distance_square = square + square.transpose() - (2.0 * F.dot(x, x.transpose()))
# Adding identity to make sqrt work.
return F.sqrt(distance_square + F.array(np.identity(n))) | [
"def",
"get_distance",
"(",
"F",
",",
"x",
")",
":",
"n",
"=",
"x",
".",
"shape",
"[",
"0",
"]",
"square",
"=",
"F",
".",
"sum",
"(",
"x",
"**",
"2.0",
",",
"axis",
"=",
"1",
",",
"keepdims",
"=",
"True",
")",
"distance_square",
"=",
"square",
"+",
"square",
".",
"transpose",
"(",
")",
"-",
"(",
"2.0",
"*",
"F",
".",
"dot",
"(",
"x",
",",
"x",
".",
"transpose",
"(",
")",
")",
")",
"# Adding identity to make sqrt work.",
"return",
"F",
".",
"sqrt",
"(",
"distance_square",
"+",
"F",
".",
"array",
"(",
"np",
".",
"identity",
"(",
"n",
")",
")",
")"
] | 40.444444 | 21.444444 |
def decode(self, val):
"""Override of the default decode method that also uses decode_date."""
# First try the date decoder.
new_val = self.decode_date(val)
if val != new_val:
return new_val
# Fall back to the default decoder.
return json.JSONDecoder.decode(self, val) | [
"def",
"decode",
"(",
"self",
",",
"val",
")",
":",
"# First try the date decoder.",
"new_val",
"=",
"self",
".",
"decode_date",
"(",
"val",
")",
"if",
"val",
"!=",
"new_val",
":",
"return",
"new_val",
"# Fall back to the default decoder.",
"return",
"json",
".",
"JSONDecoder",
".",
"decode",
"(",
"self",
",",
"val",
")"
] | 36.375 | 9.25 |
def uninstall(
ctx,
state,
all_dev=False,
all=False,
**kwargs
):
"""Un-installs a provided package and removes it from Pipfile."""
from ..core import do_uninstall
retcode = do_uninstall(
packages=state.installstate.packages,
editable_packages=state.installstate.editables,
three=state.three,
python=state.python,
system=state.system,
lock=not state.installstate.skip_lock,
all_dev=all_dev,
all=all,
keep_outdated=state.installstate.keep_outdated,
pypi_mirror=state.pypi_mirror,
ctx=ctx
)
if retcode:
sys.exit(retcode) | [
"def",
"uninstall",
"(",
"ctx",
",",
"state",
",",
"all_dev",
"=",
"False",
",",
"all",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
".",
".",
"core",
"import",
"do_uninstall",
"retcode",
"=",
"do_uninstall",
"(",
"packages",
"=",
"state",
".",
"installstate",
".",
"packages",
",",
"editable_packages",
"=",
"state",
".",
"installstate",
".",
"editables",
",",
"three",
"=",
"state",
".",
"three",
",",
"python",
"=",
"state",
".",
"python",
",",
"system",
"=",
"state",
".",
"system",
",",
"lock",
"=",
"not",
"state",
".",
"installstate",
".",
"skip_lock",
",",
"all_dev",
"=",
"all_dev",
",",
"all",
"=",
"all",
",",
"keep_outdated",
"=",
"state",
".",
"installstate",
".",
"keep_outdated",
",",
"pypi_mirror",
"=",
"state",
".",
"pypi_mirror",
",",
"ctx",
"=",
"ctx",
")",
"if",
"retcode",
":",
"sys",
".",
"exit",
"(",
"retcode",
")"
] | 26.25 | 18.375 |
def render_image(self, rgbobj, dst_x, dst_y):
"""Render the image represented by (rgbobj) at dst_x, dst_y
in the offscreen pixmap.
"""
self.logger.debug("redraw pixmap=%s" % (self.pixmap))
if self.pixmap is None:
return
self.logger.debug("drawing to pixmap")
# Prepare array for rendering
arr = rgbobj.get_array(self.rgb_order, dtype=np.uint8)
(height, width) = arr.shape[:2]
return self._render_offscreen(self.pixmap, arr, dst_x, dst_y,
width, height) | [
"def",
"render_image",
"(",
"self",
",",
"rgbobj",
",",
"dst_x",
",",
"dst_y",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"redraw pixmap=%s\"",
"%",
"(",
"self",
".",
"pixmap",
")",
")",
"if",
"self",
".",
"pixmap",
"is",
"None",
":",
"return",
"self",
".",
"logger",
".",
"debug",
"(",
"\"drawing to pixmap\"",
")",
"# Prepare array for rendering",
"arr",
"=",
"rgbobj",
".",
"get_array",
"(",
"self",
".",
"rgb_order",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
"(",
"height",
",",
"width",
")",
"=",
"arr",
".",
"shape",
"[",
":",
"2",
"]",
"return",
"self",
".",
"_render_offscreen",
"(",
"self",
".",
"pixmap",
",",
"arr",
",",
"dst_x",
",",
"dst_y",
",",
"width",
",",
"height",
")"
] | 38 | 14.533333 |
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams | [
"def",
"next_frame_basic_stochastic_discrete",
"(",
")",
":",
"hparams",
"=",
"basic_deterministic_params",
".",
"next_frame_sampling",
"(",
")",
"hparams",
".",
"batch_size",
"=",
"4",
"hparams",
".",
"video_num_target_frames",
"=",
"6",
"hparams",
".",
"scheduled_sampling_mode",
"=",
"\"prob_inverse_lin\"",
"hparams",
".",
"scheduled_sampling_decay_steps",
"=",
"40000",
"hparams",
".",
"scheduled_sampling_max_prob",
"=",
"1.0",
"hparams",
".",
"dropout",
"=",
"0.15",
"hparams",
".",
"filter_double_steps",
"=",
"3",
"hparams",
".",
"hidden_size",
"=",
"96",
"hparams",
".",
"learning_rate_constant",
"=",
"0.002",
"hparams",
".",
"learning_rate_warmup_steps",
"=",
"2000",
"hparams",
".",
"learning_rate_schedule",
"=",
"\"linear_warmup * constant\"",
"hparams",
".",
"concat_internal_states",
"=",
"True",
"hparams",
".",
"video_modality_loss_cutoff",
"=",
"0.03",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_bits\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"bottleneck_noise\"",
",",
"0.1",
")",
"hparams",
".",
"add_hparam",
"(",
"\"discretize_warmup_steps\"",
",",
"40000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_rnn_warmup_steps\"",
",",
"40000",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_rnn_max_sampling\"",
",",
"0.5",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_use_max_probability\"",
",",
"0.8",
")",
"hparams",
".",
"add_hparam",
"(",
"\"full_latent_tower\"",
",",
"False",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_predictor_state_size\"",
",",
"128",
")",
"hparams",
".",
"add_hparam",
"(",
"\"latent_predictor_temperature\"",
",",
"1.0",
")",
"hparams",
".",
"add_hparam",
"(",
"\"complex_addn\"",
",",
"True",
")",
"hparams",
".",
"add_hparam",
"(",
"\"recurrent_state_size\"",
",",
"64",
")",
"return",
"hparams"
] | 44.785714 | 9.678571 |
def get_users(self):
"""
:return: list of User object
"""
self.read_sizes()
if self.users == 0:
self.next_uid = 1
self.next_user_id='1'
return []
users = []
max_uid = 0
userdata, size = self.read_with_buffer(const.CMD_USERTEMP_RRQ, const.FCT_USER)
if self.verbose: print("user size {} (= {})".format(size, len(userdata)))
if size <= 4:
print("WRN: missing user data")
return []
total_size = unpack("I",userdata[:4])[0]
self.user_packet_size = total_size / self.users
if not self.user_packet_size in [28, 72]:
if self.verbose: print("WRN packet size would be %i" % self.user_packet_size)
userdata = userdata[4:]
if self.user_packet_size == 28:
while len(userdata) >= 28:
uid, privilege, password, name, card, group_id, timezone, user_id = unpack('<HB5s8sIxBhI',userdata.ljust(28, b'\x00')[:28])
if uid > max_uid: max_uid = uid
password = (password.split(b'\x00')[0]).decode(self.encoding, errors='ignore')
name = (name.split(b'\x00')[0]).decode(self.encoding, errors='ignore').strip()
group_id = str(group_id)
user_id = str(user_id)
#TODO: check card value and find in ver8
if not name:
name = "NN-%s" % user_id
user = User(uid, name, privilege, password, group_id, user_id, card)
users.append(user)
if self.verbose: print("[6]user:",uid, privilege, password, name, card, group_id, timezone, user_id)
userdata = userdata[28:]
else:
while len(userdata) >= 72:
uid, privilege, password, name, card, group_id, user_id = unpack('<HB8s24sIx7sx24s', userdata.ljust(72, b'\x00')[:72])
password = (password.split(b'\x00')[0]).decode(self.encoding, errors='ignore')
name = (name.split(b'\x00')[0]).decode(self.encoding, errors='ignore').strip()
group_id = (group_id.split(b'\x00')[0]).decode(self.encoding, errors='ignore').strip()
user_id = (user_id.split(b'\x00')[0]).decode(self.encoding, errors='ignore')
if uid > max_uid: max_uid = uid
if not name:
name = "NN-%s" % user_id
user = User(uid, name, privilege, password, group_id, user_id, card)
users.append(user)
userdata = userdata[72:]
max_uid += 1
self.next_uid = max_uid
self.next_user_id = str(max_uid)
while True:
if any(u for u in users if u.user_id == self.next_user_id):
max_uid += 1
self.next_user_id = str(max_uid)
else:
break
return users | [
"def",
"get_users",
"(",
"self",
")",
":",
"self",
".",
"read_sizes",
"(",
")",
"if",
"self",
".",
"users",
"==",
"0",
":",
"self",
".",
"next_uid",
"=",
"1",
"self",
".",
"next_user_id",
"=",
"'1'",
"return",
"[",
"]",
"users",
"=",
"[",
"]",
"max_uid",
"=",
"0",
"userdata",
",",
"size",
"=",
"self",
".",
"read_with_buffer",
"(",
"const",
".",
"CMD_USERTEMP_RRQ",
",",
"const",
".",
"FCT_USER",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"user size {} (= {})\"",
".",
"format",
"(",
"size",
",",
"len",
"(",
"userdata",
")",
")",
")",
"if",
"size",
"<=",
"4",
":",
"print",
"(",
"\"WRN: missing user data\"",
")",
"return",
"[",
"]",
"total_size",
"=",
"unpack",
"(",
"\"I\"",
",",
"userdata",
"[",
":",
"4",
"]",
")",
"[",
"0",
"]",
"self",
".",
"user_packet_size",
"=",
"total_size",
"/",
"self",
".",
"users",
"if",
"not",
"self",
".",
"user_packet_size",
"in",
"[",
"28",
",",
"72",
"]",
":",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"WRN packet size would be %i\"",
"%",
"self",
".",
"user_packet_size",
")",
"userdata",
"=",
"userdata",
"[",
"4",
":",
"]",
"if",
"self",
".",
"user_packet_size",
"==",
"28",
":",
"while",
"len",
"(",
"userdata",
")",
">=",
"28",
":",
"uid",
",",
"privilege",
",",
"password",
",",
"name",
",",
"card",
",",
"group_id",
",",
"timezone",
",",
"user_id",
"=",
"unpack",
"(",
"'<HB5s8sIxBhI'",
",",
"userdata",
".",
"ljust",
"(",
"28",
",",
"b'\\x00'",
")",
"[",
":",
"28",
"]",
")",
"if",
"uid",
">",
"max_uid",
":",
"max_uid",
"=",
"uid",
"password",
"=",
"(",
"password",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
"name",
"=",
"(",
"name",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
".",
"strip",
"(",
")",
"group_id",
"=",
"str",
"(",
"group_id",
")",
"user_id",
"=",
"str",
"(",
"user_id",
")",
"#TODO: check card value and find in ver8",
"if",
"not",
"name",
":",
"name",
"=",
"\"NN-%s\"",
"%",
"user_id",
"user",
"=",
"User",
"(",
"uid",
",",
"name",
",",
"privilege",
",",
"password",
",",
"group_id",
",",
"user_id",
",",
"card",
")",
"users",
".",
"append",
"(",
"user",
")",
"if",
"self",
".",
"verbose",
":",
"print",
"(",
"\"[6]user:\"",
",",
"uid",
",",
"privilege",
",",
"password",
",",
"name",
",",
"card",
",",
"group_id",
",",
"timezone",
",",
"user_id",
")",
"userdata",
"=",
"userdata",
"[",
"28",
":",
"]",
"else",
":",
"while",
"len",
"(",
"userdata",
")",
">=",
"72",
":",
"uid",
",",
"privilege",
",",
"password",
",",
"name",
",",
"card",
",",
"group_id",
",",
"user_id",
"=",
"unpack",
"(",
"'<HB8s24sIx7sx24s'",
",",
"userdata",
".",
"ljust",
"(",
"72",
",",
"b'\\x00'",
")",
"[",
":",
"72",
"]",
")",
"password",
"=",
"(",
"password",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
"name",
"=",
"(",
"name",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
".",
"strip",
"(",
")",
"group_id",
"=",
"(",
"group_id",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
".",
"strip",
"(",
")",
"user_id",
"=",
"(",
"user_id",
".",
"split",
"(",
"b'\\x00'",
")",
"[",
"0",
"]",
")",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'ignore'",
")",
"if",
"uid",
">",
"max_uid",
":",
"max_uid",
"=",
"uid",
"if",
"not",
"name",
":",
"name",
"=",
"\"NN-%s\"",
"%",
"user_id",
"user",
"=",
"User",
"(",
"uid",
",",
"name",
",",
"privilege",
",",
"password",
",",
"group_id",
",",
"user_id",
",",
"card",
")",
"users",
".",
"append",
"(",
"user",
")",
"userdata",
"=",
"userdata",
"[",
"72",
":",
"]",
"max_uid",
"+=",
"1",
"self",
".",
"next_uid",
"=",
"max_uid",
"self",
".",
"next_user_id",
"=",
"str",
"(",
"max_uid",
")",
"while",
"True",
":",
"if",
"any",
"(",
"u",
"for",
"u",
"in",
"users",
"if",
"u",
".",
"user_id",
"==",
"self",
".",
"next_user_id",
")",
":",
"max_uid",
"+=",
"1",
"self",
".",
"next_user_id",
"=",
"str",
"(",
"max_uid",
")",
"else",
":",
"break",
"return",
"users"
] | 48.559322 | 22.186441 |
def register(self, command: str, handler: Any):
"""
Register a new handler for a specific slash command
Args:
command: Slash command
handler: Callback
"""
if not command.startswith("/"):
command = f"/{command}"
LOG.info("Registering %s to %s", command, handler)
self._routes[command].append(handler) | [
"def",
"register",
"(",
"self",
",",
"command",
":",
"str",
",",
"handler",
":",
"Any",
")",
":",
"if",
"not",
"command",
".",
"startswith",
"(",
"\"/\"",
")",
":",
"command",
"=",
"f\"/{command}\"",
"LOG",
".",
"info",
"(",
"\"Registering %s to %s\"",
",",
"command",
",",
"handler",
")",
"self",
".",
"_routes",
"[",
"command",
"]",
".",
"append",
"(",
"handler",
")"
] | 27.214286 | 15.642857 |
def _find_orm(self, pnq):
"""Return a Partition object from the database based on a PartitionId.
An ORM object is returned, so changes can be persisted.
"""
# import sqlalchemy.orm.exc
from ambry.orm import Partition as OrmPartition # , Table
from sqlalchemy.orm import joinedload # , joinedload_all
assert isinstance(pnq, PartitionNameQuery), "Expected PartitionNameQuery, got {}".format(type(pnq))
pnq = pnq.with_none()
q = self.bundle.dataset._database.session.query(OrmPartition)
if pnq.fqname is not NameQuery.ANY:
q = q.filter(OrmPartition.fqname == pnq.fqname)
elif pnq.vname is not NameQuery.ANY:
q = q.filter(OrmPartition.vname == pnq.vname)
elif pnq.name is not NameQuery.ANY:
q = q.filter(OrmPartition.name == str(pnq.name))
else:
if pnq.time is not NameQuery.ANY:
q = q.filter(OrmPartition.time == pnq.time)
if pnq.space is not NameQuery.ANY:
q = q.filter(OrmPartition.space == pnq.space)
if pnq.grain is not NameQuery.ANY:
q = q.filter(OrmPartition.grain == pnq.grain)
if pnq.format is not NameQuery.ANY:
q = q.filter(OrmPartition.format == pnq.format)
if pnq.segment is not NameQuery.ANY:
q = q.filter(OrmPartition.segment == pnq.segment)
if pnq.table is not NameQuery.ANY:
if pnq.table is None:
q = q.filter(OrmPartition.t_id is None)
else:
tr = self.bundle.table(pnq.table)
if not tr:
raise ValueError("Didn't find table named {} in {} bundle path = {}".format(
pnq.table, pnq.vname, self.bundle.database.path))
q = q.filter(OrmPartition.t_vid == tr.vid)
ds = self.bundle.dataset
q = q.filter(OrmPartition.d_vid == ds.vid)
q = q.order_by(
OrmPartition.vid.asc()).order_by(
OrmPartition.segment.asc())
q = q.options(joinedload(OrmPartition.table))
return q | [
"def",
"_find_orm",
"(",
"self",
",",
"pnq",
")",
":",
"# import sqlalchemy.orm.exc",
"from",
"ambry",
".",
"orm",
"import",
"Partition",
"as",
"OrmPartition",
"# , Table",
"from",
"sqlalchemy",
".",
"orm",
"import",
"joinedload",
"# , joinedload_all",
"assert",
"isinstance",
"(",
"pnq",
",",
"PartitionNameQuery",
")",
",",
"\"Expected PartitionNameQuery, got {}\"",
".",
"format",
"(",
"type",
"(",
"pnq",
")",
")",
"pnq",
"=",
"pnq",
".",
"with_none",
"(",
")",
"q",
"=",
"self",
".",
"bundle",
".",
"dataset",
".",
"_database",
".",
"session",
".",
"query",
"(",
"OrmPartition",
")",
"if",
"pnq",
".",
"fqname",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"fqname",
"==",
"pnq",
".",
"fqname",
")",
"elif",
"pnq",
".",
"vname",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"vname",
"==",
"pnq",
".",
"vname",
")",
"elif",
"pnq",
".",
"name",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"name",
"==",
"str",
"(",
"pnq",
".",
"name",
")",
")",
"else",
":",
"if",
"pnq",
".",
"time",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"time",
"==",
"pnq",
".",
"time",
")",
"if",
"pnq",
".",
"space",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"space",
"==",
"pnq",
".",
"space",
")",
"if",
"pnq",
".",
"grain",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"grain",
"==",
"pnq",
".",
"grain",
")",
"if",
"pnq",
".",
"format",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"format",
"==",
"pnq",
".",
"format",
")",
"if",
"pnq",
".",
"segment",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"segment",
"==",
"pnq",
".",
"segment",
")",
"if",
"pnq",
".",
"table",
"is",
"not",
"NameQuery",
".",
"ANY",
":",
"if",
"pnq",
".",
"table",
"is",
"None",
":",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"t_id",
"is",
"None",
")",
"else",
":",
"tr",
"=",
"self",
".",
"bundle",
".",
"table",
"(",
"pnq",
".",
"table",
")",
"if",
"not",
"tr",
":",
"raise",
"ValueError",
"(",
"\"Didn't find table named {} in {} bundle path = {}\"",
".",
"format",
"(",
"pnq",
".",
"table",
",",
"pnq",
".",
"vname",
",",
"self",
".",
"bundle",
".",
"database",
".",
"path",
")",
")",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"t_vid",
"==",
"tr",
".",
"vid",
")",
"ds",
"=",
"self",
".",
"bundle",
".",
"dataset",
"q",
"=",
"q",
".",
"filter",
"(",
"OrmPartition",
".",
"d_vid",
"==",
"ds",
".",
"vid",
")",
"q",
"=",
"q",
".",
"order_by",
"(",
"OrmPartition",
".",
"vid",
".",
"asc",
"(",
")",
")",
".",
"order_by",
"(",
"OrmPartition",
".",
"segment",
".",
"asc",
"(",
")",
")",
"q",
"=",
"q",
".",
"options",
"(",
"joinedload",
"(",
"OrmPartition",
".",
"table",
")",
")",
"return",
"q"
] | 34.693548 | 23.580645 |
def init_host(self):
"""
Initial host
"""
env.host_string = self.host_string
env.user = self.host_user
env.password = self.host_passwd
env.key_filename = self.host_keyfile | [
"def",
"init_host",
"(",
"self",
")",
":",
"env",
".",
"host_string",
"=",
"self",
".",
"host_string",
"env",
".",
"user",
"=",
"self",
".",
"host_user",
"env",
".",
"password",
"=",
"self",
".",
"host_passwd",
"env",
".",
"key_filename",
"=",
"self",
".",
"host_keyfile"
] | 28 | 6.25 |
def iter_compress(item_iter, flag_iter):
"""
iter_compress - like numpy compress
Args:
item_iter (list):
flag_iter (list): of bools
Returns:
list: true_items
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_iter import * # NOQA
>>> item_iter = [1, 2, 3, 4, 5]
>>> flag_iter = [False, True, True, False, True]
>>> true_items = iter_compress(item_iter, flag_iter)
>>> result = list(true_items)
>>> print(result)
[2, 3, 5]
"""
# TODO: Just use it.compress
true_items = (item for (item, flag) in zip(item_iter, flag_iter) if flag)
return true_items | [
"def",
"iter_compress",
"(",
"item_iter",
",",
"flag_iter",
")",
":",
"# TODO: Just use it.compress",
"true_items",
"=",
"(",
"item",
"for",
"(",
"item",
",",
"flag",
")",
"in",
"zip",
"(",
"item_iter",
",",
"flag_iter",
")",
"if",
"flag",
")",
"return",
"true_items"
] | 27.083333 | 17 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.