text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def check_all(self, all_entries, *args, **kwargs):
"""
Go through lists of entries, find overlaps among each, return the total
"""
all_overlaps = 0
while True:
try:
user_entries = all_entries.next()
except StopIteration:
return all_overlaps
else:
user_total_overlaps = self.check_entry(
user_entries, *args, **kwargs)
all_overlaps += user_total_overlaps | [
"def",
"check_all",
"(",
"self",
",",
"all_entries",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"all_overlaps",
"=",
"0",
"while",
"True",
":",
"try",
":",
"user_entries",
"=",
"all_entries",
".",
"next",
"(",
")",
"except",
"StopIteration",
... | 35.714286 | 13.571429 |
async def connect(self):
"""
Connects to the voice channel associated with this Player.
"""
await self.node.join_voice_channel(self.channel.guild.id, self.channel.id) | [
"async",
"def",
"connect",
"(",
"self",
")",
":",
"await",
"self",
".",
"node",
".",
"join_voice_channel",
"(",
"self",
".",
"channel",
".",
"guild",
".",
"id",
",",
"self",
".",
"channel",
".",
"id",
")"
] | 38.8 | 16.8 |
def get_version(version):
"""Dynamically calculate the version based on VERSION tuple."""
if len(version) > 2 and version[2] is not None:
if isinstance(version[2], int):
str_version = "%s.%s.%s" % version[:3]
else:
str_version = "%s.%s_%s" % version[:3]
else:
str_version = "%s.%s" % version[:2]
return str_version | [
"def",
"get_version",
"(",
"version",
")",
":",
"if",
"len",
"(",
"version",
")",
">",
"2",
"and",
"version",
"[",
"2",
"]",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"version",
"[",
"2",
"]",
",",
"int",
")",
":",
"str_version",
"=",
"\... | 33.545455 | 15.090909 |
def add_weights(self, name, nin, nout, mean=0, std=0, sparsity=0, diagonal=0):
'''Helper method to create a new weight matrix.
Parameters
----------
name : str
Name of the parameter to add.
nin : int
Size of "input" for this weight matrix.
nout : int
Size of "output" for this weight matrix.
mean : float, optional
Mean value for randomly-initialized weights. Defaults to 0.
std : float, optional
Standard deviation of initial matrix values. Defaults to
:math:`1 / sqrt(n_i + n_o)`.
sparsity : float, optional
Fraction of weights to be set to zero. Defaults to 0.
diagonal : float, optional
Initialize weights to a matrix of zeros with this value along the
diagonal. Defaults to None, which initializes all weights randomly.
'''
glorot = 1 / np.sqrt(nin + nout)
m = self.kwargs.get(
'mean_{}'.format(name), self.kwargs.get('mean', mean))
s = self.kwargs.get(
'std_{}'.format(name), self.kwargs.get('std', std or glorot))
p = self.kwargs.get(
'sparsity_{}'.format(name), self.kwargs.get('sparsity', sparsity))
d = self.kwargs.get(
'diagonal_{}'.format(name), self.kwargs.get('diagonal', diagonal))
self._params.append(theano.shared(
util.random_matrix(nin, nout, mean=m, std=s, sparsity=p,
diagonal=d, rng=self.rng),
name=self._fmt(name))) | [
"def",
"add_weights",
"(",
"self",
",",
"name",
",",
"nin",
",",
"nout",
",",
"mean",
"=",
"0",
",",
"std",
"=",
"0",
",",
"sparsity",
"=",
"0",
",",
"diagonal",
"=",
"0",
")",
":",
"glorot",
"=",
"1",
"/",
"np",
".",
"sqrt",
"(",
"nin",
"+",... | 44.342857 | 19.6 |
def envs(ignore_cache=False):
'''
Return a list of refs that can be used as environments
'''
if not ignore_cache:
env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p')
cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
if cache_match is not None:
return cache_match
ret = set()
for repo in init():
trunk = os.path.join(repo['repo'], repo['trunk'])
if os.path.isdir(trunk):
# Add base as the env for trunk
ret.add('base')
else:
log.error(
'svnfs trunk path \'%s\' does not exist in repo %s, no base '
'environment will be provided by this remote',
repo['trunk'], repo['url']
)
branches = os.path.join(repo['repo'], repo['branches'])
if os.path.isdir(branches):
ret.update(os.listdir(branches))
else:
log.error(
'svnfs branches path \'%s\' does not exist in repo %s',
repo['branches'], repo['url']
)
tags = os.path.join(repo['repo'], repo['tags'])
if os.path.isdir(tags):
ret.update(os.listdir(tags))
else:
log.error(
'svnfs tags path \'%s\' does not exist in repo %s',
repo['tags'], repo['url']
)
return [x for x in sorted(ret) if _env_is_exposed(x)] | [
"def",
"envs",
"(",
"ignore_cache",
"=",
"False",
")",
":",
"if",
"not",
"ignore_cache",
":",
"env_cache",
"=",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'cachedir'",
"]",
",",
"'svnfs/envs.p'",
")",
"cache_match",
"=",
"salt",
".",
"fileser... | 35.125 | 19.175 |
def params(self):
""" Read self params from configuration. """
parser = JinjaInterpolationNamespace()
parser.read(self.configuration)
return dict(parser['params'] or {}) | [
"def",
"params",
"(",
"self",
")",
":",
"parser",
"=",
"JinjaInterpolationNamespace",
"(",
")",
"parser",
".",
"read",
"(",
"self",
".",
"configuration",
")",
"return",
"dict",
"(",
"parser",
"[",
"'params'",
"]",
"or",
"{",
"}",
")"
] | 39.4 | 6.6 |
def create_sparse_dtm(vocab, doc_labels, docs_terms, sum_uniques_per_doc):
"""
Create a sparse document-term-matrix (DTM) as scipy "coo_matrix" from vocabulary array `vocab`, document
IDs/labels array `doc_labels`, dict of doc_label -> document terms `docs_terms` and the sum of unique terms
per document `sum_uniques_per_doc`.
The DTM's rows are document names, its columns are indices in `vocab`, hence a value `DTM[j, k]` is the
term frequency of term `vocab[k]` in `docnames[j]`.
Memory requirement: about 3 * <sum_uniques_per_doc>.
"""
vocab_sorter = np.argsort(vocab) # indices that sort <vocab>
nvocab = len(vocab)
ndocs = len(doc_labels)
# create arrays for sparse matrix
data = np.empty(sum_uniques_per_doc, dtype=np.intc) # all non-zero term frequencies at data[k]
cols = np.empty(sum_uniques_per_doc, dtype=np.intc) # column index for kth data item (kth term freq.)
rows = np.empty(sum_uniques_per_doc, dtype=np.intc) # row index for kth data item (kth term freq.)
ind = 0 # current index in the sparse matrix data
# go through all documents with their terms
for i, (doc_label, terms) in enumerate(docs_terms.items()):
if len(terms) == 0: continue # skip empty documents
# find indices into `vocab` such that, if the corresponding elements in `terms` were
# inserted before the indices, the order of `vocab` would be preserved
# -> array of indices of `terms` in `vocab`
term_indices = vocab_sorter[np.searchsorted(vocab, terms, sorter=vocab_sorter)]
# count the unique terms of the document and get their vocabulary indices
uniq_indices, counts = np.unique(term_indices, return_counts=True)
n_vals = len(uniq_indices)
ind_end = ind + n_vals
data[ind:ind_end] = counts # save the counts (term frequencies)
cols[ind:ind_end] = uniq_indices # save the column index: index in <vocab>
doc_idx = np.where(doc_labels == doc_label) # get the document index for the document name
assert len(doc_idx) == 1
rows[ind:ind_end] = np.repeat(doc_idx, n_vals) # save it as repeated value
ind = ind_end
assert ind == len(data)
return coo_matrix((data, (rows, cols)), shape=(ndocs, nvocab), dtype=np.intc) | [
"def",
"create_sparse_dtm",
"(",
"vocab",
",",
"doc_labels",
",",
"docs_terms",
",",
"sum_uniques_per_doc",
")",
":",
"vocab_sorter",
"=",
"np",
".",
"argsort",
"(",
"vocab",
")",
"# indices that sort <vocab>",
"nvocab",
"=",
"len",
"(",
"vocab",
")",
"ndocs",
... | 49.434783 | 32.173913 |
def hincr(self, name, key, amount=1):
"""
Increase the value of ``key`` in hash ``name`` by ``amount``. If no key
exists, the value will be initialized as ``amount``
Like **Redis.HINCR**
:param string name: the hash name
:param string key: the key name
:param int amount: increments
:return: the integer value of ``key`` in hash ``name``
:rtype: int
>>> ssdb.hincr('hash_2', 'key1', 7)
49
>>> ssdb.hincr('hash_2', 'key2', 3)
6
>>> ssdb.hincr('hash_2', 'key_not_exists', 101)
101
>>> ssdb.hincr('hash_not_exists', 'key_not_exists', 8848)
8848
"""
amount = get_integer('amount', amount)
return self.execute_command('hincr', name, key, amount) | [
"def",
"hincr",
"(",
"self",
",",
"name",
",",
"key",
",",
"amount",
"=",
"1",
")",
":",
"amount",
"=",
"get_integer",
"(",
"'amount'",
",",
"amount",
")",
"return",
"self",
".",
"execute_command",
"(",
"'hincr'",
",",
"name",
",",
"key",
",",
"amoun... | 34.5 | 18.333333 |
def run(self):
"""
The body of the tread: read lines and put them on the queue.
"""
try:
for line in iter(self._fd.readline, False):
if line is not None:
if self._althandler:
if self._althandler(line):
# If the althandler returns True
# then don't process this as usual
continue
self._queue.put(line)
if not line:
time.sleep(0.1)
except ValueError: # This can happen if we are closed during readline - TODO - better fix.
if not self._fd.closed:
raise | [
"def",
"run",
"(",
"self",
")",
":",
"try",
":",
"for",
"line",
"in",
"iter",
"(",
"self",
".",
"_fd",
".",
"readline",
",",
"False",
")",
":",
"if",
"line",
"is",
"not",
"None",
":",
"if",
"self",
".",
"_althandler",
":",
"if",
"self",
".",
"_... | 39.444444 | 14.444444 |
def access_array(self, id_, lineno, scope=None, default_type=None):
"""
Called whenever an accessed variable is expected to be an array.
ZX BASIC requires arrays to be declared before usage, so they're
checked.
Also checks for class array.
"""
if not self.check_is_declared(id_, lineno, 'array', scope):
return None
if not self.check_class(id_, CLASS.array, lineno, scope):
return None
return self.access_id(id_, lineno, scope=scope, default_type=default_type) | [
"def",
"access_array",
"(",
"self",
",",
"id_",
",",
"lineno",
",",
"scope",
"=",
"None",
",",
"default_type",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"check_is_declared",
"(",
"id_",
",",
"lineno",
",",
"'array'",
",",
"scope",
")",
":",
"r... | 36.333333 | 24.466667 |
def set_size(self, size):
""" choose a preset size for the plot
:param size: 'small' for documents or 'large' for presentations
"""
if size == 'small':
self._set_size_small()
elif size == 'large':
self._set_size_large()
else:
raise ValueError('Size must be large or small') | [
"def",
"set_size",
"(",
"self",
",",
"size",
")",
":",
"if",
"size",
"==",
"'small'",
":",
"self",
".",
"_set_size_small",
"(",
")",
"elif",
"size",
"==",
"'large'",
":",
"self",
".",
"_set_size_large",
"(",
")",
"else",
":",
"raise",
"ValueError",
"("... | 34.8 | 12.8 |
def status(self, order_id):
'''Checks imagery order status. There can be more than one image per
order and this function returns the status of all images
within the order.
Args:
order_id (str): The id of the order placed.
Returns:
List of dictionaries, one per image. Each dictionary consists
of the keys 'acquisition_id', 'location' and 'state'.
'''
self.logger.debug('Get status of order ' + order_id)
url = '%(base_url)s/order/%(order_id)s' % {
'base_url': self.base_url, 'order_id': order_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json().get("acquisitions", {}) | [
"def",
"status",
"(",
"self",
",",
"order_id",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Get status of order '",
"+",
"order_id",
")",
"url",
"=",
"'%(base_url)s/order/%(order_id)s'",
"%",
"{",
"'base_url'",
":",
"self",
".",
"base_url",
",",
"'o... | 37.05 | 23.25 |
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d['to_jimage'] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d) | [
"def",
"sort",
"(",
"self",
",",
"key",
"=",
"None",
",",
"reverse",
"=",
"False",
")",
":",
"old_molecule",
"=",
"self",
".",
"molecule",
".",
"copy",
"(",
")",
"# sort Molecule",
"self",
".",
"molecule",
".",
"_sites",
"=",
"sorted",
"(",
"self",
"... | 35.25 | 18 |
def a_unexpected_prompt(ctx):
"""Provide message when received humphost prompt."""
prompt = ctx.ctrl.match.group(0)
ctx.msg = "Received the jump host prompt: '{}'".format(prompt)
ctx.device.connected = False
ctx.finished = True
raise ConnectionError("Unable to connect to the device.", ctx.ctrl.hostname) | [
"def",
"a_unexpected_prompt",
"(",
"ctx",
")",
":",
"prompt",
"=",
"ctx",
".",
"ctrl",
".",
"match",
".",
"group",
"(",
"0",
")",
"ctx",
".",
"msg",
"=",
"\"Received the jump host prompt: '{}'\"",
".",
"format",
"(",
"prompt",
")",
"ctx",
".",
"device",
... | 46 | 15.142857 |
def _set_server_ip(self, v, load=False):
"""
Setter method for server_ip, mapped from YANG variable /nas/server_ip (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_server_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_server_ip() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("server_ip",server_ip.server_ip, yang_name="server-ip", rest_name="server-ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='server-ip', extensions={u'tailf-common': {u'info': u'NAS server', u'cli-suppress-mode': None, u'cli-no-key-completion': None, u'callpoint': u'qos_nas_serverip', u'cli-suppress-list-no': None}}), is_container='list', yang_name="server-ip", rest_name="server-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NAS server', u'cli-suppress-mode': None, u'cli-no-key-completion': None, u'callpoint': u'qos_nas_serverip', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """server_ip must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("server_ip",server_ip.server_ip, yang_name="server-ip", rest_name="server-ip", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='server-ip', extensions={u'tailf-common': {u'info': u'NAS server', u'cli-suppress-mode': None, u'cli-no-key-completion': None, u'callpoint': u'qos_nas_serverip', u'cli-suppress-list-no': None}}), is_container='list', yang_name="server-ip", rest_name="server-ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NAS server', u'cli-suppress-mode': None, u'cli-no-key-completion': None, u'callpoint': u'qos_nas_serverip', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='list', is_config=True)""",
})
self.__server_ip = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_server_ip",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base... | 110.909091 | 52.636364 |
def check_format(format, subtype=None, endian=None):
"""Check if the combination of format/subtype/endian is valid.
Examples
--------
>>> import soundfile as sf
>>> sf.check_format('WAV', 'PCM_24')
True
>>> sf.check_format('FLAC', 'VORBIS')
False
"""
try:
return bool(_format_int(format, subtype, endian))
except (ValueError, TypeError):
return False | [
"def",
"check_format",
"(",
"format",
",",
"subtype",
"=",
"None",
",",
"endian",
"=",
"None",
")",
":",
"try",
":",
"return",
"bool",
"(",
"_format_int",
"(",
"format",
",",
"subtype",
",",
"endian",
")",
")",
"except",
"(",
"ValueError",
",",
"TypeEr... | 24.8125 | 18.5 |
def set_default_args(self, default_args):
"""Set default args for commands in collection.
Default args are used when the corresponding args aren't passed
on the command line or in a direct call.
"""
for name, args in default_args.items():
command = self[name]
command.default_args = default_args.get(command.name) or {} | [
"def",
"set_default_args",
"(",
"self",
",",
"default_args",
")",
":",
"for",
"name",
",",
"args",
"in",
"default_args",
".",
"items",
"(",
")",
":",
"command",
"=",
"self",
"[",
"name",
"]",
"command",
".",
"default_args",
"=",
"default_args",
".",
"get... | 37.6 | 16.6 |
def generateTarball(self, file_object):
''' Write a tarball of the current component/target to the file object
"file_object", which must already be open for writing at position 0
'''
archive_name = '%s-%s' % (self.getName(), self.getVersion())
def filterArchive(tarinfo):
if tarinfo.name.find(archive_name) == 0 :
unprefixed_name = tarinfo.name[len(archive_name)+1:]
tarinfo.mode &= 0o775
else:
unprefixed_name = tarinfo.name
if self.ignores(unprefixed_name):
return None
else:
return tarinfo
with tarfile.open(fileobj=file_object, mode='w:gz') as tf:
logger.info('generate archive extracting to "%s"' % archive_name)
tf.add(self.path, arcname=archive_name, filter=filterArchive) | [
"def",
"generateTarball",
"(",
"self",
",",
"file_object",
")",
":",
"archive_name",
"=",
"'%s-%s'",
"%",
"(",
"self",
".",
"getName",
"(",
")",
",",
"self",
".",
"getVersion",
"(",
")",
")",
"def",
"filterArchive",
"(",
"tarinfo",
")",
":",
"if",
"tar... | 48.111111 | 20 |
def get_sample_contacts_formatted_emails(self, sample):
"""Returns a list with the formatted emails from sample contacts
"""
contacts = list(set([sample.getContact()] + sample.getCCContact()))
return map(self.get_contact_formatted_email, contacts) | [
"def",
"get_sample_contacts_formatted_emails",
"(",
"self",
",",
"sample",
")",
":",
"contacts",
"=",
"list",
"(",
"set",
"(",
"[",
"sample",
".",
"getContact",
"(",
")",
"]",
"+",
"sample",
".",
"getCCContact",
"(",
")",
")",
")",
"return",
"map",
"(",
... | 55 | 14.4 |
def mugshot2to3(old):
"""
Upgrader for L{Mugshot} from version 2 to version 3, which re-thumbnails
the mugshot to take into account the new value of L{Mugshot.smallerSize}.
"""
new = old.upgradeVersion(Mugshot.typeName, 2, 3,
person=old.person,
type=old.type,
body=old.body,
smallerBody=old.smallerBody)
new.smallerBody = new.makeThumbnail(
new.body.open(), new.person, new.type[len('image/'):], smaller=True)
return new | [
"def",
"mugshot2to3",
"(",
"old",
")",
":",
"new",
"=",
"old",
".",
"upgradeVersion",
"(",
"Mugshot",
".",
"typeName",
",",
"2",
",",
"3",
",",
"person",
"=",
"old",
".",
"person",
",",
"type",
"=",
"old",
".",
"type",
",",
"body",
"=",
"old",
".... | 43.076923 | 15.076923 |
def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
"""
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
self._update_transforms() | [
"def",
"push_fbo",
"(",
"self",
",",
"fbo",
",",
"offset",
",",
"csize",
")",
":",
"self",
".",
"_fb_stack",
".",
"append",
"(",
"(",
"fbo",
",",
"offset",
",",
"csize",
")",
")",
"try",
":",
"fbo",
".",
"activate",
"(",
")",
"h",
",",
"w",
"="... | 34.793103 | 18.586207 |
def invalidate_object(self, address, state = 'stale'):
"""Force cache item state change (to 'worse' state only).
:Parameters:
- `state`: the new state requested.
:Types:
- `state`: `str`"""
self._lock.acquire()
try:
item = self.get_item(address)
if item and item.state_value<_state_values[state]:
item.state=state
item.update_state()
self._items_list.sort()
finally:
self._lock.release() | [
"def",
"invalidate_object",
"(",
"self",
",",
"address",
",",
"state",
"=",
"'stale'",
")",
":",
"self",
".",
"_lock",
".",
"acquire",
"(",
")",
"try",
":",
"item",
"=",
"self",
".",
"get_item",
"(",
"address",
")",
"if",
"item",
"and",
"item",
".",
... | 33.0625 | 13.4375 |
def save_to_local(self, callback_etat=print):
"""
Saved current in memory base to local file.
It's a backup, not a convenient way to update datas
:param callback_etat: state callback, taking str,int,int as args
"""
callback_etat("Aquisition...", 0, 3)
d = self.dumps()
s = json.dumps(d, indent=4, cls=formats.JsonEncoder)
callback_etat("Chiffrement...", 1, 3)
s = security.protege_data(s, True)
callback_etat("Enregistrement...", 2, 3)
try:
with open(self.LOCAL_DB_PATH, 'wb') as f:
f.write(s)
except (FileNotFoundError):
logging.exception(self.__class__.__name__)
raise StructureError("Chemin de sauvegarde introuvable !") | [
"def",
"save_to_local",
"(",
"self",
",",
"callback_etat",
"=",
"print",
")",
":",
"callback_etat",
"(",
"\"Aquisition...\"",
",",
"0",
",",
"3",
")",
"d",
"=",
"self",
".",
"dumps",
"(",
")",
"s",
"=",
"json",
".",
"dumps",
"(",
"d",
",",
"indent",
... | 40.157895 | 14.052632 |
def request_session(token, url=None):
"""
Requests a WebSocket session for the Real-Time Messaging API.
Returns a SessionMetadata object containing the information retrieved from
the API call.
"""
if url is None:
api = SlackApi()
else:
api = SlackApi(url)
response = api.rtm.start(token=token)
return SessionMetadata(response, api, token) | [
"def",
"request_session",
"(",
"token",
",",
"url",
"=",
"None",
")",
":",
"if",
"url",
"is",
"None",
":",
"api",
"=",
"SlackApi",
"(",
")",
"else",
":",
"api",
"=",
"SlackApi",
"(",
"url",
")",
"response",
"=",
"api",
".",
"rtm",
".",
"start",
"... | 24.357143 | 19.357143 |
def isHandlerPresent(self, event_name):
"""Check if an event has an handler."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
return self.handlers[event_name] is not None | [
"def",
"isHandlerPresent",
"(",
"self",
",",
"event_name",
")",
":",
"if",
"event_name",
"not",
"in",
"self",
".",
"handlers",
":",
"raise",
"ValueError",
"(",
"'{} is not a valid event'",
".",
"format",
"(",
"event_name",
")",
")",
"return",
"self",
".",
"h... | 51 | 10 |
def library_pg(args, l, config):
"""Report on the operation of a Postgres Library database"""
import tabulate
import terminaltables
from textwrap import fill
from ambry.util.text import getTerminalSize
import sys
if args.connect:
try:
l.database.connection.execute('SELECT * FROM pg_stat_activity;')
sys.exit(0)
except Exception as e:
prt(str(e))
sys.exit(1)
db = l.database
(x, y) = getTerminalSize()
if args.processes:
headers = None
rows = []
for row in db.connection.execute('SELECT pid, client_addr, application_name ass, query FROM pg_stat_activity '):
if not headers:
headers = row.keys()
row = list(str(e) for e in row)
row[3] = fill(row[3],x-50)
rows.append(row)
#print tabulate.tabulate(rows, headers)
table = terminaltables.UnixTable([headers]+rows)
print table.table
if args.blocks:
headers = None
rows = []
q1 = """
SELECT pid, database, mode, locktype, mode, relation, tuple, virtualxid FROM pg_locks order by pid;
"""
q2 = """
SELECT blocked_locks.pid AS blocked_pid,
-- blocked_activity.usename AS blocked_user,
blocking_locks.pid AS blocking_pid,
-- blocking_activity.usename AS blocking_user,
blocked_activity.query AS blocked_statement,
blocking_activity.query AS current_statement_in_blocking_process
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.GRANTED;
"""
for row in db.connection.execute(q2):
if not headers:
headers = row.keys()
row = list(str(e) for e in row)
row[2] = fill(row[2],(x-50)/2)
row[3] = fill(row[3], (x-50)/2)
rows.append(row)
if rows:
table = terminaltables.UnixTable([headers] + rows)
print table.table
if args.locks:
headers = None
rows = []
q = """
SELECT pid, database, mode, locktype, mode, relation::regclass, tuple, virtualxid
FROM pg_locks
ORDER BY pid
;
"""
for row in db.connection.execute(q):
if not headers:
headers = row.keys()
row = list(str(e) for e in row)
row[2] = fill(row[2], (x - 50) / 2)
row[3] = fill(row[3], (x - 50) / 2)
rows.append(row)
if rows:
table = terminaltables.UnixTable([headers] + rows)
print table.table | [
"def",
"library_pg",
"(",
"args",
",",
"l",
",",
"config",
")",
":",
"import",
"tabulate",
"import",
"terminaltables",
"from",
"textwrap",
"import",
"fill",
"from",
"ambry",
".",
"util",
".",
"text",
"import",
"getTerminalSize",
"import",
"sys",
"if",
"args"... | 32.571429 | 23.178571 |
def _count_fields_recursive(dataset, fields):
"""Cuenta la información de campos optativos/recomendados/requeridos
desde 'fields', y cuenta la ocurrencia de los mismos en 'dataset'.
Args:
dataset (dict): diccionario con claves a ser verificadas.
fields (dict): diccionario con los campos a verificar en dataset
como claves, y 'optativo', 'recomendado', o 'requerido' como
valores. Puede tener objetios anidados pero no arrays.
Returns:
dict: diccionario con las claves 'recomendado', 'optativo',
'requerido', 'recomendado_total', 'optativo_total',
'requerido_total', con la cantidad como valores.
"""
key_count = {
'recomendado': 0,
'optativo': 0,
'requerido': 0,
'total_optativo': 0,
'total_recomendado': 0,
'total_requerido': 0
}
for k, v in fields.items():
# Si la clave es un diccionario se implementa recursivamente el
# mismo algoritmo
if isinstance(v, dict):
# dataset[k] puede ser o un dict o una lista, ej 'dataset' es
# list, 'publisher' no. Si no es lista, lo metemos en una.
# Si no es ninguno de los dos, dataset[k] es inválido
# y se pasa un diccionario vacío para poder comparar
elements = dataset.get(k)
if not isinstance(elements, (list, dict)):
elements = [{}]
if isinstance(elements, dict):
elements = [dataset[k].copy()]
for element in elements:
# Llamada recursiva y suma del resultado al nuestro
result = _count_fields_recursive(element, v)
for key in result:
key_count[key] += result[key]
# Es un elemento normal (no iterable), se verifica si está en
# dataset o no. Se suma 1 siempre al total de su tipo
else:
# total_requerido, total_recomendado, o total_optativo
key_count['total_' + v] += 1
if k in dataset:
key_count[v] += 1
return key_count | [
"def",
"_count_fields_recursive",
"(",
"dataset",
",",
"fields",
")",
":",
"key_count",
"=",
"{",
"'recomendado'",
":",
"0",
",",
"'optativo'",
":",
"0",
",",
"'requerido'",
":",
"0",
",",
"'total_optativo'",
":",
"0",
",",
"'total_recomendado'",
":",
"0",
... | 38.425926 | 20.703704 |
def view_cookies(self):
"""
View current cookies in the `requests.Session()` object
**Returns:** List of Dicts, one cookie per Dict.
"""
return_list = []
for cookie in self._session.cookies:
return_list.append(vars(cookie))
return return_list | [
"def",
"view_cookies",
"(",
"self",
")",
":",
"return_list",
"=",
"[",
"]",
"for",
"cookie",
"in",
"self",
".",
"_session",
".",
"cookies",
":",
"return_list",
".",
"append",
"(",
"vars",
"(",
"cookie",
")",
")",
"return",
"return_list"
] | 27.454545 | 15.818182 |
def remove_user(name, profile='github'):
'''
Remove a Github user by name.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_user github-handle
'''
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
try:
git_user = client.get_user(name)
except UnknownObjectException:
log.exception("Resource not found")
return False
if organization.has_in_members(git_user):
organization.remove_from_members(git_user)
return not organization.has_in_members(git_user) | [
"def",
"remove_user",
"(",
"name",
",",
"profile",
"=",
"'github'",
")",
":",
"client",
"=",
"_get_client",
"(",
"profile",
")",
"organization",
"=",
"client",
".",
"get_organization",
"(",
"_get_config_value",
"(",
"profile",
",",
"'org_name'",
")",
")",
"t... | 23.28125 | 22.90625 |
def all_faces_with_verts(self, v_indices, as_boolean=False):
'''
returns all of the faces that contain at least one of the vertices in v_indices
'''
import numpy as np
included_vertices = np.zeros(self.v.shape[0], dtype=bool)
included_vertices[np.array(v_indices, dtype=np.uint32)] = True
faces_with_verts = included_vertices[self.f].all(axis=1)
if as_boolean:
return faces_with_verts
return np.nonzero(faces_with_verts)[0] | [
"def",
"all_faces_with_verts",
"(",
"self",
",",
"v_indices",
",",
"as_boolean",
"=",
"False",
")",
":",
"import",
"numpy",
"as",
"np",
"included_vertices",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"v",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",... | 45.181818 | 22.454545 |
def _get_stack(self, orchestration_client, stack_name):
"""Get the ID for the current deployed overcloud stack if it exists."""
try:
stack = orchestration_client.stacks.get(stack_name)
self.log.info("Stack found, will be doing a stack update")
return stack
except HTTPNotFound:
self.log.info("No stack found, will be doing a stack create") | [
"def",
"_get_stack",
"(",
"self",
",",
"orchestration_client",
",",
"stack_name",
")",
":",
"try",
":",
"stack",
"=",
"orchestration_client",
".",
"stacks",
".",
"get",
"(",
"stack_name",
")",
"self",
".",
"log",
".",
"info",
"(",
"\"Stack found, will be doing... | 44.888889 | 21.888889 |
def fire_hooks(ctx, document, elem, element, hooks):
"""Fire hooks on newly created element.
For each newly created element we will try to find defined hooks and execute them.
:Args:
- ctx (:class:`Context`): Context object
- document (:class:`ooxml.doc.Document`): Document object
- elem (:class:`ooxml.doc.Element`): Element which we serialized
- element (Element): lxml element which we created
- hooks (list): List of hooks
"""
if not hooks:
return
for hook in hooks:
hook(ctx, document, elem, element) | [
"def",
"fire_hooks",
"(",
"ctx",
",",
"document",
",",
"elem",
",",
"element",
",",
"hooks",
")",
":",
"if",
"not",
"hooks",
":",
"return",
"for",
"hook",
"in",
"hooks",
":",
"hook",
"(",
"ctx",
",",
"document",
",",
"elem",
",",
"element",
")"
] | 31.277778 | 22.055556 |
def host(self):
# pylint: disable=too-many-branches
"""Get a passive checks for an host and its services
This function builds the external commands corresponding to the host and services
provided information
:param host_name: host name
:param data: dictionary of the host properties to be modified
:return: command line
"""
logger.debug("Host status...")
if cherrypy.request.method not in ["PATCH", "POST"]:
cherrypy.response.status = 405
return {'_status': 'ERR',
'_error': 'You must only PATCH or POST on this endpoint.'}
# Update an host
# ---
if not cherrypy.request.json:
return {'_status': 'ERR',
'_error': 'You must send parameters on this endpoint.'}
host_name = None
if cherrypy.request.json.get('name', None) is not None:
host_name = cherrypy.request.json.get('name', None)
if not host_name:
return {'_status': 'ERR',
'_error': 'Missing targeted host name.'}
# Get provided data
# ---
logger.debug("Posted data: %s", cherrypy.request.json)
# Check if the host exist in Alignak
# ---
# todo: Not mandatory but it would be clean...
# Prepare response
# ---
ws_result = {'_status': 'OK',
'_result': ['%s is alive :)' % host_name],
'_issues': []}
# Manage the host livestate
# ---
# Alert on unordered livestate if several information exist
now = int(time.time())
livestate = cherrypy.request.json.get('livestate', None)
if not livestate:
# Create an host live state command
livestate = {'state': "UP"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the host '%s' livestate, "
"assuming host is UP!" % host_name)
ls['state'] = 'UP'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the host '%s'. "
"The Alignak scheduler may not handle the check result!",
host_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['UP', 'DOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Host state should be UP, DOWN or UNREACHABLE"
", and not '%s'." % (state))
else:
# Create an host live state command
command = self._build_host_livestate(host_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
services = cherrypy.request.json.get('services', None)
if not services:
return ws_result
for service in services:
service_name = service.get('name', None)
if service_name is None:
ws_result['_issues'].append("A service does not have a 'name' property")
continue
livestate = service.get('livestate', None)
if not livestate:
# Create a service live state command
livestate = {'state': "OK"}
if not isinstance(livestate, list):
livestate = [livestate]
last_ts = 0
for ls in livestate:
if ls.get('state', None) is None:
ws_result['_issues'].append("Missing state for the service %s/%s livestate, "
"assuming service is OK!"
% (host_name, service_name))
ls['state'] = 'OK'
# Tag our own timestamp
ls['_ws_timestamp'] = now
try:
timestamp = int(ls.get('timestamp', 'ABC'))
if timestamp < last_ts:
logger.info("Got unordered timestamp for the service: %s/%s. "
"The Alignak scheduler may not handle the check result!",
host_name, service_name)
last_ts = timestamp
except ValueError:
pass
for ls in livestate:
state = ls.get('state').upper()
if state not in ['OK', 'WARNING', 'CRITICAL', 'UNKNOWN', 'UNREACHABLE']:
ws_result['_issues'].append("Service %s/%s state must be OK, WARNING, "
"CRITICAL, UNKNOWN or UNREACHABLE, and not %s."
% (host_name, service_name, state))
else:
# Create a service live state command
command = self._build_service_livestate(host_name, service_name, ls)
ws_result['_result'].append("Raised: %s" % command)
# Notify the external command to our Arbiter daemon
self.app.add(ExternalCommand(command))
return ws_result | [
"def",
"host",
"(",
"self",
")",
":",
"# pylint: disable=too-many-branches",
"logger",
".",
"debug",
"(",
"\"Host status...\"",
")",
"if",
"cherrypy",
".",
"request",
".",
"method",
"not",
"in",
"[",
"\"PATCH\"",
",",
"\"POST\"",
"]",
":",
"cherrypy",
".",
"... | 40.804348 | 21.217391 |
def pad(text, bits=32):
"""
Pads the inputted text to ensure it fits the proper block length
for encryption.
:param text | <str>
bits | <int>
:return <str>
"""
return text + (bits - len(text) % bits) * chr(bits - len(text) % bits) | [
"def",
"pad",
"(",
"text",
",",
"bits",
"=",
"32",
")",
":",
"return",
"text",
"+",
"(",
"bits",
"-",
"len",
"(",
"text",
")",
"%",
"bits",
")",
"*",
"chr",
"(",
"bits",
"-",
"len",
"(",
"text",
")",
"%",
"bits",
")"
] | 25.727273 | 19.545455 |
def apply(self, func, *args, **kwargs):
"""Apply the provided function and combine the results together in the
same way as apply from groupby in pandas.
This returns a DataFrame.
"""
self._prep_pandas_groupby()
def key_by_index(data):
"""Key each row by its index.
"""
# TODO: Is there a better way to do this?
for key, row in data.iterrows():
yield (key, pd.DataFrame.from_dict(
dict([(key, row)]), orient='index'))
myargs = self._myargs
mykwargs = self._mykwargs
regroupedRDD = self._distributedRDD.mapValues(
lambda data: data.groupby(*myargs, **mykwargs))
appliedRDD = regroupedRDD.map(
lambda key_data: key_data[1].apply(func, *args, **kwargs))
reKeyedRDD = appliedRDD.flatMap(key_by_index)
dataframe = self._sortIfNeeded(reKeyedRDD).values()
return DataFrame.fromDataFrameRDD(dataframe, self.sql_ctx) | [
"def",
"apply",
"(",
"self",
",",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_prep_pandas_groupby",
"(",
")",
"def",
"key_by_index",
"(",
"data",
")",
":",
"\"\"\"Key each row by its index.\n \"\"\"",
"# TODO: Is there ... | 39.88 | 13.44 |
def create_ecdsa_public_and_private_from_pem(pem, password=None):
"""
<Purpose>
Create public and private ECDSA keys from a private 'pem'. The public and
private keys are strings in PEM format:
public: '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
private: '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
>>> junk, private = generate_public_and_private()
>>> public, private = create_ecdsa_public_and_private_from_pem(private)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
>>> passphrase = 'secret'
>>> encrypted_pem = create_ecdsa_encrypted_pem(private, passphrase)
>>> public, private = create_ecdsa_public_and_private_from_pem(encrypted_pem, passphrase)
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(public)
True
>>> securesystemslib.formats.PEMECDSA_SCHEMA.matches(private)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA key
if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if the ECDSA key
pair could not be extracted, possibly due to an unsupported algorithm.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'pem' have the correct format?
# This check will ensure 'pem' conforms to
# 'securesystemslib.formats.ECDSARSA_SCHEMA'.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(pem)
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
password = password.encode('utf-8')
else:
logger.debug('The password/passphrase is unset. The PEM is expected'
' to be unencrypted.')
public = None
private = None
# Generate the public and private ECDSA keys. The pyca/cryptography library
# performs the actual import operation.
try:
private = load_pem_private_key(pem.encode('utf-8'), password=password,
backend=default_backend())
except (ValueError, cryptography.exceptions.UnsupportedAlgorithm) as e:
raise securesystemslib.exceptions.CryptoError('Could not import private'
' PEM.\n' + str(e))
public = private.public_key()
# Serialize public and private keys to PEM format.
private = private.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
public = public.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
return public.decode('utf-8'), private.decode('utf-8') | [
"def",
"create_ecdsa_public_and_private_from_pem",
"(",
"pem",
",",
"password",
"=",
"None",
")",
":",
"# Does 'pem' have the correct format?",
"# This check will ensure 'pem' conforms to",
"# 'securesystemslib.formats.ECDSARSA_SCHEMA'.",
"securesystemslib",
".",
"formats",
".",
"P... | 36 | 27.176471 |
def cross_lists(*sets):
"""Return the cross product of the arguments"""
wheels = [iter(_) for _ in sets]
digits = [next(it) for it in wheels]
while True:
yield digits[:]
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(sets[i])
digits[i] = next(wheels[i])
else:
break | [
"def",
"cross_lists",
"(",
"*",
"sets",
")",
":",
"wheels",
"=",
"[",
"iter",
"(",
"_",
")",
"for",
"_",
"in",
"sets",
"]",
"digits",
"=",
"[",
"next",
"(",
"it",
")",
"for",
"it",
"in",
"wheels",
"]",
"while",
"True",
":",
"yield",
"digits",
"... | 30.733333 | 11.733333 |
def get_global_parameters(config_names=("core", "scheduler", "worker", "retcode")):
"""
Returns a list of global, luigi-internal configuration parameters. Each list item is a 4-tuple
containing the configuration class, the parameter instance, the parameter name, and the full
parameter name in the cli. When *config_names* is set, it should be a list of configuration
class names that are exclusively taken into account.
"""
params = []
for cls in luigi.task.Config.__subclasses__():
if config_names and cls.__name__ not in config_names:
continue
for attr in dir(cls):
param = getattr(cls, attr)
if not isinstance(param, luigi.Parameter):
continue
full_name = attr.replace("_", "-")
if getattr(cls, "use_cmdline_section", True):
full_name = "{}-{}".format(cls.__name__.replace("_", "-"), full_name)
params.append((cls, param, attr, full_name))
return params | [
"def",
"get_global_parameters",
"(",
"config_names",
"=",
"(",
"\"core\"",
",",
"\"scheduler\"",
",",
"\"worker\"",
",",
"\"retcode\"",
")",
")",
":",
"params",
"=",
"[",
"]",
"for",
"cls",
"in",
"luigi",
".",
"task",
".",
"Config",
".",
"__subclasses__",
... | 41.416667 | 25.583333 |
def exe_cmd(*cmds):
"""Executes commands in a new shell. Directing stderr to PIPE.
This is fastboot's own exe_cmd because of its peculiar way of writing
non-error info to stderr.
Args:
cmds: A sequence of commands and arguments.
Returns:
The output of the command run.
Raises:
Exception: An error occurred during the command execution.
"""
cmd = ' '.join(cmds)
proc = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
(out, err) = proc.communicate()
if not err:
return out
return err | [
"def",
"exe_cmd",
"(",
"*",
"cmds",
")",
":",
"cmd",
"=",
"' '",
".",
"join",
"(",
"cmds",
")",
"proc",
"=",
"Popen",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"shell",
"=",
"True",
")",
"(",
"out",
",",
"err",
... | 26 | 22.142857 |
def getvalue(x):
"""Return the single value of x or raise TypError if more than one value."""
if isrepeating(x):
raise TypeError(
"Ambiguous call to getvalue for %r which has more than one value."
% x)
for value in getvalues(x):
return value | [
"def",
"getvalue",
"(",
"x",
")",
":",
"if",
"isrepeating",
"(",
"x",
")",
":",
"raise",
"TypeError",
"(",
"\"Ambiguous call to getvalue for %r which has more than one value.\"",
"%",
"x",
")",
"for",
"value",
"in",
"getvalues",
"(",
"x",
")",
":",
"return",
"... | 31.777778 | 21.111111 |
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print("Compiling", file_py)
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError as err:
print(err.msg)
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename) | [
"def",
"_get_codename",
"(",
"self",
",",
"pathname",
",",
"basename",
")",
":",
"file_py",
"=",
"pathname",
"+",
"\".py\"",
"file_pyc",
"=",
"pathname",
"+",
"\".pyc\"",
"file_pyo",
"=",
"pathname",
"+",
"\".pyo\"",
"if",
"os",
".",
"path",
".",
"isfile",... | 41.068966 | 14.517241 |
def complete_abstract_value(
exe_context, # type: ExecutionContext
return_type, # type: Union[GraphQLInterfaceType, GraphQLUnionType]
field_asts, # type: List[Field]
info, # type: ResolveInfo
path, # type: List[Union[int, str]]
result, # type: Any
):
# type: (...) -> Dict[str, Any]
"""
Complete an value of an abstract type by determining the runtime type of that value, then completing based
on that type.
"""
runtime_type = None # type: Union[str, GraphQLObjectType, None]
# Field type must be Object, Interface or Union and expect sub-selections.
if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)):
if return_type.resolve_type:
runtime_type = return_type.resolve_type(result, info)
else:
runtime_type = get_default_resolve_type_fn(result, info, return_type)
if isinstance(runtime_type, string_types):
runtime_type = info.schema.get_type(runtime_type) # type: ignore
if not isinstance(runtime_type, GraphQLObjectType):
raise GraphQLError(
(
"Abstract type {} must resolve to an Object type at runtime "
+ 'for field {}.{} with value "{}", received "{}".'
).format(
return_type, info.parent_type, info.field_name, result, runtime_type
),
field_asts,
)
if not exe_context.schema.is_possible_type(return_type, runtime_type):
raise GraphQLError(
u'Runtime Object type "{}" is not a possible type for "{}".'.format(
runtime_type, return_type
),
field_asts,
)
return complete_object_value(
exe_context, runtime_type, field_asts, info, path, result
) | [
"def",
"complete_abstract_value",
"(",
"exe_context",
",",
"# type: ExecutionContext",
"return_type",
",",
"# type: Union[GraphQLInterfaceType, GraphQLUnionType]",
"field_asts",
",",
"# type: List[Field]",
"info",
",",
"# type: ResolveInfo",
"path",
",",
"# type: List[Union[int, st... | 37.085106 | 24.06383 |
def unicode_from_html(content):
"""Attempts to decode an HTML string into unicode.
If unsuccessful, the original content is returned.
"""
encodings = get_encodings_from_content(content)
for encoding in encodings:
try:
return unicode(content, encoding)
except (UnicodeError, TypeError):
pass
return content | [
"def",
"unicode_from_html",
"(",
"content",
")",
":",
"encodings",
"=",
"get_encodings_from_content",
"(",
"content",
")",
"for",
"encoding",
"in",
"encodings",
":",
"try",
":",
"return",
"unicode",
"(",
"content",
",",
"encoding",
")",
"except",
"(",
"Unicode... | 24.2 | 18.666667 |
def declare_example(self, source):
"""Execute the given code, adding it to the runner's namespace."""
with patch_modules():
code = compile(source, "<docs>", "exec")
exec(code, self.namespace) | [
"def",
"declare_example",
"(",
"self",
",",
"source",
")",
":",
"with",
"patch_modules",
"(",
")",
":",
"code",
"=",
"compile",
"(",
"source",
",",
"\"<docs>\"",
",",
"\"exec\"",
")",
"exec",
"(",
"code",
",",
"self",
".",
"namespace",
")"
] | 45.4 | 6.2 |
def _get_library_metadata(self, date_range):
"""
Retrieve the libraries for the given date range, the assumption is that the date ranges do not overlap and
they are CLOSED_CLOSED.
At the moment the date range is mandatory
"""
if date_range is None:
raise Exception("A date range must be provided")
if not (date_range.start and date_range.end):
raise Exception("The date range {0} must contain a start and end date".format(date_range))
start = date_range.start if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz())
end = date_range.end if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz())
query = {'$or': [{'start': {'$lte': start}, 'end': {'$gte': start}},
{'start': {'$gte': start}, 'end': {'$lte': end}},
{'start': {'$lte': end}, 'end': {'$gte': end}}]}
cursor = self._collection.find(query,
projection={'library_name': 1, 'start': 1, 'end': 1},
sort=[('start', pymongo.ASCENDING)])
results = []
for res in cursor:
start = res['start']
if date_range.start.tzinfo is not None and start.tzinfo is None:
start = start.replace(tzinfo=mktz("UTC")).astimezone(tz=date_range.start.tzinfo)
end = res['end']
if date_range.end.tzinfo is not None and end.tzinfo is None:
end = end.replace(tzinfo=mktz("UTC")).astimezone(tz=date_range.end.tzinfo)
results.append(TickStoreLibrary(res['library_name'], DateRange(start, end, CLOSED_CLOSED)))
return results | [
"def",
"_get_library_metadata",
"(",
"self",
",",
"date_range",
")",
":",
"if",
"date_range",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"A date range must be provided\"",
")",
"if",
"not",
"(",
"date_range",
".",
"start",
"and",
"date_range",
".",
"end",
... | 50.852941 | 32.029412 |
def invoke_consumer(self, message):
"""Wrap the actual processor processing bits
:param rejected.data.Message message: The message to process
"""
# Only allow for a single message to be processed at a time
with (yield self.consumer_lock.acquire()):
if self.is_idle:
if message.channel.is_closed:
LOGGER.warning('Channel %s is closed on '
'connection "%s", discarding '
'local copy of message %s',
message.channel.channel_number,
message.connection,
utils.message_info(message.exchange,
message.routing_key,
message.properties))
self.counters[self.CLOSED_ON_START] += 1
self.maybe_get_next_message()
return
self.set_state(self.STATE_PROCESSING)
self.delivery_time = start_time = time.time()
self.active_message = message
self.measurement = data.Measurement()
if message.method.redelivered:
self.counters[self.REDELIVERED] += 1
self.measurement.set_tag(self.REDELIVERED, True)
try:
result = yield self.consumer.execute(message,
self.measurement)
except Exception as error:
LOGGER.exception('Unhandled exception from consumer in '
'process. This should not happen. %s',
error)
result = data.MESSAGE_REQUEUE
LOGGER.debug('Finished processing message: %r', result)
self.on_processed(message, result, start_time)
elif self.is_waiting_to_shutdown:
LOGGER.info(
'Requeueing pending message due to pending shutdown')
self.reject(message, True)
self.shutdown_connections()
elif self.is_shutting_down:
LOGGER.info('Requeueing pending message due to shutdown')
self.reject(message, True)
self.on_ready_to_stop()
else:
LOGGER.warning('Exiting invoke_consumer without processing, '
'this should not happen. State: %s',
self.state_description)
self.maybe_get_next_message() | [
"def",
"invoke_consumer",
"(",
"self",
",",
"message",
")",
":",
"# Only allow for a single message to be processed at a time",
"with",
"(",
"yield",
"self",
".",
"consumer_lock",
".",
"acquire",
"(",
")",
")",
":",
"if",
"self",
".",
"is_idle",
":",
"if",
"mess... | 46.614035 | 19.824561 |
def from_httplib(cls, message, duplicates=('set-cookie',)): # Python 2
"""Read headers from a Python 2 httplib message object."""
ret = cls(message.items())
# ret now contains only the last header line for each duplicate.
# Importing with all duplicates would be nice, but this would
# mean to repeat most of the raw parsing already done, when the
# message object was created. Extracting only the headers of interest
# separately, the cookies, should be faster and requires less
# extra code.
for key in duplicates:
ret.discard(key)
for val in message.getheaders(key):
ret.add(key, val)
return ret | [
"def",
"from_httplib",
"(",
"cls",
",",
"message",
",",
"duplicates",
"=",
"(",
"'set-cookie'",
",",
")",
")",
":",
"# Python 2",
"ret",
"=",
"cls",
"(",
"message",
".",
"items",
"(",
")",
")",
"# ret now contains only the last header line for each duplicate.",
... | 50.714286 | 19.142857 |
def base64_decode(nb):
"""Restore all bytes objects in the notebook from base64-encoded strings.
Note: This is never used
"""
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == 'code':
for output in cell.outputs:
if 'png' in output:
if isinstance(output.png, unicode):
output.png = output.png.encode('ascii')
output.png = decodestring(output.png)
if 'jpeg' in output:
if isinstance(output.jpeg, unicode):
output.jpeg = output.jpeg.encode('ascii')
output.jpeg = decodestring(output.jpeg)
return nb | [
"def",
"base64_decode",
"(",
"nb",
")",
":",
"for",
"ws",
"in",
"nb",
".",
"worksheets",
":",
"for",
"cell",
"in",
"ws",
".",
"cells",
":",
"if",
"cell",
".",
"cell_type",
"==",
"'code'",
":",
"for",
"output",
"in",
"cell",
".",
"outputs",
":",
"if... | 41.611111 | 14.388889 |
def get_title(self):
"""Default title for plot
"""
def fformat(x): # float format
if isinstance(x, (list, tuple)):
return '[{0}]'.format(', '.join(map(fformat, x)))
if isinstance(x, Quantity):
x = x.value
elif isinstance(x, str):
warnings.warn('WARNING: fformat called with a' +
' string. This has ' +
'been depricated and may disappear ' +
'in a future release.')
x = float(x)
return '{0:.2f}'.format(x)
bits = [('Q', fformat(self.result.q))]
bits.append(('tres', '{:.3g}'.format(self.qxfrm_args['tres'])))
if self.qxfrm_args.get('qrange'):
bits.append(('q-range', fformat(self.qxfrm_args['qrange'])))
if self.qxfrm_args['whiten']:
bits.append(('whitened',))
bits.extend([
('f-range', fformat(self.result.yspan)),
('e-range', '[{:.3g}, {:.3g}]'.format(self.result.min(),
self.result.max())),
])
return ', '.join([': '.join(bit) for bit in bits]) | [
"def",
"get_title",
"(",
"self",
")",
":",
"def",
"fformat",
"(",
"x",
")",
":",
"# float format",
"if",
"isinstance",
"(",
"x",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"'[{0}]'",
".",
"format",
"(",
"', '",
".",
"join",
"(",
"map"... | 42.892857 | 14.678571 |
def load_chunk(filename, bounds, encoding='utf8', slow=False):
"""
Load a chunk from file using Bounds info.
Pass 'slow=True' for an alternative loading method based on line numbers.
"""
if slow:
return _load_chunk_slow(filename, bounds, encoding)
with open(filename, 'rb') as f:
f.seek(bounds.byte_start)
size = bounds.byte_end - bounds.byte_start
return f.read(size).decode(encoding) | [
"def",
"load_chunk",
"(",
"filename",
",",
"bounds",
",",
"encoding",
"=",
"'utf8'",
",",
"slow",
"=",
"False",
")",
":",
"if",
"slow",
":",
"return",
"_load_chunk_slow",
"(",
"filename",
",",
"bounds",
",",
"encoding",
")",
"with",
"open",
"(",
"filenam... | 35.916667 | 14.75 |
def start(st_reg_number):
"""Checks the number valiaty for the Acre state"""
#st_reg_number = str(st_reg_number)
weights = [4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
digits = st_reg_number[:len(st_reg_number) - 2]
check_digits = st_reg_number[-2:]
divisor = 11
if len(st_reg_number) > 13:
return False
sum_total = 0
for i in range(len(digits)):
sum_total = sum_total + int(digits[i]) * weights[i]
rest_division = sum_total % divisor
first_digit = divisor - rest_division
if first_digit == 10 or first_digit == 11:
first_digit = 0
if str(first_digit) != check_digits[0]:
return False
digits = digits + str(first_digit)
weights = [5] + weights
sum_total = 0
for i in range(len(digits)):
sum_total = sum_total + int(digits[i]) * weights[i]
rest_division = sum_total % divisor
second_digit = divisor - rest_division
if second_digit == 10 or second_digit == 11:
second_digit = 0
return str(first_digit) + str(second_digit) == check_digits | [
"def",
"start",
"(",
"st_reg_number",
")",
":",
"#st_reg_number = str(st_reg_number)",
"weights",
"=",
"[",
"4",
",",
"3",
",",
"2",
",",
"9",
",",
"8",
",",
"7",
",",
"6",
",",
"5",
",",
"4",
",",
"3",
",",
"2",
"]",
"digits",
"=",
"st_reg_number"... | 26.384615 | 19.051282 |
def get_values(text):
"""
Accept a string such as BACKGROUNDCOLOR [r] [g] [b]
and return ['r', 'g', 'b']
"""
res = re.findall(r"\[(.*?)\]", text)
values = []
for r in res:
if "|" in r:
params = r.split("|")
for p in params:
values.append(p)
else:
values.append(r)
values = [str(v.lower()) for v in values]
return values | [
"def",
"get_values",
"(",
"text",
")",
":",
"res",
"=",
"re",
".",
"findall",
"(",
"r\"\\[(.*?)\\]\"",
",",
"text",
")",
"values",
"=",
"[",
"]",
"for",
"r",
"in",
"res",
":",
"if",
"\"|\"",
"in",
"r",
":",
"params",
"=",
"r",
".",
"split",
"(",
... | 20.4 | 18.3 |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkersRealTimeStatisticsContext for this WorkersRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_real_time_statistics.WorkersRealTimeStatisticsContext
"""
if self._context is None:
self._context = WorkersRealTimeStatisticsContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
)
return self._context | [
"def",
"_proxy",
"(",
"self",
")",
":",
"if",
"self",
".",
"_context",
"is",
"None",
":",
"self",
".",
"_context",
"=",
"WorkersRealTimeStatisticsContext",
"(",
"self",
".",
"_version",
",",
"workspace_sid",
"=",
"self",
".",
"_solution",
"[",
"'workspace_si... | 45.928571 | 27.071429 |
def _create_rates(self, dist='uniform', size=None, eps=1e-4):
'''Create a rate parameter (usually for a recurrent network layer).
Parameters
----------
dist : {'uniform', 'log'}, optional
Distribution of rate values. Defaults to ``'uniform'``.
size : int, optional
Number of rates to create. Defaults to ``self.output_size``.
eps : float, optional
A "buffer" preventing rate values from getting too close to 0 or 1.
Defaults to 1e-4.
Returns
-------
rates : theano shared or None
A vector of rate parameters for certain types of recurrent layers.
'''
if size is None:
size = self.output_size
if dist == 'uniform':
z = np.random.uniform(eps, 1 - eps, size=size).astype(util.FLOAT)
return theano.shared(z, name=self._fmt('rate'))
if dist == 'log':
z = np.random.uniform(-6, -eps, size=size).astype(util.FLOAT)
return theano.shared(np.exp(z), name=self._fmt('rate'))
return None | [
"def",
"_create_rates",
"(",
"self",
",",
"dist",
"=",
"'uniform'",
",",
"size",
"=",
"None",
",",
"eps",
"=",
"1e-4",
")",
":",
"if",
"size",
"is",
"None",
":",
"size",
"=",
"self",
".",
"output_size",
"if",
"dist",
"==",
"'uniform'",
":",
"z",
"=... | 40.111111 | 22.925926 |
def store_records_for_package(self, entry_point, records):
"""
Store the records in a way that permit lookup by package
"""
# If provided records already exist in the module mapping list,
# it likely means that a package declared multiple keys for the
# same package namespace; while normally this does not happen,
# this default implementation make no assumptions as to whether
# or not this is permitted.
pkg_module_records = self._dist_to_package_module_map(entry_point)
pkg_module_records.extend(records) | [
"def",
"store_records_for_package",
"(",
"self",
",",
"entry_point",
",",
"records",
")",
":",
"# If provided records already exist in the module mapping list,",
"# it likely means that a package declared multiple keys for the",
"# same package namespace; while normally this does not happen,... | 48.166667 | 20.5 |
def _event_monitor_loop(region_name, vpc_id,
watcher_plugin, health_plugin,
iterations, sleep_time,
route_check_time_interval=30):
"""
Monitor queues to receive updates about new route specs or any detected
failed IPs.
If any of those have updates, notify the health-monitor thread with a
message on a special queue and also re-process the entire routing table.
The 'iterations' argument allows us to limit the running time of the watch
loop for test purposes. Not used during normal operation. Also, for faster
tests, sleep_time can be set to values less than 1.
The 'route_check_time_interval' arguments specifies the number of seconds
we allow to elapse before forcing a re-check of the VPC routes. This is so
that accidentally deleted routes or manually broken route tables can be
fixed back up again on their own.
"""
q_route_spec = watcher_plugin.get_route_spec_queue()
q_monitor_ips, q_failed_ips, q_questionable_ips = \
health_plugin.get_queues()
time.sleep(sleep_time) # Wait to allow monitor to report results
current_route_spec = {} # The last route spec we have seen
all_ips = [] # Cache of IP addresses we currently know about
# Occasionally we want to recheck VPC routes even without other updates.
# That way, if a route is manually deleted by someone, it will be
# re-created on its own.
last_route_check_time = time.time()
while not CURRENT_STATE._stop_all:
try:
# Get the latest messages from the route-spec monitor and the
# health-check monitor. At system start the route-spec queue should
# immediately have been initialized with a first message.
failed_ips = utils.read_last_msg_from_queue(q_failed_ips)
questnbl_ips = utils.read_last_msg_from_queue(q_questionable_ips)
new_route_spec = utils.read_last_msg_from_queue(q_route_spec)
if failed_ips:
# Store the failed IPs in the shared state
CURRENT_STATE.failed_ips = failed_ips
if questnbl_ips:
# Store the questionable IPs in the shared state
CURRENT_STATE.questionble_ips = questnbl_ips
if new_route_spec:
# Store the new route spec in the shared state
CURRENT_STATE.route_spec = new_route_spec
current_route_spec = new_route_spec
# Need to communicate a new set of IPs to the health
# monitoring thread, in case the list changed. The list of
# addresses is extracted from the route spec. Pass in the old
# version of the address list, so that this function can
# compare to see if there are any changes to the host list.
all_ips = _update_health_monitor_with_new_ips(new_route_spec,
all_ips,
q_monitor_ips)
# Spec or list of failed or questionable IPs changed? Update
# routes...
# We pass in the last route spec we have seen, since we are also
# here in case we only have failed/questionable IPs, but no new
# route spec. This is also called occasionally on its own, so that
# we can repair any damaged route tables in VPC.
now = time.time()
time_for_regular_recheck = \
(now - last_route_check_time) > route_check_time_interval
if new_route_spec or failed_ips or questnbl_ips or \
time_for_regular_recheck:
if not new_route_spec and not (failed_ips or questnbl_ips):
# Only reason we are here is due to expired timer.
logging.debug("Time for regular route check")
last_route_check_time = now
vpc.handle_spec(region_name, vpc_id, current_route_spec,
failed_ips if failed_ips else [],
questnbl_ips if questnbl_ips else [])
# If iterations are provided, count down and exit
if iterations is not None:
iterations -= 1
if iterations == 0:
break
time.sleep(sleep_time)
except KeyboardInterrupt:
# Allow exit via keyboard interrupt, useful during development
return
except Exception as e:
# Of course we should never get here, but if we do, better to log
# it and keep operating best we can...
import traceback
traceback.print_exc()
logging.error("*** Uncaught exception 1: %s" % str(e))
return
logging.debug("event_monitor_loop ended: Global stop") | [
"def",
"_event_monitor_loop",
"(",
"region_name",
",",
"vpc_id",
",",
"watcher_plugin",
",",
"health_plugin",
",",
"iterations",
",",
"sleep_time",
",",
"route_check_time_interval",
"=",
"30",
")",
":",
"q_route_spec",
"=",
"watcher_plugin",
".",
"get_route_spec_queue... | 48.038835 | 25.203883 |
def hold_time(self, datetime=None):
"""持仓时间
Keyword Arguments:
datetime {[type]} -- [description] (default: {None})
"""
def weights(x):
if sum(x['amount']) != 0:
return pd.Timestamp(self.datetime
) - pd.to_datetime(x.datetime.max())
else:
return np.nan
if datetime is None:
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().groupby('code').apply(weights).dropna()
else:
return self.history_table.set_index(
'datetime',
drop=False
).sort_index().loc[:datetime].groupby('code').apply(weights
).dropna() | [
"def",
"hold_time",
"(",
"self",
",",
"datetime",
"=",
"None",
")",
":",
"def",
"weights",
"(",
"x",
")",
":",
"if",
"sum",
"(",
"x",
"[",
"'amount'",
"]",
")",
"!=",
"0",
":",
"return",
"pd",
".",
"Timestamp",
"(",
"self",
".",
"datetime",
")",
... | 33.2 | 18.24 |
def get_sync_binding_cmds(self, switch_bindings, expected_bindings):
"""Returns the list of commands required to synchronize ACL bindings
1. Delete any unexpected bindings
2. Add any missing bindings
"""
switch_cmds = list()
# Update any necessary switch interface ACLs
bindings_to_delete = switch_bindings - expected_bindings
bindings_to_add = expected_bindings - switch_bindings
for intf, acl, direction in bindings_to_delete:
switch_cmds.extend(['interface %s' % intf,
'no ip access-group %s %s' %
(acl, direction),
'exit'])
for intf, acl, direction in bindings_to_add:
switch_cmds.extend(['interface %s' % intf,
'ip access-group %s %s' % (acl, direction),
'exit'])
return switch_cmds | [
"def",
"get_sync_binding_cmds",
"(",
"self",
",",
"switch_bindings",
",",
"expected_bindings",
")",
":",
"switch_cmds",
"=",
"list",
"(",
")",
"# Update any necessary switch interface ACLs",
"bindings_to_delete",
"=",
"switch_bindings",
"-",
"expected_bindings",
"bindings_t... | 44.809524 | 15.047619 |
def get_buy(self, buy_id, **params):
"""https://developers.coinbase.com/api/v2#show-a-buy"""
return self.api_client.get_buy(self.id, buy_id, **params) | [
"def",
"get_buy",
"(",
"self",
",",
"buy_id",
",",
"*",
"*",
"params",
")",
":",
"return",
"self",
".",
"api_client",
".",
"get_buy",
"(",
"self",
".",
"id",
",",
"buy_id",
",",
"*",
"*",
"params",
")"
] | 54.666667 | 9.666667 |
def document_examples(p):
"""
Document example programs with purpose (and intent)
"""
p.comment('maths_ml_algorithms.py', 'machine learning algorithms for toolbox in AIKIF')
p.comment('algebra.py', 'toolbox module for based evaluation of maths problems')
p.comment('crypt_utils.py', 'scripts to encode / decode data')
p.comment('game_board_utils.py', 'board game rules')
p.comment('solve_knapsack.py ', 'toolbox - solves knapsack (using trivial algorithms)')
p.comment('example_solve_happiness.py', 'toy problem - finding a world to keep all people happy (O^n^n^n complexity :) )')
p.comment('finance_example.py', 'toy problem - logging finance data [TODO]')
p.comment('maths_fermat_brute_force.py', 'sample script to calculate long running process')
p.comment('puzzle_N_queens.py', 'stub only - not implemented///')
p.comment('puzzle_missions_canninballs.py', 'calculates the path to solve problem')
p.comment('solve_travelling_salesman.py', 'stub only')
p.comment('solve_knapsack.py', 'functions to solve the knapsack problem')
p.comment('load_ABS_data.py', 'old example showing how to map reading a file to the aikif')
p.comment('createMindOntology.py', 'script to parse website wiki page of OpenCog into a simple CSV structure')
p.comment('cyc_extract.py', 'script to read OpenCyc dataset and extract data (STUB only)')
p.comment('read_opencyc.py', 'script to read OpenCyc dataset')
p.comment('read_wordnet.py', 'script to read WordNet dataset')
p.comment('review_ontology.py', '[DATA] program to document details of ontology review')
p.comment('form_example_simple.py', 'creates a TKinter form to show how sql generation works = TOK')
p.comment('worlds.py', 'generates a 2d grid world')
p.comment('autobackup.py', 'example showing automatic file backups via filelists')
p.comment('document_AIKIF.py', 'this program - collect a list of programs and add commments / progress on what is actually implemented')
p.comment('ex_index_mydocs.py', 'example showing what to index')
p.comment('ex_project_management.py', 'example on using aikif for project management (poor example - needs work)')
p.comment('game_of_life_console.py', 'example showing a game of life = TOK') | [
"def",
"document_examples",
"(",
"p",
")",
":",
"p",
".",
"comment",
"(",
"'maths_ml_algorithms.py'",
",",
"'machine learning algorithms for toolbox in AIKIF'",
")",
"p",
".",
"comment",
"(",
"'algebra.py'",
",",
"'toolbox module for based evaluation of maths problems'",
")... | 75.1 | 40.9 |
def install_node_modules(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False):
"""Return a Command for managing an node_modules installation.
Note: The command is skipped if the `--skip-yarn` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The yarn command to build assets to the build_dir.
"""
class Yarn(BaseCommand):
description = 'install package.json dependencies using yarn'
def run(self):
if skip_yarn:
log.info('Skipping yarn-installation')
return
node_package = path or here
node_modules = os.path.join(node_package, 'node_modules')
if not which("yarn"):
log.error("`yarn` unavailable. If you're running this command "
"using sudo, make sure `yarn` is availble to sudo")
return
if force or is_stale(node_modules, os.path.join(node_package, 'package.json')):
log.info('Installing build dependencies with yarn. This may '
'take a while...')
run(['yarn', 'install'], cwd=node_package)
if build_dir and source_dir and not force:
should_build = is_stale(build_dir, source_dir)
else:
should_build = True
if should_build:
run(['yarn', 'run', build_cmd], cwd=node_package)
return Yarn | [
"def",
"install_node_modules",
"(",
"path",
"=",
"None",
",",
"build_dir",
"=",
"None",
",",
"source_dir",
"=",
"None",
",",
"build_cmd",
"=",
"'build'",
",",
"force",
"=",
"False",
")",
":",
"class",
"Yarn",
"(",
"BaseCommand",
")",
":",
"description",
... | 40.767442 | 22.325581 |
def bootstrap(force=False):
'''
Download and install the latest version of the Chocolatey package manager
via the official bootstrap.
Chocolatey requires Windows PowerShell and the .NET v4.0 runtime. Depending
on the host's version of Windows, chocolatey.bootstrap will attempt to
ensure these prerequisites are met by downloading and executing the
appropriate installers from Microsoft.
Note that if PowerShell is installed, you may have to restart the host
machine for Chocolatey to work.
force
Run the bootstrap process even if Chocolatey is found in the path.
CLI Example:
.. code-block:: bash
salt '*' chocolatey.bootstrap
salt '*' chocolatey.bootstrap force=True
'''
# Check if Chocolatey is already present in the path
try:
choc_path = _find_chocolatey(__context__, __salt__)
except CommandExecutionError:
choc_path = None
if choc_path and not force:
return 'Chocolatey found at {0}'.format(choc_path)
# The following lookup tables are required to determine the correct
# download required to install PowerShell. That's right, there's more
# than one! You're welcome.
ps_downloads = {
('Vista', 'x86'): 'http://download.microsoft.com/download/A/7/5/A75BC017-63CE-47D6-8FA4-AFB5C21BAC54/Windows6.0-KB968930-x86.msu',
('Vista', 'AMD64'): 'http://download.microsoft.com/download/3/C/8/3C8CF51E-1D9D-4DAA-AAEA-5C48D1CD055C/Windows6.0-KB968930-x64.msu',
('2008Server', 'x86'): 'http://download.microsoft.com/download/F/9/E/F9EF6ACB-2BA8-4845-9C10-85FC4A69B207/Windows6.0-KB968930-x86.msu',
('2008Server', 'AMD64'): 'http://download.microsoft.com/download/2/8/6/28686477-3242-4E96-9009-30B16BED89AF/Windows6.0-KB968930-x64.msu'
}
# It took until .NET v4.0 for Microsoft got the hang of making installers,
# this should work under any version of Windows
net4_url = 'http://download.microsoft.com/download/1/B/E/1BE39E79-7E39-46A3-96FF-047F95396215/dotNetFx40_Full_setup.exe'
temp_dir = tempfile.gettempdir()
# Check if PowerShell is installed. This should be the case for every
# Windows release following Server 2008.
ps_path = 'C:\\Windows\\SYSTEM32\\WindowsPowerShell\\v1.0\\powershell.exe'
if not __salt__['cmd.has_exec'](ps_path):
if (__grains__['osrelease'], __grains__['cpuarch']) in ps_downloads:
# Install the appropriate release of PowerShell v2.0
url = ps_downloads[(__grains__['osrelease'], __grains__['cpuarch'])]
dest = os.path.join(temp_dir, 'powershell.exe')
__salt__['cp.get_url'](url, dest)
cmd = [dest, '/quiet', '/norestart']
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
err = ('Installing Windows PowerShell failed. Please run the '
'installer GUI on the host to get a more specific '
'reason.')
raise CommandExecutionError(err)
else:
err = 'Windows PowerShell not found'
raise CommandNotFoundError(err)
# Run the .NET Framework 4 web installer
dest = os.path.join(temp_dir, 'dotnet4.exe')
__salt__['cp.get_url'](net4_url, dest)
cmd = [dest, '/q', '/norestart']
result = __salt__['cmd.run_all'](cmd, python_shell=False)
if result['retcode'] != 0:
err = ('Installing .NET v4.0 failed. Please run the installer GUI on '
'the host to get a more specific reason.')
raise CommandExecutionError(err)
# Run the Chocolatey bootstrap.
cmd = (
'{0} -NoProfile -ExecutionPolicy unrestricted '
'-Command "iex ((new-object net.webclient).'
'DownloadString(\'https://chocolatey.org/install.ps1\'))" '
'&& SET PATH=%PATH%;%systemdrive%\\chocolatey\\bin'
.format(ps_path)
)
result = __salt__['cmd.run_all'](cmd, python_shell=True)
if result['retcode'] != 0:
raise CommandExecutionError(
'Bootstrapping Chocolatey failed: {0}'.format(result['stderr'])
)
return result['stdout'] | [
"def",
"bootstrap",
"(",
"force",
"=",
"False",
")",
":",
"# Check if Chocolatey is already present in the path",
"try",
":",
"choc_path",
"=",
"_find_chocolatey",
"(",
"__context__",
",",
"__salt__",
")",
"except",
"CommandExecutionError",
":",
"choc_path",
"=",
"Non... | 43.531915 | 26.765957 |
def distance_similarity(a, b, p, T=CLOSE_DISTANCE_THRESHOLD):
"""Computes the distance similarity between a line segment
and a point
Args:
a ([float, float]): x and y coordinates. Line start
b ([float, float]): x and y coordinates. Line end
p ([float, float]): x and y coordinates. Point to compute the distance
Returns:
float: between 0 and 1. Where 1 is very similar and 0 is completely different
"""
d = distance_to_line(a, b, p)
r = (-1/float(T)) * abs(d) + 1
return r if r > 0 else 0 | [
"def",
"distance_similarity",
"(",
"a",
",",
"b",
",",
"p",
",",
"T",
"=",
"CLOSE_DISTANCE_THRESHOLD",
")",
":",
"d",
"=",
"distance_to_line",
"(",
"a",
",",
"b",
",",
"p",
")",
"r",
"=",
"(",
"-",
"1",
"/",
"float",
"(",
"T",
")",
")",
"*",
"a... | 36 | 21.933333 |
def _compute_distance_fast(self):
"""Calls edit_distance, and asserts that if we already have values for
matches and distance, that they match."""
d, m = edit_distance(self.seq1, self.seq2,
action_function=self.action_function,
test=self.test)
if self.dist:
assert d == self.dist
if self._matches:
assert m == self._matches
self.dist = d
self._matches = m | [
"def",
"_compute_distance_fast",
"(",
"self",
")",
":",
"d",
",",
"m",
"=",
"edit_distance",
"(",
"self",
".",
"seq1",
",",
"self",
".",
"seq2",
",",
"action_function",
"=",
"self",
".",
"action_function",
",",
"test",
"=",
"self",
".",
"test",
")",
"i... | 40.166667 | 10.416667 |
def click_on_pat(pat: str, mousebutton: int=1, offset: (float, float)=None, tolerance: int=0, restore_pos: bool=False) -> None:
"""
Requires imagemagick, xautomation, xwd.
Click on a pattern at a specified offset (x,y) in percent of the pattern dimension. x is the horizontal distance from the top left corner, y is the vertical distance from the top left corner. By default, the offset is (50,50), which means that the center of the pattern will be clicked at.
Exception PatternNotFound is raised when the pattern is not found on the screen.
:param pat: path of pattern image (PNG) to click on.
:param mousebutton: mouse button number used for the click
:param offset: offset from the top left point of the match. (float,float)
:param tolerance: An integer ≥ 0 to specify the level of tolerance for 'fuzzy' matches. If negative or not convertible to int, raises ValueError.
:param restore_pos: return to the initial mouse position after the click.
"""
x0, y0 = mouse_pos()
move_to_pat(pat, offset, tolerance)
mouse_click(mousebutton)
if restore_pos:
mouse_move(x0, y0) | [
"def",
"click_on_pat",
"(",
"pat",
":",
"str",
",",
"mousebutton",
":",
"int",
"=",
"1",
",",
"offset",
":",
"(",
"float",
",",
"float",
")",
"=",
"None",
",",
"tolerance",
":",
"int",
"=",
"0",
",",
"restore_pos",
":",
"bool",
"=",
"False",
")",
... | 69.875 | 42 |
def load_module(full_path):
"""
Load module from full path
Args:
full_path: module full path name
Returns:
python module
References:
https://stackoverflow.com/a/67692/1332656
Examples:
>>> import os
>>>
>>> cur_file = os.path.abspath(__file__).replace('\\\\', '/')
>>> cur_path = '/'.join(cur_file.split('/')[:-1])
>>> load_module(f'{cur_path}/timezone.py').__name__
'timezone'
>>> load_module(f'{cur_path}/timezone.pyc')
Traceback (most recent call last):
ImportError: not a python file: timezone.pyc
"""
from importlib import util
file_name = full_path.replace('\\', '/').split('/')[-1]
if file_name[-3:] != '.py':
raise ImportError(f'not a python file: {file_name}')
module_name = file_name[:-3]
spec = util.spec_from_file_location(name=module_name, location=full_path)
module = util.module_from_spec(spec=spec)
spec.loader.exec_module(module=module)
return module | [
"def",
"load_module",
"(",
"full_path",
")",
":",
"from",
"importlib",
"import",
"util",
"file_name",
"=",
"full_path",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"if",
"file_name",
"[",
"-",
"3... | 31.34375 | 17.96875 |
def with_random_weights(cls, options):
"""
Initialize from a list of options with random weights.
The weights assigned to each object are uniformally random
integers between ``1`` and ``len(options)``
Args:
options (list): The list of options of any type this object
can return with the ``get()`` method.
Returns:
SoftOptions: A newly constructed instance
"""
return cls([(value, random.randint(1, len(options)))
for value in options]) | [
"def",
"with_random_weights",
"(",
"cls",
",",
"options",
")",
":",
"return",
"cls",
"(",
"[",
"(",
"value",
",",
"random",
".",
"randint",
"(",
"1",
",",
"len",
"(",
"options",
")",
")",
")",
"for",
"value",
"in",
"options",
"]",
")"
] | 34.1875 | 19.4375 |
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0):
'''Follows the solution path to find the best lambda value.'''
self.u = np.zeros(self.Dk.shape[0], dtype='double')
lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
log_likelihood_trace = np.zeros(lambda_grid.shape)
beta_trace = []
best_idx = None
best_plateaus = None
if self.edges is None:
self.edges = defaultdict(list)
elist = csr_matrix(self.D).indices.reshape((self.D.shape[0], 2))
for n1, n2 in elist:
self.edges[n1].append(n2)
self.edges[n2].append(n1)
# Solve the series of lambda values with warm starts at each point
for i, lam in enumerate(lambda_grid):
if verbose:
print('#{0} Lambda = {1}'.format(i, lam))
# Fit to the final values
beta = self.solve(lam)
if verbose:
print('Calculating degrees of freedom')
# Count the number of free parameters in the grid (dof) -- TODO: the graph trend filtering paper seems to imply we shouldn't multiply by (k+1)?
dof_vals = self.Dk_minus_one.dot(beta) if self.k > 0 else beta
plateaus = calc_plateaus(dof_vals, self.edges, rel_tol=0.01) if (self.k % 2) == 0 else nearly_unique(dof_vals, rel_tol=0.03)
dof_trace[i] = max(1,len(plateaus)) #* (k+1)
if verbose:
print('Calculating Information Criteria')
# Get the negative log-likelihood
log_likelihood_trace[i] = -0.5 * ((self.y - beta)**2).sum()
# Calculate AIC = 2k - 2ln(L)
aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.)
# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi))
# Track the best model thus far
if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
best_idx = i
best_plateaus = plateaus
# Save the trace of all the resulting parameters
beta_trace.append(np.array(beta))
if verbose:
print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}\n'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
if verbose:
print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
return {'aic': aic_trace,
'aicc': aicc_trace,
'bic': bic_trace,
'dof': dof_trace,
'loglikelihood': log_likelihood_trace,
'beta': np.array(beta_trace),
'lambda': lambda_grid,
'best_idx': best_idx,
'best': beta_trace[best_idx],
'plateaus': best_plateaus} | [
"def",
"solution_path",
"(",
"self",
",",
"min_lambda",
",",
"max_lambda",
",",
"lambda_bins",
",",
"verbose",
"=",
"0",
")",
":",
"self",
".",
"u",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"Dk",
".",
"shape",
"[",
"0",
"]",
",",
"dtype",
"=",
... | 47.84 | 28.666667 |
def cumulative_distribution(self, X):
"""Computes the cumulative distribution function for the copula, :math:`C(u, v)`
Args:
X: `np.ndarray`
Returns:
np.array: cumulative probability
"""
self.check_fit()
U, V = self.split_matrix(X)
if (V == 0).all() or (U == 0).all():
return np.zeros(V.shape[0])
else:
cdfs = [
np.power(
np.power(U[i], -self.theta) + np.power(V[i], -self.theta) - 1,
-1.0 / self.theta
)
if (U[i] > 0 and V[i] > 0) else 0
for i in range(len(U))
]
return np.array([max(x, 0) for x in cdfs]) | [
"def",
"cumulative_distribution",
"(",
"self",
",",
"X",
")",
":",
"self",
".",
"check_fit",
"(",
")",
"U",
",",
"V",
"=",
"self",
".",
"split_matrix",
"(",
"X",
")",
"if",
"(",
"V",
"==",
"0",
")",
".",
"all",
"(",
")",
"or",
"(",
"U",
"==",
... | 26.888889 | 19.222222 |
def _to_linear(M, N, L, q):
"Converts a qubit in chimera coordinates to its linear index."
(x, y, u, k) = q
return 2 * L * N * x + 2 * L * y + L * u + k | [
"def",
"_to_linear",
"(",
"M",
",",
"N",
",",
"L",
",",
"q",
")",
":",
"(",
"x",
",",
"y",
",",
"u",
",",
"k",
")",
"=",
"q",
"return",
"2",
"*",
"L",
"*",
"N",
"*",
"x",
"+",
"2",
"*",
"L",
"*",
"y",
"+",
"L",
"*",
"u",
"+",
"k"
] | 40.25 | 16.75 |
def fields_metadata(self):
"""
Returns fields metadata as a dataframe.
"""
return (pd.concat([f.metadata() for f in self.fields], axis = 1)
.transpose()
.sort_values(["step_num", "frame", "label", "position"])) | [
"def",
"fields_metadata",
"(",
"self",
")",
":",
"return",
"(",
"pd",
".",
"concat",
"(",
"[",
"f",
".",
"metadata",
"(",
")",
"for",
"f",
"in",
"self",
".",
"fields",
"]",
",",
"axis",
"=",
"1",
")",
".",
"transpose",
"(",
")",
".",
"sort_values... | 35.142857 | 12.857143 |
def normalize(expr):
"""No elimination, but normalize arguments."""
args = [normalize(arg) for arg in expr.args]
return type(expr)(expr.func, *args, start=expr.start, end=expr.end) | [
"def",
"normalize",
"(",
"expr",
")",
":",
"args",
"=",
"[",
"normalize",
"(",
"arg",
")",
"for",
"arg",
"in",
"expr",
".",
"args",
"]",
"return",
"type",
"(",
"expr",
")",
"(",
"expr",
".",
"func",
",",
"*",
"args",
",",
"start",
"=",
"expr",
... | 37.8 | 19.8 |
def resource_created_response(resource):
"""Return HTTP response with status code *201*, signaling a created
*resource*
:param resource: resource created as a result of current request
:type resource: :class:`sandman.model.Model`
:rtype: :class:`flask.Response`
"""
if _get_acceptable_response_type() == JSON:
response = _single_resource_json_response(resource)
else:
response = _single_resource_html_response(resource)
response.status_code = 201
response.headers['Location'] = 'http://localhost:5000/{}'.format(
resource.resource_uri())
return response | [
"def",
"resource_created_response",
"(",
"resource",
")",
":",
"if",
"_get_acceptable_response_type",
"(",
")",
"==",
"JSON",
":",
"response",
"=",
"_single_resource_json_response",
"(",
"resource",
")",
"else",
":",
"response",
"=",
"_single_resource_html_response",
... | 35.705882 | 17.117647 |
def insert(self, row, ensure=None, types=None):
"""Add a ``row`` dict by inserting it into the table.
If ``ensure`` is set, any of the keys of the row are not
table columns, they will be created automatically.
During column creation, ``types`` will be checked for a key
matching the name of a column to be created, and the given
SQLAlchemy column type will be used. Otherwise, the type is
guessed from the row value, defaulting to a simple unicode
field.
::
data = dict(title='I am a banana!')
table.insert(data)
Returns the inserted row's primary key.
"""
row = self._sync_columns(row, ensure, types=types)
res = self.db.executable.execute(self.table.insert(row))
if len(res.inserted_primary_key) > 0:
return res.inserted_primary_key[0]
return True | [
"def",
"insert",
"(",
"self",
",",
"row",
",",
"ensure",
"=",
"None",
",",
"types",
"=",
"None",
")",
":",
"row",
"=",
"self",
".",
"_sync_columns",
"(",
"row",
",",
"ensure",
",",
"types",
"=",
"types",
")",
"res",
"=",
"self",
".",
"db",
".",
... | 38.565217 | 20.391304 |
def conditional_jit(function=None, **kwargs): # noqa: D202
"""Use numba's jit decorator if numba is installed.
Notes
-----
If called without arguments then return wrapped function.
@conditional_jit
def my_func():
return
else called with arguments
@conditional_jit(nopython=True)
def my_func():
return
"""
def wrapper(function):
try:
numba = importlib.import_module("numba")
return numba.jit(**kwargs)(function)
except ImportError:
return function
if function:
return wrapper(function)
else:
return wrapper | [
"def",
"conditional_jit",
"(",
"function",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: D202",
"def",
"wrapper",
"(",
"function",
")",
":",
"try",
":",
"numba",
"=",
"importlib",
".",
"import_module",
"(",
"\"numba\"",
")",
"return",
"numba",
... | 21.064516 | 22.548387 |
def parse(readDataInstance):
"""
Returns a new L{ImageBoundImportDescriptorEntry} object.
@type readDataInstance: L{ReadData}
@param readDataInstance: A L{ReadData} object containing data to create a new L{ImageBoundImportDescriptorEntry}.
@rtype: L{ImageBoundImportDescriptorEntry}
@return: A new {ImageBoundImportDescriptorEntry} object.
"""
boundEntry = ImageBoundImportDescriptorEntry()
boundEntry.timeDateStamp.value = readDataInstance.readDword()
boundEntry.offsetModuleName.value = readDataInstance.readWord()
boundEntry.numberOfModuleForwarderRefs.value = readDataInstance.readWord()
numberOfForwarderRefsEntries = boundEntry.numberOfModuleForwarderRefs .value
if numberOfForwarderRefsEntries:
bytesToRead = numberOfForwarderRefsEntries * ImageBoundForwarderRefEntry().sizeof()
rd = utils.ReadData(readDataInstance.read(bytesToRead))
boundEntry.forwarderRefsList = ImageBoundForwarderRef.parse(rd, numberOfForwarderRefsEntries)
return boundEntry | [
"def",
"parse",
"(",
"readDataInstance",
")",
":",
"boundEntry",
"=",
"ImageBoundImportDescriptorEntry",
"(",
")",
"boundEntry",
".",
"timeDateStamp",
".",
"value",
"=",
"readDataInstance",
".",
"readDword",
"(",
")",
"boundEntry",
".",
"offsetModuleName",
".",
"v... | 50.954545 | 27.318182 |
def _copy_replace(src, dst, replacements):
"""Copies the src file into dst applying the replacements dict"""
with src.open() as infile, dst.open('w') as outfile:
outfile.write(re.sub(
'|'.join(re.escape(k) for k in replacements),
lambda m: str(replacements[m.group(0)]),
infile.read()
)) | [
"def",
"_copy_replace",
"(",
"src",
",",
"dst",
",",
"replacements",
")",
":",
"with",
"src",
".",
"open",
"(",
")",
"as",
"infile",
",",
"dst",
".",
"open",
"(",
"'w'",
")",
"as",
"outfile",
":",
"outfile",
".",
"write",
"(",
"re",
".",
"sub",
"... | 42.5 | 12.875 |
def get_child_value(parent, name, allow_missing=0):
""" return the value of the child element with name in the parent Element """
if not parent.HasElement(name):
if allow_missing:
return np.nan
else:
raise Exception('failed to find child element %s in parent' % name)
else:
return XmlHelper.as_value(parent.GetElement(name)) | [
"def",
"get_child_value",
"(",
"parent",
",",
"name",
",",
"allow_missing",
"=",
"0",
")",
":",
"if",
"not",
"parent",
".",
"HasElement",
"(",
"name",
")",
":",
"if",
"allow_missing",
":",
"return",
"np",
".",
"nan",
"else",
":",
"raise",
"Exception",
... | 45.333333 | 16.555556 |
def scale(text="", value=0, min=0 ,max=100, step=1, draw_value=True, title="",
width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None):
"""
Select a number with a range widget
:param text: text inside window
:type text: str
:param value: current value
:type value: int
:param min: minimum value
:type min: int
:param max: maximum value
:type max: int
:param step: incrementation value
:type step: int
:param draw_value: hide/show cursor value
:type draw_value: bool
:param title: title of the window
:type title: str
:param width: window width
:type width: int
:param height: window height
:type height: int
:param timeout: close the window after n seconds
:type timeout: int
:return: The value selected by the user
:rtype: float
"""
dialog = ZScale(text, value, min, max, step,
draw_value, title, width, height, timeout)
dialog.run()
return dialog.response | [
"def",
"scale",
"(",
"text",
"=",
"\"\"",
",",
"value",
"=",
"0",
",",
"min",
"=",
"0",
",",
"max",
"=",
"100",
",",
"step",
"=",
"1",
",",
"draw_value",
"=",
"True",
",",
"title",
"=",
"\"\"",
",",
"width",
"=",
"DEFAULT_WIDTH",
",",
"height",
... | 30.34375 | 14.84375 |
def calc_v_qa_v1(self):
"""Update the stored water volume based on the equation of continuity.
Note that for too high outflow values, which would result in overdraining
the lake, the outflow is trimmed.
Required derived parameters:
|Seconds|
|NmbSubsteps|
Required flux sequence:
|QZ|
Updated aide sequences:
|llake_aides.QA|
|llake_aides.V|
Basic Equation:
:math:`\\frac{dV}{dt}= QZ - QA`
Examples:
Prepare a lake model with an initial storage of 100.000 m³ and an
inflow of 2 m³/s and a (potential) outflow of 6 m³/s:
>>> from hydpy.models.llake import *
>>> parameterstep()
>>> simulationstep('12h')
>>> maxdt('6h')
>>> derived.seconds.update()
>>> derived.nmbsubsteps.update()
>>> aides.v = 1e5
>>> fluxes.qz = 2.
>>> aides.qa = 6.
Through calling method `calc_v_qa_v1` three times with the same inflow
and outflow values, the storage is emptied after the second step and
outflow is equal to inflow after the third step:
>>> model.calc_v_qa_v1()
>>> aides.v
v(13600.0)
>>> aides.qa
qa(6.0)
>>> model.new2old()
>>> model.calc_v_qa_v1()
>>> aides.v
v(0.0)
>>> aides.qa
qa(2.62963)
>>> model.new2old()
>>> model.calc_v_qa_v1()
>>> aides.v
v(0.0)
>>> aides.qa
qa(2.0)
Note that the results of method |calc_v_qa_v1| are not based
depend on the (outer) simulation step size but on the (inner)
calculation step size defined by parameter `maxdt`.
"""
der = self.parameters.derived.fastaccess
flu = self.sequences.fluxes.fastaccess
aid = self.sequences.aides.fastaccess
aid.qa = min(aid.qa, flu.qz+der.nmbsubsteps/der.seconds*aid.v)
aid.v = max(aid.v+der.seconds/der.nmbsubsteps*(flu.qz-aid.qa), 0.) | [
"def",
"calc_v_qa_v1",
"(",
"self",
")",
":",
"der",
"=",
"self",
".",
"parameters",
".",
"derived",
".",
"fastaccess",
"flu",
"=",
"self",
".",
"sequences",
".",
"fluxes",
".",
"fastaccess",
"aid",
"=",
"self",
".",
"sequences",
".",
"aides",
".",
"fa... | 28.848485 | 20.984848 |
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip() | [
"def",
"_extract_options",
"(",
"orig_script",
")",
":",
"first",
"=",
"(",
"orig_script",
"+",
"'\\n'",
")",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"match",
"=",
"_first_line_re",
"(",
")",
".",
"match",
"(",
"first",
")",
"options",
"=",
"match"... | 37.5 | 8.75 |
def step(g, n1, n2, inbound=False, backward=False, continue_fn=None):
"""
Step along a path through a directed graph unless there is an intersection
Example graph:
Note that edge (1, 2) and (2, 3) are bidirectional, i.e., (2, 1) and
(3, 2) are also edges
1 -- 2 -- 3 -->-- 5 -->-- 7
| |
^ v
| |
4 6
>>> step(g, 1, 2)
3
>>> step(g, 3, 5)
None
>>> step(g, 2, 3)
5
>>> step(g, 2, 3, inbound=True)
None
>>> step(g, 7, 5, 3, backward=True)
3
>>> def f(g, n1, n2, backward):
if n2 == 5:
return 7
return None
>>> step(g, 3, 5, continue_fn=f)
7
Parameters
----------
g : networkx DiGraph
n1 : node id in g
n2 : node id in g
(n1, n2) must be an edge in g
inbound : bool (default False)
whether incoming edges should be considered
backward : bool (default False)
whether edges are in reverse order (i.e., point from n2 to n1)
continue_fn : callable (optional)
if at an intersection, continue_fn is called to indicate how to
proceed
continue_fn takes the form:
f(g, n1, n2, backward) where all arguments are as passed into step.
f should return a node id such that f(g, n1, n2, backward) is a
successors of n2. f should return None if no way forward.
Returns
-------
The next node in the path from n1 to n2. Returns None if there
are no edges from n2 or multiple edges from n2
"""
forw = g.successors
back = g.predecessors
if backward:
back, forw = forw, back
nodes = forw(n2)
if inbound:
nodes = set(nodes + back(n2))
candidates = [n for n in nodes if n != n1]
if len(candidates) == 1:
result = candidates[0]
elif continue_fn:
result = continue_fn(g, n1, n2, backward)
else:
result = None
return result | [
"def",
"step",
"(",
"g",
",",
"n1",
",",
"n2",
",",
"inbound",
"=",
"False",
",",
"backward",
"=",
"False",
",",
"continue_fn",
"=",
"None",
")",
":",
"forw",
"=",
"g",
".",
"successors",
"back",
"=",
"g",
".",
"predecessors",
"if",
"backward",
":"... | 25.051948 | 22.220779 |
def get_items(self, project=None, scope_path=None, recursion_level=None, include_links=None, version_descriptor=None):
"""GetItems.
Get a list of Tfvc items
:param str project: Project ID or project name
:param str scope_path: Version control path of a folder to return multiple items.
:param str recursion_level: None (just the item), or OneLevel (contents of a folder).
:param bool include_links: True to include links.
:param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor:
:rtype: [TfvcItem]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if scope_path is not None:
query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str')
if recursion_level is not None:
query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str')
if include_links is not None:
query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool')
if version_descriptor is not None:
if version_descriptor.version_option is not None:
query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option
if version_descriptor.version_type is not None:
query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type
if version_descriptor.version is not None:
query_parameters['versionDescriptor.version'] = version_descriptor.version
response = self._send(http_method='GET',
location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TfvcItem]', self._unwrap_collection(response)) | [
"def",
"get_items",
"(",
"self",
",",
"project",
"=",
"None",
",",
"scope_path",
"=",
"None",
",",
"recursion_level",
"=",
"None",
",",
"include_links",
"=",
"None",
",",
"version_descriptor",
"=",
"None",
")",
":",
"route_values",
"=",
"{",
"}",
"if",
"... | 63.666667 | 29.212121 |
def _parse(self, content):
"""
Parse data request to data from python.
@param content: Context of request.
@raise ParseError:
"""
if content:
stream = BytesIO(str(content))
data = json.loads(stream.getvalue())
return data | [
"def",
"_parse",
"(",
"self",
",",
"content",
")",
":",
"if",
"content",
":",
"stream",
"=",
"BytesIO",
"(",
"str",
"(",
"content",
")",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"stream",
".",
"getvalue",
"(",
")",
")",
"return",
"data"
] | 22 | 17.857143 |
def query_segdb(cls, flag, *args, **kwargs):
"""Query the initial LIGO segment database for the given flag
Parameters
----------
flag : `str`
The name of the flag for which to query
*args
Either, two `float`-like numbers indicating the
GPS [start, stop) interval, or a `SegmentList`
defining a number of summary segments
url : `str`, optional
URL of the segment database, defaults to
``$DEFAULT_SEGMENT_SERVER`` environment variable, or
``'https://segments.ligo.org'``
Returns
-------
flag : `DataQualityFlag`
A new `DataQualityFlag`, with the `known` and `active` lists
filled appropriately.
"""
warnings.warn("query_segdb is deprecated and will be removed in a "
"future release", DeprecationWarning)
# parse arguments
qsegs = _parse_query_segments(args, cls.query_segdb)
# process query
try:
flags = DataQualityDict.query_segdb([flag], qsegs, **kwargs)
except TypeError as exc:
if 'DataQualityDict' in str(exc):
raise TypeError(str(exc).replace('DataQualityDict',
cls.__name__))
else:
raise
if len(flags) > 1:
raise RuntimeError("Multiple flags returned for single query, "
"something went wrong:\n %s"
% '\n '.join(flags.keys()))
elif len(flags) == 0:
raise RuntimeError("No flags returned for single query, "
"something went wrong.")
return flags[flag] | [
"def",
"query_segdb",
"(",
"cls",
",",
"flag",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"\"query_segdb is deprecated and will be removed in a \"",
"\"future release\"",
",",
"DeprecationWarning",
")",
"# parse arguments",
"q... | 37 | 20.659574 |
async def dispatch(self, request, view=None, **kwargs):
"""Dispatch request."""
if view is None and request.method not in self.methods:
raise HTTPMethodNotAllowed(request.method, self.methods)
method = getattr(self, view or request.method.lower())
response = await method(request, **kwargs)
return await self.make_response(request, response) | [
"async",
"def",
"dispatch",
"(",
"self",
",",
"request",
",",
"view",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"view",
"is",
"None",
"and",
"request",
".",
"method",
"not",
"in",
"self",
".",
"methods",
":",
"raise",
"HTTPMethodNotAllowed"... | 48.375 | 19.5 |
def iter_xCharts(self):
"""
Generate each xChart child element in document.
"""
plot_tags = (
qn('c:area3DChart'), qn('c:areaChart'), qn('c:bar3DChart'),
qn('c:barChart'), qn('c:bubbleChart'), qn('c:doughnutChart'),
qn('c:line3DChart'), qn('c:lineChart'), qn('c:ofPieChart'),
qn('c:pie3DChart'), qn('c:pieChart'), qn('c:radarChart'),
qn('c:scatterChart'), qn('c:stockChart'), qn('c:surface3DChart'),
qn('c:surfaceChart')
)
for child in self.iterchildren():
if child.tag not in plot_tags:
continue
yield child | [
"def",
"iter_xCharts",
"(",
"self",
")",
":",
"plot_tags",
"=",
"(",
"qn",
"(",
"'c:area3DChart'",
")",
",",
"qn",
"(",
"'c:areaChart'",
")",
",",
"qn",
"(",
"'c:bar3DChart'",
")",
",",
"qn",
"(",
"'c:barChart'",
")",
",",
"qn",
"(",
"'c:bubbleChart'",
... | 38.411765 | 19.235294 |
def get_tokens(line: str) -> Iterator[str]:
"""
Yields tokens from input string.
:param line: Input string.
:return: Iterator over tokens.
"""
for token in line.rstrip().split():
if len(token) > 0:
yield token | [
"def",
"get_tokens",
"(",
"line",
":",
"str",
")",
"->",
"Iterator",
"[",
"str",
"]",
":",
"for",
"token",
"in",
"line",
".",
"rstrip",
"(",
")",
".",
"split",
"(",
")",
":",
"if",
"len",
"(",
"token",
")",
">",
"0",
":",
"yield",
"token"
] | 24.5 | 9.5 |
def get_log_file_from_item(history):
"""
Return the log file based on provided history item.
Description is optional.
"""
try:
log_file, description = shlex.split(history)
except ValueError:
log_file = history.strip()
return log_file | [
"def",
"get_log_file_from_item",
"(",
"history",
")",
":",
"try",
":",
"log_file",
",",
"description",
"=",
"shlex",
".",
"split",
"(",
"history",
")",
"except",
"ValueError",
":",
"log_file",
"=",
"history",
".",
"strip",
"(",
")",
"return",
"log_file"
] | 22.333333 | 16.666667 |
def addVars(filename, varNamesStr, outOf=None):
r"""Like `saveVars`, but appends additional variables to file."""
filename, varnames, outOf = __saveVarsHelper(filename, varNamesStr, outOf)
f = None
try:
f = open(filename, "rb")
h = cPickle.load(f)
f.close()
h.update(dict(zip(varnames, atIndices(outOf, varnames))))
f = open(filename, "wb")
cPickle.dump( h, f , 1 )
finally:
if f: f.close() | [
"def",
"addVars",
"(",
"filename",
",",
"varNamesStr",
",",
"outOf",
"=",
"None",
")",
":",
"filename",
",",
"varnames",
",",
"outOf",
"=",
"__saveVarsHelper",
"(",
"filename",
",",
"varNamesStr",
",",
"outOf",
")",
"f",
"=",
"None",
"try",
":",
"f",
"... | 32.428571 | 19.642857 |
def map_grounding():
"""Map grounding on a list of INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
stmts_json = body.get('statements')
stmts = stmts_from_json(stmts_json)
stmts_out = ac.map_grounding(stmts)
return _return_stmts(stmts_out) | [
"def",
"map_grounding",
"(",
")",
":",
"if",
"request",
".",
"method",
"==",
"'OPTIONS'",
":",
"return",
"{",
"}",
"response",
"=",
"request",
".",
"body",
".",
"read",
"(",
")",
".",
"decode",
"(",
"'utf-8'",
")",
"body",
"=",
"json",
".",
"loads",
... | 35.9 | 7.5 |
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result) | [
"def",
"traverse_next",
"(",
"page",
",",
"nextx",
",",
"results",
",",
"tabular_data_headers",
"=",
"[",
"]",
",",
"verbosity",
"=",
"0",
")",
":",
"for",
"link",
"in",
"page",
".",
"extract_links",
"(",
"selector",
"=",
"nextx",
"[",
"'follow_link'",
"... | 45.205128 | 19.923077 |
def haversine(lat1, lon1, lat2, lon2):
"""
compute the distance in meters between two points in latlon
Parameters
----------
lat1: int or float
the latitude of point 1
lon1: int or float
the longitude of point 1
lat2: int or float
the latitude of point 2
lon2: int or float
the longitude of point 2
Returns
-------
float
the distance between point 1 and point 2 in meters
"""
radius = 6371000
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
a = math.sin((lat2 - lat1) / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2
c = 2 * math.asin(math.sqrt(a))
return radius * c | [
"def",
"haversine",
"(",
"lat1",
",",
"lon1",
",",
"lat2",
",",
"lon2",
")",
":",
"radius",
"=",
"6371000",
"lat1",
",",
"lon1",
",",
"lat2",
",",
"lon2",
"=",
"map",
"(",
"math",
".",
"radians",
",",
"[",
"lat1",
",",
"lon1",
",",
"lat2",
",",
... | 27.038462 | 21.346154 |
def lyricsmode(song):
"""
Returns the lyrics found in lyricsmode.com for the specified mp3 file or an
empty string if not found.
"""
translate = {
URLESCAPE: '',
' ': '_'
}
artist = song.artist.lower()
artist = normalize(artist, translate)
title = song.title.lower()
title = normalize(title, translate)
artist = re.sub(r'\_{2,}', '_', artist)
title = re.sub(r'\_{2,}', '_', title)
if artist[0:4].lower() == 'the ':
artist = artist[4:]
if artist[0:2].lower() == 'a ':
prefix = artist[2]
else:
prefix = artist[0]
url = 'http://www.lyricsmode.com/lyrics/{}/{}/{}.html'
url = url.format(prefix, artist, title)
soup = get_url(url)
content = soup.find(id='lyrics_text')
return content.get_text().strip() | [
"def",
"lyricsmode",
"(",
"song",
")",
":",
"translate",
"=",
"{",
"URLESCAPE",
":",
"''",
",",
"' '",
":",
"'_'",
"}",
"artist",
"=",
"song",
".",
"artist",
".",
"lower",
"(",
")",
"artist",
"=",
"normalize",
"(",
"artist",
",",
"translate",
")",
... | 25.548387 | 16.580645 |
async def fetch_room(self, room_id):
"""Lookup details for a given room id"""
url = "https://production.plum.technology/v2/getRoom"
data = {"rid": room_id}
return await self.__post(url, data) | [
"async",
"def",
"fetch_room",
"(",
"self",
",",
"room_id",
")",
":",
"url",
"=",
"\"https://production.plum.technology/v2/getRoom\"",
"data",
"=",
"{",
"\"rid\"",
":",
"room_id",
"}",
"return",
"await",
"self",
".",
"__post",
"(",
"url",
",",
"data",
")"
] | 43.8 | 7.4 |
def act(self, *args, **kwargs):
"""gather a rules parameters together and run the predicate. If that
returns True, then go on and run the action function
returns:
a tuple indicating the results of applying the predicate and the
action function:
(False, None) - the predicate failed, action function not run
(True, True) - the predicate and action functions succeeded
(True, False) - the predicate succeeded, but the action function
failed"""
if self.predicate(*args, **kwargs):
bool_result = self.action(*args, **kwargs)
return (True, bool_result)
else:
return (False, None) | [
"def",
"act",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"predicate",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"bool_result",
"=",
"self",
".",
"action",
"(",
"*",
"args",
",",
"*",
"*",
"... | 46 | 19 |
def _edges_in_tri_except(self, tri, edge):
"""Return the edges in *tri*, excluding *edge*.
"""
edges = [(tri[i], tri[(i+1) % 3]) for i in range(3)]
try:
edges.remove(tuple(edge))
except ValueError:
edges.remove(tuple(edge[::-1]))
return edges | [
"def",
"_edges_in_tri_except",
"(",
"self",
",",
"tri",
",",
"edge",
")",
":",
"edges",
"=",
"[",
"(",
"tri",
"[",
"i",
"]",
",",
"tri",
"[",
"(",
"i",
"+",
"1",
")",
"%",
"3",
"]",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
"]",
"try",... | 34 | 10 |
def read_file(self, location):
"""Read in a yaml file and return as a python object"""
try:
return yaml.load(open(location))
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as error:
raise self.BadFileErrorKls("Failed to read yaml", location=location, error_type=error.__class__.__name__, error="{0}{1}".format(error.problem, error.problem_mark)) | [
"def",
"read_file",
"(",
"self",
",",
"location",
")",
":",
"try",
":",
"return",
"yaml",
".",
"load",
"(",
"open",
"(",
"location",
")",
")",
"except",
"(",
"yaml",
".",
"parser",
".",
"ParserError",
",",
"yaml",
".",
"scanner",
".",
"ScannerError",
... | 66.833333 | 35.666667 |
def vbar_stack(self, stackers, **kw):
''' Generate multiple ``VBar`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``vbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vbar_stack`` will
will create two ``VBar`` renderers that stack:
.. code-block:: python
p.vbar_stack(['2016', '2017'], x=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.vbar(bottom=stack(), top=stack('2016'), x=10, width=0.9, color='blue', source=source, name='2016')
p.vbar(bottom=stack('2016'), top=stack('2016', '2017'), x=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in _double_stack(stackers, "bottom", "top", **kw):
result.append(self.vbar(**kw))
return result | [
"def",
"vbar_stack",
"(",
"self",
",",
"stackers",
",",
"*",
"*",
"kw",
")",
":",
"result",
"=",
"[",
"]",
"for",
"kw",
"in",
"_double_stack",
"(",
"stackers",
",",
"\"bottom\"",
",",
"\"top\"",
",",
"*",
"*",
"kw",
")",
":",
"result",
".",
"append... | 38.902439 | 32.073171 |
def _unpack_obs(obs, space, tensorlib=tf):
"""Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
"""
if (isinstance(space, gym.spaces.Dict)
or isinstance(space, gym.spaces.Tuple)):
prep = get_preprocessor(space)(space)
if len(obs.shape) != 2 or obs.shape[1] != prep.shape[0]:
raise ValueError(
"Expected flattened obs shape of [None, {}], got {}".format(
prep.shape[0], obs.shape))
assert len(prep.preprocessors) == len(space.spaces), \
(len(prep.preprocessors) == len(space.spaces))
offset = 0
if isinstance(space, gym.spaces.Tuple):
u = []
for p, v in zip(prep.preprocessors, space.spaces):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u.append(
_unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib))
else:
u = OrderedDict()
for p, (k, v) in zip(prep.preprocessors, space.spaces.items()):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u[k] = _unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib)
return u
else:
return obs | [
"def",
"_unpack_obs",
"(",
"obs",
",",
"space",
",",
"tensorlib",
"=",
"tf",
")",
":",
"if",
"(",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Dict",
")",
"or",
"isinstance",
"(",
"space",
",",
"gym",
".",
"spaces",
".",
"Tuple",
")... | 39.780488 | 17.146341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.