code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def tan_rand(q, seed=9):
"""Find a random vector in the tangent space of the n sphere
This function will find a random orthogonal vector to q.
Parameters
----------
q
(n+1,) array which is in the n-sphere
Returns
-------
qd
(n+1,) array which is orthogonal to n-sphere and also random
"""
# probably need a check in case we get a parallel vector
rs = np.random.RandomState(seed)
rvec = rs.rand(q.shape[0])
qd = np.cross(rvec, q)
qd = qd / np.linalg.norm(qd)
while np.dot(q, qd) > 1e-6:
rvec = rs.rand(q.shape[0])
qd = np.cross(rvec, q)
qd = qd / np.linalg.norm(qd)
return qd | Find a random vector in the tangent space of the n sphere
This function will find a random orthogonal vector to q.
Parameters
----------
q
(n+1,) array which is in the n-sphere
Returns
-------
qd
(n+1,) array which is orthogonal to n-sphere and also random | Below is the the instruction that describes the task:
### Input:
Find a random vector in the tangent space of the n sphere
This function will find a random orthogonal vector to q.
Parameters
----------
q
(n+1,) array which is in the n-sphere
Returns
-------
qd
(n+1,) array which is orthogonal to n-sphere and also random
### Response:
def tan_rand(q, seed=9):
"""Find a random vector in the tangent space of the n sphere
This function will find a random orthogonal vector to q.
Parameters
----------
q
(n+1,) array which is in the n-sphere
Returns
-------
qd
(n+1,) array which is orthogonal to n-sphere and also random
"""
# probably need a check in case we get a parallel vector
rs = np.random.RandomState(seed)
rvec = rs.rand(q.shape[0])
qd = np.cross(rvec, q)
qd = qd / np.linalg.norm(qd)
while np.dot(q, qd) > 1e-6:
rvec = rs.rand(q.shape[0])
qd = np.cross(rvec, q)
qd = qd / np.linalg.norm(qd)
return qd |
def resetSession(self, username=None, password=None, verify=True) :
"""resets the session"""
self.disconnectSession()
self.session = AikidoSession(username, password, verify) | resets the session | Below is the the instruction that describes the task:
### Input:
resets the session
### Response:
def resetSession(self, username=None, password=None, verify=True) :
"""resets the session"""
self.disconnectSession()
self.session = AikidoSession(username, password, verify) |
def purgeRelationship(self, pid, subject, predicate, object, isLiteral=False,
datatype=None):
'''Remove a relationship from an object.
Wrapper function for
`Fedora REST API purgeRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-purgeRelationship>`_
:param pid: object pid
:param subject: relationship subject
:param predicate: relationship predicate
:param object: relationship object
:param isLiteral: boolean (default: false)
:param datatype: optional datatype
:returns: boolean; indicates whether or not a relationship was
removed
'''
http_args = {'subject': subject, 'predicate': predicate,
'object': object, 'isLiteral': isLiteral}
if datatype is not None:
http_args['datatype'] = datatype
url = 'objects/%(pid)s/relationships' % {'pid': pid}
response = self.delete(url, params=http_args)
# should have a status code of 200;
# response body text indicates if a relationship was purged or not
return response.status_code == requests.codes.ok and response.content == b'true' | Remove a relationship from an object.
Wrapper function for
`Fedora REST API purgeRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-purgeRelationship>`_
:param pid: object pid
:param subject: relationship subject
:param predicate: relationship predicate
:param object: relationship object
:param isLiteral: boolean (default: false)
:param datatype: optional datatype
:returns: boolean; indicates whether or not a relationship was
removed | Below is the the instruction that describes the task:
### Input:
Remove a relationship from an object.
Wrapper function for
`Fedora REST API purgeRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-purgeRelationship>`_
:param pid: object pid
:param subject: relationship subject
:param predicate: relationship predicate
:param object: relationship object
:param isLiteral: boolean (default: false)
:param datatype: optional datatype
:returns: boolean; indicates whether or not a relationship was
removed
### Response:
def purgeRelationship(self, pid, subject, predicate, object, isLiteral=False,
datatype=None):
'''Remove a relationship from an object.
Wrapper function for
`Fedora REST API purgeRelationship <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-purgeRelationship>`_
:param pid: object pid
:param subject: relationship subject
:param predicate: relationship predicate
:param object: relationship object
:param isLiteral: boolean (default: false)
:param datatype: optional datatype
:returns: boolean; indicates whether or not a relationship was
removed
'''
http_args = {'subject': subject, 'predicate': predicate,
'object': object, 'isLiteral': isLiteral}
if datatype is not None:
http_args['datatype'] = datatype
url = 'objects/%(pid)s/relationships' % {'pid': pid}
response = self.delete(url, params=http_args)
# should have a status code of 200;
# response body text indicates if a relationship was purged or not
return response.status_code == requests.codes.ok and response.content == b'true' |
def fileInfo(self, index):
"""
Gets the file info of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: QFileInfo
"""
return self._fs_model_source.fileInfo(
self._fs_model_proxy.mapToSource(index)) | Gets the file info of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: QFileInfo | Below is the the instruction that describes the task:
### Input:
Gets the file info of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: QFileInfo
### Response:
def fileInfo(self, index):
"""
Gets the file info of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: QFileInfo
"""
return self._fs_model_source.fileInfo(
self._fs_model_proxy.mapToSource(index)) |
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line | Tails a file | Below is the the instruction that describes the task:
### Input:
Tails a file
### Response:
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line |
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt] | Parse one line of the XFS info output. | Below is the the instruction that describes the task:
### Input:
Parse one line of the XFS info output.
### Response:
def _xfs_info_get_kv(serialized):
'''
Parse one line of the XFS info output.
'''
# No need to know sub-elements here
if serialized.startswith("="):
serialized = serialized[1:].strip()
serialized = serialized.replace(" = ", "=*** ").replace(" =", "=")
# Keywords has no spaces, values do
opt = []
for tkn in serialized.split(" "):
if not opt or "=" in tkn:
opt.append(tkn)
else:
opt[len(opt) - 1] = opt[len(opt) - 1] + " " + tkn
# Preserve ordering
return [tuple(items.split("=")) for items in opt] |
def middleware(self, args):
"""
Appends a Middleware to the route which is to be executed before the route runs
"""
if self.url[(len(self.url) - 1)] == (self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=None)):
self.url.pop()
self.url.append((self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=args)))
return self | Appends a Middleware to the route which is to be executed before the route runs | Below is the the instruction that describes the task:
### Input:
Appends a Middleware to the route which is to be executed before the route runs
### Response:
def middleware(self, args):
"""
Appends a Middleware to the route which is to be executed before the route runs
"""
if self.url[(len(self.url) - 1)] == (self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=None)):
self.url.pop()
self.url.append((self.url_, self.controller, dict(method=self.method, request_type=self.request_type, middleware=args)))
return self |
def init_poolmanager(self, connections, maxsize,
block=requests.adapters.DEFAULT_POOLBLOCK,
**pool_kwargs):
"""Initialize poolmanager with cipher and Tlsv1"""
context = create_urllib3_context(ciphers=self.CIPHERS,
ssl_version=ssl.PROTOCOL_TLSv1)
pool_kwargs['ssl_context'] = context
return super(TLSv1Adapter, self).init_poolmanager(connections, maxsize,
block, **pool_kwargs) | Initialize poolmanager with cipher and Tlsv1 | Below is the the instruction that describes the task:
### Input:
Initialize poolmanager with cipher and Tlsv1
### Response:
def init_poolmanager(self, connections, maxsize,
block=requests.adapters.DEFAULT_POOLBLOCK,
**pool_kwargs):
"""Initialize poolmanager with cipher and Tlsv1"""
context = create_urllib3_context(ciphers=self.CIPHERS,
ssl_version=ssl.PROTOCOL_TLSv1)
pool_kwargs['ssl_context'] = context
return super(TLSv1Adapter, self).init_poolmanager(connections, maxsize,
block, **pool_kwargs) |
def mass1_from_tau0_tau3(tau0, tau3, f_lower):
r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = eta_from_tau0_tau3(tau0, tau3, f_lower)
return mass1_from_mtotal_eta(mtotal, eta) | r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`. | Below is the the instruction that describes the task:
### Input:
r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`.
### Response:
def mass1_from_tau0_tau3(tau0, tau3, f_lower):
r"""Returns the primary mass from the given :math:`\tau_0, \tau_3`."""
mtotal = mtotal_from_tau0_tau3(tau0, tau3, f_lower)
eta = eta_from_tau0_tau3(tau0, tau3, f_lower)
return mass1_from_mtotal_eta(mtotal, eta) |
def parse(cls, parser, token):
"""
Parse the "as var" syntax.
"""
bits, as_var = parse_as_var(parser, token)
tag_name, args, kwargs = parse_token_kwargs(parser, bits, ('template',) + cls.allowed_kwargs, compile_args=cls.compile_args, compile_kwargs=cls.compile_kwargs)
# Pass through standard chain
cls.validate_args(tag_name, *args)
return cls(tag_name, as_var, *args, **kwargs) | Parse the "as var" syntax. | Below is the the instruction that describes the task:
### Input:
Parse the "as var" syntax.
### Response:
def parse(cls, parser, token):
"""
Parse the "as var" syntax.
"""
bits, as_var = parse_as_var(parser, token)
tag_name, args, kwargs = parse_token_kwargs(parser, bits, ('template',) + cls.allowed_kwargs, compile_args=cls.compile_args, compile_kwargs=cls.compile_kwargs)
# Pass through standard chain
cls.validate_args(tag_name, *args)
return cls(tag_name, as_var, *args, **kwargs) |
def add_back_ref(self, back_ref, attr=None):
"""Add reference from back_ref to self
:param back_ref: back_ref to add
:type back_ref: Resource
:rtype: Resource
"""
back_ref.add_ref(self, attr)
return self.fetch() | Add reference from back_ref to self
:param back_ref: back_ref to add
:type back_ref: Resource
:rtype: Resource | Below is the the instruction that describes the task:
### Input:
Add reference from back_ref to self
:param back_ref: back_ref to add
:type back_ref: Resource
:rtype: Resource
### Response:
def add_back_ref(self, back_ref, attr=None):
"""Add reference from back_ref to self
:param back_ref: back_ref to add
:type back_ref: Resource
:rtype: Resource
"""
back_ref.add_ref(self, attr)
return self.fetch() |
def annotate(self, sent):
"""Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words.
"""
preds = []
words = []
for word, fv in self.sent2examples(sent):
probs = self.predictor(fv)
tags = probs.argsort()
tag = self.ID_TAG[tags[-1]]
words.append(word)
preds.append(tag)
# fix_chunks(preds)
annotations = zip(words, preds)
return annotations | Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words. | Below is the the instruction that describes the task:
### Input:
Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words.
### Response:
def annotate(self, sent):
"""Annotate a squence of words with entity tags.
Args:
sent: sequence of strings/words.
"""
preds = []
words = []
for word, fv in self.sent2examples(sent):
probs = self.predictor(fv)
tags = probs.argsort()
tag = self.ID_TAG[tags[-1]]
words.append(word)
preds.append(tag)
# fix_chunks(preds)
annotations = zip(words, preds)
return annotations |
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
is_openid1 = request.message.isOpenID1()
if args == {}:
return None
self.parseExtensionArgs(args, is_openid1)
return self | Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message | Below is the the instruction that describes the task:
### Input:
Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
### Response:
def fromOpenIDRequest(cls, request):
"""Instantiate a Request object from the arguments in a
C{checkid_*} OpenID message
"""
self = cls()
args = request.message.getArgs(self.ns_uri)
is_openid1 = request.message.isOpenID1()
if args == {}:
return None
self.parseExtensionArgs(args, is_openid1)
return self |
def _gi_build_stub(parent):
"""
Inspect the passed module recursively and build stubs for functions,
classes, etc.
"""
classes = {}
functions = {}
constants = {}
methods = {}
for name in dir(parent):
if name.startswith("__"):
continue
# Check if this is a valid name in python
if not re.match(_identifier_re, name):
continue
try:
obj = getattr(parent, name)
except:
continue
if inspect.isclass(obj):
classes[name] = obj
elif inspect.isfunction(obj) or inspect.isbuiltin(obj):
functions[name] = obj
elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
methods[name] = obj
elif (
str(obj).startswith("<flags")
or str(obj).startswith("<enum ")
or str(obj).startswith("<GType ")
or inspect.isdatadescriptor(obj)
):
constants[name] = 0
elif isinstance(obj, (int, str)):
constants[name] = obj
elif callable(obj):
# Fall back to a function for anything callable
functions[name] = obj
else:
# Assume everything else is some manner of constant
constants[name] = 0
ret = ""
if constants:
ret += "# %s constants\n\n" % parent.__name__
for name in sorted(constants):
if name[0].isdigit():
# GDK has some busted constant names like
# Gdk.EventType.2BUTTON_PRESS
continue
val = constants[name]
strval = str(val)
if isinstance(val, str):
strval = '"%s"' % str(val).replace("\\", "\\\\")
ret += "%s = %s\n" % (name, strval)
if ret:
ret += "\n\n"
if functions:
ret += "# %s functions\n\n" % parent.__name__
for name in sorted(functions):
ret += "def %s(*args, **kwargs):\n" % name
ret += " pass\n"
if ret:
ret += "\n\n"
if methods:
ret += "# %s methods\n\n" % parent.__name__
for name in sorted(methods):
ret += "def %s(self, *args, **kwargs):\n" % name
ret += " pass\n"
if ret:
ret += "\n\n"
if classes:
ret += "# %s classes\n\n" % parent.__name__
for name, obj in sorted(classes.items()):
base = "object"
if issubclass(obj, Exception):
base = "Exception"
ret += "class %s(%s):\n" % (name, base)
classret = _gi_build_stub(obj)
if not classret:
classret = "pass\n"
for line in classret.splitlines():
ret += " " + line + "\n"
ret += "\n"
return ret | Inspect the passed module recursively and build stubs for functions,
classes, etc. | Below is the the instruction that describes the task:
### Input:
Inspect the passed module recursively and build stubs for functions,
classes, etc.
### Response:
def _gi_build_stub(parent):
"""
Inspect the passed module recursively and build stubs for functions,
classes, etc.
"""
classes = {}
functions = {}
constants = {}
methods = {}
for name in dir(parent):
if name.startswith("__"):
continue
# Check if this is a valid name in python
if not re.match(_identifier_re, name):
continue
try:
obj = getattr(parent, name)
except:
continue
if inspect.isclass(obj):
classes[name] = obj
elif inspect.isfunction(obj) or inspect.isbuiltin(obj):
functions[name] = obj
elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj):
methods[name] = obj
elif (
str(obj).startswith("<flags")
or str(obj).startswith("<enum ")
or str(obj).startswith("<GType ")
or inspect.isdatadescriptor(obj)
):
constants[name] = 0
elif isinstance(obj, (int, str)):
constants[name] = obj
elif callable(obj):
# Fall back to a function for anything callable
functions[name] = obj
else:
# Assume everything else is some manner of constant
constants[name] = 0
ret = ""
if constants:
ret += "# %s constants\n\n" % parent.__name__
for name in sorted(constants):
if name[0].isdigit():
# GDK has some busted constant names like
# Gdk.EventType.2BUTTON_PRESS
continue
val = constants[name]
strval = str(val)
if isinstance(val, str):
strval = '"%s"' % str(val).replace("\\", "\\\\")
ret += "%s = %s\n" % (name, strval)
if ret:
ret += "\n\n"
if functions:
ret += "# %s functions\n\n" % parent.__name__
for name in sorted(functions):
ret += "def %s(*args, **kwargs):\n" % name
ret += " pass\n"
if ret:
ret += "\n\n"
if methods:
ret += "# %s methods\n\n" % parent.__name__
for name in sorted(methods):
ret += "def %s(self, *args, **kwargs):\n" % name
ret += " pass\n"
if ret:
ret += "\n\n"
if classes:
ret += "# %s classes\n\n" % parent.__name__
for name, obj in sorted(classes.items()):
base = "object"
if issubclass(obj, Exception):
base = "Exception"
ret += "class %s(%s):\n" % (name, base)
classret = _gi_build_stub(obj)
if not classret:
classret = "pass\n"
for line in classret.splitlines():
ret += " " + line + "\n"
ret += "\n"
return ret |
def transRbend(theta=None, rho=None, gamma=None, incsym=-1):
""" Transport matrix of rectangle dipole
:param theta: bending angle in [RAD]
:param incsym: incident symmetry, -1 by default,
available options:
* -1: left half symmetry,
* 0: full symmetry,
* 1: right half symmetry
:param rho: bending radius in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
if None in (theta, rho, gamma):
print("warning: 'theta', 'rho', 'gamma' should be positive float numbers.")
m = np.eye(6, 6, dtype=np.float64)
return m
else:
beta12d = {'-1': (0, theta), '0': (theta * 0.5, theta * 0.5), '1': (theta, 0)}
(beta1, beta2) = beta12d[str(incsym)]
mf1 = transFringe(beta=beta1, rho=rho)
mf2 = transFringe(beta=beta2, rho=rho)
ms = transSect(theta=theta, rho=rho, gamma=gamma)
m = reduce(np.dot, [mf1, ms, mf2])
return m | Transport matrix of rectangle dipole
:param theta: bending angle in [RAD]
:param incsym: incident symmetry, -1 by default,
available options:
* -1: left half symmetry,
* 0: full symmetry,
* 1: right half symmetry
:param rho: bending radius in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array | Below is the the instruction that describes the task:
### Input:
Transport matrix of rectangle dipole
:param theta: bending angle in [RAD]
:param incsym: incident symmetry, -1 by default,
available options:
* -1: left half symmetry,
* 0: full symmetry,
* 1: right half symmetry
:param rho: bending radius in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
### Response:
def transRbend(theta=None, rho=None, gamma=None, incsym=-1):
""" Transport matrix of rectangle dipole
:param theta: bending angle in [RAD]
:param incsym: incident symmetry, -1 by default,
available options:
* -1: left half symmetry,
* 0: full symmetry,
* 1: right half symmetry
:param rho: bending radius in [m]
:param gamma: electron energy, gamma value
:return: 6x6 numpy array
"""
if None in (theta, rho, gamma):
print("warning: 'theta', 'rho', 'gamma' should be positive float numbers.")
m = np.eye(6, 6, dtype=np.float64)
return m
else:
beta12d = {'-1': (0, theta), '0': (theta * 0.5, theta * 0.5), '1': (theta, 0)}
(beta1, beta2) = beta12d[str(incsym)]
mf1 = transFringe(beta=beta1, rho=rho)
mf2 = transFringe(beta=beta2, rho=rho)
ms = transSect(theta=theta, rho=rho, gamma=gamma)
m = reduce(np.dot, [mf1, ms, mf2])
return m |
def cmd(command, *args, **kwargs):
'''
NOTE: This function is preserved for backwards compatibilty. This allows
commands to be executed using either of the following syntactic forms.
salt '*' nxos.cmd <function>
or
salt '*' nxos.<function>
command
function from `salt.modules.nxos` to run
args
positional args to pass to `command` function
kwargs
key word arguments to pass to `command` function
.. code-block:: bash
salt '*' nxos.cmd sendline 'show ver'
salt '*' nxos.cmd show_run
salt '*' nxos.cmd check_password username=admin password='$5$lkjsdfoi$blahblahblah' encrypted=True
'''
for k in list(kwargs):
if k.startswith('__pub_'):
kwargs.pop(k)
local_command = '.'.join(['nxos', command])
log.info('local command: %s', local_command)
if local_command not in __salt__:
return False
return __salt__[local_command](*args, **kwargs) | NOTE: This function is preserved for backwards compatibilty. This allows
commands to be executed using either of the following syntactic forms.
salt '*' nxos.cmd <function>
or
salt '*' nxos.<function>
command
function from `salt.modules.nxos` to run
args
positional args to pass to `command` function
kwargs
key word arguments to pass to `command` function
.. code-block:: bash
salt '*' nxos.cmd sendline 'show ver'
salt '*' nxos.cmd show_run
salt '*' nxos.cmd check_password username=admin password='$5$lkjsdfoi$blahblahblah' encrypted=True | Below is the the instruction that describes the task:
### Input:
NOTE: This function is preserved for backwards compatibilty. This allows
commands to be executed using either of the following syntactic forms.
salt '*' nxos.cmd <function>
or
salt '*' nxos.<function>
command
function from `salt.modules.nxos` to run
args
positional args to pass to `command` function
kwargs
key word arguments to pass to `command` function
.. code-block:: bash
salt '*' nxos.cmd sendline 'show ver'
salt '*' nxos.cmd show_run
salt '*' nxos.cmd check_password username=admin password='$5$lkjsdfoi$blahblahblah' encrypted=True
### Response:
def cmd(command, *args, **kwargs):
'''
NOTE: This function is preserved for backwards compatibilty. This allows
commands to be executed using either of the following syntactic forms.
salt '*' nxos.cmd <function>
or
salt '*' nxos.<function>
command
function from `salt.modules.nxos` to run
args
positional args to pass to `command` function
kwargs
key word arguments to pass to `command` function
.. code-block:: bash
salt '*' nxos.cmd sendline 'show ver'
salt '*' nxos.cmd show_run
salt '*' nxos.cmd check_password username=admin password='$5$lkjsdfoi$blahblahblah' encrypted=True
'''
for k in list(kwargs):
if k.startswith('__pub_'):
kwargs.pop(k)
local_command = '.'.join(['nxos', command])
log.info('local command: %s', local_command)
if local_command not in __salt__:
return False
return __salt__[local_command](*args, **kwargs) |
def add_agent_cloud(self, agent_cloud):
"""AddAgentCloud.
[Preview API]
:param :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>` agent_cloud:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>`
"""
content = self._serialize.body(agent_cloud, 'TaskAgentCloud')
response = self._send(http_method='POST',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='5.0-preview.1',
content=content)
return self._deserialize('TaskAgentCloud', response) | AddAgentCloud.
[Preview API]
:param :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>` agent_cloud:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>` | Below is the the instruction that describes the task:
### Input:
AddAgentCloud.
[Preview API]
:param :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>` agent_cloud:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>`
### Response:
def add_agent_cloud(self, agent_cloud):
"""AddAgentCloud.
[Preview API]
:param :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>` agent_cloud:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v5_0.task_agent.models.TaskAgentCloud>`
"""
content = self._serialize.body(agent_cloud, 'TaskAgentCloud')
response = self._send(http_method='POST',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='5.0-preview.1',
content=content)
return self._deserialize('TaskAgentCloud', response) |
def main():
""" The main entrypoint for the Opentrons robot API server stack.
This function
- creates and starts the server for both the RPC routes
handled by :py:mod:`opentrons.server.rpc` and the HTTP routes handled
by :py:mod:`opentrons.server.http`
- initializes the hardware interaction handled by either
:py:mod:`opentrons.legacy_api` or :py:mod:`opentrons.hardware_control`
This function does not return until the server is brought down.
"""
arg_parser = ArgumentParser(
description="Opentrons robot software",
parents=[build_arg_parser()])
args = arg_parser.parse_args()
run(**vars(args))
arg_parser.exit(message="Stopped\n") | The main entrypoint for the Opentrons robot API server stack.
This function
- creates and starts the server for both the RPC routes
handled by :py:mod:`opentrons.server.rpc` and the HTTP routes handled
by :py:mod:`opentrons.server.http`
- initializes the hardware interaction handled by either
:py:mod:`opentrons.legacy_api` or :py:mod:`opentrons.hardware_control`
This function does not return until the server is brought down. | Below is the the instruction that describes the task:
### Input:
The main entrypoint for the Opentrons robot API server stack.
This function
- creates and starts the server for both the RPC routes
handled by :py:mod:`opentrons.server.rpc` and the HTTP routes handled
by :py:mod:`opentrons.server.http`
- initializes the hardware interaction handled by either
:py:mod:`opentrons.legacy_api` or :py:mod:`opentrons.hardware_control`
This function does not return until the server is brought down.
### Response:
def main():
""" The main entrypoint for the Opentrons robot API server stack.
This function
- creates and starts the server for both the RPC routes
handled by :py:mod:`opentrons.server.rpc` and the HTTP routes handled
by :py:mod:`opentrons.server.http`
- initializes the hardware interaction handled by either
:py:mod:`opentrons.legacy_api` or :py:mod:`opentrons.hardware_control`
This function does not return until the server is brought down.
"""
arg_parser = ArgumentParser(
description="Opentrons robot software",
parents=[build_arg_parser()])
args = arg_parser.parse_args()
run(**vars(args))
arg_parser.exit(message="Stopped\n") |
def medlineRecordParser(record):
"""The parser [`MedlineRecord`](../classes/MedlineRecord.html#metaknowledge.medline.MedlineRecord) use. This takes an entry from [medlineParser()](#metaknowledge.medline.medlineHandlers.medlineParser) and parses it a part of the creation of a `MedlineRecord`.
# Parameters
_record_ : `enumerate object`
> a file wrapped by `enumerate()`
# Returns
`collections.OrderedDict`
> An ordered dictionary of the key-vaue pairs in the entry
"""
tagDict = collections.OrderedDict()
tag = 'PMID'
mostRecentAuthor = None
for lineNum, line in record:
tmptag = line[:4].rstrip()
contents = line[6:-1]
if tmptag.isalpha() and line[4] == '-':
tag = tmptag
if tag == 'AU':
mostRecentAuthor = contents
if tag in authorBasedTags:
contents = "{} : {}".format(mostRecentAuthor, contents)
try:
tagDict[tag].append(contents)
except KeyError:
tagDict[tag] = [contents]
elif line[:6] == ' ':
tagDict[tag][-1] += '\n' + line[6:-1]
elif line == '\n':
break
else:
raise BadPubmedRecord("Tag not formed correctly on line {}: '{}'".format(lineNum, line))
return tagDict | The parser [`MedlineRecord`](../classes/MedlineRecord.html#metaknowledge.medline.MedlineRecord) use. This takes an entry from [medlineParser()](#metaknowledge.medline.medlineHandlers.medlineParser) and parses it a part of the creation of a `MedlineRecord`.
# Parameters
_record_ : `enumerate object`
> a file wrapped by `enumerate()`
# Returns
`collections.OrderedDict`
> An ordered dictionary of the key-vaue pairs in the entry | Below is the the instruction that describes the task:
### Input:
The parser [`MedlineRecord`](../classes/MedlineRecord.html#metaknowledge.medline.MedlineRecord) use. This takes an entry from [medlineParser()](#metaknowledge.medline.medlineHandlers.medlineParser) and parses it a part of the creation of a `MedlineRecord`.
# Parameters
_record_ : `enumerate object`
> a file wrapped by `enumerate()`
# Returns
`collections.OrderedDict`
> An ordered dictionary of the key-vaue pairs in the entry
### Response:
def medlineRecordParser(record):
"""The parser [`MedlineRecord`](../classes/MedlineRecord.html#metaknowledge.medline.MedlineRecord) use. This takes an entry from [medlineParser()](#metaknowledge.medline.medlineHandlers.medlineParser) and parses it a part of the creation of a `MedlineRecord`.
# Parameters
_record_ : `enumerate object`
> a file wrapped by `enumerate()`
# Returns
`collections.OrderedDict`
> An ordered dictionary of the key-vaue pairs in the entry
"""
tagDict = collections.OrderedDict()
tag = 'PMID'
mostRecentAuthor = None
for lineNum, line in record:
tmptag = line[:4].rstrip()
contents = line[6:-1]
if tmptag.isalpha() and line[4] == '-':
tag = tmptag
if tag == 'AU':
mostRecentAuthor = contents
if tag in authorBasedTags:
contents = "{} : {}".format(mostRecentAuthor, contents)
try:
tagDict[tag].append(contents)
except KeyError:
tagDict[tag] = [contents]
elif line[:6] == ' ':
tagDict[tag][-1] += '\n' + line[6:-1]
elif line == '\n':
break
else:
raise BadPubmedRecord("Tag not formed correctly on line {}: '{}'".format(lineNum, line))
return tagDict |
def compile_dictionary(self, lang, wordlists, encoding, output):
"""Compile user dictionary."""
cmd = [
self.binary,
'--lang', lang,
'--encoding', codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(encoding, encoding).lower()).name,
'create',
'master', output
]
wordlist = ''
try:
output_location = os.path.dirname(output)
if not os.path.exists(output_location):
os.makedirs(output_location)
if os.path.exists(output):
os.remove(output)
self.log("Compiling Dictionary...", 1)
# Read word lists and create a unique set of words
words = set()
for wordlist in wordlists:
with open(wordlist, 'rb') as src:
for word in src.read().split(b'\n'):
words.add(word.replace(b'\r', b''))
# Compile wordlist against language
util.call(
[
self.binary,
'--lang', lang,
'--encoding=utf-8',
'create',
'master', output
],
input_text=b'\n'.join(sorted(words)) + b'\n'
)
except Exception:
self.log(cmd, 0)
self.log("Current wordlist: '%s'" % wordlist, 0)
self.log("Problem compiling dictionary. Check the binary path and options.", 0)
raise | Compile user dictionary. | Below is the the instruction that describes the task:
### Input:
Compile user dictionary.
### Response:
def compile_dictionary(self, lang, wordlists, encoding, output):
"""Compile user dictionary."""
cmd = [
self.binary,
'--lang', lang,
'--encoding', codecs.lookup(filters.PYTHON_ENCODING_NAMES.get(encoding, encoding).lower()).name,
'create',
'master', output
]
wordlist = ''
try:
output_location = os.path.dirname(output)
if not os.path.exists(output_location):
os.makedirs(output_location)
if os.path.exists(output):
os.remove(output)
self.log("Compiling Dictionary...", 1)
# Read word lists and create a unique set of words
words = set()
for wordlist in wordlists:
with open(wordlist, 'rb') as src:
for word in src.read().split(b'\n'):
words.add(word.replace(b'\r', b''))
# Compile wordlist against language
util.call(
[
self.binary,
'--lang', lang,
'--encoding=utf-8',
'create',
'master', output
],
input_text=b'\n'.join(sorted(words)) + b'\n'
)
except Exception:
self.log(cmd, 0)
self.log("Current wordlist: '%s'" % wordlist, 0)
self.log("Problem compiling dictionary. Check the binary path and options.", 0)
raise |
def cleanup_containers(self, include_initial=False, exclude=None, **kwargs):
"""
Identical to :meth:`dockermap.client.docker_util.DockerUtilityMixin.cleanup_containers` with additional logging.
"""
self.push_log("Generating list of stopped containers.")
set_raise_on_error(kwargs, False)
return super(DockerFabricClient, self).cleanup_containers(include_initial=include_initial, exclude=exclude,
**kwargs) | Identical to :meth:`dockermap.client.docker_util.DockerUtilityMixin.cleanup_containers` with additional logging. | Below is the the instruction that describes the task:
### Input:
Identical to :meth:`dockermap.client.docker_util.DockerUtilityMixin.cleanup_containers` with additional logging.
### Response:
def cleanup_containers(self, include_initial=False, exclude=None, **kwargs):
"""
Identical to :meth:`dockermap.client.docker_util.DockerUtilityMixin.cleanup_containers` with additional logging.
"""
self.push_log("Generating list of stopped containers.")
set_raise_on_error(kwargs, False)
return super(DockerFabricClient, self).cleanup_containers(include_initial=include_initial, exclude=exclude,
**kwargs) |
def account(self, key=None, address=None, name=None):
"""Query for an account by key, address, or name."""
if key:
return self.client.account(key, wallet=self)
if address:
q = dict(address=address)
elif name:
q = dict(name=name)
else:
raise TypeError("Missing param: key, address, or name is required.")
return Account(
self.resource.account_query(q).get(), self.client, wallet=self) | Query for an account by key, address, or name. | Below is the the instruction that describes the task:
### Input:
Query for an account by key, address, or name.
### Response:
def account(self, key=None, address=None, name=None):
"""Query for an account by key, address, or name."""
if key:
return self.client.account(key, wallet=self)
if address:
q = dict(address=address)
elif name:
q = dict(name=name)
else:
raise TypeError("Missing param: key, address, or name is required.")
return Account(
self.resource.account_query(q).get(), self.client, wallet=self) |
def plot_props(self, prop_y, prop_x, prop_z='temp',
output='avg_eigs', dop_type='n', doping=None,
temps=None, xlim=(-2, 2), ax=None):
"""
Function to plot the transport properties.
Args:
prop_y: property to plot among ("Conductivity","Seebeck","Kappa","Carrier_conc","Hall_carrier_conc_trace"). Abbreviations are possible, like "S" for "Seebeck"
prop_x: independent variable in the x-axis among ('mu','doping','temp')
prop_z: third variable to plot multiple curves ('doping','temp')
output: 'avg_eigs' to plot the average of the eigenvalues of the properties
tensors; 'eigs' to plot the three eigenvalues of the properties
tensors.
dop_type: 'n' or 'p' to specify the doping type in plots that use doping
levels as prop_x or prop_z
doping: list of doping level to plot, useful to reduce the number of curves
when prop_z='doping'
temps: list of temperatures to plot, useful to reduce the number of curves
when prop_z='temp'
xlim: chemical potential range, useful when prop_x='mu'
ax: figure.axes where to plot. If None, a new figure is produced.
Example:
bztPlotter.plot_props('S','mu','temp',temps=[600,900,1200]).show()
more example are provided in the notebook
"How to use Boltztra2 interface.ipynb".
"""
props = ("Conductivity", "Seebeck", "Kappa", "Effective_mass",
"Power_Factor", "Carrier_conc", "Hall_carrier_conc_trace")
props_lbl = ("Conductivity", "Seebeck", "$K_{el}$", "Effective mass",
"Power Factor", "Carrier concentration", "Hall carrier conc.")
props_unit = (r"$(\mathrm{kS\,m^{-1}})$", r"($\mu$V/K)", r"$(W / (m \cdot K))$",
r"$(m_e)$", r"$( mW / (m\cdot K^2)$", r"$(cm^{-3})$", r"$(cm^{-3})$")
props_short = [p[:len(prop_y)] for p in props]
if prop_y not in props_short:
raise BoltztrapError("prop_y not valid")
if prop_x not in ('mu', 'doping', 'temp'):
raise BoltztrapError("prop_x not valid")
if prop_z not in ('doping', 'temp'):
raise BoltztrapError("prop_z not valid")
idx_prop = props_short.index(prop_y)
leg_title = ""
mu = self.bzt_transP.mu_r_eV
if prop_z == 'doping' and prop_x == 'temp':
p_array = eval("self.bzt_transP." + props[idx_prop] + '_' + prop_z)
else:
p_array = eval("self.bzt_transP." + props[idx_prop] + '_' + prop_x)
if ax is None:
fig = plt.figure(figsize=(10, 8))
temps_all = self.bzt_transP.temp_r.tolist()
if temps is None:
temps = self.bzt_transP.temp_r.tolist()
doping_all = self.bzt_transP.doping.tolist()
if doping is None:
doping = self.bzt_transP.doping.tolist()
# special case of carrier and hall carrier concentration 2d arrays (temp,mu)
if idx_prop in [5, 6]:
if prop_z == 'temp' and prop_x == 'mu':
for temp in temps:
ti = temps_all.index(temp)
prop_out = p_array[ti] if idx_prop == 6 else np.abs(p_array[ti])
plt.semilogy(mu, prop_out, label=str(temp) + ' K')
plt.xlabel(r"$\mu$ (eV)", fontsize=30)
plt.xlim(xlim)
else:
raise BoltztrapError("only prop_x=mu and prop_z=temp are available for c.c. and Hall c.c.!")
elif prop_z == 'temp' and prop_x == 'mu':
for temp in temps:
ti = temps_all.index(temp)
prop_out = np.linalg.eigh(p_array[ti])[0]
if output == 'avg_eigs':
plt.plot(mu, prop_out.mean(axis=1), label=str(temp) + ' K')
elif output == 'eigs':
for i in range(3):
plt.plot(mu, prop_out[:, i],
label='eig ' + str(i) + ' ' + str(temp) + ' K')
plt.xlabel(r"$\mu$ (eV)", fontsize=30)
plt.xlim(xlim)
elif prop_z == 'temp' and prop_x == 'doping':
for temp in temps:
ti = temps_all.index(temp)
prop_out = np.linalg.eigh(p_array[dop_type][ti])[0]
if output == 'avg_eigs':
plt.semilogx(doping_all, prop_out.mean(axis=1), 's-',
label=str(temp) + ' K')
elif output == 'eigs':
for i in range(3):
plt.plot(doping_all, prop_out[:, i], 's-',
label='eig ' + str(i) + ' ' + str(temp) + ' K')
plt.xlabel(r"Carrier conc. $cm^{-3}$", fontsize=30)
leg_title = dop_type + "-type"
elif prop_z == 'doping' and prop_x == 'temp':
for dop in doping:
di = doping_all.index(dop)
prop_out = np.linalg.eigh(p_array[dop_type][:, di])[0]
if output == 'avg_eigs':
plt.plot(temps_all, prop_out.mean(axis=1),
's-', label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for i in range(3):
plt.plot(temps_all, prop_out[:, i], 's-',
label='eig ' + str(i) + ' ' + str(dop) + ' $cm^{-3}$')
plt.xlabel(r"Temperature (K)", fontsize=30)
leg_title = dop_type + "-type"
plt.ylabel(props_lbl[idx_prop] + ' ' + props_unit[idx_prop], fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.legend(title=leg_title if leg_title != "" else "", fontsize=15)
plt.tight_layout()
plt.grid()
return plt | Function to plot the transport properties.
Args:
prop_y: property to plot among ("Conductivity","Seebeck","Kappa","Carrier_conc","Hall_carrier_conc_trace"). Abbreviations are possible, like "S" for "Seebeck"
prop_x: independent variable in the x-axis among ('mu','doping','temp')
prop_z: third variable to plot multiple curves ('doping','temp')
output: 'avg_eigs' to plot the average of the eigenvalues of the properties
tensors; 'eigs' to plot the three eigenvalues of the properties
tensors.
dop_type: 'n' or 'p' to specify the doping type in plots that use doping
levels as prop_x or prop_z
doping: list of doping level to plot, useful to reduce the number of curves
when prop_z='doping'
temps: list of temperatures to plot, useful to reduce the number of curves
when prop_z='temp'
xlim: chemical potential range, useful when prop_x='mu'
ax: figure.axes where to plot. If None, a new figure is produced.
Example:
bztPlotter.plot_props('S','mu','temp',temps=[600,900,1200]).show()
more example are provided in the notebook
"How to use Boltztra2 interface.ipynb". | Below is the the instruction that describes the task:
### Input:
Function to plot the transport properties.
Args:
prop_y: property to plot among ("Conductivity","Seebeck","Kappa","Carrier_conc","Hall_carrier_conc_trace"). Abbreviations are possible, like "S" for "Seebeck"
prop_x: independent variable in the x-axis among ('mu','doping','temp')
prop_z: third variable to plot multiple curves ('doping','temp')
output: 'avg_eigs' to plot the average of the eigenvalues of the properties
tensors; 'eigs' to plot the three eigenvalues of the properties
tensors.
dop_type: 'n' or 'p' to specify the doping type in plots that use doping
levels as prop_x or prop_z
doping: list of doping level to plot, useful to reduce the number of curves
when prop_z='doping'
temps: list of temperatures to plot, useful to reduce the number of curves
when prop_z='temp'
xlim: chemical potential range, useful when prop_x='mu'
ax: figure.axes where to plot. If None, a new figure is produced.
Example:
bztPlotter.plot_props('S','mu','temp',temps=[600,900,1200]).show()
more example are provided in the notebook
"How to use Boltztra2 interface.ipynb".
### Response:
def plot_props(self, prop_y, prop_x, prop_z='temp',
output='avg_eigs', dop_type='n', doping=None,
temps=None, xlim=(-2, 2), ax=None):
"""
Function to plot the transport properties.
Args:
prop_y: property to plot among ("Conductivity","Seebeck","Kappa","Carrier_conc","Hall_carrier_conc_trace"). Abbreviations are possible, like "S" for "Seebeck"
prop_x: independent variable in the x-axis among ('mu','doping','temp')
prop_z: third variable to plot multiple curves ('doping','temp')
output: 'avg_eigs' to plot the average of the eigenvalues of the properties
tensors; 'eigs' to plot the three eigenvalues of the properties
tensors.
dop_type: 'n' or 'p' to specify the doping type in plots that use doping
levels as prop_x or prop_z
doping: list of doping level to plot, useful to reduce the number of curves
when prop_z='doping'
temps: list of temperatures to plot, useful to reduce the number of curves
when prop_z='temp'
xlim: chemical potential range, useful when prop_x='mu'
ax: figure.axes where to plot. If None, a new figure is produced.
Example:
bztPlotter.plot_props('S','mu','temp',temps=[600,900,1200]).show()
more example are provided in the notebook
"How to use Boltztra2 interface.ipynb".
"""
props = ("Conductivity", "Seebeck", "Kappa", "Effective_mass",
"Power_Factor", "Carrier_conc", "Hall_carrier_conc_trace")
props_lbl = ("Conductivity", "Seebeck", "$K_{el}$", "Effective mass",
"Power Factor", "Carrier concentration", "Hall carrier conc.")
props_unit = (r"$(\mathrm{kS\,m^{-1}})$", r"($\mu$V/K)", r"$(W / (m \cdot K))$",
r"$(m_e)$", r"$( mW / (m\cdot K^2)$", r"$(cm^{-3})$", r"$(cm^{-3})$")
props_short = [p[:len(prop_y)] for p in props]
if prop_y not in props_short:
raise BoltztrapError("prop_y not valid")
if prop_x not in ('mu', 'doping', 'temp'):
raise BoltztrapError("prop_x not valid")
if prop_z not in ('doping', 'temp'):
raise BoltztrapError("prop_z not valid")
idx_prop = props_short.index(prop_y)
leg_title = ""
mu = self.bzt_transP.mu_r_eV
if prop_z == 'doping' and prop_x == 'temp':
p_array = eval("self.bzt_transP." + props[idx_prop] + '_' + prop_z)
else:
p_array = eval("self.bzt_transP." + props[idx_prop] + '_' + prop_x)
if ax is None:
fig = plt.figure(figsize=(10, 8))
temps_all = self.bzt_transP.temp_r.tolist()
if temps is None:
temps = self.bzt_transP.temp_r.tolist()
doping_all = self.bzt_transP.doping.tolist()
if doping is None:
doping = self.bzt_transP.doping.tolist()
# special case of carrier and hall carrier concentration 2d arrays (temp,mu)
if idx_prop in [5, 6]:
if prop_z == 'temp' and prop_x == 'mu':
for temp in temps:
ti = temps_all.index(temp)
prop_out = p_array[ti] if idx_prop == 6 else np.abs(p_array[ti])
plt.semilogy(mu, prop_out, label=str(temp) + ' K')
plt.xlabel(r"$\mu$ (eV)", fontsize=30)
plt.xlim(xlim)
else:
raise BoltztrapError("only prop_x=mu and prop_z=temp are available for c.c. and Hall c.c.!")
elif prop_z == 'temp' and prop_x == 'mu':
for temp in temps:
ti = temps_all.index(temp)
prop_out = np.linalg.eigh(p_array[ti])[0]
if output == 'avg_eigs':
plt.plot(mu, prop_out.mean(axis=1), label=str(temp) + ' K')
elif output == 'eigs':
for i in range(3):
plt.plot(mu, prop_out[:, i],
label='eig ' + str(i) + ' ' + str(temp) + ' K')
plt.xlabel(r"$\mu$ (eV)", fontsize=30)
plt.xlim(xlim)
elif prop_z == 'temp' and prop_x == 'doping':
for temp in temps:
ti = temps_all.index(temp)
prop_out = np.linalg.eigh(p_array[dop_type][ti])[0]
if output == 'avg_eigs':
plt.semilogx(doping_all, prop_out.mean(axis=1), 's-',
label=str(temp) + ' K')
elif output == 'eigs':
for i in range(3):
plt.plot(doping_all, prop_out[:, i], 's-',
label='eig ' + str(i) + ' ' + str(temp) + ' K')
plt.xlabel(r"Carrier conc. $cm^{-3}$", fontsize=30)
leg_title = dop_type + "-type"
elif prop_z == 'doping' and prop_x == 'temp':
for dop in doping:
di = doping_all.index(dop)
prop_out = np.linalg.eigh(p_array[dop_type][:, di])[0]
if output == 'avg_eigs':
plt.plot(temps_all, prop_out.mean(axis=1),
's-', label=str(dop) + ' $cm^{-3}$')
elif output == 'eigs':
for i in range(3):
plt.plot(temps_all, prop_out[:, i], 's-',
label='eig ' + str(i) + ' ' + str(dop) + ' $cm^{-3}$')
plt.xlabel(r"Temperature (K)", fontsize=30)
leg_title = dop_type + "-type"
plt.ylabel(props_lbl[idx_prop] + ' ' + props_unit[idx_prop], fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.legend(title=leg_title if leg_title != "" else "", fontsize=15)
plt.tight_layout()
plt.grid()
return plt |
def generate_fixtures(datasets, reuses):
'''Build sample fixture data (users, datasets and reuses).'''
user = UserFactory()
log.info('Generated user "{user.email}".'.format(user=user))
organization = OrganizationFactory(members=[Member(user=user)])
log.info('Generated organization "{org.name}".'.format(org=organization))
for _ in range(datasets):
dataset = VisibleDatasetFactory(organization=organization)
DiscussionFactory(subject=dataset, user=user)
ReuseFactory.create_batch(reuses, datasets=[dataset], owner=user)
msg = 'Generated {datasets} dataset(s) with {reuses} reuse(s) each.'
log.info(msg.format(**locals())) | Build sample fixture data (users, datasets and reuses). | Below is the the instruction that describes the task:
### Input:
Build sample fixture data (users, datasets and reuses).
### Response:
def generate_fixtures(datasets, reuses):
'''Build sample fixture data (users, datasets and reuses).'''
user = UserFactory()
log.info('Generated user "{user.email}".'.format(user=user))
organization = OrganizationFactory(members=[Member(user=user)])
log.info('Generated organization "{org.name}".'.format(org=organization))
for _ in range(datasets):
dataset = VisibleDatasetFactory(organization=organization)
DiscussionFactory(subject=dataset, user=user)
ReuseFactory.create_batch(reuses, datasets=[dataset], owner=user)
msg = 'Generated {datasets} dataset(s) with {reuses} reuse(s) each.'
log.info(msg.format(**locals())) |
async def get(self, target_format, target_size, size_tolerance_prct, out_filepath):
""" Download cover and process it. """
if self.source_quality.value <= CoverSourceQuality.LOW.value:
logging.getLogger("Cover").warning("Cover is from a potentially unreliable source and may be unrelated to the search")
images_data = []
for i, url in enumerate(self.urls):
# download
logging.getLogger("Cover").info("Downloading cover '%s' (part %u/%u)..." % (url, i + 1, len(self.urls)))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, self.format)
store_in_cache_callback, image_data = await self.source.http.query(url,
headers=headers,
verify=False,
cache=__class__.image_cache,
pre_cache_callback=pre_cache_callback)
# store immediately in cache
await store_in_cache_callback()
# append for multi images
images_data.append(image_data)
need_format_change = (self.format != target_format)
need_size_change = ((max(self.size) > target_size) and
(abs(max(self.size) - target_size) >
target_size * size_tolerance_prct / 100))
need_join = len(images_data) > 1
if need_join or need_format_change or need_size_change:
# post process
image_data = self.postProcess(images_data,
target_format if need_format_change else None,
target_size if need_size_change else None)
# crunch image again
image_data = await __class__.crunch(image_data, target_format)
# write it
with open(out_filepath, "wb") as file:
file.write(image_data) | Download cover and process it. | Below is the the instruction that describes the task:
### Input:
Download cover and process it.
### Response:
async def get(self, target_format, target_size, size_tolerance_prct, out_filepath):
""" Download cover and process it. """
if self.source_quality.value <= CoverSourceQuality.LOW.value:
logging.getLogger("Cover").warning("Cover is from a potentially unreliable source and may be unrelated to the search")
images_data = []
for i, url in enumerate(self.urls):
# download
logging.getLogger("Cover").info("Downloading cover '%s' (part %u/%u)..." % (url, i + 1, len(self.urls)))
headers = {}
self.source.updateHttpHeaders(headers)
async def pre_cache_callback(img_data):
return await __class__.crunch(img_data, self.format)
store_in_cache_callback, image_data = await self.source.http.query(url,
headers=headers,
verify=False,
cache=__class__.image_cache,
pre_cache_callback=pre_cache_callback)
# store immediately in cache
await store_in_cache_callback()
# append for multi images
images_data.append(image_data)
need_format_change = (self.format != target_format)
need_size_change = ((max(self.size) > target_size) and
(abs(max(self.size) - target_size) >
target_size * size_tolerance_prct / 100))
need_join = len(images_data) > 1
if need_join or need_format_change or need_size_change:
# post process
image_data = self.postProcess(images_data,
target_format if need_format_change else None,
target_size if need_size_change else None)
# crunch image again
image_data = await __class__.crunch(image_data, target_format)
# write it
with open(out_filepath, "wb") as file:
file.write(image_data) |
def attach(self, dwProcessId):
"""
Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process.
"""
# Get the Process object from the snapshot,
# if missing create a new one.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Warn when mixing 32 and 64 bits.
# This also allows the user to stop attaching altogether,
# depending on how the warnings are configured.
if System.bits != aProcess.get_bits():
msg = "Mixture of 32 and 64 bits is considered experimental." \
" Use at your own risk!"
warnings.warn(msg, MixedBitsWarning)
# Attach to the process.
win32.DebugActiveProcess(dwProcessId)
# Add the new PID to the set of debugees.
self.__attachedDebugees.add(dwProcessId)
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# If the Process object was not in the snapshot, add it now.
if not self.system.has_process(dwProcessId):
self.system._add_process(aProcess)
# Scan the process threads and loaded modules.
# This is prefered because the thread and library events do not
# properly give some information, like the filename for each module.
aProcess.scan_threads()
aProcess.scan_modules()
# Return the Process object, like the execv() and execl() methods.
return aProcess | Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process. | Below is the the instruction that describes the task:
### Input:
Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process.
### Response:
def attach(self, dwProcessId):
"""
Attaches to an existing process for debugging.
@see: L{detach}, L{execv}, L{execl}
@type dwProcessId: int
@param dwProcessId: Global ID of a process to attach to.
@rtype: L{Process}
@return: A new Process object. Normally you don't need to use it now,
it's best to interact with the process from the event handler.
@raise WindowsError: Raises an exception on error.
Depending on the circumstances, the debugger may or may not have
attached to the target process.
"""
# Get the Process object from the snapshot,
# if missing create a new one.
try:
aProcess = self.system.get_process(dwProcessId)
except KeyError:
aProcess = Process(dwProcessId)
# Warn when mixing 32 and 64 bits.
# This also allows the user to stop attaching altogether,
# depending on how the warnings are configured.
if System.bits != aProcess.get_bits():
msg = "Mixture of 32 and 64 bits is considered experimental." \
" Use at your own risk!"
warnings.warn(msg, MixedBitsWarning)
# Attach to the process.
win32.DebugActiveProcess(dwProcessId)
# Add the new PID to the set of debugees.
self.__attachedDebugees.add(dwProcessId)
# Match the system kill-on-exit flag to our own.
self.__setSystemKillOnExitMode()
# If the Process object was not in the snapshot, add it now.
if not self.system.has_process(dwProcessId):
self.system._add_process(aProcess)
# Scan the process threads and loaded modules.
# This is prefered because the thread and library events do not
# properly give some information, like the filename for each module.
aProcess.scan_threads()
aProcess.scan_modules()
# Return the Process object, like the execv() and execl() methods.
return aProcess |
def update_key(
self, vault_base_url, key_name, key_version, key_ops=None, key_attributes=None, tags=None, custom_headers=None, raw=False, **operation_config):
"""The update key operation changes specified attributes of a stored key
and can be applied to any key type and key version stored in Azure Key
Vault.
In order to perform this operation, the key must already exist in the
Key Vault. Note: The cryptographic material of a key itself cannot be
changed. This operation requires the keys/update permission.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param key_ops: Json web key operations. For more information on
possible key operations, see JsonWebKeyOperation.
:type key_ops: list[str or
~azure.keyvault.v2016_10_01.models.JsonWebKeyOperation]
:param key_attributes:
:type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeyBundle or ClientRawResponse if raw=true
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>`
"""
parameters = models.KeyUpdateParameters(key_ops=key_ops, key_attributes=key_attributes, tags=tags)
# Construct URL
url = self.update_key.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'KeyUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('KeyBundle', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized | The update key operation changes specified attributes of a stored key
and can be applied to any key type and key version stored in Azure Key
Vault.
In order to perform this operation, the key must already exist in the
Key Vault. Note: The cryptographic material of a key itself cannot be
changed. This operation requires the keys/update permission.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param key_ops: Json web key operations. For more information on
possible key operations, see JsonWebKeyOperation.
:type key_ops: list[str or
~azure.keyvault.v2016_10_01.models.JsonWebKeyOperation]
:param key_attributes:
:type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeyBundle or ClientRawResponse if raw=true
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>` | Below is the the instruction that describes the task:
### Input:
The update key operation changes specified attributes of a stored key
and can be applied to any key type and key version stored in Azure Key
Vault.
In order to perform this operation, the key must already exist in the
Key Vault. Note: The cryptographic material of a key itself cannot be
changed. This operation requires the keys/update permission.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param key_ops: Json web key operations. For more information on
possible key operations, see JsonWebKeyOperation.
:type key_ops: list[str or
~azure.keyvault.v2016_10_01.models.JsonWebKeyOperation]
:param key_attributes:
:type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeyBundle or ClientRawResponse if raw=true
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>`
### Response:
def update_key(
self, vault_base_url, key_name, key_version, key_ops=None, key_attributes=None, tags=None, custom_headers=None, raw=False, **operation_config):
"""The update key operation changes specified attributes of a stored key
and can be applied to any key type and key version stored in Azure Key
Vault.
In order to perform this operation, the key must already exist in the
Key Vault. Note: The cryptographic material of a key itself cannot be
changed. This operation requires the keys/update permission.
:param vault_base_url: The vault name, for example
https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param key_ops: Json web key operations. For more information on
possible key operations, see JsonWebKeyOperation.
:type key_ops: list[str or
~azure.keyvault.v2016_10_01.models.JsonWebKeyOperation]
:param key_attributes:
:type key_attributes: ~azure.keyvault.v2016_10_01.models.KeyAttributes
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: KeyBundle or ClientRawResponse if raw=true
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`KeyVaultErrorException<azure.keyvault.v2016_10_01.models.KeyVaultErrorException>`
"""
parameters = models.KeyUpdateParameters(key_ops=key_ops, key_attributes=key_attributes, tags=tags)
# Construct URL
url = self.update_key.metadata['url']
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'KeyUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.KeyVaultErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('KeyBundle', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized |
def _on_message(self, _, msg):
"""Sends and receives data to the callback function."""
result = json.loads(msg)
if result["cmd"] == "chat":
data = {"type": "message", "nick": result["nick"],
"text": result["text"]}
if "trip" in result:
data["trip"] = result["trip"]
self._callback(self, data)
elif result["cmd"] == "onlineSet":
self._onlineUsers += result["nicks"]
elif result["cmd"] == "onlineAdd":
self._onlineUsers.append(result["nick"])
self._callback(self, {"type": "online add",
"nick": result["nick"]})
elif result["cmd"] == "onlineRemove":
self._onlineUsers.remove(result["nick"])
self._callback(self, {"type": "online remove",
"nick": result["nick"]})
elif result["cmd"] == "info" and " invited " in result["text"]:
if "You invited " in result["text"]:
name = self._nick
else:
space = re.search(r"\s", result["text"])
name = result["text"][:space.start()]
link = re.search(r"\?", result["text"])
channel = result["text"][link.end():]
self._callback(self, {"type": "invite", "nick": name,
"channel": channel})
elif result["cmd"] == "info" and " IPs " in result["text"]:
data = result["text"].split()
self._callback(self, {"type": "stats", "IPs": data[0],
"channels": data[4]})
elif result["cmd"] == "info" and "Banned " in result["text"]:
nick = result["text"][len("Banned "):]
self._callback(self, {"type": "banned", "nick": nick})
elif result["cmd"] == "info" and "Unbanned " in result["text"]:
ip = result["text"][len("Unbanned "):]
self._callback(self, {"type": "unbanned", "ip": ip})
elif (result["cmd"] == "info"
and "Server broadcast: " in result["text"]):
txt = result["text"][len("Server broadcast: "):]
self._callback(self, {"type": "broadcast", "text": txt})
elif result["cmd"] == "info":
self._callback(self, {"type": "list users",
"text": result["text"]})
elif result["cmd"] == "warn":
data = {"type": "warn", "warning": result["text"]}
if "Could not find " in result["text"]:
data["warning"] = "user to ban not found"
data["nick"] = result["text"][len("Could not find "):]
self._callback(self, data) | Sends and receives data to the callback function. | Below is the the instruction that describes the task:
### Input:
Sends and receives data to the callback function.
### Response:
def _on_message(self, _, msg):
"""Sends and receives data to the callback function."""
result = json.loads(msg)
if result["cmd"] == "chat":
data = {"type": "message", "nick": result["nick"],
"text": result["text"]}
if "trip" in result:
data["trip"] = result["trip"]
self._callback(self, data)
elif result["cmd"] == "onlineSet":
self._onlineUsers += result["nicks"]
elif result["cmd"] == "onlineAdd":
self._onlineUsers.append(result["nick"])
self._callback(self, {"type": "online add",
"nick": result["nick"]})
elif result["cmd"] == "onlineRemove":
self._onlineUsers.remove(result["nick"])
self._callback(self, {"type": "online remove",
"nick": result["nick"]})
elif result["cmd"] == "info" and " invited " in result["text"]:
if "You invited " in result["text"]:
name = self._nick
else:
space = re.search(r"\s", result["text"])
name = result["text"][:space.start()]
link = re.search(r"\?", result["text"])
channel = result["text"][link.end():]
self._callback(self, {"type": "invite", "nick": name,
"channel": channel})
elif result["cmd"] == "info" and " IPs " in result["text"]:
data = result["text"].split()
self._callback(self, {"type": "stats", "IPs": data[0],
"channels": data[4]})
elif result["cmd"] == "info" and "Banned " in result["text"]:
nick = result["text"][len("Banned "):]
self._callback(self, {"type": "banned", "nick": nick})
elif result["cmd"] == "info" and "Unbanned " in result["text"]:
ip = result["text"][len("Unbanned "):]
self._callback(self, {"type": "unbanned", "ip": ip})
elif (result["cmd"] == "info"
and "Server broadcast: " in result["text"]):
txt = result["text"][len("Server broadcast: "):]
self._callback(self, {"type": "broadcast", "text": txt})
elif result["cmd"] == "info":
self._callback(self, {"type": "list users",
"text": result["text"]})
elif result["cmd"] == "warn":
data = {"type": "warn", "warning": result["text"]}
if "Could not find " in result["text"]:
data["warning"] = "user to ban not found"
data["nick"] = result["text"][len("Could not find "):]
self._callback(self, data) |
def encode_events(self, duration, events, values, dtype=np.bool):
'''Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
'''
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total] | Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values) | Below is the the instruction that describes the task:
### Input:
Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
### Response:
def encode_events(self, duration, events, values, dtype=np.bool):
'''Encode labeled events as a time-series matrix.
Parameters
----------
duration : number
The duration of the track
events : ndarray, shape=(n,)
Time index of the events
values : ndarray, shape=(n, m)
Values array. Must have the same first index as `events`.
dtype : numpy data type
Returns
-------
target : ndarray, shape=(n_frames, n_values)
'''
frames = time_to_frames(events, sr=self.sr,
hop_length=self.hop_length)
n_total = int(time_to_frames(duration, sr=self.sr,
hop_length=self.hop_length))
n_alloc = n_total
if np.any(frames):
n_alloc = max(n_total, 1 + int(frames.max()))
target = np.empty((n_alloc, values.shape[1]),
dtype=dtype)
target.fill(fill_value(dtype))
values = values.astype(dtype)
for column, event in zip(values, frames):
target[event] += column
return target[:n_total] |
def elbv2_load_balancer_arn_suffix(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*`
"""
try:
elb = self._elbv2_load_balancer(lookup)
m = re.search(r'.+?(app\/[^\/]+\/[^\/]+)$', elb['LoadBalancerArn'])
return m.group(1)
except ClientError:
return default | Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*` | Below is the the instruction that describes the task:
### Input:
Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*`
### Response:
def elbv2_load_balancer_arn_suffix(self, lookup, default=None):
"""
Args:
lookup: the friendly name of the v2 elb to look up
default: value to return in case of no match
Returns:
The shorthand fragment of the ALB's ARN, of the form `app/*/*`
"""
try:
elb = self._elbv2_load_balancer(lookup)
m = re.search(r'.+?(app\/[^\/]+\/[^\/]+)$', elb['LoadBalancerArn'])
return m.group(1)
except ClientError:
return default |
def upgrade_database(
alembic_config_filename: str,
alembic_base_dir: str = None,
starting_revision: str = None,
destination_revision: str = "head",
version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE,
as_sql: bool = False) -> None:
"""
Use Alembic to upgrade our database.
See http://alembic.readthedocs.org/en/latest/api/runtime.html
but also, in particular, ``site-packages/alembic/command.py``
Arguments:
alembic_config_filename:
config filename
alembic_base_dir:
directory to start in, so relative paths in the config file work
starting_revision:
revision to start at (typically ``None`` to ask the database)
destination_revision:
revision to aim for (typically ``"head"`` to migrate to the latest
structure)
version_table: table name for Alembic versions
as_sql:
run in "offline" mode: print the migration SQL, rather than
modifying the database. See
http://alembic.zzzcomputing.com/en/latest/offline.html
"""
if alembic_base_dir is None:
alembic_base_dir = os.path.dirname(alembic_config_filename)
os.chdir(alembic_base_dir) # so the directory in the config file works
config = Config(alembic_config_filename)
script = ScriptDirectory.from_config(config)
# noinspection PyUnusedLocal,PyProtectedMember
def upgrade(rev, context):
return script._upgrade_revs(destination_revision, rev)
log.info("Upgrading database to revision {!r} using Alembic",
destination_revision)
with EnvironmentContext(config,
script,
fn=upgrade,
as_sql=as_sql,
starting_rev=starting_revision,
destination_rev=destination_revision,
tag=None,
version_table=version_table):
script.run_env()
log.info("Database upgrade completed") | Use Alembic to upgrade our database.
See http://alembic.readthedocs.org/en/latest/api/runtime.html
but also, in particular, ``site-packages/alembic/command.py``
Arguments:
alembic_config_filename:
config filename
alembic_base_dir:
directory to start in, so relative paths in the config file work
starting_revision:
revision to start at (typically ``None`` to ask the database)
destination_revision:
revision to aim for (typically ``"head"`` to migrate to the latest
structure)
version_table: table name for Alembic versions
as_sql:
run in "offline" mode: print the migration SQL, rather than
modifying the database. See
http://alembic.zzzcomputing.com/en/latest/offline.html | Below is the the instruction that describes the task:
### Input:
Use Alembic to upgrade our database.
See http://alembic.readthedocs.org/en/latest/api/runtime.html
but also, in particular, ``site-packages/alembic/command.py``
Arguments:
alembic_config_filename:
config filename
alembic_base_dir:
directory to start in, so relative paths in the config file work
starting_revision:
revision to start at (typically ``None`` to ask the database)
destination_revision:
revision to aim for (typically ``"head"`` to migrate to the latest
structure)
version_table: table name for Alembic versions
as_sql:
run in "offline" mode: print the migration SQL, rather than
modifying the database. See
http://alembic.zzzcomputing.com/en/latest/offline.html
### Response:
def upgrade_database(
alembic_config_filename: str,
alembic_base_dir: str = None,
starting_revision: str = None,
destination_revision: str = "head",
version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE,
as_sql: bool = False) -> None:
"""
Use Alembic to upgrade our database.
See http://alembic.readthedocs.org/en/latest/api/runtime.html
but also, in particular, ``site-packages/alembic/command.py``
Arguments:
alembic_config_filename:
config filename
alembic_base_dir:
directory to start in, so relative paths in the config file work
starting_revision:
revision to start at (typically ``None`` to ask the database)
destination_revision:
revision to aim for (typically ``"head"`` to migrate to the latest
structure)
version_table: table name for Alembic versions
as_sql:
run in "offline" mode: print the migration SQL, rather than
modifying the database. See
http://alembic.zzzcomputing.com/en/latest/offline.html
"""
if alembic_base_dir is None:
alembic_base_dir = os.path.dirname(alembic_config_filename)
os.chdir(alembic_base_dir) # so the directory in the config file works
config = Config(alembic_config_filename)
script = ScriptDirectory.from_config(config)
# noinspection PyUnusedLocal,PyProtectedMember
def upgrade(rev, context):
return script._upgrade_revs(destination_revision, rev)
log.info("Upgrading database to revision {!r} using Alembic",
destination_revision)
with EnvironmentContext(config,
script,
fn=upgrade,
as_sql=as_sql,
starting_rev=starting_revision,
destination_rev=destination_revision,
tag=None,
version_table=version_table):
script.run_env()
log.info("Database upgrade completed") |
def to_json(self):
"""
Returns the JSON Representation of the content type field.
"""
result = {
'name': self.name,
'id': self._real_id(),
'type': self.type,
'localized': self.localized,
'omitted': self.omitted,
'required': self.required,
'disabled': self.disabled,
'validations': [v.to_json() for v in self.validations]
}
if self.type == 'Array':
result['items'] = self.items
if self.type == 'Link':
result['linkType'] = self.link_type
return result | Returns the JSON Representation of the content type field. | Below is the the instruction that describes the task:
### Input:
Returns the JSON Representation of the content type field.
### Response:
def to_json(self):
"""
Returns the JSON Representation of the content type field.
"""
result = {
'name': self.name,
'id': self._real_id(),
'type': self.type,
'localized': self.localized,
'omitted': self.omitted,
'required': self.required,
'disabled': self.disabled,
'validations': [v.to_json() for v in self.validations]
}
if self.type == 'Array':
result['items'] = self.items
if self.type == 'Link':
result['linkType'] = self.link_type
return result |
def rgba_floats_tuple(self, x):
"""Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with float values between 0. and 1.
"""
if x <= self.index[0]:
return self.colors[0]
if x >= self.index[-1]:
return self.colors[-1]
i = len([u for u in self.index if u < x]) # 0 < i < n.
if self.index[i-1] < self.index[i]:
p = (x - self.index[i-1])*1./(self.index[i]-self.index[i-1])
elif self.index[i-1] == self.index[i]:
p = 1.
else:
raise ValueError('Thresholds are not sorted.')
return tuple((1.-p) * self.colors[i-1][j] + p*self.colors[i][j] for j
in range(4)) | Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with float values between 0. and 1. | Below is the the instruction that describes the task:
### Input:
Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with float values between 0. and 1.
### Response:
def rgba_floats_tuple(self, x):
"""Provides the color corresponding to value `x` in the
form of a tuple (R,G,B,A) with float values between 0. and 1.
"""
if x <= self.index[0]:
return self.colors[0]
if x >= self.index[-1]:
return self.colors[-1]
i = len([u for u in self.index if u < x]) # 0 < i < n.
if self.index[i-1] < self.index[i]:
p = (x - self.index[i-1])*1./(self.index[i]-self.index[i-1])
elif self.index[i-1] == self.index[i]:
p = 1.
else:
raise ValueError('Thresholds are not sorted.')
return tuple((1.-p) * self.colors[i-1][j] + p*self.colors[i][j] for j
in range(4)) |
def smartResizeColumnsToContents( self ):
"""
Resizes the columns to the contents based on the user preferences.
"""
self.blockSignals(True)
self.setUpdatesEnabled(False)
header = self.header()
header.blockSignals(True)
columns = range(self.columnCount())
sizes = [self.columnWidth(c) for c in columns]
header.resizeSections(header.ResizeToContents)
for col in columns:
width = self.columnWidth(col)
if ( width < sizes[col] ):
self.setColumnWidth(col, sizes[col])
header.blockSignals(False)
self.setUpdatesEnabled(True)
self.blockSignals(False) | Resizes the columns to the contents based on the user preferences. | Below is the the instruction that describes the task:
### Input:
Resizes the columns to the contents based on the user preferences.
### Response:
def smartResizeColumnsToContents( self ):
"""
Resizes the columns to the contents based on the user preferences.
"""
self.blockSignals(True)
self.setUpdatesEnabled(False)
header = self.header()
header.blockSignals(True)
columns = range(self.columnCount())
sizes = [self.columnWidth(c) for c in columns]
header.resizeSections(header.ResizeToContents)
for col in columns:
width = self.columnWidth(col)
if ( width < sizes[col] ):
self.setColumnWidth(col, sizes[col])
header.blockSignals(False)
self.setUpdatesEnabled(True)
self.blockSignals(False) |
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False | Tries to encode images with ffmpeg to check if it works. | Below is the the instruction that describes the task:
### Input:
Tries to encode images with ffmpeg to check if it works.
### Response:
def ffmpeg_works():
"""Tries to encode images with ffmpeg to check if it works."""
images = np.zeros((2, 32, 32, 3), dtype=np.uint8)
try:
_encode_gif(images, 2)
return True
except (IOError, OSError):
return False |
def obj_from_file(filename='annotation.yaml', filetype='auto'):
''' Read object from file '''
if filetype == 'auto':
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ('yaml', 'yml'):
from ruamel.yaml import YAML
yaml = YAML(typ="unsafe")
with open(filename, encoding="utf-8") as f:
obj = yaml.load(f)
if obj is None:
obj = {}
# import yaml
# with open(filename, encoding="utf-8") as f:
# intext = f.read()
# obj = yaml.load(intext)
elif filetype in ('pickle', 'pkl', 'pklz', 'picklezip'):
fcontent = read_pkl_and_pklz(filename)
# import pickle
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
# import sPickle as pickle
if sys.version_info.major == 2:
obj = pickle.loads(fcontent)
else:
obj = pickle.loads(fcontent, encoding="latin1")
else:
logger.error('Unknown filetype ' + filetype)
return obj | Read object from file | Below is the the instruction that describes the task:
### Input:
Read object from file
### Response:
def obj_from_file(filename='annotation.yaml', filetype='auto'):
''' Read object from file '''
if filetype == 'auto':
_, ext = os.path.splitext(filename)
filetype = ext[1:]
if filetype in ('yaml', 'yml'):
from ruamel.yaml import YAML
yaml = YAML(typ="unsafe")
with open(filename, encoding="utf-8") as f:
obj = yaml.load(f)
if obj is None:
obj = {}
# import yaml
# with open(filename, encoding="utf-8") as f:
# intext = f.read()
# obj = yaml.load(intext)
elif filetype in ('pickle', 'pkl', 'pklz', 'picklezip'):
fcontent = read_pkl_and_pklz(filename)
# import pickle
if sys.version_info[0] < 3:
import cPickle as pickle
else:
import _pickle as pickle
# import sPickle as pickle
if sys.version_info.major == 2:
obj = pickle.loads(fcontent)
else:
obj = pickle.loads(fcontent, encoding="latin1")
else:
logger.error('Unknown filetype ' + filetype)
return obj |
def commit(self):
""" Store metadata on this Job to the backend. """
logger.debug('Committing job {0}'.format(self.name))
self.backend.commit_job(self._serialize())
self.parent.commit() | Store metadata on this Job to the backend. | Below is the the instruction that describes the task:
### Input:
Store metadata on this Job to the backend.
### Response:
def commit(self):
""" Store metadata on this Job to the backend. """
logger.debug('Committing job {0}'.format(self.name))
self.backend.commit_job(self._serialize())
self.parent.commit() |
def graph(self, name, *args, **kwargs):
"""
Creates a :py:class:`Graph` instance.
:param str name: The namespace for the graph metadata.
:returns: a :py:class:`Graph` instance.
"""
return Graph(self, name, *args, **kwargs) | Creates a :py:class:`Graph` instance.
:param str name: The namespace for the graph metadata.
:returns: a :py:class:`Graph` instance. | Below is the the instruction that describes the task:
### Input:
Creates a :py:class:`Graph` instance.
:param str name: The namespace for the graph metadata.
:returns: a :py:class:`Graph` instance.
### Response:
def graph(self, name, *args, **kwargs):
"""
Creates a :py:class:`Graph` instance.
:param str name: The namespace for the graph metadata.
:returns: a :py:class:`Graph` instance.
"""
return Graph(self, name, *args, **kwargs) |
def add_data(self, minimum_address, maximum_address, data, overwrite):
"""Add given data to this segment. The added data must be adjacent to
the current segment data, otherwise an exception is thrown.
"""
if minimum_address == self.maximum_address:
self.maximum_address = maximum_address
self.data += data
elif maximum_address == self.minimum_address:
self.minimum_address = minimum_address
self.data = data + self.data
elif (overwrite
and minimum_address < self.maximum_address
and maximum_address > self.minimum_address):
self_data_offset = minimum_address - self.minimum_address
# Prepend data.
if self_data_offset < 0:
self_data_offset *= -1
self.data = data[:self_data_offset] + self.data
del data[:self_data_offset]
self.minimum_address = minimum_address
# Overwrite overlapping part.
self_data_left = len(self.data) - self_data_offset
if len(data) <= self_data_left:
self.data[self_data_offset:self_data_offset + len(data)] = data
data = bytearray()
else:
self.data[self_data_offset:] = data[:self_data_left]
data = data[self_data_left:]
# Append data.
if len(data) > 0:
self.data += data
self.maximum_address = maximum_address
else:
raise AddDataError(
'data added to a segment must be adjacent to or overlapping '
'with the original segment data') | Add given data to this segment. The added data must be adjacent to
the current segment data, otherwise an exception is thrown. | Below is the the instruction that describes the task:
### Input:
Add given data to this segment. The added data must be adjacent to
the current segment data, otherwise an exception is thrown.
### Response:
def add_data(self, minimum_address, maximum_address, data, overwrite):
"""Add given data to this segment. The added data must be adjacent to
the current segment data, otherwise an exception is thrown.
"""
if minimum_address == self.maximum_address:
self.maximum_address = maximum_address
self.data += data
elif maximum_address == self.minimum_address:
self.minimum_address = minimum_address
self.data = data + self.data
elif (overwrite
and minimum_address < self.maximum_address
and maximum_address > self.minimum_address):
self_data_offset = minimum_address - self.minimum_address
# Prepend data.
if self_data_offset < 0:
self_data_offset *= -1
self.data = data[:self_data_offset] + self.data
del data[:self_data_offset]
self.minimum_address = minimum_address
# Overwrite overlapping part.
self_data_left = len(self.data) - self_data_offset
if len(data) <= self_data_left:
self.data[self_data_offset:self_data_offset + len(data)] = data
data = bytearray()
else:
self.data[self_data_offset:] = data[:self_data_left]
data = data[self_data_left:]
# Append data.
if len(data) > 0:
self.data += data
self.maximum_address = maximum_address
else:
raise AddDataError(
'data added to a segment must be adjacent to or overlapping '
'with the original segment data') |
def move(self, loc, yoff=None):
""" Moves cursor to specified location. Accepts the following arguments:
* ``move(loc)`` - Move cursor to ``Location``
* ``move(xoff, yoff)`` - Move cursor to offset from current location
"""
from .Geometry import Location
self._lock.acquire()
if isinstance(loc, Location):
mouse.move(loc.x, loc.y)
elif yoff is not None:
xoff = loc
mouse.move(xoff, yoff)
else:
raise ValueError("Invalid argument. Expected either move(loc) or move(xoff, yoff).")
self._last_position = loc
self._lock.release() | Moves cursor to specified location. Accepts the following arguments:
* ``move(loc)`` - Move cursor to ``Location``
* ``move(xoff, yoff)`` - Move cursor to offset from current location | Below is the the instruction that describes the task:
### Input:
Moves cursor to specified location. Accepts the following arguments:
* ``move(loc)`` - Move cursor to ``Location``
* ``move(xoff, yoff)`` - Move cursor to offset from current location
### Response:
def move(self, loc, yoff=None):
""" Moves cursor to specified location. Accepts the following arguments:
* ``move(loc)`` - Move cursor to ``Location``
* ``move(xoff, yoff)`` - Move cursor to offset from current location
"""
from .Geometry import Location
self._lock.acquire()
if isinstance(loc, Location):
mouse.move(loc.x, loc.y)
elif yoff is not None:
xoff = loc
mouse.move(xoff, yoff)
else:
raise ValueError("Invalid argument. Expected either move(loc) or move(xoff, yoff).")
self._last_position = loc
self._lock.release() |
def get_gravatar_profile_url(email, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build url
url = '{base}{hash}'.format(base=url_base, hash=email_hash)
return url | Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http | Below is the the instruction that describes the task:
### Input:
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
### Response:
def get_gravatar_profile_url(email, secure=GRAVATAR_DEFAULT_SECURE):
"""
Builds a url to a gravatar profile from an email address.
:param email: The email to fetch the gravatar for
:param secure: If True use https, otherwise plain http
"""
if secure:
url_base = GRAVATAR_SECURE_URL
else:
url_base = GRAVATAR_URL
# Calculate the email hash
email_hash = calculate_gravatar_hash(email)
# Build url
url = '{base}{hash}'.format(base=url_base, hash=email_hash)
return url |
def clean_line_profile_text(text):
"""
Sorts the output from line profile by execution time
Removes entries which were not run
"""
#
profile_block_list = parse_rawprofile_blocks(text)
#profile_block_list = fix_rawprofile_blocks(profile_block_list)
#---
# FIXME can be written much nicer
prefix_list, timemap = parse_timemap_from_blocks(profile_block_list)
# Sort the blocks by time
sorted_lists = sorted(six.iteritems(timemap), key=operator.itemgetter(0))
newlist = prefix_list[:]
for key, val in sorted_lists:
newlist.extend(val)
# Rejoin output text
output_text = '\n'.join(newlist)
#---
# Hack in a profile summary
summary_text = get_summary(profile_block_list)
output_text = output_text
return output_text, summary_text | Sorts the output from line profile by execution time
Removes entries which were not run | Below is the the instruction that describes the task:
### Input:
Sorts the output from line profile by execution time
Removes entries which were not run
### Response:
def clean_line_profile_text(text):
"""
Sorts the output from line profile by execution time
Removes entries which were not run
"""
#
profile_block_list = parse_rawprofile_blocks(text)
#profile_block_list = fix_rawprofile_blocks(profile_block_list)
#---
# FIXME can be written much nicer
prefix_list, timemap = parse_timemap_from_blocks(profile_block_list)
# Sort the blocks by time
sorted_lists = sorted(six.iteritems(timemap), key=operator.itemgetter(0))
newlist = prefix_list[:]
for key, val in sorted_lists:
newlist.extend(val)
# Rejoin output text
output_text = '\n'.join(newlist)
#---
# Hack in a profile summary
summary_text = get_summary(profile_block_list)
output_text = output_text
return output_text, summary_text |
def group(self):
"""
| Comment: The id of a group
"""
if self.api and self.group_id:
return self.api._get_group(self.group_id) | | Comment: The id of a group | Below is the the instruction that describes the task:
### Input:
| Comment: The id of a group
### Response:
def group(self):
"""
| Comment: The id of a group
"""
if self.api and self.group_id:
return self.api._get_group(self.group_id) |
def encode(self, inputs, attention_bias):
"""Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
embedded_inputs = self.embedding_softmax_layer(inputs)
inputs_padding = model_utils.get_padding(inputs)
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.params.layer_postprocess_dropout)
encoder_inputs = tf.nn.dropout(
encoder_inputs, 1 - self.params.layer_postprocess_dropout)
return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding) | Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size] | Below is the the instruction that describes the task:
### Input:
Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
### Response:
def encode(self, inputs, attention_bias):
"""Generate continuous representation for inputs.
Args:
inputs: int tensor with shape [batch_size, input_length].
attention_bias: float tensor with shape [batch_size, 1, 1, input_length]
Returns:
float tensor with shape [batch_size, input_length, hidden_size]
"""
with tf.name_scope("encode"):
# Prepare inputs to the layer stack by adding positional encodings and
# applying dropout.
embedded_inputs = self.embedding_softmax_layer(inputs)
inputs_padding = model_utils.get_padding(inputs)
with tf.name_scope("add_pos_encoding"):
length = tf.shape(embedded_inputs)[1]
pos_encoding = model_utils.get_position_encoding(
length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if self.train:
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=self.params.layer_postprocess_dropout)
encoder_inputs = tf.nn.dropout(
encoder_inputs, 1 - self.params.layer_postprocess_dropout)
return self.encoder_stack(encoder_inputs, attention_bias, inputs_padding) |
def annealing(self):
'''
Annealing.
'''
self.__predicted_log_list = []
for cycle in range(self.__cycles_num):
for mc_step in range(self.__mc_step):
self.__move()
self.__gammma *= self.__fractional_reduction
if isinstance(self.__tolerance_diff_e, float) and len(self.__predicted_log_list) > 1:
diff = abs(self.__predicted_log_list[-1][5] - self.__predicted_log_list[-2][5])
if diff < self.__tolerance_diff_e:
break
self.predicted_log_arr = np.array(self.__predicted_log_list) | Annealing. | Below is the the instruction that describes the task:
### Input:
Annealing.
### Response:
def annealing(self):
'''
Annealing.
'''
self.__predicted_log_list = []
for cycle in range(self.__cycles_num):
for mc_step in range(self.__mc_step):
self.__move()
self.__gammma *= self.__fractional_reduction
if isinstance(self.__tolerance_diff_e, float) and len(self.__predicted_log_list) > 1:
diff = abs(self.__predicted_log_list[-1][5] - self.__predicted_log_list[-2][5])
if diff < self.__tolerance_diff_e:
break
self.predicted_log_arr = np.array(self.__predicted_log_list) |
def align(w,signals,mode):
""" Align various waves in a specific point.
Given a mode of alignment, this function computes the specific time point of
a wave where all the waves would be aligned. With the difference between the
time point of the reference wave and the time points of all the other waves,
we have the amount of samples the waves will move to align in the specific
point computed.
Parameters
----------
w: array-like
the input signal to use as a reference of alignment (all the other signals
will be aligned with this one).
signals: array-like or matrix-like
the input signals to align.
mode: string
the mode used in the alignment, from 'max', 'min', 'peak', 'peak_neg',
'infMaxAlign' and 'infMinAlign'.
Returns
-------
nw: a masked array
a new set of aligned signals in a masked array (some cells have NAN values
due to the alignment).
Example
-------
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'max')
[masked_array(data = [30.0 28.0 26.0 13.0 20.0 --],
mask = [False False False False False True],
fill_value = 1e+20)
]
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'peak')
[masked_array(data = [-- -- 10.0 30.0 28.0 26.0],
mask = [ True True False False False False],
fill_value = 1e+20)
]
>>> align([34,4,8],[[100,550,278,67,613,120],[10,470,230,189,856,420]],'min')
[masked_array(data = [278.0 67.0 613.0 120.0 -- --],
mask = [False False False False True True],
fill_value = 1e+20)
, masked_array(data = [-- 10.0 470.0 230.0 189.0 856.0],
mask = [ True False False False False False],
fill_value = 1e+20)
]
"""
nw = []
if len(shape(signals))==1:
signals = [signals]
for i in range(len(signals)):
if (mode == 'max'):
al = maxAlign(w,signals[i])
elif (mode == 'min'):
al = minAlign(w,signals[i])
elif (mode == 'peak'):
al = peakAlign(w,signals[i])
elif (mode == 'peak_neg'):
al = peakNegAlign(w,signals[i])
elif (mode == 'infMaxAlign'):
al = infMaxAlign(w,signals[i])
elif (mode == 'infMinAlign'):
al = infMinAlign(w,signals[i])
nw += [ moveWave(signals[i],al) ]
return nw | Align various waves in a specific point.
Given a mode of alignment, this function computes the specific time point of
a wave where all the waves would be aligned. With the difference between the
time point of the reference wave and the time points of all the other waves,
we have the amount of samples the waves will move to align in the specific
point computed.
Parameters
----------
w: array-like
the input signal to use as a reference of alignment (all the other signals
will be aligned with this one).
signals: array-like or matrix-like
the input signals to align.
mode: string
the mode used in the alignment, from 'max', 'min', 'peak', 'peak_neg',
'infMaxAlign' and 'infMinAlign'.
Returns
-------
nw: a masked array
a new set of aligned signals in a masked array (some cells have NAN values
due to the alignment).
Example
-------
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'max')
[masked_array(data = [30.0 28.0 26.0 13.0 20.0 --],
mask = [False False False False False True],
fill_value = 1e+20)
]
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'peak')
[masked_array(data = [-- -- 10.0 30.0 28.0 26.0],
mask = [ True True False False False False],
fill_value = 1e+20)
]
>>> align([34,4,8],[[100,550,278,67,613,120],[10,470,230,189,856,420]],'min')
[masked_array(data = [278.0 67.0 613.0 120.0 -- --],
mask = [False False False False True True],
fill_value = 1e+20)
, masked_array(data = [-- 10.0 470.0 230.0 189.0 856.0],
mask = [ True False False False False False],
fill_value = 1e+20)
] | Below is the the instruction that describes the task:
### Input:
Align various waves in a specific point.
Given a mode of alignment, this function computes the specific time point of
a wave where all the waves would be aligned. With the difference between the
time point of the reference wave and the time points of all the other waves,
we have the amount of samples the waves will move to align in the specific
point computed.
Parameters
----------
w: array-like
the input signal to use as a reference of alignment (all the other signals
will be aligned with this one).
signals: array-like or matrix-like
the input signals to align.
mode: string
the mode used in the alignment, from 'max', 'min', 'peak', 'peak_neg',
'infMaxAlign' and 'infMinAlign'.
Returns
-------
nw: a masked array
a new set of aligned signals in a masked array (some cells have NAN values
due to the alignment).
Example
-------
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'max')
[masked_array(data = [30.0 28.0 26.0 13.0 20.0 --],
mask = [False False False False False True],
fill_value = 1e+20)
]
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'peak')
[masked_array(data = [-- -- 10.0 30.0 28.0 26.0],
mask = [ True True False False False False],
fill_value = 1e+20)
]
>>> align([34,4,8],[[100,550,278,67,613,120],[10,470,230,189,856,420]],'min')
[masked_array(data = [278.0 67.0 613.0 120.0 -- --],
mask = [False False False False True True],
fill_value = 1e+20)
, masked_array(data = [-- 10.0 470.0 230.0 189.0 856.0],
mask = [ True False False False False False],
fill_value = 1e+20)
]
### Response:
def align(w,signals,mode):
""" Align various waves in a specific point.
Given a mode of alignment, this function computes the specific time point of
a wave where all the waves would be aligned. With the difference between the
time point of the reference wave and the time points of all the other waves,
we have the amount of samples the waves will move to align in the specific
point computed.
Parameters
----------
w: array-like
the input signal to use as a reference of alignment (all the other signals
will be aligned with this one).
signals: array-like or matrix-like
the input signals to align.
mode: string
the mode used in the alignment, from 'max', 'min', 'peak', 'peak_neg',
'infMaxAlign' and 'infMinAlign'.
Returns
-------
nw: a masked array
a new set of aligned signals in a masked array (some cells have NAN values
due to the alignment).
Example
-------
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'max')
[masked_array(data = [30.0 28.0 26.0 13.0 20.0 --],
mask = [False False False False False True],
fill_value = 1e+20)
]
>>> align([6,3,4,5,2,2],[10,30,28,26,13,20],'peak')
[masked_array(data = [-- -- 10.0 30.0 28.0 26.0],
mask = [ True True False False False False],
fill_value = 1e+20)
]
>>> align([34,4,8],[[100,550,278,67,613,120],[10,470,230,189,856,420]],'min')
[masked_array(data = [278.0 67.0 613.0 120.0 -- --],
mask = [False False False False True True],
fill_value = 1e+20)
, masked_array(data = [-- 10.0 470.0 230.0 189.0 856.0],
mask = [ True False False False False False],
fill_value = 1e+20)
]
"""
nw = []
if len(shape(signals))==1:
signals = [signals]
for i in range(len(signals)):
if (mode == 'max'):
al = maxAlign(w,signals[i])
elif (mode == 'min'):
al = minAlign(w,signals[i])
elif (mode == 'peak'):
al = peakAlign(w,signals[i])
elif (mode == 'peak_neg'):
al = peakNegAlign(w,signals[i])
elif (mode == 'infMaxAlign'):
al = infMaxAlign(w,signals[i])
elif (mode == 'infMinAlign'):
al = infMinAlign(w,signals[i])
nw += [ moveWave(signals[i],al) ]
return nw |
def load_friend_chains(chain, friend_chains, txt, nfiles=None):
"""Load a list of trees from a file and add them as friends to the
chain."""
if re.search('.root?', txt) is not None:
c = ROOT.TChain(chain.GetName())
c.SetDirectory(0)
c.Add(txt)
friend_chains.append(c)
chain.AddFriend(c, rand_str())
return
files = np.loadtxt(txt, unpack=True, dtype='str')
if files.ndim == 0:
files = np.array([files])
if nfiles is not None:
files = files[:nfiles]
print("Loading %i files..." % len(files))
c = ROOT.TChain(chain.GetName())
c.SetDirectory(0)
for f in files:
c.Add(f)
friend_chains.append(c)
chain.AddFriend(c, rand_str())
return | Load a list of trees from a file and add them as friends to the
chain. | Below is the the instruction that describes the task:
### Input:
Load a list of trees from a file and add them as friends to the
chain.
### Response:
def load_friend_chains(chain, friend_chains, txt, nfiles=None):
"""Load a list of trees from a file and add them as friends to the
chain."""
if re.search('.root?', txt) is not None:
c = ROOT.TChain(chain.GetName())
c.SetDirectory(0)
c.Add(txt)
friend_chains.append(c)
chain.AddFriend(c, rand_str())
return
files = np.loadtxt(txt, unpack=True, dtype='str')
if files.ndim == 0:
files = np.array([files])
if nfiles is not None:
files = files[:nfiles]
print("Loading %i files..." % len(files))
c = ROOT.TChain(chain.GetName())
c.SetDirectory(0)
for f in files:
c.Add(f)
friend_chains.append(c)
chain.AddFriend(c, rand_str())
return |
def system(**kwargs):
"""
Generally, this will automatically be added to a newly initialized
:class:`phoebe.frontend.bundle.Bundle`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
params = []
params += [FloatParameter(qualifier='t0', value=kwargs.get('t0', 0.0), default_unit=u.d, description='Time at which all values are provided')]
# TODO: re-enable these once they're incorporated into orbits (dynamics) correctly.
params += [FloatParameter(qualifier='ra', value=kwargs.get('ra', 0.0), default_unit=u.deg, description='Right ascension')]
params += [FloatParameter(qualifier='dec', value=kwargs.get('dec', 0.0), default_unit=u.deg, description='Declination')]
params += [StringParameter(qualifier='epoch', value=kwargs.get('epoch', 'J2000'), description='Epoch of coordinates')]
#params += [FloatParameter(qualifier='pmra', value=kwargs.get('pmra', 0.0), default_unit=u.mas/u.yr, description='Proper motion in right ascension')]
#params += [FloatParameter(qualifier='pmdec', value=kwargs.get('pmdec', 0.0), default_unit=u.mas/u.yr, description='Proper motion in declination')]
params += [FloatParameter(qualifier='distance', value=kwargs.get('distance', 1.0), default_unit=u.m, description='Distance to the system')]
params += [FloatParameter(qualifier='vgamma', value=kwargs.get('vgamma', 0.0), default_unit=u.km/u.s, description='Systemic velocity (in the direction of positive RV or negative vz)')]
return ParameterSet(params) | Generally, this will automatically be added to a newly initialized
:class:`phoebe.frontend.bundle.Bundle`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s | Below is the the instruction that describes the task:
### Input:
Generally, this will automatically be added to a newly initialized
:class:`phoebe.frontend.bundle.Bundle`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
### Response:
def system(**kwargs):
"""
Generally, this will automatically be added to a newly initialized
:class:`phoebe.frontend.bundle.Bundle`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
params = []
params += [FloatParameter(qualifier='t0', value=kwargs.get('t0', 0.0), default_unit=u.d, description='Time at which all values are provided')]
# TODO: re-enable these once they're incorporated into orbits (dynamics) correctly.
params += [FloatParameter(qualifier='ra', value=kwargs.get('ra', 0.0), default_unit=u.deg, description='Right ascension')]
params += [FloatParameter(qualifier='dec', value=kwargs.get('dec', 0.0), default_unit=u.deg, description='Declination')]
params += [StringParameter(qualifier='epoch', value=kwargs.get('epoch', 'J2000'), description='Epoch of coordinates')]
#params += [FloatParameter(qualifier='pmra', value=kwargs.get('pmra', 0.0), default_unit=u.mas/u.yr, description='Proper motion in right ascension')]
#params += [FloatParameter(qualifier='pmdec', value=kwargs.get('pmdec', 0.0), default_unit=u.mas/u.yr, description='Proper motion in declination')]
params += [FloatParameter(qualifier='distance', value=kwargs.get('distance', 1.0), default_unit=u.m, description='Distance to the system')]
params += [FloatParameter(qualifier='vgamma', value=kwargs.get('vgamma', 0.0), default_unit=u.km/u.s, description='Systemic velocity (in the direction of positive RV or negative vz)')]
return ParameterSet(params) |
def reindex(self, newIndexIDs=None, newIndexNames=None, newIndexClassNames=None, newIndexTagNames=None):
'''
reindex - reindex the tree. Optionally, change what fields are indexed.
@param newIndexIDs <bool/None> - None to leave same, otherwise new value to index IDs
@parma newIndexNames <bool/None> - None to leave same, otherwise new value to index names
@param newIndexClassNames <bool/None> - None to leave same, otherwise new value to index class names
@param newIndexTagNames <bool/None> - None to leave same, otherwise new value to index tag names
'''
if newIndexIDs is not None:
self.indexIDs = newIndexIDs
if newIndexNames is not None:
self.indexNames = newIndexNames
if newIndexClassNames is not None:
self.newIndexClassNames = newIndexClassNames
if newIndexTagNames is not None:
self.newIndexTagNames = newIndexTagNames
self._resetIndexInternal()
self._indexTagRecursive(self.root) | reindex - reindex the tree. Optionally, change what fields are indexed.
@param newIndexIDs <bool/None> - None to leave same, otherwise new value to index IDs
@parma newIndexNames <bool/None> - None to leave same, otherwise new value to index names
@param newIndexClassNames <bool/None> - None to leave same, otherwise new value to index class names
@param newIndexTagNames <bool/None> - None to leave same, otherwise new value to index tag names | Below is the the instruction that describes the task:
### Input:
reindex - reindex the tree. Optionally, change what fields are indexed.
@param newIndexIDs <bool/None> - None to leave same, otherwise new value to index IDs
@parma newIndexNames <bool/None> - None to leave same, otherwise new value to index names
@param newIndexClassNames <bool/None> - None to leave same, otherwise new value to index class names
@param newIndexTagNames <bool/None> - None to leave same, otherwise new value to index tag names
### Response:
def reindex(self, newIndexIDs=None, newIndexNames=None, newIndexClassNames=None, newIndexTagNames=None):
'''
reindex - reindex the tree. Optionally, change what fields are indexed.
@param newIndexIDs <bool/None> - None to leave same, otherwise new value to index IDs
@parma newIndexNames <bool/None> - None to leave same, otherwise new value to index names
@param newIndexClassNames <bool/None> - None to leave same, otherwise new value to index class names
@param newIndexTagNames <bool/None> - None to leave same, otherwise new value to index tag names
'''
if newIndexIDs is not None:
self.indexIDs = newIndexIDs
if newIndexNames is not None:
self.indexNames = newIndexNames
if newIndexClassNames is not None:
self.newIndexClassNames = newIndexClassNames
if newIndexTagNames is not None:
self.newIndexTagNames = newIndexTagNames
self._resetIndexInternal()
self._indexTagRecursive(self.root) |
def setContent(self, type_, value):
"""
Sets the content that's going to be sent to analyze according to its type
:param type_:
Type of the content (text, file or url)
:param value:
Value of the content
"""
if type_ in [self.CONTENT_TYPE_TXT, self.CONTENT_TYPE_URL,
self.CONTENT_TYPE_FILE]:
if type_ == self.CONTENT_TYPE_FILE:
self._file = {}
self._file = {'doc': open(value, 'rb')}
else:
self.addParam(type_, value) | Sets the content that's going to be sent to analyze according to its type
:param type_:
Type of the content (text, file or url)
:param value:
Value of the content | Below is the the instruction that describes the task:
### Input:
Sets the content that's going to be sent to analyze according to its type
:param type_:
Type of the content (text, file or url)
:param value:
Value of the content
### Response:
def setContent(self, type_, value):
"""
Sets the content that's going to be sent to analyze according to its type
:param type_:
Type of the content (text, file or url)
:param value:
Value of the content
"""
if type_ in [self.CONTENT_TYPE_TXT, self.CONTENT_TYPE_URL,
self.CONTENT_TYPE_FILE]:
if type_ == self.CONTENT_TYPE_FILE:
self._file = {}
self._file = {'doc': open(value, 'rb')}
else:
self.addParam(type_, value) |
def listFiles(self):
"""Gets all files in the temp file tree (which may be dirs).
"""
def fn(dirName, level, files):
if level == self.levelNo-1:
for fileName in os.listdir(dirName):
if fileName != "lock":
absFileName = os.path.join(dirName, fileName)
files.append(absFileName)
else:
for subDir in os.listdir(dirName):
if subDir != "lock":
absDirName = os.path.join(dirName, subDir)
assert os.path.isdir(absDirName)
fn(absDirName, level+1, files)
files = []
fn(self.rootDir, 0, files)
return files | Gets all files in the temp file tree (which may be dirs). | Below is the the instruction that describes the task:
### Input:
Gets all files in the temp file tree (which may be dirs).
### Response:
def listFiles(self):
"""Gets all files in the temp file tree (which may be dirs).
"""
def fn(dirName, level, files):
if level == self.levelNo-1:
for fileName in os.listdir(dirName):
if fileName != "lock":
absFileName = os.path.join(dirName, fileName)
files.append(absFileName)
else:
for subDir in os.listdir(dirName):
if subDir != "lock":
absDirName = os.path.join(dirName, subDir)
assert os.path.isdir(absDirName)
fn(absDirName, level+1, files)
files = []
fn(self.rootDir, 0, files)
return files |
def connect(self, address):
"""
Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return:
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setblocking(False)
# Disable the Nagle algorithm.
self.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
yield from self._loop.sock_connect(self.s, address)
yield from self._notify._connect(address) | Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return: | Below is the the instruction that describes the task:
### Input:
Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return:
### Response:
def connect(self, address):
"""
Connect to a remote or local gpiod daemon.
:param address: a pair (address, port), the address must be already
resolved (for example an ip address)
:return:
"""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setblocking(False)
# Disable the Nagle algorithm.
self.s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
yield from self._loop.sock_connect(self.s, address)
yield from self._notify._connect(address) |
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False):
"""Return a list of dictionaries fit for AnalysisSpecsResultsField
consumption.
If neither hidemin nor hidemax are specified, only services which have
float()able entries in result,min and max field will be included. If
hidemin and/or hidemax specified, results might contain empty min
and/or max fields.
"""
values = []
# selected services
service_uids = form.get("uids", [])
if not service_uids:
# Inject empty fields for the validator
values = [dict.fromkeys(field.getSubfields())]
for uid in service_uids:
s_min = self._get_spec_value(form, uid, "min")
s_max = self._get_spec_value(form, uid, "max")
if not s_min and not s_max:
# If user has not set value neither for min nor max, omit this
# record. Otherwise, since 'min' and 'max' are defined as
# mandatory subfields, the following message will appear after
# submission: "Specifications is required, please correct."
continue
# TODO: disallow this case in the UI
if s_min and s_max:
if float(s_min) > float(s_max):
logger.warn("Min({}) > Max({}) is not allowed"
.format(s_min, s_max))
continue
min_operator = self._get_spec_value(
form, uid, "min_operator", check_floatable=False)
max_operator = self._get_spec_value(
form, uid, "max_operator", check_floatable=False)
service = api.get_object_by_uid(uid)
values.append({
"keyword": service.getKeyword(),
"uid": uid,
"min_operator": min_operator,
"min": s_min,
"max_operator": max_operator,
"max": s_max,
"warn_min": self._get_spec_value(form, uid, "warn_min"),
"warn_max": self._get_spec_value(form, uid, "warn_max"),
"hidemin": self._get_spec_value(form, uid, "hidemin"),
"hidemax": self._get_spec_value(form, uid, "hidemax"),
"rangecomment": self._get_spec_value(form, uid, "rangecomment",
check_floatable=False)})
return values, {} | Return a list of dictionaries fit for AnalysisSpecsResultsField
consumption.
If neither hidemin nor hidemax are specified, only services which have
float()able entries in result,min and max field will be included. If
hidemin and/or hidemax specified, results might contain empty min
and/or max fields. | Below is the the instruction that describes the task:
### Input:
Return a list of dictionaries fit for AnalysisSpecsResultsField
consumption.
If neither hidemin nor hidemax are specified, only services which have
float()able entries in result,min and max field will be included. If
hidemin and/or hidemax specified, results might contain empty min
and/or max fields.
### Response:
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False):
"""Return a list of dictionaries fit for AnalysisSpecsResultsField
consumption.
If neither hidemin nor hidemax are specified, only services which have
float()able entries in result,min and max field will be included. If
hidemin and/or hidemax specified, results might contain empty min
and/or max fields.
"""
values = []
# selected services
service_uids = form.get("uids", [])
if not service_uids:
# Inject empty fields for the validator
values = [dict.fromkeys(field.getSubfields())]
for uid in service_uids:
s_min = self._get_spec_value(form, uid, "min")
s_max = self._get_spec_value(form, uid, "max")
if not s_min and not s_max:
# If user has not set value neither for min nor max, omit this
# record. Otherwise, since 'min' and 'max' are defined as
# mandatory subfields, the following message will appear after
# submission: "Specifications is required, please correct."
continue
# TODO: disallow this case in the UI
if s_min and s_max:
if float(s_min) > float(s_max):
logger.warn("Min({}) > Max({}) is not allowed"
.format(s_min, s_max))
continue
min_operator = self._get_spec_value(
form, uid, "min_operator", check_floatable=False)
max_operator = self._get_spec_value(
form, uid, "max_operator", check_floatable=False)
service = api.get_object_by_uid(uid)
values.append({
"keyword": service.getKeyword(),
"uid": uid,
"min_operator": min_operator,
"min": s_min,
"max_operator": max_operator,
"max": s_max,
"warn_min": self._get_spec_value(form, uid, "warn_min"),
"warn_max": self._get_spec_value(form, uid, "warn_max"),
"hidemin": self._get_spec_value(form, uid, "hidemin"),
"hidemax": self._get_spec_value(form, uid, "hidemax"),
"rangecomment": self._get_spec_value(form, uid, "rangecomment",
check_floatable=False)})
return values, {} |
def delistify(a, b=None):
"""
If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3]
"""
if isinstance(b, (tuple, list, np.ndarray)):
if isinstance(a, (tuple, list, np.ndarray)):
return type(b)(a)
return type(b)([a])
else:
if isinstance(a, (tuple, list, np.ndarray)) and len(a) == 1:
return a[0]
return a
return a | If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3] | Below is the the instruction that describes the task:
### Input:
If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3]
### Response:
def delistify(a, b=None):
"""
If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3]
"""
if isinstance(b, (tuple, list, np.ndarray)):
if isinstance(a, (tuple, list, np.ndarray)):
return type(b)(a)
return type(b)([a])
else:
if isinstance(a, (tuple, list, np.ndarray)) and len(a) == 1:
return a[0]
return a
return a |
def complete_hosts(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
commands = ["add", "remove", "dc"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in commands if s.startswith(mline)] | Tab-complete 'creds' commands. | Below is the the instruction that describes the task:
### Input:
Tab-complete 'creds' commands.
### Response:
def complete_hosts(self, text, line, begidx, endidx):
"Tab-complete 'creds' commands."
commands = ["add", "remove", "dc"]
mline = line.partition(' ')[2]
offs = len(mline) - len(text)
return [s[offs:] for s in commands if s.startswith(mline)] |
def on_channel_click(self, channel_id=None, key=None, button=None):
"""Respond to the click on a channel."""
channels = self.channel_ids
if channels is None:
return
if len(channels) == 1:
self.on_select()
return
assert len(channels) >= 2
# Get the axis from the pressed button (1, 2, etc.)
# axis = 'x' if button == 1 else 'y'
d = 0 if button == 1 else 1
# Change the first or second best channel.
old = channels[d]
# Avoid updating the view if the channel doesn't change.
if channel_id == old:
return
channels[d] = channel_id
# Ensure that the first two channels are different.
if channels[1 - d] == channel_id:
channels[1 - d] = old
assert channels[0] != channels[1]
# Remove duplicate channels.
self.channel_ids = _uniq(channels)
logger.debug("Choose channels %d and %d in feature view.",
*channels[:2])
# Fix the channels temporarily.
self.on_select(fixed_channels=True) | Respond to the click on a channel. | Below is the the instruction that describes the task:
### Input:
Respond to the click on a channel.
### Response:
def on_channel_click(self, channel_id=None, key=None, button=None):
"""Respond to the click on a channel."""
channels = self.channel_ids
if channels is None:
return
if len(channels) == 1:
self.on_select()
return
assert len(channels) >= 2
# Get the axis from the pressed button (1, 2, etc.)
# axis = 'x' if button == 1 else 'y'
d = 0 if button == 1 else 1
# Change the first or second best channel.
old = channels[d]
# Avoid updating the view if the channel doesn't change.
if channel_id == old:
return
channels[d] = channel_id
# Ensure that the first two channels are different.
if channels[1 - d] == channel_id:
channels[1 - d] = old
assert channels[0] != channels[1]
# Remove duplicate channels.
self.channel_ids = _uniq(channels)
logger.debug("Choose channels %d and %d in feature view.",
*channels[:2])
# Fix the channels temporarily.
self.on_select(fixed_channels=True) |
def sun_zenith_angle(utc_time, lon, lat):
"""Sun-zenith angle for *lon*, *lat* at *utc_time*.
lon,lat in degrees.
The angle returned is given in degrees
"""
return np.rad2deg(np.arccos(cos_zen(utc_time, lon, lat))) | Sun-zenith angle for *lon*, *lat* at *utc_time*.
lon,lat in degrees.
The angle returned is given in degrees | Below is the the instruction that describes the task:
### Input:
Sun-zenith angle for *lon*, *lat* at *utc_time*.
lon,lat in degrees.
The angle returned is given in degrees
### Response:
def sun_zenith_angle(utc_time, lon, lat):
"""Sun-zenith angle for *lon*, *lat* at *utc_time*.
lon,lat in degrees.
The angle returned is given in degrees
"""
return np.rad2deg(np.arccos(cos_zen(utc_time, lon, lat))) |
def _parse_directive(self, directive):
"""
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
"""
words = directive.split()
if len(words) == 1 and words[0] not in ('include', 'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft', 'prune'):
# no action given, let's use the default 'include'
words.insert(0, 'include')
action = words[0]
patterns = thedir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistlibException(
'%r expects <pattern1> <pattern2> ...' % action)
patterns = [convert_path(word) for word in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistlibException(
'%r expects <dir> <pattern1> <pattern2> ...' % action)
thedir = convert_path(words[1])
patterns = [convert_path(word) for word in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistlibException(
'%r expects a single <dir_pattern>' % action)
dir_pattern = convert_path(words[1])
else:
raise DistlibException('unknown action %r' % action)
return action, patterns, thedir, dir_pattern | Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns | Below is the the instruction that describes the task:
### Input:
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
### Response:
def _parse_directive(self, directive):
"""
Validate a directive.
:param directive: The directive to validate.
:return: A tuple of action, patterns, thedir, dir_patterns
"""
words = directive.split()
if len(words) == 1 and words[0] not in ('include', 'exclude',
'global-include',
'global-exclude',
'recursive-include',
'recursive-exclude',
'graft', 'prune'):
# no action given, let's use the default 'include'
words.insert(0, 'include')
action = words[0]
patterns = thedir = dir_pattern = None
if action in ('include', 'exclude',
'global-include', 'global-exclude'):
if len(words) < 2:
raise DistlibException(
'%r expects <pattern1> <pattern2> ...' % action)
patterns = [convert_path(word) for word in words[1:]]
elif action in ('recursive-include', 'recursive-exclude'):
if len(words) < 3:
raise DistlibException(
'%r expects <dir> <pattern1> <pattern2> ...' % action)
thedir = convert_path(words[1])
patterns = [convert_path(word) for word in words[2:]]
elif action in ('graft', 'prune'):
if len(words) != 2:
raise DistlibException(
'%r expects a single <dir_pattern>' % action)
dir_pattern = convert_path(words[1])
else:
raise DistlibException('unknown action %r' % action)
return action, patterns, thedir, dir_pattern |
def do_interval( sources, index, out, ref_src, start, end, seq_db, missing_data, strand ):
"""
Join together alignment blocks to create a semi human projected local
alignment (small reference sequence deletions are kept as supported by
the local alignment).
"""
ref_src_size = None
# Make sure the reference component is also the first in the source list
assert sources[0].split('.')[0] == ref_src.split('.')[0], "%s != %s" \
% ( sources[0].split('.')[0], ref_src.split('.')[0] )
# Determine the overall length of the interval
base_len = end - start
# Counter for the last reference species base we have processed
last_stop = start
# Rows in maf blocks come in in arbitrary order, we'll convert things
# to the destred order of the tiled block
source_to_index = dict( ( name, i ) for ( i, name ) in enumerate( sources ) )
# This gets all the maf blocks overlapping our interval of interest
# NOTE: Unlike maf_tile we're expecting
# things to be single coverage in the reference species, so we won't
# sort by score and lay down.
blocks = index.get( ref_src, start, end )
# The last component seen for each species onto which we are tiling
last_components = [ None ] * len( sources )
last_status = [ None ] * len( sources )
cols_needing_fill = [ 0 ] * len( sources )
# The list of strings in which we build up the tiled alignment
tiled_rows = [ "" for i in range( len( sources ) ) ]
# Enumerate the (ordered) list of blocks
for i, block in enumerate( blocks ):
# Check for overlap in reference species
ref = block.get_component_by_src_start( ref_src )
if ref.start < last_stop:
if ref.end < last_stop:
continue
block = block.slice_by_component( ref, last_stop, min( end, ref.end ) )
ref = block.get_component_by_src_start( ref_src )
block = block.slice_by_component( ref, max( start, ref.start ), min( end, ref.end ) )
ref = block.get_component_by_src_start( ref_src )
# print block
assert last_components[0] is None or ref.start >= last_components[0].end, \
"MAF must be sorted and single coverage in reference species!"
assert ref.strand == "+", \
"MAF must have all reference species blocks on the plus strand"
# Store the size of the reference sequence for building fake block
if ref_src_size is None:
ref_src_size = ref.src_size
# Handle the reference component seperately, it has no synteny status
# but we will try to fill in missing sequence
if ref.start > last_stop:
# Need to fill in some reference sequence
chunk_len = ref.start - last_stop
text = bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( last_stop, chunk_len )
tiled_rows[0] += text
for source in sources[1:]:
cols_needing_fill[ source_to_index[ source ] ] += chunk_len
# Do reference component
chunk_len = len( ref.text )
tiled_rows[0] += ref.text
# Do each other component
for source in sources[1:]:
source_index = source_to_index[ source ]
comp = block.get_component_by_src_start( source )
if comp:
if comp.synteny_left is None:
left_status, left_length = None, -1
else:
left_status, left_length = comp.synteny_left
if comp.synteny_right is None:
right_status, right_length = None, -1
else:
right_status, right_length = comp.synteny_right
# We have a component, do we need to do some filling?
cols_to_fill = cols_needing_fill[ source_index ]
if cols_to_fill > 0:
# Adjacent components should have matching status
## assert last_status[ source_index ] is None or last_status[ source_index ] == left_status, \
## "left status (%s) does not match right status (%s) of last component for %s" \
## % ( left_status, last_status[ source_index ], source )
if left_status is None:
fill_char = guess_fill_char( last_components[source_index], comp )
else:
fill_char = get_fill_char( left_status )
tiled_rows[ source_index ] += ( fill_char * cols_to_fill )
cols_needing_fill[ source_index ] = 0
# Okay, filled up to current position, now append the text
tiled_rows[ source_index ] += comp.text
assert len( tiled_rows[ source_index ] ) == len( tiled_rows[ 0 ] ), \
"length of tiled row should match reference row"
last_components[ source_index ] = comp
last_status[ source_index ] = right_status
else:
# No component, we'll have to fill this region when we know
# the status
cols_needing_fill[ source_index ] += chunk_len
last_stop = ref.end
# No more components, clean up the ends
if last_stop < end:
# Need to fill in some reference sequence
chunk_len = end - last_stop
tiled_rows[0] += bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( last_stop, chunk_len )
for source in sources[1:]:
cols_needing_fill[ source_to_index[ source ] ] += chunk_len
# Any final filling that needs to be done?
for source in sources[1:]:
source_index = source_to_index[ source ]
fill_needed = cols_needing_fill[ source_index ]
if fill_needed > 0:
if last_components[ source_index ] is None:
# print >>sys.stderr, "Never saw any components for %s, filling with @" % source
fill_char = '@'
else:
if last_status[ source_index ] is None:
fill_char = '*'
else:
fill_char = get_fill_char( last_status[ source_index ] )
tiled_rows[ source_index ] += fill_char * fill_needed
assert len( tiled_rows[ source_index ] ) == len( tiled_rows[ 0 ] ), \
"length of tiled row should match reference row"
# Okay, now make up the fake alignment from the tiled rows.
tiled_rows = remove_all_gap_columns( tiled_rows )
a = align.Alignment()
for i, name in enumerate( sources ):
text = "".join( tiled_rows[i] )
size = len( text ) - text.count( "-" )
if i == 0:
if ref_src_size is None: ref_src_size = bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).length
c = align.Component( ref_src, start, end-start, "+", ref_src_size, text )
else:
c = align.Component( name + ".fake", 0, size, "?", size, text )
a.add_component( c )
if strand == '-':
a = a.reverse_complement()
out.write( a ) | Join together alignment blocks to create a semi human projected local
alignment (small reference sequence deletions are kept as supported by
the local alignment). | Below is the the instruction that describes the task:
### Input:
Join together alignment blocks to create a semi human projected local
alignment (small reference sequence deletions are kept as supported by
the local alignment).
### Response:
def do_interval( sources, index, out, ref_src, start, end, seq_db, missing_data, strand ):
"""
Join together alignment blocks to create a semi human projected local
alignment (small reference sequence deletions are kept as supported by
the local alignment).
"""
ref_src_size = None
# Make sure the reference component is also the first in the source list
assert sources[0].split('.')[0] == ref_src.split('.')[0], "%s != %s" \
% ( sources[0].split('.')[0], ref_src.split('.')[0] )
# Determine the overall length of the interval
base_len = end - start
# Counter for the last reference species base we have processed
last_stop = start
# Rows in maf blocks come in in arbitrary order, we'll convert things
# to the destred order of the tiled block
source_to_index = dict( ( name, i ) for ( i, name ) in enumerate( sources ) )
# This gets all the maf blocks overlapping our interval of interest
# NOTE: Unlike maf_tile we're expecting
# things to be single coverage in the reference species, so we won't
# sort by score and lay down.
blocks = index.get( ref_src, start, end )
# The last component seen for each species onto which we are tiling
last_components = [ None ] * len( sources )
last_status = [ None ] * len( sources )
cols_needing_fill = [ 0 ] * len( sources )
# The list of strings in which we build up the tiled alignment
tiled_rows = [ "" for i in range( len( sources ) ) ]
# Enumerate the (ordered) list of blocks
for i, block in enumerate( blocks ):
# Check for overlap in reference species
ref = block.get_component_by_src_start( ref_src )
if ref.start < last_stop:
if ref.end < last_stop:
continue
block = block.slice_by_component( ref, last_stop, min( end, ref.end ) )
ref = block.get_component_by_src_start( ref_src )
block = block.slice_by_component( ref, max( start, ref.start ), min( end, ref.end ) )
ref = block.get_component_by_src_start( ref_src )
# print block
assert last_components[0] is None or ref.start >= last_components[0].end, \
"MAF must be sorted and single coverage in reference species!"
assert ref.strand == "+", \
"MAF must have all reference species blocks on the plus strand"
# Store the size of the reference sequence for building fake block
if ref_src_size is None:
ref_src_size = ref.src_size
# Handle the reference component seperately, it has no synteny status
# but we will try to fill in missing sequence
if ref.start > last_stop:
# Need to fill in some reference sequence
chunk_len = ref.start - last_stop
text = bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( last_stop, chunk_len )
tiled_rows[0] += text
for source in sources[1:]:
cols_needing_fill[ source_to_index[ source ] ] += chunk_len
# Do reference component
chunk_len = len( ref.text )
tiled_rows[0] += ref.text
# Do each other component
for source in sources[1:]:
source_index = source_to_index[ source ]
comp = block.get_component_by_src_start( source )
if comp:
if comp.synteny_left is None:
left_status, left_length = None, -1
else:
left_status, left_length = comp.synteny_left
if comp.synteny_right is None:
right_status, right_length = None, -1
else:
right_status, right_length = comp.synteny_right
# We have a component, do we need to do some filling?
cols_to_fill = cols_needing_fill[ source_index ]
if cols_to_fill > 0:
# Adjacent components should have matching status
## assert last_status[ source_index ] is None or last_status[ source_index ] == left_status, \
## "left status (%s) does not match right status (%s) of last component for %s" \
## % ( left_status, last_status[ source_index ], source )
if left_status is None:
fill_char = guess_fill_char( last_components[source_index], comp )
else:
fill_char = get_fill_char( left_status )
tiled_rows[ source_index ] += ( fill_char * cols_to_fill )
cols_needing_fill[ source_index ] = 0
# Okay, filled up to current position, now append the text
tiled_rows[ source_index ] += comp.text
assert len( tiled_rows[ source_index ] ) == len( tiled_rows[ 0 ] ), \
"length of tiled row should match reference row"
last_components[ source_index ] = comp
last_status[ source_index ] = right_status
else:
# No component, we'll have to fill this region when we know
# the status
cols_needing_fill[ source_index ] += chunk_len
last_stop = ref.end
# No more components, clean up the ends
if last_stop < end:
# Need to fill in some reference sequence
chunk_len = end - last_stop
tiled_rows[0] += bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( last_stop, chunk_len )
for source in sources[1:]:
cols_needing_fill[ source_to_index[ source ] ] += chunk_len
# Any final filling that needs to be done?
for source in sources[1:]:
source_index = source_to_index[ source ]
fill_needed = cols_needing_fill[ source_index ]
if fill_needed > 0:
if last_components[ source_index ] is None:
# print >>sys.stderr, "Never saw any components for %s, filling with @" % source
fill_char = '@'
else:
if last_status[ source_index ] is None:
fill_char = '*'
else:
fill_char = get_fill_char( last_status[ source_index ] )
tiled_rows[ source_index ] += fill_char * fill_needed
assert len( tiled_rows[ source_index ] ) == len( tiled_rows[ 0 ] ), \
"length of tiled row should match reference row"
# Okay, now make up the fake alignment from the tiled rows.
tiled_rows = remove_all_gap_columns( tiled_rows )
a = align.Alignment()
for i, name in enumerate( sources ):
text = "".join( tiled_rows[i] )
size = len( text ) - text.count( "-" )
if i == 0:
if ref_src_size is None: ref_src_size = bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).length
c = align.Component( ref_src, start, end-start, "+", ref_src_size, text )
else:
c = align.Component( name + ".fake", 0, size, "?", size, text )
a.add_component( c )
if strand == '-':
a = a.reverse_complement()
out.write( a ) |
def GetTSKFileByPathSpec(self, path_spec):
"""Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location.
"""
# Opening a file by inode number is faster than opening a file
# by location.
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
else:
raise errors.PathSpecError(
'Path specification missing inode and location.')
return tsk_file | Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location. | Below is the the instruction that describes the task:
### Input:
Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location.
### Response:
def GetTSKFileByPathSpec(self, path_spec):
"""Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location.
"""
# Opening a file by inode number is faster than opening a file
# by location.
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(location)
else:
raise errors.PathSpecError(
'Path specification missing inode and location.')
return tsk_file |
def read_header(self, file_handle, nextdata_offset=0):
"""Read the header of the FCS file.
The header specifies where the annotation, data and analysis are located inside the binary
file.
Args:
file_handle: buffer containing FCS file.
nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
"""
header = {'FCS format': file_handle.read(6)}
file_handle.read(4) # 4 space characters after the FCS format
for field in ('text start', 'text end', 'data start', 'data end', 'analysis start',
'analysis end'):
s = file_handle.read(8)
try:
field_value = int(s)
except ValueError:
field_value = 0
header[field] = field_value + nextdata_offset
# Checking that the location of the TEXT segment is specified
for k in ('text start', 'text end'):
if header[k] == 0:
raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate '
u'information about the "{}" segment.)'.format(self.path, k))
elif header[k] > self._file_size:
raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment '
u'is larger than file size'.format(self.path, k))
else:
# All OK
pass
self._data_start = header['data start']
self._data_end = header['data start']
if header['analysis end'] - header['analysis start'] != 0:
warnings.warn(u'There appears to be some information in the ANALYSIS segment of file '
u'{0}. However, it might not be read correctly.'.format(self.path))
self.annotation['__header__'] = header | Read the header of the FCS file.
The header specifies where the annotation, data and analysis are located inside the binary
file.
Args:
file_handle: buffer containing FCS file.
nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA | Below is the the instruction that describes the task:
### Input:
Read the header of the FCS file.
The header specifies where the annotation, data and analysis are located inside the binary
file.
Args:
file_handle: buffer containing FCS file.
nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
### Response:
def read_header(self, file_handle, nextdata_offset=0):
"""Read the header of the FCS file.
The header specifies where the annotation, data and analysis are located inside the binary
file.
Args:
file_handle: buffer containing FCS file.
nextdata_offset: byte offset of a set header from file start specified by $NEXTDATA
"""
header = {'FCS format': file_handle.read(6)}
file_handle.read(4) # 4 space characters after the FCS format
for field in ('text start', 'text end', 'data start', 'data end', 'analysis start',
'analysis end'):
s = file_handle.read(8)
try:
field_value = int(s)
except ValueError:
field_value = 0
header[field] = field_value + nextdata_offset
# Checking that the location of the TEXT segment is specified
for k in ('text start', 'text end'):
if header[k] == 0:
raise ValueError(u'The FCS file "{}" seems corrupted. (Parser cannot locate '
u'information about the "{}" segment.)'.format(self.path, k))
elif header[k] > self._file_size:
raise ValueError(u'The FCS file "{}" is corrupted. "{}" segment '
u'is larger than file size'.format(self.path, k))
else:
# All OK
pass
self._data_start = header['data start']
self._data_end = header['data start']
if header['analysis end'] - header['analysis start'] != 0:
warnings.warn(u'There appears to be some information in the ANALYSIS segment of file '
u'{0}. However, it might not be read correctly.'.format(self.path))
self.annotation['__header__'] = header |
def _name_for_command(command):
r"""Craft a simple command name from the command.
The best command strings for this are going to be those where a simple
command was given; we will use the command to derive the name.
We won't always be able to figure something out and the caller should just
specify a "--name" on the command-line.
For example, commands like "export VAR=val\necho ${VAR}", this function would
return "export".
If the command starts space or a comment, then we'll skip to the first code
we can find.
If we find nothing, just return "command".
>>> _name_for_command('samtools index "${BAM}"')
'samtools'
>>> _name_for_command('/usr/bin/sort "${INFILE}" > "${OUTFILE}"')
'sort'
>>> _name_for_command('# This should be ignored')
'command'
>>> _name_for_command('\\\n\\\n# Bad continuations, but ignore.\necho hello.')
'echo'
Arguments:
command: the user-provided command
Returns:
a proposed name for the task.
"""
lines = command.splitlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#') and line != '\\':
return os.path.basename(re.split(r'\s', line)[0])
return 'command' | r"""Craft a simple command name from the command.
The best command strings for this are going to be those where a simple
command was given; we will use the command to derive the name.
We won't always be able to figure something out and the caller should just
specify a "--name" on the command-line.
For example, commands like "export VAR=val\necho ${VAR}", this function would
return "export".
If the command starts space or a comment, then we'll skip to the first code
we can find.
If we find nothing, just return "command".
>>> _name_for_command('samtools index "${BAM}"')
'samtools'
>>> _name_for_command('/usr/bin/sort "${INFILE}" > "${OUTFILE}"')
'sort'
>>> _name_for_command('# This should be ignored')
'command'
>>> _name_for_command('\\\n\\\n# Bad continuations, but ignore.\necho hello.')
'echo'
Arguments:
command: the user-provided command
Returns:
a proposed name for the task. | Below is the the instruction that describes the task:
### Input:
r"""Craft a simple command name from the command.
The best command strings for this are going to be those where a simple
command was given; we will use the command to derive the name.
We won't always be able to figure something out and the caller should just
specify a "--name" on the command-line.
For example, commands like "export VAR=val\necho ${VAR}", this function would
return "export".
If the command starts space or a comment, then we'll skip to the first code
we can find.
If we find nothing, just return "command".
>>> _name_for_command('samtools index "${BAM}"')
'samtools'
>>> _name_for_command('/usr/bin/sort "${INFILE}" > "${OUTFILE}"')
'sort'
>>> _name_for_command('# This should be ignored')
'command'
>>> _name_for_command('\\\n\\\n# Bad continuations, but ignore.\necho hello.')
'echo'
Arguments:
command: the user-provided command
Returns:
a proposed name for the task.
### Response:
def _name_for_command(command):
r"""Craft a simple command name from the command.
The best command strings for this are going to be those where a simple
command was given; we will use the command to derive the name.
We won't always be able to figure something out and the caller should just
specify a "--name" on the command-line.
For example, commands like "export VAR=val\necho ${VAR}", this function would
return "export".
If the command starts space or a comment, then we'll skip to the first code
we can find.
If we find nothing, just return "command".
>>> _name_for_command('samtools index "${BAM}"')
'samtools'
>>> _name_for_command('/usr/bin/sort "${INFILE}" > "${OUTFILE}"')
'sort'
>>> _name_for_command('# This should be ignored')
'command'
>>> _name_for_command('\\\n\\\n# Bad continuations, but ignore.\necho hello.')
'echo'
Arguments:
command: the user-provided command
Returns:
a proposed name for the task.
"""
lines = command.splitlines()
for line in lines:
line = line.strip()
if line and not line.startswith('#') and line != '\\':
return os.path.basename(re.split(r'\s', line)[0])
return 'command' |
def inverse(self):
"""Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform.
"""
inv_rot = np.linalg.inv(self.rotation)
inv_scale = 1.0 / self.scale
inv_trans = -inv_scale * inv_rot.dot(self.translation)
return SimilarityTransform(inv_rot, inv_trans, inv_scale,
from_frame=self._to_frame,
to_frame=self._from_frame) | Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform. | Below is the the instruction that describes the task:
### Input:
Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform.
### Response:
def inverse(self):
"""Take the inverse of the similarity transform.
Returns
-------
:obj:`SimilarityTransform`
The inverse of this SimilarityTransform.
"""
inv_rot = np.linalg.inv(self.rotation)
inv_scale = 1.0 / self.scale
inv_trans = -inv_scale * inv_rot.dot(self.translation)
return SimilarityTransform(inv_rot, inv_trans, inv_scale,
from_frame=self._to_frame,
to_frame=self._from_frame) |
def _buffer_extract(self, data):
"""
Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ..."""
# Extract token-delimited entities from the input string with the split command.
# There's a bit of craftiness here with the -1 parameter. Normally split would
# behave no differently regardless of if the token lies at the very end of the
# input buffer or not (i.e. a literal edge case) Specifying -1 forces split to
# return "" in this case, meaning that the last entry in the list represents a
# new segment of data where the token has not been encountered
entities = collections.deque(data.split(self._delimiter, -1))
# Check to see if the buffer has exceeded capacity, if we're imposing a limit
if self._size_limit:
if self.input_size + len(entities[0]) > self._size_limit:
raise Exception('input buffer full')
self._input_size += len(entities[0])
# Move the first entry in the resulting array into the input buffer. It represents
# the last segment of a token-delimited entity unless it's the only entry in the list.
first_entry = entities.popleft()
if len(first_entry) > 0:
self._input.append(first_entry)
# If the resulting array from the split is empty, the token was not encountered
# (not even at the end of the buffer). Since we've encountered no token-delimited
# entities this go-around, return an empty array.
if len(entities) == 0:
return []
# At this point, we've hit a token, or potentially multiple tokens. Now we can bring
# together all the data we've buffered from earlier calls without hitting a token,
# and add it to our list of discovered entities.
entities.appendleft(''.join(self._input))
# Now that we've hit a token, joined the input buffer and added it to the entities
# list, we can go ahead and clear the input buffer. All of the segments that were
# stored before the join can now be garbage collected.
self._input.clear()
# The last entity in the list is not token delimited, however, thanks to the -1
# passed to split. It represents the beginning of a new list of as-yet-untokenized
# data, so we add it to the start of the list.
self._input.append(entities.pop())
# Set the new input buffer size, provided we're keeping track
if self._size_limit:
self._input_size = len(self._input[0])
# Now we're left with the list of extracted token-delimited entities we wanted
# in the first place. Hooray!
return entities | Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ... | Below is the the instruction that describes the task:
### Input:
Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ...
### Response:
def _buffer_extract(self, data):
"""
Extract takes an arbitrary string of input data and returns an array of
tokenized entities, provided there were any available to extract. This
makes for easy processing of datagrams using a pattern like:
tokenizer.extract(data).map { |entity| Decode(entity) }.each do ..."""
# Extract token-delimited entities from the input string with the split command.
# There's a bit of craftiness here with the -1 parameter. Normally split would
# behave no differently regardless of if the token lies at the very end of the
# input buffer or not (i.e. a literal edge case) Specifying -1 forces split to
# return "" in this case, meaning that the last entry in the list represents a
# new segment of data where the token has not been encountered
entities = collections.deque(data.split(self._delimiter, -1))
# Check to see if the buffer has exceeded capacity, if we're imposing a limit
if self._size_limit:
if self.input_size + len(entities[0]) > self._size_limit:
raise Exception('input buffer full')
self._input_size += len(entities[0])
# Move the first entry in the resulting array into the input buffer. It represents
# the last segment of a token-delimited entity unless it's the only entry in the list.
first_entry = entities.popleft()
if len(first_entry) > 0:
self._input.append(first_entry)
# If the resulting array from the split is empty, the token was not encountered
# (not even at the end of the buffer). Since we've encountered no token-delimited
# entities this go-around, return an empty array.
if len(entities) == 0:
return []
# At this point, we've hit a token, or potentially multiple tokens. Now we can bring
# together all the data we've buffered from earlier calls without hitting a token,
# and add it to our list of discovered entities.
entities.appendleft(''.join(self._input))
# Now that we've hit a token, joined the input buffer and added it to the entities
# list, we can go ahead and clear the input buffer. All of the segments that were
# stored before the join can now be garbage collected.
self._input.clear()
# The last entity in the list is not token delimited, however, thanks to the -1
# passed to split. It represents the beginning of a new list of as-yet-untokenized
# data, so we add it to the start of the list.
self._input.append(entities.pop())
# Set the new input buffer size, provided we're keeping track
if self._size_limit:
self._input_size = len(self._input[0])
# Now we're left with the list of extracted token-delimited entities we wanted
# in the first place. Hooray!
return entities |
def _graph_reduction(adj, x, g, f):
"""we can go ahead and remove any simplicial or almost-simplicial vertices from adj.
"""
as_list = set()
as_nodes = {v for v in adj if len(adj[v]) <= f and is_almost_simplicial(adj, v)}
while as_nodes:
as_list.union(as_nodes)
for n in as_nodes:
# update g and f
dv = len(adj[n])
if dv > g:
g = dv
if g > f:
f = g
# eliminate v
x.append(n)
_elim_adj(adj, n)
# see if we have any more simplicial nodes
as_nodes = {v for v in adj if len(adj[v]) <= f and is_almost_simplicial(adj, v)}
return g, f, as_list | we can go ahead and remove any simplicial or almost-simplicial vertices from adj. | Below is the the instruction that describes the task:
### Input:
we can go ahead and remove any simplicial or almost-simplicial vertices from adj.
### Response:
def _graph_reduction(adj, x, g, f):
"""we can go ahead and remove any simplicial or almost-simplicial vertices from adj.
"""
as_list = set()
as_nodes = {v for v in adj if len(adj[v]) <= f and is_almost_simplicial(adj, v)}
while as_nodes:
as_list.union(as_nodes)
for n in as_nodes:
# update g and f
dv = len(adj[n])
if dv > g:
g = dv
if g > f:
f = g
# eliminate v
x.append(n)
_elim_adj(adj, n)
# see if we have any more simplicial nodes
as_nodes = {v for v in adj if len(adj[v]) <= f and is_almost_simplicial(adj, v)}
return g, f, as_list |
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis) | apply to the values as a numpy array | Below is the the instruction that describes the task:
### Input:
apply to the values as a numpy array
### Response:
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis) |
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal | Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal | Below is the the instruction that describes the task:
### Input:
Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
### Response:
def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal |
def _morphy_best(word, pos=None):
"""
Get the most likely stem for a word using Morphy, once the input has been
pre-processed by morphy_stem().
"""
results = []
if pos is None:
pos = 'nvar'
for pos_item in pos:
results.extend(morphy(word, pos_item))
if not results:
return None
results.sort(key=lambda x: _word_badness(x))
return results[0] | Get the most likely stem for a word using Morphy, once the input has been
pre-processed by morphy_stem(). | Below is the the instruction that describes the task:
### Input:
Get the most likely stem for a word using Morphy, once the input has been
pre-processed by morphy_stem().
### Response:
def _morphy_best(word, pos=None):
"""
Get the most likely stem for a word using Morphy, once the input has been
pre-processed by morphy_stem().
"""
results = []
if pos is None:
pos = 'nvar'
for pos_item in pos:
results.extend(morphy(word, pos_item))
if not results:
return None
results.sort(key=lambda x: _word_badness(x))
return results[0] |
def format_diff_pyxb(a_pyxb, b_pyxb):
"""Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta
"""
return '\n'.join(
difflib.ndiff(
serialize_to_xml_str(a_pyxb).splitlines(),
serialize_to_xml_str(b_pyxb).splitlines(),
)
) | Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta | Below is the the instruction that describes the task:
### Input:
Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta
### Response:
def format_diff_pyxb(a_pyxb, b_pyxb):
"""Create a diff between two PyXB objects.
Args:
a_pyxb: PyXB object
b_pyxb: PyXB object
Returns:
str : `Differ`-style delta
"""
return '\n'.join(
difflib.ndiff(
serialize_to_xml_str(a_pyxb).splitlines(),
serialize_to_xml_str(b_pyxb).splitlines(),
)
) |
def send_shift_handoff(
self,
parent,
recipients,
subject,
cc=None,
notes_content_type=None,
notes_content=None,
incidents=None,
preview_only=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sends a summary of the shift for oncall handoff.
Example:
>>> from google.cloud import irm_v1alpha2
>>>
>>> client = irm_v1alpha2.IncidentServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `recipients`:
>>> recipients = []
>>>
>>> # TODO: Initialize `subject`:
>>> subject = ''
>>>
>>> response = client.send_shift_handoff(parent, recipients, subject)
Args:
parent (str): The resource name of the Stackdriver project that the handoff is being
sent from. for example, ``projects/{project_id}``
recipients (list[str]): Email addresses of the recipients of the handoff, for example,
"user@example.com". Must contain at least one entry.
subject (str): The subject of the email. Required.
cc (list[str]): Email addresses that should be CC'd on the handoff. Optional.
notes_content_type (str): Content type string, for example, 'text/plain' or 'text/html'.
notes_content (str): Additional notes to be included in the handoff. Optional.
incidents (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Incident]]): The set of incidents that should be included in the handoff. Optional.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.irm_v1alpha2.types.Incident`
preview_only (bool): If set to true a ShiftHandoffResponse will be returned but the handoff
will not actually be sent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.irm_v1alpha2.types.SendShiftHandoffResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "send_shift_handoff" not in self._inner_api_calls:
self._inner_api_calls[
"send_shift_handoff"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.send_shift_handoff,
default_retry=self._method_configs["SendShiftHandoff"].retry,
default_timeout=self._method_configs["SendShiftHandoff"].timeout,
client_info=self._client_info,
)
request = incidents_service_pb2.SendShiftHandoffRequest(
parent=parent,
recipients=recipients,
subject=subject,
cc=cc,
notes_content_type=notes_content_type,
notes_content=notes_content,
incidents=incidents,
preview_only=preview_only,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["send_shift_handoff"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Sends a summary of the shift for oncall handoff.
Example:
>>> from google.cloud import irm_v1alpha2
>>>
>>> client = irm_v1alpha2.IncidentServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `recipients`:
>>> recipients = []
>>>
>>> # TODO: Initialize `subject`:
>>> subject = ''
>>>
>>> response = client.send_shift_handoff(parent, recipients, subject)
Args:
parent (str): The resource name of the Stackdriver project that the handoff is being
sent from. for example, ``projects/{project_id}``
recipients (list[str]): Email addresses of the recipients of the handoff, for example,
"user@example.com". Must contain at least one entry.
subject (str): The subject of the email. Required.
cc (list[str]): Email addresses that should be CC'd on the handoff. Optional.
notes_content_type (str): Content type string, for example, 'text/plain' or 'text/html'.
notes_content (str): Additional notes to be included in the handoff. Optional.
incidents (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Incident]]): The set of incidents that should be included in the handoff. Optional.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.irm_v1alpha2.types.Incident`
preview_only (bool): If set to true a ShiftHandoffResponse will be returned but the handoff
will not actually be sent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.irm_v1alpha2.types.SendShiftHandoffResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Sends a summary of the shift for oncall handoff.
Example:
>>> from google.cloud import irm_v1alpha2
>>>
>>> client = irm_v1alpha2.IncidentServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `recipients`:
>>> recipients = []
>>>
>>> # TODO: Initialize `subject`:
>>> subject = ''
>>>
>>> response = client.send_shift_handoff(parent, recipients, subject)
Args:
parent (str): The resource name of the Stackdriver project that the handoff is being
sent from. for example, ``projects/{project_id}``
recipients (list[str]): Email addresses of the recipients of the handoff, for example,
"user@example.com". Must contain at least one entry.
subject (str): The subject of the email. Required.
cc (list[str]): Email addresses that should be CC'd on the handoff. Optional.
notes_content_type (str): Content type string, for example, 'text/plain' or 'text/html'.
notes_content (str): Additional notes to be included in the handoff. Optional.
incidents (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Incident]]): The set of incidents that should be included in the handoff. Optional.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.irm_v1alpha2.types.Incident`
preview_only (bool): If set to true a ShiftHandoffResponse will be returned but the handoff
will not actually be sent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.irm_v1alpha2.types.SendShiftHandoffResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def send_shift_handoff(
self,
parent,
recipients,
subject,
cc=None,
notes_content_type=None,
notes_content=None,
incidents=None,
preview_only=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sends a summary of the shift for oncall handoff.
Example:
>>> from google.cloud import irm_v1alpha2
>>>
>>> client = irm_v1alpha2.IncidentServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `recipients`:
>>> recipients = []
>>>
>>> # TODO: Initialize `subject`:
>>> subject = ''
>>>
>>> response = client.send_shift_handoff(parent, recipients, subject)
Args:
parent (str): The resource name of the Stackdriver project that the handoff is being
sent from. for example, ``projects/{project_id}``
recipients (list[str]): Email addresses of the recipients of the handoff, for example,
"user@example.com". Must contain at least one entry.
subject (str): The subject of the email. Required.
cc (list[str]): Email addresses that should be CC'd on the handoff. Optional.
notes_content_type (str): Content type string, for example, 'text/plain' or 'text/html'.
notes_content (str): Additional notes to be included in the handoff. Optional.
incidents (list[Union[dict, ~google.cloud.irm_v1alpha2.types.Incident]]): The set of incidents that should be included in the handoff. Optional.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.irm_v1alpha2.types.Incident`
preview_only (bool): If set to true a ShiftHandoffResponse will be returned but the handoff
will not actually be sent.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.irm_v1alpha2.types.SendShiftHandoffResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "send_shift_handoff" not in self._inner_api_calls:
self._inner_api_calls[
"send_shift_handoff"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.send_shift_handoff,
default_retry=self._method_configs["SendShiftHandoff"].retry,
default_timeout=self._method_configs["SendShiftHandoff"].timeout,
client_info=self._client_info,
)
request = incidents_service_pb2.SendShiftHandoffRequest(
parent=parent,
recipients=recipients,
subject=subject,
cc=cc,
notes_content_type=notes_content_type,
notes_content=notes_content,
incidents=incidents,
preview_only=preview_only,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["send_shift_handoff"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def configure_once(config=None, bind_in_runtime=True):
"""Create an injector with a callable config if not present, otherwise, do nothing."""
with _INJECTOR_LOCK:
if _INJECTOR:
return _INJECTOR
return configure(config, bind_in_runtime=bind_in_runtime) | Create an injector with a callable config if not present, otherwise, do nothing. | Below is the the instruction that describes the task:
### Input:
Create an injector with a callable config if not present, otherwise, do nothing.
### Response:
def configure_once(config=None, bind_in_runtime=True):
"""Create an injector with a callable config if not present, otherwise, do nothing."""
with _INJECTOR_LOCK:
if _INJECTOR:
return _INJECTOR
return configure(config, bind_in_runtime=bind_in_runtime) |
def format_baseline_list(baseline_list):
"""Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data
"""
uniq_base = dict()
uniq_delta = dict()
for bline in baseline_list:
bsplit = bline.split()
bdate = " ".join(bsplit[2:])
if bsplit[0] not in uniq_base.keys():
uniq_base[bsplit[0]] = ""
if bsplit[1] not in uniq_delta.keys():
uniq_delta[bsplit[1]] = ""
uniq_base[bsplit[0]] += "{:s}, ".format(bdate)
uniq_delta[bsplit[1]] += "{:s}, ".format(bdate)
if len(uniq_base.items()) == 1:
base_string = "Baseline {:s}".format(list(uniq_base.keys())[0])
else:
base_string = "Baseline "
for i,kk in enumerate(uniq_base.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_base[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_base[kk][:-2])
else:
base_string += "unknown"
if len(uniq_delta.items()) == 1:
base_string += "\nDelta {:s}".format(list(uniq_delta.keys())[0])
else:
base_string += "\nDelta "
for i,kk in enumerate(uniq_delta.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_delta[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_delta[kk][:-2])
else:
base_string += "unknown"
return base_string | Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data | Below is the the instruction that describes the task:
### Input:
Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data
### Response:
def format_baseline_list(baseline_list):
"""Format the list of baseline information from the loaded files into a
cohesive, informative string
Parameters
------------
baseline_list : (list)
List of strings specifying the baseline information for each
SuperMAG file
Returns
---------
base_string : (str)
Single string containing the relevent data
"""
uniq_base = dict()
uniq_delta = dict()
for bline in baseline_list:
bsplit = bline.split()
bdate = " ".join(bsplit[2:])
if bsplit[0] not in uniq_base.keys():
uniq_base[bsplit[0]] = ""
if bsplit[1] not in uniq_delta.keys():
uniq_delta[bsplit[1]] = ""
uniq_base[bsplit[0]] += "{:s}, ".format(bdate)
uniq_delta[bsplit[1]] += "{:s}, ".format(bdate)
if len(uniq_base.items()) == 1:
base_string = "Baseline {:s}".format(list(uniq_base.keys())[0])
else:
base_string = "Baseline "
for i,kk in enumerate(uniq_base.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_base[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_base[kk][:-2])
else:
base_string += "unknown"
if len(uniq_delta.items()) == 1:
base_string += "\nDelta {:s}".format(list(uniq_delta.keys())[0])
else:
base_string += "\nDelta "
for i,kk in enumerate(uniq_delta.keys()):
if i == 1:
base_string += "{:s}: {:s}".format(kk, uniq_delta[kk][:-2])
else:
base_string += " {:s}: {:s}".format(kk,
uniq_delta[kk][:-2])
else:
base_string += "unknown"
return base_string |
def kalman_transition(filtered_mean, filtered_cov,
transition_matrix, transition_noise):
"""Propagate a filtered distribution through a transition model."""
predicted_mean = _propagate_mean(filtered_mean,
transition_matrix,
transition_noise)
predicted_cov = _propagate_cov(filtered_cov,
transition_matrix,
transition_noise)
return predicted_mean, predicted_cov | Propagate a filtered distribution through a transition model. | Below is the the instruction that describes the task:
### Input:
Propagate a filtered distribution through a transition model.
### Response:
def kalman_transition(filtered_mean, filtered_cov,
transition_matrix, transition_noise):
"""Propagate a filtered distribution through a transition model."""
predicted_mean = _propagate_mean(filtered_mean,
transition_matrix,
transition_noise)
predicted_cov = _propagate_cov(filtered_cov,
transition_matrix,
transition_noise)
return predicted_mean, predicted_cov |
def _format_issue_url(self):
"""Format full issue URL."""
query = urlencode({
'title': self._format_issue_title(),
'body': self._format_issue_body(),
})
return self.REPO_URL + self.ISSUE_SUFFIX + '?' + query | Format full issue URL. | Below is the the instruction that describes the task:
### Input:
Format full issue URL.
### Response:
def _format_issue_url(self):
"""Format full issue URL."""
query = urlencode({
'title': self._format_issue_title(),
'body': self._format_issue_body(),
})
return self.REPO_URL + self.ISSUE_SUFFIX + '?' + query |
async def _wrap_http(self, handler, *args, **kwargs):
''' wraps a handler with an HTTP request-response cycle'''
try:
method = self.request_method()
# support preflight requests when CORS is enabled
if method == 'OPTIONS':
return self.build_http_response(None, status=NO_CONTENT)
data = await handler(self, *args, **kwargs)
# format the response object
formatted = self.format(method, data)
status = self.responses.get(method, OK)
return self.build_http_response(formatted, status=status)
except Exception as ex:
return self.dispatch_error(ex) | wraps a handler with an HTTP request-response cycle | Below is the the instruction that describes the task:
### Input:
wraps a handler with an HTTP request-response cycle
### Response:
async def _wrap_http(self, handler, *args, **kwargs):
''' wraps a handler with an HTTP request-response cycle'''
try:
method = self.request_method()
# support preflight requests when CORS is enabled
if method == 'OPTIONS':
return self.build_http_response(None, status=NO_CONTENT)
data = await handler(self, *args, **kwargs)
# format the response object
formatted = self.format(method, data)
status = self.responses.get(method, OK)
return self.build_http_response(formatted, status=status)
except Exception as ex:
return self.dispatch_error(ex) |
def handle_subports(self, subports, event_type):
"""Subport data model change from the server."""
LOG.debug("Subports event received: %(event_type)s. "
"Subports: %(subports)s",
{'event_type': event_type, 'subports': subports})
# update the cache.
if event_type == events.CREATED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk:
trunk.sub_ports.append(subport)
elif event_type == events.DELETED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk and subport in trunk.sub_ports:
trunk.sub_ports.remove(subport)
# update the bound trunks.
affected_trunk_ids = set([s['trunk_id'] for s in subports])
for trunk_id in affected_trunk_ids:
trunk = self._trunks.get(trunk_id)
if trunk:
self._setup_trunk(trunk) | Subport data model change from the server. | Below is the the instruction that describes the task:
### Input:
Subport data model change from the server.
### Response:
def handle_subports(self, subports, event_type):
"""Subport data model change from the server."""
LOG.debug("Subports event received: %(event_type)s. "
"Subports: %(subports)s",
{'event_type': event_type, 'subports': subports})
# update the cache.
if event_type == events.CREATED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk:
trunk.sub_ports.append(subport)
elif event_type == events.DELETED:
for subport in subports:
trunk = self._trunks.get(subport['trunk_id'])
if trunk and subport in trunk.sub_ports:
trunk.sub_ports.remove(subport)
# update the bound trunks.
affected_trunk_ids = set([s['trunk_id'] for s in subports])
for trunk_id in affected_trunk_ids:
trunk = self._trunks.get(trunk_id)
if trunk:
self._setup_trunk(trunk) |
def chebyshev(point1, point2):
"""Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return max(abs(point1[0] - point2[0]), abs(point1[1] - point2[1])) | Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float | Below is the the instruction that describes the task:
### Input:
Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
### Response:
def chebyshev(point1, point2):
"""Computes distance between 2D points using chebyshev metric
:param point1: 1st point
:type point1: list
:param point2: 2nd point
:type point2: list
:returns: Distance between point1 and point2
:rtype: float
"""
return max(abs(point1[0] - point2[0]), abs(point1[1] - point2[1])) |
async def get(self):
"""
Returns (batch, data) if one or more items could be retrieved.
If the cancellation occurs or only invalid items were in the
queue, (None, None) will be returned instead.
"""
if not self._deque:
self._ready.clear()
await self._ready.wait()
buffer = io.BytesIO()
batch = []
size = 0
# Fill a new batch to return while the size is small enough,
# as long as we don't exceed the maximum length of messages.
while self._deque and len(batch) <= MessageContainer.MAXIMUM_LENGTH:
state = self._deque.popleft()
size += len(state.data) + TLMessage.SIZE_OVERHEAD
if size <= MessageContainer.MAXIMUM_SIZE:
state.msg_id = self._state.write_data_as_message(
buffer, state.data, isinstance(state.request, TLRequest),
after_id=state.after.msg_id if state.after else None
)
batch.append(state)
self._log.debug('Assigned msg_id = %d to %s (%x)',
state.msg_id, state.request.__class__.__name__,
id(state.request))
continue
if batch:
# Put the item back since it can't be sent in this batch
self._deque.appendleft(state)
break
# If a single message exceeds the maximum size, then the
# message payload cannot be sent. Telegram would forcibly
# close the connection; message would never be confirmed.
#
# We don't put the item back because it can never be sent.
# If we did, we would loop again and reach this same path.
# Setting the exception twice results in `InvalidStateError`
# and this method should never return with error, which we
# really want to avoid.
self._log.warning(
'Message payload for %s is too long (%d) and cannot be sent',
state.request.__class__.__name__, len(state.data)
)
state.future.set_exception(
ValueError('Request payload is too big'))
size = 0
continue
if not batch:
return None, None
if len(batch) > 1:
# Inlined code to pack several messages into a container
data = struct.pack(
'<Ii', MessageContainer.CONSTRUCTOR_ID, len(batch)
) + buffer.getvalue()
buffer = io.BytesIO()
container_id = self._state.write_data_as_message(
buffer, data, content_related=False
)
for s in batch:
s.container_id = container_id
data = buffer.getvalue()
return batch, data | Returns (batch, data) if one or more items could be retrieved.
If the cancellation occurs or only invalid items were in the
queue, (None, None) will be returned instead. | Below is the the instruction that describes the task:
### Input:
Returns (batch, data) if one or more items could be retrieved.
If the cancellation occurs or only invalid items were in the
queue, (None, None) will be returned instead.
### Response:
async def get(self):
"""
Returns (batch, data) if one or more items could be retrieved.
If the cancellation occurs or only invalid items were in the
queue, (None, None) will be returned instead.
"""
if not self._deque:
self._ready.clear()
await self._ready.wait()
buffer = io.BytesIO()
batch = []
size = 0
# Fill a new batch to return while the size is small enough,
# as long as we don't exceed the maximum length of messages.
while self._deque and len(batch) <= MessageContainer.MAXIMUM_LENGTH:
state = self._deque.popleft()
size += len(state.data) + TLMessage.SIZE_OVERHEAD
if size <= MessageContainer.MAXIMUM_SIZE:
state.msg_id = self._state.write_data_as_message(
buffer, state.data, isinstance(state.request, TLRequest),
after_id=state.after.msg_id if state.after else None
)
batch.append(state)
self._log.debug('Assigned msg_id = %d to %s (%x)',
state.msg_id, state.request.__class__.__name__,
id(state.request))
continue
if batch:
# Put the item back since it can't be sent in this batch
self._deque.appendleft(state)
break
# If a single message exceeds the maximum size, then the
# message payload cannot be sent. Telegram would forcibly
# close the connection; message would never be confirmed.
#
# We don't put the item back because it can never be sent.
# If we did, we would loop again and reach this same path.
# Setting the exception twice results in `InvalidStateError`
# and this method should never return with error, which we
# really want to avoid.
self._log.warning(
'Message payload for %s is too long (%d) and cannot be sent',
state.request.__class__.__name__, len(state.data)
)
state.future.set_exception(
ValueError('Request payload is too big'))
size = 0
continue
if not batch:
return None, None
if len(batch) > 1:
# Inlined code to pack several messages into a container
data = struct.pack(
'<Ii', MessageContainer.CONSTRUCTOR_ID, len(batch)
) + buffer.getvalue()
buffer = io.BytesIO()
container_id = self._state.write_data_as_message(
buffer, data, content_related=False
)
for s in batch:
s.container_id = container_id
data = buffer.getvalue()
return batch, data |
def is_connection_error(e):
"""
Checks if error e pertains to a connection issue
"""
return (isinstance(e, err.InterfaceError) and e.args[0] == "(0, '')") or\
(isinstance(e, err.OperationalError) and e.args[0] in operation_error_codes.values()) | Checks if error e pertains to a connection issue | Below is the the instruction that describes the task:
### Input:
Checks if error e pertains to a connection issue
### Response:
def is_connection_error(e):
"""
Checks if error e pertains to a connection issue
"""
return (isinstance(e, err.InterfaceError) and e.args[0] == "(0, '')") or\
(isinstance(e, err.OperationalError) and e.args[0] in operation_error_codes.values()) |
def endpoint_delete(auth=None, **kwargs):
'''
Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_endpoint(**kwargs) | Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9 | Below is the the instruction that describes the task:
### Input:
Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9
### Response:
def endpoint_delete(auth=None, **kwargs):
'''
Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_endpoint(**kwargs) |
def get_effective_agent_id_with_proxy(proxy):
"""Given a Proxy, returns the Id of the effective Agent"""
if is_authenticated_with_proxy(proxy):
if proxy.has_effective_agent():
return proxy.get_effective_agent_id()
else:
return proxy.get_authentication().get_agent_id()
else:
return Id(
identifier='MC3GUE$T@MIT.EDU',
namespace='authentication.Agent',
authority='MIT-ODL') | Given a Proxy, returns the Id of the effective Agent | Below is the the instruction that describes the task:
### Input:
Given a Proxy, returns the Id of the effective Agent
### Response:
def get_effective_agent_id_with_proxy(proxy):
"""Given a Proxy, returns the Id of the effective Agent"""
if is_authenticated_with_proxy(proxy):
if proxy.has_effective_agent():
return proxy.get_effective_agent_id()
else:
return proxy.get_authentication().get_agent_id()
else:
return Id(
identifier='MC3GUE$T@MIT.EDU',
namespace='authentication.Agent',
authority='MIT-ODL') |
def decompress_G2(p: G2Compressed) -> G2Uncompressed:
"""
Recovers x and y coordinates from the compressed point (z1, z2).
"""
z1, z2 = p
# b_flag == 1 indicates the infinity point
b_flag1 = (z1 % POW_2_383) // POW_2_382
if b_flag1 == 1:
return Z2
x1 = z1 % POW_2_381
x2 = z2
# x1 is the imaginary part, x2 is the real part
x = FQ2([x2, x1])
y = modular_squareroot_in_FQ2(x**3 + b2)
if y is None:
raise ValueError("Failed to find a modular squareroot")
# Choose the y whose leftmost bit of the imaginary part is equal to the a_flag1
# If y_im happens to be zero, then use the bit of y_re
a_flag1 = (z1 % POW_2_382) // POW_2_381
y_re, y_im = y.coeffs
if (y_im > 0 and (y_im * 2) // q != a_flag1) or (y_im == 0 and (y_re * 2) // q != a_flag1):
y = FQ2((y * -1).coeffs)
if not is_on_curve((x, y, FQ2([1, 0])), b2):
raise ValueError(
"The given point is not on the twisted curve over FQ**2"
)
return (x, y, FQ2([1, 0])) | Recovers x and y coordinates from the compressed point (z1, z2). | Below is the the instruction that describes the task:
### Input:
Recovers x and y coordinates from the compressed point (z1, z2).
### Response:
def decompress_G2(p: G2Compressed) -> G2Uncompressed:
"""
Recovers x and y coordinates from the compressed point (z1, z2).
"""
z1, z2 = p
# b_flag == 1 indicates the infinity point
b_flag1 = (z1 % POW_2_383) // POW_2_382
if b_flag1 == 1:
return Z2
x1 = z1 % POW_2_381
x2 = z2
# x1 is the imaginary part, x2 is the real part
x = FQ2([x2, x1])
y = modular_squareroot_in_FQ2(x**3 + b2)
if y is None:
raise ValueError("Failed to find a modular squareroot")
# Choose the y whose leftmost bit of the imaginary part is equal to the a_flag1
# If y_im happens to be zero, then use the bit of y_re
a_flag1 = (z1 % POW_2_382) // POW_2_381
y_re, y_im = y.coeffs
if (y_im > 0 and (y_im * 2) // q != a_flag1) or (y_im == 0 and (y_re * 2) // q != a_flag1):
y = FQ2((y * -1).coeffs)
if not is_on_curve((x, y, FQ2([1, 0])), b2):
raise ValueError(
"The given point is not on the twisted curve over FQ**2"
)
return (x, y, FQ2([1, 0])) |
def list_rbac_policies(self, retrieve_all=True, **_params):
"""Fetch a list of all RBAC policies for a project."""
return self.list('rbac_policies', self.rbac_policies_path,
retrieve_all, **_params) | Fetch a list of all RBAC policies for a project. | Below is the the instruction that describes the task:
### Input:
Fetch a list of all RBAC policies for a project.
### Response:
def list_rbac_policies(self, retrieve_all=True, **_params):
"""Fetch a list of all RBAC policies for a project."""
return self.list('rbac_policies', self.rbac_policies_path,
retrieve_all, **_params) |
def read_exif_from_file(filename):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
f = open(filename, "rb")
data = f.read(6)
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = data[2:6]
HEAD_LENGTH = 4
exif = None
while 1:
length = struct.unpack(">H", head[2: 4])[0]
if head[:2] == b"\xff\xe1":
segment_data = f.read(length - 2)
if segment_data[:4] != b'Exif':
head = f.read(HEAD_LENGTH)
continue
exif = head + segment_data
break
elif head[0:1] == b"\xff":
f.read(length - 2)
head = f.read(HEAD_LENGTH)
else:
break
f.close()
return exif | Slices JPEG meta data into a list from JPEG binary data. | Below is the the instruction that describes the task:
### Input:
Slices JPEG meta data into a list from JPEG binary data.
### Response:
def read_exif_from_file(filename):
"""Slices JPEG meta data into a list from JPEG binary data.
"""
f = open(filename, "rb")
data = f.read(6)
if data[0:2] != b"\xff\xd8":
raise InvalidImageDataError("Given data isn't JPEG.")
head = data[2:6]
HEAD_LENGTH = 4
exif = None
while 1:
length = struct.unpack(">H", head[2: 4])[0]
if head[:2] == b"\xff\xe1":
segment_data = f.read(length - 2)
if segment_data[:4] != b'Exif':
head = f.read(HEAD_LENGTH)
continue
exif = head + segment_data
break
elif head[0:1] == b"\xff":
f.read(length - 2)
head = f.read(HEAD_LENGTH)
else:
break
f.close()
return exif |
def n_jobs(self):
""" Returns number of jobs/threads to use during assignment of data.
Returns
-------
If None it will return the setting of 'PYEMMA_NJOBS' or
'SLURM_CPUS_ON_NODE' environment variable. If none of these environment variables exist,
the number of processors /or cores is returned.
Notes
-----
This setting will effectively be multiplied by the the number of threads used by NumPy for
algorithms which use multiple processes. So take care if you choose this manually.
"""
if not hasattr(self, '_n_jobs'):
self._n_jobs = get_n_jobs(logger=getattr(self, 'logger'))
return self._n_jobs | Returns number of jobs/threads to use during assignment of data.
Returns
-------
If None it will return the setting of 'PYEMMA_NJOBS' or
'SLURM_CPUS_ON_NODE' environment variable. If none of these environment variables exist,
the number of processors /or cores is returned.
Notes
-----
This setting will effectively be multiplied by the the number of threads used by NumPy for
algorithms which use multiple processes. So take care if you choose this manually. | Below is the the instruction that describes the task:
### Input:
Returns number of jobs/threads to use during assignment of data.
Returns
-------
If None it will return the setting of 'PYEMMA_NJOBS' or
'SLURM_CPUS_ON_NODE' environment variable. If none of these environment variables exist,
the number of processors /or cores is returned.
Notes
-----
This setting will effectively be multiplied by the the number of threads used by NumPy for
algorithms which use multiple processes. So take care if you choose this manually.
### Response:
def n_jobs(self):
""" Returns number of jobs/threads to use during assignment of data.
Returns
-------
If None it will return the setting of 'PYEMMA_NJOBS' or
'SLURM_CPUS_ON_NODE' environment variable. If none of these environment variables exist,
the number of processors /or cores is returned.
Notes
-----
This setting will effectively be multiplied by the the number of threads used by NumPy for
algorithms which use multiple processes. So take care if you choose this manually.
"""
if not hasattr(self, '_n_jobs'):
self._n_jobs = get_n_jobs(logger=getattr(self, 'logger'))
return self._n_jobs |
def unwrap(s, node_indent):
"""Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
"""
def get_indent():
if line_str.startswith('"""'):
return node_indent
return len(re.match(r"^( *)", line_str).group(1))
def finish_block():
if block_list:
unwrap_list.append(
(block_indent, (" ".join([v.strip() for v in block_list])).strip())
)
block_list.clear()
unwrap_list = []
block_indent = None
block_list = []
for line_str in s.splitlines():
line_str = line_str.rstrip()
line_indent = get_indent()
# A new block has been started. Record the indent of the first line in that
# block to use as the indent for all the lines that will be put in this block.
if not block_list:
block_indent = line_indent
# A blank line always starts a new block.
if line_str == "":
finish_block()
# Indent any lines that are less indentend than the docstr node
# if line_indent < node_indent:
# line_indent = block_indent
# A line that is indented less than the current block starts a new block.
if line_indent < block_indent:
finish_block()
# A line that is the start of a markdown list starts a new block.
elif line_str.strip().startswith(("- ", "* ")):
finish_block()
# A markdown title always starts a new block.
elif line_str.strip().endswith(":"):
finish_block()
block_list.append(line_str)
# Only make blocks for markdown list items. Write everything else as single line items.
if not block_list[0].strip().startswith(("- ", "* ")):
finish_block()
# Finish the block that was in progress when the end of the docstring was reached.
finish_block()
return unwrap_list | Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64. | Below is the the instruction that describes the task:
### Input:
Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
### Response:
def unwrap(s, node_indent):
"""Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64.
"""
def get_indent():
if line_str.startswith('"""'):
return node_indent
return len(re.match(r"^( *)", line_str).group(1))
def finish_block():
if block_list:
unwrap_list.append(
(block_indent, (" ".join([v.strip() for v in block_list])).strip())
)
block_list.clear()
unwrap_list = []
block_indent = None
block_list = []
for line_str in s.splitlines():
line_str = line_str.rstrip()
line_indent = get_indent()
# A new block has been started. Record the indent of the first line in that
# block to use as the indent for all the lines that will be put in this block.
if not block_list:
block_indent = line_indent
# A blank line always starts a new block.
if line_str == "":
finish_block()
# Indent any lines that are less indentend than the docstr node
# if line_indent < node_indent:
# line_indent = block_indent
# A line that is indented less than the current block starts a new block.
if line_indent < block_indent:
finish_block()
# A line that is the start of a markdown list starts a new block.
elif line_str.strip().startswith(("- ", "* ")):
finish_block()
# A markdown title always starts a new block.
elif line_str.strip().endswith(":"):
finish_block()
block_list.append(line_str)
# Only make blocks for markdown list items. Write everything else as single line items.
if not block_list[0].strip().startswith(("- ", "* ")):
finish_block()
# Finish the block that was in progress when the end of the docstring was reached.
finish_block()
return unwrap_list |
def _compute_mean(self, C, mag, rrup):
"""
Compute mean value according to equation 18, page 32.
"""
# see table 3, page 14
R1 = 90.
R2 = 150.
# see equation 19, page 32
m_ref = mag - 4
r1 = R1 + C['c8'] * m_ref
r2 = R2 + C['c11'] * m_ref
assert r1 > 0
assert r2 > 0
g0 = np.log10(
np.sqrt(np.minimum(rrup, r1) ** 2 + (1 + C['c5'] * m_ref) ** 2)
)
g1 = np.maximum(np.log10(rrup / r1), 0)
g2 = np.maximum(np.log10(rrup / r2), 0)
mean = (C['c0'] + C['c1'] * m_ref + C['c2'] * m_ref ** 2 +
(C['c3'] + C['c4'] * m_ref) * g0 +
(C['c6'] + C['c7'] * m_ref) * g1 +
(C['c9'] + C['c10'] * m_ref) * g2)
# convert from log10 to ln and units from cm/s2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
return mean | Compute mean value according to equation 18, page 32. | Below is the the instruction that describes the task:
### Input:
Compute mean value according to equation 18, page 32.
### Response:
def _compute_mean(self, C, mag, rrup):
"""
Compute mean value according to equation 18, page 32.
"""
# see table 3, page 14
R1 = 90.
R2 = 150.
# see equation 19, page 32
m_ref = mag - 4
r1 = R1 + C['c8'] * m_ref
r2 = R2 + C['c11'] * m_ref
assert r1 > 0
assert r2 > 0
g0 = np.log10(
np.sqrt(np.minimum(rrup, r1) ** 2 + (1 + C['c5'] * m_ref) ** 2)
)
g1 = np.maximum(np.log10(rrup / r1), 0)
g2 = np.maximum(np.log10(rrup / r2), 0)
mean = (C['c0'] + C['c1'] * m_ref + C['c2'] * m_ref ** 2 +
(C['c3'] + C['c4'] * m_ref) * g0 +
(C['c6'] + C['c7'] * m_ref) * g1 +
(C['c9'] + C['c10'] * m_ref) * g2)
# convert from log10 to ln and units from cm/s2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
return mean |
def _request(self, form, url=None, **kwargs):
"""Extract input data from the form to pass to a Requests session."""
method = str(form.get("method", "get"))
action = form.get("action")
url = urllib.parse.urljoin(url, action)
if url is None: # This happens when both `action` and `url` are None.
raise ValueError('no URL to submit to')
# read https://www.w3.org/TR/html52/sec-forms.html
data = kwargs.pop("data", dict())
files = kwargs.pop("files", dict())
# Use a list of 2-tuples to better reflect the behavior of browser QSL.
# Requests also retains order when encoding form data in 2-tuple lists.
data = [(k, v) for k, v in data.items()]
# Process form tags in the order that they appear on the page,
# skipping those tags that do not have a name-attribute.
selector = ",".join("{}[name]".format(i) for i in
("input", "button", "textarea", "select"))
for tag in form.select(selector):
name = tag.get("name") # name-attribute of tag
# Skip disabled elements, since they should not be submitted.
if tag.has_attr('disabled'):
continue
if tag.name == "input":
if tag.get("type", "").lower() in ("radio", "checkbox"):
if "checked" not in tag.attrs:
continue
value = tag.get("value", "on")
else:
# browsers use empty string for inputs with missing values
value = tag.get("value", "")
if tag.get("type", "").lower() == "file":
# read http://www.cs.tut.fi/~jkorpela/forms/file.html
# in browsers, file upload only happens if the form
# (or submit button) enctype attribute is set to
# "multipart/form-data". We don't care, simplify.
filename = value
if filename != "" and isinstance(filename, string_types):
content = open(filename, "rb")
else:
content = ""
# If value is the empty string, we still pass it for
# consistency with browsers (see #250).
files[name] = (filename, content)
else:
data.append((name, value))
elif tag.name == "button":
if tag.get("type", "").lower() in ("button", "reset"):
continue
else:
data.append((name, tag.get("value", "")))
elif tag.name == "textarea":
data.append((name, tag.text))
elif tag.name == "select":
# If the value attribute is not specified, the content will
# be passed as a value instead.
options = tag.select("option")
selected_values = [i.get("value", i.text) for i in options
if "selected" in i.attrs]
if "multiple" in tag.attrs:
for value in selected_values:
data.append((name, value))
elif selected_values:
# A standard select element only allows one option to be
# selected, but browsers pick last if somehow multiple.
data.append((name, selected_values[-1]))
elif options:
# Selects the first option if none are selected
first_value = options[0].get("value", options[0].text)
data.append((name, first_value))
if method.lower() == "get":
kwargs["params"] = data
else:
kwargs["data"] = data
return self.session.request(method, url, files=files, **kwargs) | Extract input data from the form to pass to a Requests session. | Below is the the instruction that describes the task:
### Input:
Extract input data from the form to pass to a Requests session.
### Response:
def _request(self, form, url=None, **kwargs):
"""Extract input data from the form to pass to a Requests session."""
method = str(form.get("method", "get"))
action = form.get("action")
url = urllib.parse.urljoin(url, action)
if url is None: # This happens when both `action` and `url` are None.
raise ValueError('no URL to submit to')
# read https://www.w3.org/TR/html52/sec-forms.html
data = kwargs.pop("data", dict())
files = kwargs.pop("files", dict())
# Use a list of 2-tuples to better reflect the behavior of browser QSL.
# Requests also retains order when encoding form data in 2-tuple lists.
data = [(k, v) for k, v in data.items()]
# Process form tags in the order that they appear on the page,
# skipping those tags that do not have a name-attribute.
selector = ",".join("{}[name]".format(i) for i in
("input", "button", "textarea", "select"))
for tag in form.select(selector):
name = tag.get("name") # name-attribute of tag
# Skip disabled elements, since they should not be submitted.
if tag.has_attr('disabled'):
continue
if tag.name == "input":
if tag.get("type", "").lower() in ("radio", "checkbox"):
if "checked" not in tag.attrs:
continue
value = tag.get("value", "on")
else:
# browsers use empty string for inputs with missing values
value = tag.get("value", "")
if tag.get("type", "").lower() == "file":
# read http://www.cs.tut.fi/~jkorpela/forms/file.html
# in browsers, file upload only happens if the form
# (or submit button) enctype attribute is set to
# "multipart/form-data". We don't care, simplify.
filename = value
if filename != "" and isinstance(filename, string_types):
content = open(filename, "rb")
else:
content = ""
# If value is the empty string, we still pass it for
# consistency with browsers (see #250).
files[name] = (filename, content)
else:
data.append((name, value))
elif tag.name == "button":
if tag.get("type", "").lower() in ("button", "reset"):
continue
else:
data.append((name, tag.get("value", "")))
elif tag.name == "textarea":
data.append((name, tag.text))
elif tag.name == "select":
# If the value attribute is not specified, the content will
# be passed as a value instead.
options = tag.select("option")
selected_values = [i.get("value", i.text) for i in options
if "selected" in i.attrs]
if "multiple" in tag.attrs:
for value in selected_values:
data.append((name, value))
elif selected_values:
# A standard select element only allows one option to be
# selected, but browsers pick last if somehow multiple.
data.append((name, selected_values[-1]))
elif options:
# Selects the first option if none are selected
first_value = options[0].get("value", options[0].text)
data.append((name, first_value))
if method.lower() == "get":
kwargs["params"] = data
else:
kwargs["data"] = data
return self.session.request(method, url, files=files, **kwargs) |
def get_available_storage_system(self, **kwargs):
"""
Retrieves a specific storage system and its associated volumes available to the server profile based
on the given server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
storageSystemId (str):
The storage system ID associated with the resource.
Returns:
dict: Available storage system.
"""
uri = self._helper.build_uri_with_query_string(kwargs, '/available-storage-system')
return self._helper.do_get(uri) | Retrieves a specific storage system and its associated volumes available to the server profile based
on the given server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
storageSystemId (str):
The storage system ID associated with the resource.
Returns:
dict: Available storage system. | Below is the the instruction that describes the task:
### Input:
Retrieves a specific storage system and its associated volumes available to the server profile based
on the given server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
storageSystemId (str):
The storage system ID associated with the resource.
Returns:
dict: Available storage system.
### Response:
def get_available_storage_system(self, **kwargs):
"""
Retrieves a specific storage system and its associated volumes available to the server profile based
on the given server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
storageSystemId (str):
The storage system ID associated with the resource.
Returns:
dict: Available storage system.
"""
uri = self._helper.build_uri_with_query_string(kwargs, '/available-storage-system')
return self._helper.do_get(uri) |
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result | Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object') | Below is the the instruction that describes the task:
### Input:
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
### Response:
def month_name(self, locale=None):
"""
Return the month names of the DateTimeIndex with specified locale.
.. versionadded:: 0.23.0
Parameters
----------
locale : str, optional
Locale determining the language in which to return the month name.
Default is English locale.
Returns
-------
Index
Index of month names.
Examples
--------
>>> idx = pd.date_range(start='2018-01', freq='M', periods=3)
>>> idx
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'],
dtype='datetime64[ns]', freq='M')
>>> idx.month_name()
Index(['January', 'February', 'March'], dtype='object')
"""
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._local_timestamps()
else:
values = self.asi8
result = fields.get_date_name_field(values, 'month_name',
locale=locale)
result = self._maybe_mask_results(result, fill_value=None)
return result |
def convert_other_format(filename, format):
"""
转换为png图片
:param filename: 图片文件名
:return:
"""
img = Image.open(filename)
rp = ReParser()
c_filename = rp.replace(r'\..*$', format, filename)
img.save(c_filename) | 转换为png图片
:param filename: 图片文件名
:return: | Below is the the instruction that describes the task:
### Input:
转换为png图片
:param filename: 图片文件名
:return:
### Response:
def convert_other_format(filename, format):
"""
转换为png图片
:param filename: 图片文件名
:return:
"""
img = Image.open(filename)
rp = ReParser()
c_filename = rp.replace(r'\..*$', format, filename)
img.save(c_filename) |
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)} | Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional. | Below is the the instruction that describes the task:
### Input:
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
### Response:
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)} |
def get_term_frequency(self, term, document, normalized=False):
"""
Returns the frequency of the term specified in the document.
"""
if document not in self._documents:
raise IndexError(DOCUMENT_DOES_NOT_EXIST)
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
result = self._terms[term].get(document, 0)
if normalized:
result /= self.get_document_length(document)
return float(result) | Returns the frequency of the term specified in the document. | Below is the the instruction that describes the task:
### Input:
Returns the frequency of the term specified in the document.
### Response:
def get_term_frequency(self, term, document, normalized=False):
"""
Returns the frequency of the term specified in the document.
"""
if document not in self._documents:
raise IndexError(DOCUMENT_DOES_NOT_EXIST)
if term not in self._terms:
raise IndexError(TERM_DOES_NOT_EXIST)
result = self._terms[term].get(document, 0)
if normalized:
result /= self.get_document_length(document)
return float(result) |
def history(self, channel, oldest=None, latest=None):
"""Fetch the history of a channel."""
resource = self.RCHANNEL_HISTORY
params = {
self.PCHANNEL: channel,
self.PCOUNT: self.max_items
}
if oldest is not None:
params[self.POLDEST] = oldest
if latest is not None:
params[self.PLATEST] = latest
response = self._fetch(resource, params)
return response | Fetch the history of a channel. | Below is the the instruction that describes the task:
### Input:
Fetch the history of a channel.
### Response:
def history(self, channel, oldest=None, latest=None):
"""Fetch the history of a channel."""
resource = self.RCHANNEL_HISTORY
params = {
self.PCHANNEL: channel,
self.PCOUNT: self.max_items
}
if oldest is not None:
params[self.POLDEST] = oldest
if latest is not None:
params[self.PLATEST] = latest
response = self._fetch(resource, params)
return response |
def delete_country_by_id(cls, country_id, **kwargs):
"""Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_country_by_id_with_http_info(country_id, **kwargs)
else:
(data) = cls._delete_country_by_id_with_http_info(country_id, **kwargs)
return data | Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_country_by_id(cls, country_id, **kwargs):
"""Delete Country
Delete an instance of Country by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_country_by_id(country_id, async=True)
>>> result = thread.get()
:param async bool
:param str country_id: ID of country to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_country_by_id_with_http_info(country_id, **kwargs)
else:
(data) = cls._delete_country_by_id_with_http_info(country_id, **kwargs)
return data |
def _get_supported_for_any_abi(version=None, platform=None, impl=None, force_manylinux=False):
"""Generates supported tags for unspecified ABI types to support more intuitive cross-platform
resolution."""
unique_tags = {
tag for abi in _gen_all_abis(impl, version)
for tag in _get_supported(version=version,
platform=platform,
impl=impl,
abi=abi,
force_manylinux=force_manylinux)
}
return list(unique_tags) | Generates supported tags for unspecified ABI types to support more intuitive cross-platform
resolution. | Below is the the instruction that describes the task:
### Input:
Generates supported tags for unspecified ABI types to support more intuitive cross-platform
resolution.
### Response:
def _get_supported_for_any_abi(version=None, platform=None, impl=None, force_manylinux=False):
"""Generates supported tags for unspecified ABI types to support more intuitive cross-platform
resolution."""
unique_tags = {
tag for abi in _gen_all_abis(impl, version)
for tag in _get_supported(version=version,
platform=platform,
impl=impl,
abi=abi,
force_manylinux=force_manylinux)
}
return list(unique_tags) |
def intersect(left, *rights, **kwargs):
"""
Calc intersection among datasets,
:param left: collection
:param rights: collection or list of collections
:param distinct: whether to preserve duolicate entries
:return: collection
:Examples:
>>> import pandas as pd
>>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]}))
>>> df2 = DataFrame(pd.DataFrame({'a': [1, 3, 3], 'b': [1, 3, 3]}))
>>> df1.intersect(df2)
a b
0 1 1
1 3 3
2 3 3
>>> df1.intersect(df2, distinct=True)
a b
0 1 1
1 3 3
"""
import time
from ..utils import output
distinct = kwargs.get('distinct', False)
if isinstance(rights[0], list):
rights = rights[0]
cols = [n for n in left.schema.names]
types = [n for n in left.schema.types]
collections = (left, ) + rights
idx_col_name = 'idx_%d' % int(time.time())
counter_col_name = 'exc_counter_%d' % int(time.time())
collections = [c[c, Scalar(idx).rename(idx_col_name)] for idx, c in enumerate(collections)]
unioned = reduce(lambda a, b: a.union(b), collections)
src_agg = unioned.groupby(*(cols + [idx_col_name])) \
.agg(**{counter_col_name: unioned.count()})
aggregators = {
idx_col_name: src_agg[idx_col_name].nunique(),
counter_col_name: src_agg[counter_col_name].min(),
}
final_agg = src_agg.groupby(*cols).agg(**aggregators)
final_agg = final_agg.filter(final_agg[idx_col_name] == len(collections))
if distinct:
return final_agg.filter(final_agg[counter_col_name] > 0).select(*cols)
else:
@output(cols, types)
def exploder(row):
import sys
irange = xrange if sys.version_info[0] < 3 else range
for _ in irange(getattr(row, counter_col_name)):
yield row[:-2]
return final_agg.map_reduce(mapper=exploder).select(*cols) | Calc intersection among datasets,
:param left: collection
:param rights: collection or list of collections
:param distinct: whether to preserve duolicate entries
:return: collection
:Examples:
>>> import pandas as pd
>>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]}))
>>> df2 = DataFrame(pd.DataFrame({'a': [1, 3, 3], 'b': [1, 3, 3]}))
>>> df1.intersect(df2)
a b
0 1 1
1 3 3
2 3 3
>>> df1.intersect(df2, distinct=True)
a b
0 1 1
1 3 3 | Below is the the instruction that describes the task:
### Input:
Calc intersection among datasets,
:param left: collection
:param rights: collection or list of collections
:param distinct: whether to preserve duolicate entries
:return: collection
:Examples:
>>> import pandas as pd
>>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]}))
>>> df2 = DataFrame(pd.DataFrame({'a': [1, 3, 3], 'b': [1, 3, 3]}))
>>> df1.intersect(df2)
a b
0 1 1
1 3 3
2 3 3
>>> df1.intersect(df2, distinct=True)
a b
0 1 1
1 3 3
### Response:
def intersect(left, *rights, **kwargs):
"""
Calc intersection among datasets,
:param left: collection
:param rights: collection or list of collections
:param distinct: whether to preserve duolicate entries
:return: collection
:Examples:
>>> import pandas as pd
>>> df1 = DataFrame(pd.DataFrame({'a': [1, 2, 3, 3, 3], 'b': [1, 2, 3, 3, 3]}))
>>> df2 = DataFrame(pd.DataFrame({'a': [1, 3, 3], 'b': [1, 3, 3]}))
>>> df1.intersect(df2)
a b
0 1 1
1 3 3
2 3 3
>>> df1.intersect(df2, distinct=True)
a b
0 1 1
1 3 3
"""
import time
from ..utils import output
distinct = kwargs.get('distinct', False)
if isinstance(rights[0], list):
rights = rights[0]
cols = [n for n in left.schema.names]
types = [n for n in left.schema.types]
collections = (left, ) + rights
idx_col_name = 'idx_%d' % int(time.time())
counter_col_name = 'exc_counter_%d' % int(time.time())
collections = [c[c, Scalar(idx).rename(idx_col_name)] for idx, c in enumerate(collections)]
unioned = reduce(lambda a, b: a.union(b), collections)
src_agg = unioned.groupby(*(cols + [idx_col_name])) \
.agg(**{counter_col_name: unioned.count()})
aggregators = {
idx_col_name: src_agg[idx_col_name].nunique(),
counter_col_name: src_agg[counter_col_name].min(),
}
final_agg = src_agg.groupby(*cols).agg(**aggregators)
final_agg = final_agg.filter(final_agg[idx_col_name] == len(collections))
if distinct:
return final_agg.filter(final_agg[counter_col_name] > 0).select(*cols)
else:
@output(cols, types)
def exploder(row):
import sys
irange = xrange if sys.version_info[0] < 3 else range
for _ in irange(getattr(row, counter_col_name)):
yield row[:-2]
return final_agg.map_reduce(mapper=exploder).select(*cols) |
def add(self, fig, title, minX, maxX, offsetAdjuster=None,
sequenceFetcher=None):
"""
Find the features for a sequence title. If there aren't too many, add
the features to C{fig}. Return information about the features, as
described below.
@param fig: A matplotlib figure.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
@param sequenceFetcher: A function that takes a sequence title and a
database name and returns a C{Bio.SeqIO} instance. If C{None}, use
L{dark.entrez.getSequence}.
@return: If we seem to be offline, return C{None}. Otherwise, return
a L{FeatureList} instance.
"""
offsetAdjuster = offsetAdjuster or (lambda x: x)
fig.set_title('Target sequence features', fontsize=self.TITLE_FONTSIZE)
fig.set_yticks([])
features = FeatureList(title, self.DATABASE, self.WANTED_TYPES,
sequenceFetcher=sequenceFetcher)
if features.offline:
fig.text(minX + (maxX - minX) / 3.0, 0,
'You (or Genbank) appear to be offline.',
fontsize=self.FONTSIZE)
fig.axis([minX, maxX, -1, 1])
return None
# If no interesting features were found, display a message saying
# so in the figure. Otherwise, if we don't have too many features
# to plot, add the feature info to the figure.
nFeatures = len(features)
if nFeatures == 0:
# fig.text(minX + (maxX - minX) / 3.0, 0, 'No features found',
# fontsize=self.FONTSIZE)
fig.text(0.5, 0.5, 'No features found',
horizontalalignment='center', verticalalignment='center',
transform=fig.transAxes, fontsize=self.FONTSIZE)
fig.axis([minX, maxX, -1, 1])
elif nFeatures <= self.MAX_FEATURES_TO_DISPLAY:
# Call the method in our subclass to do the figure display.
self._displayFeatures(fig, features, minX, maxX, offsetAdjuster)
else:
self.tooManyFeaturesToPlot = True
# fig.text(minX + (maxX - minX) / 3.0, 0,
# 'Too many features to plot.', fontsize=self.FONTSIZE)
fig.text(0.5, 0.5, 'Too many features to plot',
horizontalalignment='center', verticalalignment='center',
fontsize=self.FONTSIZE, transform=fig.transAxes)
fig.axis([minX, maxX, -1, 1])
return features | Find the features for a sequence title. If there aren't too many, add
the features to C{fig}. Return information about the features, as
described below.
@param fig: A matplotlib figure.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
@param sequenceFetcher: A function that takes a sequence title and a
database name and returns a C{Bio.SeqIO} instance. If C{None}, use
L{dark.entrez.getSequence}.
@return: If we seem to be offline, return C{None}. Otherwise, return
a L{FeatureList} instance. | Below is the the instruction that describes the task:
### Input:
Find the features for a sequence title. If there aren't too many, add
the features to C{fig}. Return information about the features, as
described below.
@param fig: A matplotlib figure.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
@param sequenceFetcher: A function that takes a sequence title and a
database name and returns a C{Bio.SeqIO} instance. If C{None}, use
L{dark.entrez.getSequence}.
@return: If we seem to be offline, return C{None}. Otherwise, return
a L{FeatureList} instance.
### Response:
def add(self, fig, title, minX, maxX, offsetAdjuster=None,
sequenceFetcher=None):
"""
Find the features for a sequence title. If there aren't too many, add
the features to C{fig}. Return information about the features, as
described below.
@param fig: A matplotlib figure.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param minX: The smallest x coordinate.
@param maxX: The largest x coordinate.
@param offsetAdjuster: a function for adjusting feature X axis offsets
for plotting.
@param sequenceFetcher: A function that takes a sequence title and a
database name and returns a C{Bio.SeqIO} instance. If C{None}, use
L{dark.entrez.getSequence}.
@return: If we seem to be offline, return C{None}. Otherwise, return
a L{FeatureList} instance.
"""
offsetAdjuster = offsetAdjuster or (lambda x: x)
fig.set_title('Target sequence features', fontsize=self.TITLE_FONTSIZE)
fig.set_yticks([])
features = FeatureList(title, self.DATABASE, self.WANTED_TYPES,
sequenceFetcher=sequenceFetcher)
if features.offline:
fig.text(minX + (maxX - minX) / 3.0, 0,
'You (or Genbank) appear to be offline.',
fontsize=self.FONTSIZE)
fig.axis([minX, maxX, -1, 1])
return None
# If no interesting features were found, display a message saying
# so in the figure. Otherwise, if we don't have too many features
# to plot, add the feature info to the figure.
nFeatures = len(features)
if nFeatures == 0:
# fig.text(minX + (maxX - minX) / 3.0, 0, 'No features found',
# fontsize=self.FONTSIZE)
fig.text(0.5, 0.5, 'No features found',
horizontalalignment='center', verticalalignment='center',
transform=fig.transAxes, fontsize=self.FONTSIZE)
fig.axis([minX, maxX, -1, 1])
elif nFeatures <= self.MAX_FEATURES_TO_DISPLAY:
# Call the method in our subclass to do the figure display.
self._displayFeatures(fig, features, minX, maxX, offsetAdjuster)
else:
self.tooManyFeaturesToPlot = True
# fig.text(minX + (maxX - minX) / 3.0, 0,
# 'Too many features to plot.', fontsize=self.FONTSIZE)
fig.text(0.5, 0.5, 'Too many features to plot',
horizontalalignment='center', verticalalignment='center',
fontsize=self.FONTSIZE, transform=fig.transAxes)
fig.axis([minX, maxX, -1, 1])
return features |
def getTextualNode(self, subreference: CtsReference=None):
""" Special GetPassage implementation for SimplePassage (Simple is True by default)
:param subreference:
:return:
"""
if not isinstance(subreference, CtsReference):
subreference = CtsReference(subreference)
return self.textObject.getTextualNode(subreference) | Special GetPassage implementation for SimplePassage (Simple is True by default)
:param subreference:
:return: | Below is the the instruction that describes the task:
### Input:
Special GetPassage implementation for SimplePassage (Simple is True by default)
:param subreference:
:return:
### Response:
def getTextualNode(self, subreference: CtsReference=None):
""" Special GetPassage implementation for SimplePassage (Simple is True by default)
:param subreference:
:return:
"""
if not isinstance(subreference, CtsReference):
subreference = CtsReference(subreference)
return self.textObject.getTextualNode(subreference) |
def balance_sheet(self, end=datetime.max,
format=ReportFormat.printout, output_path=None):
"""
Generate a transaction list report.
:param end: The end date to generate the report for.
:param format: The format of the report.
:param output_path: The path to the file the report is written to.
If None, then the report is not written to a file.
:returns: The generated report.
"""
rpt = BalanceSheet(self, end, output_path)
return rpt.render(format) | Generate a transaction list report.
:param end: The end date to generate the report for.
:param format: The format of the report.
:param output_path: The path to the file the report is written to.
If None, then the report is not written to a file.
:returns: The generated report. | Below is the the instruction that describes the task:
### Input:
Generate a transaction list report.
:param end: The end date to generate the report for.
:param format: The format of the report.
:param output_path: The path to the file the report is written to.
If None, then the report is not written to a file.
:returns: The generated report.
### Response:
def balance_sheet(self, end=datetime.max,
format=ReportFormat.printout, output_path=None):
"""
Generate a transaction list report.
:param end: The end date to generate the report for.
:param format: The format of the report.
:param output_path: The path to the file the report is written to.
If None, then the report is not written to a file.
:returns: The generated report.
"""
rpt = BalanceSheet(self, end, output_path)
return rpt.render(format) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.