code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def handle_debug_backtrace(self, call_id, payload):
"""Handle responses `DebugBacktrace`."""
frames = payload["frames"]
fd, path = tempfile.mkstemp('.json', text=True, dir=self.tmp_diff_folder)
tmpfile = os.fdopen(fd, 'w')
tmpfile.write(json.dumps(frames, indent=2))
opts = {'readonly': True, 'bufhidden': 'wipe',
'buflisted': False, 'swapfile': False}
self.editor.split_window(path, size=20, bufopts=opts)
tmpfile.close() | Handle responses `DebugBacktrace`. | Below is the the instruction that describes the task:
### Input:
Handle responses `DebugBacktrace`.
### Response:
def handle_debug_backtrace(self, call_id, payload):
"""Handle responses `DebugBacktrace`."""
frames = payload["frames"]
fd, path = tempfile.mkstemp('.json', text=True, dir=self.tmp_diff_folder)
tmpfile = os.fdopen(fd, 'w')
tmpfile.write(json.dumps(frames, indent=2))
opts = {'readonly': True, 'bufhidden': 'wipe',
'buflisted': False, 'swapfile': False}
self.editor.split_window(path, size=20, bufopts=opts)
tmpfile.close() |
def chunked(self):
"""Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members) | Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members. | Below is the the instruction that describes the task:
### Input:
Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
### Response:
def chunked(self):
"""Returns a boolean indicating if the guild is "chunked".
A chunked guild means that :attr:`member_count` is equal to the
number of members stored in the internal :attr:`members` cache.
If this value returns ``False``, then you should request for
offline members.
"""
count = getattr(self, '_member_count', None)
if count is None:
return False
return count == len(self._members) |
def to_json(self):
"""
Prepare data for the initial state of the admin-on-rest
"""
endpoints = []
for endpoint in self.endpoints:
list_fields = endpoint.fields
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
data = endpoint.to_dict()
data['fields'] = resource_type.get_type_of_fields(
list_fields,
table,
)
endpoints.append(data)
data = {
'title': self.title,
'endpoints': sorted(endpoints, key=lambda x: x['name']),
}
return json.dumps(data) | Prepare data for the initial state of the admin-on-rest | Below is the the instruction that describes the task:
### Input:
Prepare data for the initial state of the admin-on-rest
### Response:
def to_json(self):
"""
Prepare data for the initial state of the admin-on-rest
"""
endpoints = []
for endpoint in self.endpoints:
list_fields = endpoint.fields
resource_type = endpoint.Meta.resource_type
table = endpoint.Meta.table
data = endpoint.to_dict()
data['fields'] = resource_type.get_type_of_fields(
list_fields,
table,
)
endpoints.append(data)
data = {
'title': self.title,
'endpoints': sorted(endpoints, key=lambda x: x['name']),
}
return json.dumps(data) |
def _get_connection(self):
'''
_get_connection - Maybe get a new connection, or reuse if passed in.
Will share a connection with a model
internal
'''
if self._connection is None:
self._connection = self._get_new_connection()
return self._connection | _get_connection - Maybe get a new connection, or reuse if passed in.
Will share a connection with a model
internal | Below is the the instruction that describes the task:
### Input:
_get_connection - Maybe get a new connection, or reuse if passed in.
Will share a connection with a model
internal
### Response:
def _get_connection(self):
'''
_get_connection - Maybe get a new connection, or reuse if passed in.
Will share a connection with a model
internal
'''
if self._connection is None:
self._connection = self._get_new_connection()
return self._connection |
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version} | Install a "package" on the REST server | Below is the the instruction that describes the task:
### Input:
Install a "package" on the REST server
### Response:
def package_install(name, **kwargs):
'''
Install a "package" on the REST server
'''
DETAILS = _load_state()
if kwargs.get('version', False):
version = kwargs['version']
else:
version = '1.0'
DETAILS['packages'][name] = version
_save_state(DETAILS)
return {name: version} |
def _run_and_return(self, cmd, sudo=None):
''' Run a command, show the message to the user if quiet isn't set,
and return the return code. This is a wrapper for the OCI client
to run a command and easily return the return code value (what
the user is ultimately interested in).
Parameters
==========
cmd: the command (list) to run.
sudo: whether to add sudo or not.
'''
sudo = self._get_sudo(sudo)
result = self._run_command(cmd,
sudo=sudo,
quiet=True,
return_result=True)
# Successful return with no output
if len(result) == 0:
return
# Show the response to the user, only if not quiet.
elif not self.quiet:
bot.println(result['message'])
# Return the state object to the user
return result['return_code'] | Run a command, show the message to the user if quiet isn't set,
and return the return code. This is a wrapper for the OCI client
to run a command and easily return the return code value (what
the user is ultimately interested in).
Parameters
==========
cmd: the command (list) to run.
sudo: whether to add sudo or not. | Below is the the instruction that describes the task:
### Input:
Run a command, show the message to the user if quiet isn't set,
and return the return code. This is a wrapper for the OCI client
to run a command and easily return the return code value (what
the user is ultimately interested in).
Parameters
==========
cmd: the command (list) to run.
sudo: whether to add sudo or not.
### Response:
def _run_and_return(self, cmd, sudo=None):
''' Run a command, show the message to the user if quiet isn't set,
and return the return code. This is a wrapper for the OCI client
to run a command and easily return the return code value (what
the user is ultimately interested in).
Parameters
==========
cmd: the command (list) to run.
sudo: whether to add sudo or not.
'''
sudo = self._get_sudo(sudo)
result = self._run_command(cmd,
sudo=sudo,
quiet=True,
return_result=True)
# Successful return with no output
if len(result) == 0:
return
# Show the response to the user, only if not quiet.
elif not self.quiet:
bot.println(result['message'])
# Return the state object to the user
return result['return_code'] |
def is_leaf(self, name):
r"""
Test if a node is a leaf node (node with no children).
:param name: Node name
:type name: :ref:`NodeName`
:rtype: boolean
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return not self._db[name]["children"] | r"""
Test if a node is a leaf node (node with no children).
:param name: Node name
:type name: :ref:`NodeName`
:rtype: boolean
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree) | Below is the the instruction that describes the task:
### Input:
r"""
Test if a node is a leaf node (node with no children).
:param name: Node name
:type name: :ref:`NodeName`
:rtype: boolean
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
### Response:
def is_leaf(self, name):
r"""
Test if a node is a leaf node (node with no children).
:param name: Node name
:type name: :ref:`NodeName`
:rtype: boolean
:raises:
* RuntimeError (Argument \`name\` is not valid)
* RuntimeError (Node *[name]* not in tree)
"""
if self._validate_node_name(name):
raise RuntimeError("Argument `name` is not valid")
self._node_in_tree(name)
return not self._db[name]["children"] |
def wait(self):
""" Wait until all in progress and queued items are processed """
self._wait_called = True
while self.tracked_coordinator_count() > 0 or \
self.waiting_coordinator_count() > 0:
time.sleep(1)
super(AsperaTransferCoordinatorController, self).wait()
self._wait_called = False | Wait until all in progress and queued items are processed | Below is the the instruction that describes the task:
### Input:
Wait until all in progress and queued items are processed
### Response:
def wait(self):
""" Wait until all in progress and queued items are processed """
self._wait_called = True
while self.tracked_coordinator_count() > 0 or \
self.waiting_coordinator_count() > 0:
time.sleep(1)
super(AsperaTransferCoordinatorController, self).wait()
self._wait_called = False |
def progressbar(iterable, length=23):
"""Print a simple progress bar while processing the given iterable.
Function |progressbar| does print the progress bar when option
`printprogress` is activted:
>>> from hydpy import pub
>>> pub.options.printprogress = True
You can pass an iterable object. Say you want to calculate the the sum
of all integer values from 1 to 100 and print the progress of the
calculation. Using function |range| (which returns a list in Python 2
and an iterator in Python3, but both are fine), one just has to
interpose function |progressbar|:
>>> from hydpy.core.printtools import progressbar
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
|---------------------|
***********************
>>> x_sum
5050
To prevent possible interim print commands from dismembering the status
bar, they are delayed until the status bar is complete. For intermediate
print outs of each fiftieth calculation, the result looks as follows:
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
... if not x % 50:
... print(x, x_sum)
|---------------------|
***********************
50 1275
100 5050
The number of characters of the progress bar can be changed:
>>> for i in progressbar(range(100), length=50):
... continue
|------------------------------------------------|
**************************************************
But its maximum number of characters is restricted by the length of the
given iterable:
>>> for i in progressbar(range(10), length=50):
... continue
|--------|
**********
The smallest possible progress bar has two characters:
>>> for i in progressbar(range(2)):
... continue
||
**
For iterables of length one or zero, no progress bar is plottet:
>>> for i in progressbar(range(1)):
... continue
The same is True when the `printprogress` option is inactivated:
>>> pub.options.printprogress = False
>>> for i in progressbar(range(100)):
... continue
"""
if hydpy.pub.options.printprogress and (len(iterable) > 1):
temp_name = os.path.join(tempfile.gettempdir(),
'HydPy_progressbar_stdout')
temp_stdout = open(temp_name, 'w')
real_stdout = sys.stdout
try:
sys.stdout = temp_stdout
nmbstars = min(len(iterable), length)
nmbcounts = len(iterable)/nmbstars
indentation = ' '*max(_printprogress_indentation, 0)
with PrintStyle(color=36, font=1, file=real_stdout):
print(' %s|%s|\n%s ' % (indentation,
'-'*(nmbstars-2),
indentation),
end='',
file=real_stdout)
counts = 1.
for next_ in iterable:
counts += 1.
if counts >= nmbcounts:
print(end='*', file=real_stdout)
counts -= nmbcounts
yield next_
finally:
try:
temp_stdout.close()
except BaseException:
pass
sys.stdout = real_stdout
print()
with open(temp_name, 'r') as temp_stdout:
sys.stdout.write(temp_stdout.read())
sys.stdout.flush()
else:
for next_ in iterable:
yield next_ | Print a simple progress bar while processing the given iterable.
Function |progressbar| does print the progress bar when option
`printprogress` is activted:
>>> from hydpy import pub
>>> pub.options.printprogress = True
You can pass an iterable object. Say you want to calculate the the sum
of all integer values from 1 to 100 and print the progress of the
calculation. Using function |range| (which returns a list in Python 2
and an iterator in Python3, but both are fine), one just has to
interpose function |progressbar|:
>>> from hydpy.core.printtools import progressbar
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
|---------------------|
***********************
>>> x_sum
5050
To prevent possible interim print commands from dismembering the status
bar, they are delayed until the status bar is complete. For intermediate
print outs of each fiftieth calculation, the result looks as follows:
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
... if not x % 50:
... print(x, x_sum)
|---------------------|
***********************
50 1275
100 5050
The number of characters of the progress bar can be changed:
>>> for i in progressbar(range(100), length=50):
... continue
|------------------------------------------------|
**************************************************
But its maximum number of characters is restricted by the length of the
given iterable:
>>> for i in progressbar(range(10), length=50):
... continue
|--------|
**********
The smallest possible progress bar has two characters:
>>> for i in progressbar(range(2)):
... continue
||
**
For iterables of length one or zero, no progress bar is plottet:
>>> for i in progressbar(range(1)):
... continue
The same is True when the `printprogress` option is inactivated:
>>> pub.options.printprogress = False
>>> for i in progressbar(range(100)):
... continue | Below is the the instruction that describes the task:
### Input:
Print a simple progress bar while processing the given iterable.
Function |progressbar| does print the progress bar when option
`printprogress` is activted:
>>> from hydpy import pub
>>> pub.options.printprogress = True
You can pass an iterable object. Say you want to calculate the the sum
of all integer values from 1 to 100 and print the progress of the
calculation. Using function |range| (which returns a list in Python 2
and an iterator in Python3, but both are fine), one just has to
interpose function |progressbar|:
>>> from hydpy.core.printtools import progressbar
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
|---------------------|
***********************
>>> x_sum
5050
To prevent possible interim print commands from dismembering the status
bar, they are delayed until the status bar is complete. For intermediate
print outs of each fiftieth calculation, the result looks as follows:
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
... if not x % 50:
... print(x, x_sum)
|---------------------|
***********************
50 1275
100 5050
The number of characters of the progress bar can be changed:
>>> for i in progressbar(range(100), length=50):
... continue
|------------------------------------------------|
**************************************************
But its maximum number of characters is restricted by the length of the
given iterable:
>>> for i in progressbar(range(10), length=50):
... continue
|--------|
**********
The smallest possible progress bar has two characters:
>>> for i in progressbar(range(2)):
... continue
||
**
For iterables of length one or zero, no progress bar is plottet:
>>> for i in progressbar(range(1)):
... continue
The same is True when the `printprogress` option is inactivated:
>>> pub.options.printprogress = False
>>> for i in progressbar(range(100)):
... continue
### Response:
def progressbar(iterable, length=23):
"""Print a simple progress bar while processing the given iterable.
Function |progressbar| does print the progress bar when option
`printprogress` is activted:
>>> from hydpy import pub
>>> pub.options.printprogress = True
You can pass an iterable object. Say you want to calculate the the sum
of all integer values from 1 to 100 and print the progress of the
calculation. Using function |range| (which returns a list in Python 2
and an iterator in Python3, but both are fine), one just has to
interpose function |progressbar|:
>>> from hydpy.core.printtools import progressbar
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
|---------------------|
***********************
>>> x_sum
5050
To prevent possible interim print commands from dismembering the status
bar, they are delayed until the status bar is complete. For intermediate
print outs of each fiftieth calculation, the result looks as follows:
>>> x_sum = 0
>>> for x in progressbar(range(1, 101)):
... x_sum += x
... if not x % 50:
... print(x, x_sum)
|---------------------|
***********************
50 1275
100 5050
The number of characters of the progress bar can be changed:
>>> for i in progressbar(range(100), length=50):
... continue
|------------------------------------------------|
**************************************************
But its maximum number of characters is restricted by the length of the
given iterable:
>>> for i in progressbar(range(10), length=50):
... continue
|--------|
**********
The smallest possible progress bar has two characters:
>>> for i in progressbar(range(2)):
... continue
||
**
For iterables of length one or zero, no progress bar is plottet:
>>> for i in progressbar(range(1)):
... continue
The same is True when the `printprogress` option is inactivated:
>>> pub.options.printprogress = False
>>> for i in progressbar(range(100)):
... continue
"""
if hydpy.pub.options.printprogress and (len(iterable) > 1):
temp_name = os.path.join(tempfile.gettempdir(),
'HydPy_progressbar_stdout')
temp_stdout = open(temp_name, 'w')
real_stdout = sys.stdout
try:
sys.stdout = temp_stdout
nmbstars = min(len(iterable), length)
nmbcounts = len(iterable)/nmbstars
indentation = ' '*max(_printprogress_indentation, 0)
with PrintStyle(color=36, font=1, file=real_stdout):
print(' %s|%s|\n%s ' % (indentation,
'-'*(nmbstars-2),
indentation),
end='',
file=real_stdout)
counts = 1.
for next_ in iterable:
counts += 1.
if counts >= nmbcounts:
print(end='*', file=real_stdout)
counts -= nmbcounts
yield next_
finally:
try:
temp_stdout.close()
except BaseException:
pass
sys.stdout = real_stdout
print()
with open(temp_name, 'r') as temp_stdout:
sys.stdout.write(temp_stdout.read())
sys.stdout.flush()
else:
for next_ in iterable:
yield next_ |
def CheckInputArgs(*interfaces):
"""Must provide at least one interface, the last one may be repeated.
"""
l = len(interfaces)
def wrapper(func):
def check_args(self, *args, **kw):
for i in range(len(args)):
if (l > i and interfaces[i].providedBy(args[i])) or interfaces[-1].providedBy(args[i]):
continue
if l > i: raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[i])
raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[-1])
func(self, *args, **kw)
return check_args
return wrapper | Must provide at least one interface, the last one may be repeated. | Below is the the instruction that describes the task:
### Input:
Must provide at least one interface, the last one may be repeated.
### Response:
def CheckInputArgs(*interfaces):
"""Must provide at least one interface, the last one may be repeated.
"""
l = len(interfaces)
def wrapper(func):
def check_args(self, *args, **kw):
for i in range(len(args)):
if (l > i and interfaces[i].providedBy(args[i])) or interfaces[-1].providedBy(args[i]):
continue
if l > i: raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[i])
raise TypeError, 'arg %s does not implement %s' %(args[i], interfaces[-1])
func(self, *args, **kw)
return check_args
return wrapper |
def apply(self, method):
"""Applies the given ForecastingAlgorithm or SmoothingMethod from the :py:mod:`pycast.methods`
module to the TimeSeries.
:param BaseMethod method: Method that should be used with the TimeSeries.
For more information about the methods take a look into their corresponding documentation.
:raise: Raises a StandardError when the TimeSeries was not normalized and hte method requires a
normalized TimeSeries
"""
# check, if the methods requirements are fullfilled
if method.has_to_be_normalized() and not self._normalized:
raise StandardError("method requires a normalized TimeSeries instance.")
if method.has_to_be_sorted():
self.sort_timeseries()
return method.execute(self) | Applies the given ForecastingAlgorithm or SmoothingMethod from the :py:mod:`pycast.methods`
module to the TimeSeries.
:param BaseMethod method: Method that should be used with the TimeSeries.
For more information about the methods take a look into their corresponding documentation.
:raise: Raises a StandardError when the TimeSeries was not normalized and hte method requires a
normalized TimeSeries | Below is the the instruction that describes the task:
### Input:
Applies the given ForecastingAlgorithm or SmoothingMethod from the :py:mod:`pycast.methods`
module to the TimeSeries.
:param BaseMethod method: Method that should be used with the TimeSeries.
For more information about the methods take a look into their corresponding documentation.
:raise: Raises a StandardError when the TimeSeries was not normalized and hte method requires a
normalized TimeSeries
### Response:
def apply(self, method):
"""Applies the given ForecastingAlgorithm or SmoothingMethod from the :py:mod:`pycast.methods`
module to the TimeSeries.
:param BaseMethod method: Method that should be used with the TimeSeries.
For more information about the methods take a look into their corresponding documentation.
:raise: Raises a StandardError when the TimeSeries was not normalized and hte method requires a
normalized TimeSeries
"""
# check, if the methods requirements are fullfilled
if method.has_to_be_normalized() and not self._normalized:
raise StandardError("method requires a normalized TimeSeries instance.")
if method.has_to_be_sorted():
self.sort_timeseries()
return method.execute(self) |
def am_orb(m1,m2,a,e):
'''
orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity
'''
a_cm = a * rsun_cm
m1_g = m1 * msun_g
m2_g = m2 * msun_g
J_orb=np.sqrt(grav_const*a_cm*(old_div((m1_g**2*m2_g**2),(m1_g+m2_g))))*(1-e**2)
return J_orb | orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity | Below is the the instruction that describes the task:
### Input:
orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity
### Response:
def am_orb(m1,m2,a,e):
'''
orbital angular momentum.
e.g Ge etal2010
Parameters
----------
m1, m2 : float
Masses of both stars in Msun.
A : float
Separation in Rsun.
e : float
Eccentricity
'''
a_cm = a * rsun_cm
m1_g = m1 * msun_g
m2_g = m2 * msun_g
J_orb=np.sqrt(grav_const*a_cm*(old_div((m1_g**2*m2_g**2),(m1_g+m2_g))))*(1-e**2)
return J_orb |
def remove_listener(self, registration_id):
"""
Removes the specified membership listener.
:param registration_id: (str), registration id of the listener to be deleted.
:return: (bool), if the registration is removed, ``false`` otherwise.
"""
try:
self.listeners.pop(registration_id)
return True
except KeyError:
return False | Removes the specified membership listener.
:param registration_id: (str), registration id of the listener to be deleted.
:return: (bool), if the registration is removed, ``false`` otherwise. | Below is the the instruction that describes the task:
### Input:
Removes the specified membership listener.
:param registration_id: (str), registration id of the listener to be deleted.
:return: (bool), if the registration is removed, ``false`` otherwise.
### Response:
def remove_listener(self, registration_id):
"""
Removes the specified membership listener.
:param registration_id: (str), registration id of the listener to be deleted.
:return: (bool), if the registration is removed, ``false`` otherwise.
"""
try:
self.listeners.pop(registration_id)
return True
except KeyError:
return False |
def init_state_from_ledger(self, state: State, ledger: Ledger, reqHandler):
"""
If the trie is empty then initialize it by applying
txns from ledger.
"""
if state.isEmpty:
logger.info('{} found state to be empty, recreating from '
'ledger'.format(self))
for seq_no, txn in ledger.getAllTxn():
txn = self.update_txn_with_extra_data(txn)
reqHandler.updateState([txn, ], isCommitted=True)
state.commit(rootHash=state.headHash) | If the trie is empty then initialize it by applying
txns from ledger. | Below is the the instruction that describes the task:
### Input:
If the trie is empty then initialize it by applying
txns from ledger.
### Response:
def init_state_from_ledger(self, state: State, ledger: Ledger, reqHandler):
"""
If the trie is empty then initialize it by applying
txns from ledger.
"""
if state.isEmpty:
logger.info('{} found state to be empty, recreating from '
'ledger'.format(self))
for seq_no, txn in ledger.getAllTxn():
txn = self.update_txn_with_extra_data(txn)
reqHandler.updateState([txn, ], isCommitted=True)
state.commit(rootHash=state.headHash) |
def combine(self, merge_points=False):
"""Appends all blocks into a single unstructured grid.
Parameters
----------
merge_points : bool, optional
Merge coincidental points.
"""
alg = vtk.vtkAppendFilter()
for block in self:
alg.AddInputData(block)
alg.SetMergePoints(merge_points)
alg.Update()
return wrap(alg.GetOutputDataObject(0)) | Appends all blocks into a single unstructured grid.
Parameters
----------
merge_points : bool, optional
Merge coincidental points. | Below is the the instruction that describes the task:
### Input:
Appends all blocks into a single unstructured grid.
Parameters
----------
merge_points : bool, optional
Merge coincidental points.
### Response:
def combine(self, merge_points=False):
"""Appends all blocks into a single unstructured grid.
Parameters
----------
merge_points : bool, optional
Merge coincidental points.
"""
alg = vtk.vtkAppendFilter()
for block in self:
alg.AddInputData(block)
alg.SetMergePoints(merge_points)
alg.Update()
return wrap(alg.GetOutputDataObject(0)) |
def get_info(node_cfg):
"""Return a tuple with the verbal name of a node, and a dict of field names."""
node_cfg = node_cfg if isinstance(node_cfg, dict) else {"name": node_cfg}
return node_cfg.get("name"), node_cfg.get("fields", {}) | Return a tuple with the verbal name of a node, and a dict of field names. | Below is the the instruction that describes the task:
### Input:
Return a tuple with the verbal name of a node, and a dict of field names.
### Response:
def get_info(node_cfg):
"""Return a tuple with the verbal name of a node, and a dict of field names."""
node_cfg = node_cfg if isinstance(node_cfg, dict) else {"name": node_cfg}
return node_cfg.get("name"), node_cfg.get("fields", {}) |
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt | Converts a unix timestamp to a Python datetime object | Below is the the instruction that describes the task:
### Input:
Converts a unix timestamp to a Python datetime object
### Response:
def timestamp2date(tstamp):
"Converts a unix timestamp to a Python datetime object"
dt = datetime.fromtimestamp(tstamp)
if not dt.hour+dt.minute+dt.second+dt.microsecond:
return dt.date()
else:
return dt |
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
"""Asynchronously invokes the underlying RPC on the client.
Args:
request_iterator: An ASYNC iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError.
"""
return _utils.wrap_future_call(
self._inner.future(
_utils.WrappedAsyncIterator(request_iterator, self._loop),
timeout,
metadata,
credentials
),
self._loop,
self._executor) | Asynchronously invokes the underlying RPC on the client.
Args:
request_iterator: An ASYNC iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError. | Below is the the instruction that describes the task:
### Input:
Asynchronously invokes the underlying RPC on the client.
Args:
request_iterator: An ASYNC iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError.
### Response:
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
"""Asynchronously invokes the underlying RPC on the client.
Args:
request_iterator: An ASYNC iterator that yields request values for the RPC.
timeout: An optional duration of time in seconds to allow for the RPC.
If None, the timeout is considered infinite.
metadata: Optional :term:`metadata` to be transmitted to the
service-side of the RPC.
credentials: An optional CallCredentials for the RPC.
Returns:
An object that is both a Call for the RPC and a Future. In the event of
RPC completion, the return Call-Future's result value will be the
response message of the RPC. Should the event terminate with non-OK
status, the returned Call-Future's exception value will be an RpcError.
"""
return _utils.wrap_future_call(
self._inner.future(
_utils.WrappedAsyncIterator(request_iterator, self._loop),
timeout,
metadata,
credentials
),
self._loop,
self._executor) |
def none_coalesce_handle(tokens):
"""Process the None-coalescing operator."""
if len(tokens) == 1:
return tokens[0]
elif tokens[0].isalnum():
return "({b} if {a} is None else {a})".format(
a=tokens[0],
b=none_coalesce_handle(tokens[1:]),
)
else:
return "(lambda {x}: {b} if {x} is None else {x})({a})".format(
x=none_coalesce_var,
a=tokens[0],
b=none_coalesce_handle(tokens[1:]),
) | Process the None-coalescing operator. | Below is the the instruction that describes the task:
### Input:
Process the None-coalescing operator.
### Response:
def none_coalesce_handle(tokens):
"""Process the None-coalescing operator."""
if len(tokens) == 1:
return tokens[0]
elif tokens[0].isalnum():
return "({b} if {a} is None else {a})".format(
a=tokens[0],
b=none_coalesce_handle(tokens[1:]),
)
else:
return "(lambda {x}: {b} if {x} is None else {x})({a})".format(
x=none_coalesce_var,
a=tokens[0],
b=none_coalesce_handle(tokens[1:]),
) |
def configure_camera(self):
"""
Enables access to the camera.
http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config
https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/
Afterwards, test with:
/opt/vc/bin/raspistill --nopreview --output image.jpg
Check for compatibility with:
vcgencmd get_camera
which should show:
supported=1 detected=1
"""
#TODO:check per OS? Works on Raspbian Jessie
r = self.local_renderer
if self.env.camera_enabled:
r.pc('Enabling camera.')
#TODO:fix, doesn't work on Ubuntu, which uses commented-out values
# Set start_x=1
#r.sudo('if grep "start_x=0" /boot/config.txt; then sed -i "s/start_x=0/start_x=1/g" /boot/config.txt; fi')
#r.sudo('if grep "start_x" /boot/config.txt; then true; else echo "start_x=1" >> /boot/config.txt; fi')
r.enable_attr(
filename='/boot/config.txt',
key='start_x',
value=1,
use_sudo=True,
)
# Set gpu_mem=128
# r.sudo('if grep "gpu_mem" /boot/config.txt; then true; else echo "gpu_mem=128" >> /boot/config.txt; fi')
r.enable_attr(
filename='/boot/config.txt',
key='gpu_mem',
value=r.env.gpu_mem,
use_sudo=True,
)
# Compile the Raspberry Pi binaries.
#https://github.com/raspberrypi/userland
r.run('cd ~; git clone https://github.com/raspberrypi/userland.git; cd userland; ./buildme')
r.run('touch ~/.bash_aliases')
#r.run("echo 'PATH=$PATH:/opt/vc/bin\nexport PATH' >> ~/.bash_aliases")
r.append(r'PATH=$PATH:/opt/vc/bin\nexport PATH', '~/.bash_aliases')
#r.run("echo 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH' >> ~/.bash_aliases")
r.append(r'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH', '~/.bash_aliases')
r.run('source ~/.bashrc')
r.sudo('ldconfig')
# Allow our user to access the video device.
r.sudo("echo 'SUBSYSTEM==\"vchiq\",GROUP=\"video\",MODE=\"0660\"' > /etc/udev/rules.d/10-vchiq-permissions.rules")
r.sudo("usermod -a -G video {user}")
r.reboot(wait=300, timeout=60)
self.test_camera()
else:
r.disable_attr(
filename='/boot/config.txt',
key='start_x',
use_sudo=True,
)
r.disable_attr(
filename='/boot/config.txt',
key='gpu_mem',
use_sudo=True,
)
r.reboot(wait=300, timeout=60) | Enables access to the camera.
http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config
https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/
Afterwards, test with:
/opt/vc/bin/raspistill --nopreview --output image.jpg
Check for compatibility with:
vcgencmd get_camera
which should show:
supported=1 detected=1 | Below is the the instruction that describes the task:
### Input:
Enables access to the camera.
http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config
https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/
Afterwards, test with:
/opt/vc/bin/raspistill --nopreview --output image.jpg
Check for compatibility with:
vcgencmd get_camera
which should show:
supported=1 detected=1
### Response:
def configure_camera(self):
"""
Enables access to the camera.
http://raspberrypi.stackexchange.com/questions/14229/how-can-i-enable-the-camera-without-using-raspi-config
https://mike632t.wordpress.com/2014/06/26/raspberry-pi-camera-setup/
Afterwards, test with:
/opt/vc/bin/raspistill --nopreview --output image.jpg
Check for compatibility with:
vcgencmd get_camera
which should show:
supported=1 detected=1
"""
#TODO:check per OS? Works on Raspbian Jessie
r = self.local_renderer
if self.env.camera_enabled:
r.pc('Enabling camera.')
#TODO:fix, doesn't work on Ubuntu, which uses commented-out values
# Set start_x=1
#r.sudo('if grep "start_x=0" /boot/config.txt; then sed -i "s/start_x=0/start_x=1/g" /boot/config.txt; fi')
#r.sudo('if grep "start_x" /boot/config.txt; then true; else echo "start_x=1" >> /boot/config.txt; fi')
r.enable_attr(
filename='/boot/config.txt',
key='start_x',
value=1,
use_sudo=True,
)
# Set gpu_mem=128
# r.sudo('if grep "gpu_mem" /boot/config.txt; then true; else echo "gpu_mem=128" >> /boot/config.txt; fi')
r.enable_attr(
filename='/boot/config.txt',
key='gpu_mem',
value=r.env.gpu_mem,
use_sudo=True,
)
# Compile the Raspberry Pi binaries.
#https://github.com/raspberrypi/userland
r.run('cd ~; git clone https://github.com/raspberrypi/userland.git; cd userland; ./buildme')
r.run('touch ~/.bash_aliases')
#r.run("echo 'PATH=$PATH:/opt/vc/bin\nexport PATH' >> ~/.bash_aliases")
r.append(r'PATH=$PATH:/opt/vc/bin\nexport PATH', '~/.bash_aliases')
#r.run("echo 'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH' >> ~/.bash_aliases")
r.append(r'LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/vc/lib\nexport LD_LIBRARY_PATH', '~/.bash_aliases')
r.run('source ~/.bashrc')
r.sudo('ldconfig')
# Allow our user to access the video device.
r.sudo("echo 'SUBSYSTEM==\"vchiq\",GROUP=\"video\",MODE=\"0660\"' > /etc/udev/rules.d/10-vchiq-permissions.rules")
r.sudo("usermod -a -G video {user}")
r.reboot(wait=300, timeout=60)
self.test_camera()
else:
r.disable_attr(
filename='/boot/config.txt',
key='start_x',
use_sudo=True,
)
r.disable_attr(
filename='/boot/config.txt',
key='gpu_mem',
use_sudo=True,
)
r.reboot(wait=300, timeout=60) |
def run(self, options):
"""
.. todo::
check network connection
:param Namespace options: parse result from argparse
:return:
"""
self.logger.debug("debug enabled...")
depends = ['git']
nil_tools = []
self.logger.info("depends list: %s", depends)
for v in depends:
real_path = shutil.which(v)
if real_path:
self.print_message("Found {}:{}..."
" {}".format(v,
real_path,
termcolor.colored(
'[OK]',
color='blue')))
else:
nil_tools.append(v)
self.error_message(
'Missing tool:`{}`... {}'.format(v, '[ERR]'), prefix='',
suffix='')
pass
if nil_tools:
self.print_message('')
self.error("please install missing tools...")
else:
self.print_message("\nNo error found,"
"you can use cliez in right way.")
self.logger.debug("check finished...")
pass
pass | .. todo::
check network connection
:param Namespace options: parse result from argparse
:return: | Below is the the instruction that describes the task:
### Input:
.. todo::
check network connection
:param Namespace options: parse result from argparse
:return:
### Response:
def run(self, options):
"""
.. todo::
check network connection
:param Namespace options: parse result from argparse
:return:
"""
self.logger.debug("debug enabled...")
depends = ['git']
nil_tools = []
self.logger.info("depends list: %s", depends)
for v in depends:
real_path = shutil.which(v)
if real_path:
self.print_message("Found {}:{}..."
" {}".format(v,
real_path,
termcolor.colored(
'[OK]',
color='blue')))
else:
nil_tools.append(v)
self.error_message(
'Missing tool:`{}`... {}'.format(v, '[ERR]'), prefix='',
suffix='')
pass
if nil_tools:
self.print_message('')
self.error("please install missing tools...")
else:
self.print_message("\nNo error found,"
"you can use cliez in right way.")
self.logger.debug("check finished...")
pass
pass |
def libvlc_event_detach(p_event_manager, i_event_type, f_callback, p_user_data):
'''Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event.
'''
f = _Cfunctions.get('libvlc_event_detach', None) or \
_Cfunction('libvlc_event_detach', ((1,), (1,), (1,), (1,),), None,
None, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, p_user_data) | Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event. | Below is the the instruction that describes the task:
### Input:
Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event.
### Response:
def libvlc_event_detach(p_event_manager, i_event_type, f_callback, p_user_data):
'''Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event.
'''
f = _Cfunctions.get('libvlc_event_detach', None) or \
_Cfunction('libvlc_event_detach', ((1,), (1,), (1,), (1,),), None,
None, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, p_user_data) |
def delete(self, album, **kwds):
"""
Endpoint: /album/<id>/delete.json
Deletes an album.
Returns True if successful.
Raises a TroveboxError if not.
"""
return self._client.post("/album/%s/delete.json" %
self._extract_id(album),
**kwds)["result"] | Endpoint: /album/<id>/delete.json
Deletes an album.
Returns True if successful.
Raises a TroveboxError if not. | Below is the the instruction that describes the task:
### Input:
Endpoint: /album/<id>/delete.json
Deletes an album.
Returns True if successful.
Raises a TroveboxError if not.
### Response:
def delete(self, album, **kwds):
"""
Endpoint: /album/<id>/delete.json
Deletes an album.
Returns True if successful.
Raises a TroveboxError if not.
"""
return self._client.post("/album/%s/delete.json" %
self._extract_id(album),
**kwds)["result"] |
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site | Iterate over species grouped by type | Below is the the instruction that describes the task:
### Input:
Iterate over species grouped by type
### Response:
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site |
def get(request, path, root=_METADATA_ROOT, recursive=False):
"""Fetch a resource from the metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
path (str): The resource to retrieve. For example,
``'instance/service-accounts/default'``.
root (str): The full path to the metadata server root.
recursive (bool): Whether to do a recursive query of metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents for more
details.
Returns:
Union[Mapping, str]: If the metadata server returns JSON, a mapping of
the decoded JSON is return. Otherwise, the response content is
returned as a string.
Raises:
google.auth.exceptions.TransportError: if an error occurred while
retrieving metadata.
"""
base_url = urlparse.urljoin(root, path)
query_params = {}
if recursive:
query_params['recursive'] = 'true'
url = _helpers.update_query(base_url, query_params)
response = request(url=url, method='GET', headers=_METADATA_HEADERS)
if response.status == http_client.OK:
content = _helpers.from_bytes(response.data)
if response.headers['content-type'] == 'application/json':
try:
return json.loads(content)
except ValueError as caught_exc:
new_exc = exceptions.TransportError(
'Received invalid JSON from the Google Compute Engine'
'metadata service: {:.20}'.format(content))
six.raise_from(new_exc, caught_exc)
else:
return content
else:
raise exceptions.TransportError(
'Failed to retrieve {} from the Google Compute Engine'
'metadata service. Status: {} Response:\n{}'.format(
url, response.status, response.data), response) | Fetch a resource from the metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
path (str): The resource to retrieve. For example,
``'instance/service-accounts/default'``.
root (str): The full path to the metadata server root.
recursive (bool): Whether to do a recursive query of metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents for more
details.
Returns:
Union[Mapping, str]: If the metadata server returns JSON, a mapping of
the decoded JSON is return. Otherwise, the response content is
returned as a string.
Raises:
google.auth.exceptions.TransportError: if an error occurred while
retrieving metadata. | Below is the the instruction that describes the task:
### Input:
Fetch a resource from the metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
path (str): The resource to retrieve. For example,
``'instance/service-accounts/default'``.
root (str): The full path to the metadata server root.
recursive (bool): Whether to do a recursive query of metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents for more
details.
Returns:
Union[Mapping, str]: If the metadata server returns JSON, a mapping of
the decoded JSON is return. Otherwise, the response content is
returned as a string.
Raises:
google.auth.exceptions.TransportError: if an error occurred while
retrieving metadata.
### Response:
def get(request, path, root=_METADATA_ROOT, recursive=False):
"""Fetch a resource from the metadata server.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
path (str): The resource to retrieve. For example,
``'instance/service-accounts/default'``.
root (str): The full path to the metadata server root.
recursive (bool): Whether to do a recursive query of metadata. See
https://cloud.google.com/compute/docs/metadata#aggcontents for more
details.
Returns:
Union[Mapping, str]: If the metadata server returns JSON, a mapping of
the decoded JSON is return. Otherwise, the response content is
returned as a string.
Raises:
google.auth.exceptions.TransportError: if an error occurred while
retrieving metadata.
"""
base_url = urlparse.urljoin(root, path)
query_params = {}
if recursive:
query_params['recursive'] = 'true'
url = _helpers.update_query(base_url, query_params)
response = request(url=url, method='GET', headers=_METADATA_HEADERS)
if response.status == http_client.OK:
content = _helpers.from_bytes(response.data)
if response.headers['content-type'] == 'application/json':
try:
return json.loads(content)
except ValueError as caught_exc:
new_exc = exceptions.TransportError(
'Received invalid JSON from the Google Compute Engine'
'metadata service: {:.20}'.format(content))
six.raise_from(new_exc, caught_exc)
else:
return content
else:
raise exceptions.TransportError(
'Failed to retrieve {} from the Google Compute Engine'
'metadata service. Status: {} Response:\n{}'.format(
url, response.status, response.data), response) |
def resample(input_data, ratio, converter_type='sinc_best', verbose=False):
"""Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
"""
from samplerate.lowlevel import src_simple
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
output_data = np.empty(output_shape, dtype=np.float32)
converter_type = _get_converter_type(converter_type)
(error, input_frames_used, output_frames_gen) \
= src_simple(input_data, output_data, ratio,
converter_type.value, channels)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen]) | Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios. | Below is the the instruction that describes the task:
### Input:
Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
### Response:
def resample(input_data, ratio, converter_type='sinc_best', verbose=False):
"""Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
"""
from samplerate.lowlevel import src_simple
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
output_data = np.empty(output_shape, dtype=np.float32)
converter_type = _get_converter_type(converter_type)
(error, input_frames_used, output_frames_gen) \
= src_simple(input_data, output_data, ratio,
converter_type.value, channels)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen]) |
def read(cls, file):
"""Reads a WebVTT captions file."""
parser = WebVTTParser().read(file)
return cls(file=file, captions=parser.captions, styles=parser.styles) | Reads a WebVTT captions file. | Below is the the instruction that describes the task:
### Input:
Reads a WebVTT captions file.
### Response:
def read(cls, file):
"""Reads a WebVTT captions file."""
parser = WebVTTParser().read(file)
return cls(file=file, captions=parser.captions, styles=parser.styles) |
def get_interface_detail_request(last_interface_name,
last_interface_type):
""" Creates a new Netconf request based on the last received
interface name and type when the hasMore flag is true
"""
request_interface = ET.Element(
'get-interface-detail',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if last_interface_name != '':
last_received_int = ET.SubElement(request_interface,
"last-rcvd-interface")
last_int_type_el = ET.SubElement(last_received_int,
"interface-type")
last_int_type_el.text = last_interface_type
last_int_name_el = ET.SubElement(last_received_int,
"interface-name")
last_int_name_el.text = last_interface_name
return request_interface | Creates a new Netconf request based on the last received
interface name and type when the hasMore flag is true | Below is the the instruction that describes the task:
### Input:
Creates a new Netconf request based on the last received
interface name and type when the hasMore flag is true
### Response:
def get_interface_detail_request(last_interface_name,
last_interface_type):
""" Creates a new Netconf request based on the last received
interface name and type when the hasMore flag is true
"""
request_interface = ET.Element(
'get-interface-detail',
xmlns="urn:brocade.com:mgmt:brocade-interface-ext"
)
if last_interface_name != '':
last_received_int = ET.SubElement(request_interface,
"last-rcvd-interface")
last_int_type_el = ET.SubElement(last_received_int,
"interface-type")
last_int_type_el.text = last_interface_type
last_int_name_el = ET.SubElement(last_received_int,
"interface-name")
last_int_name_el.text = last_interface_name
return request_interface |
def download_feature_values_and_metadata_files(self, mapobject_type_name,
directory, parallel=1):
'''Downloads all feature values for the given object type and stores the
data as *CSV* files on disk.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
directory: str
absolute path to the directory on disk where the file should be
parallel: int
number of parallel processes to use for upload
See also
--------
:meth:`tmclient.api.TmClient.download_feature_values`
:meth:`tmclient.api.TmClient.download_object_metadata`
'''
def download_per_well(well):
logger.info(
'download feature data at well: plate=%s, well=%s',
well['plate_name'], well['name']
)
res = self._download_feature_values(
mapobject_type_name, well['plate_name'], well['name']
)
filename = self._extract_filename_from_headers(res.headers)
filepath = os.path.join(directory, os.path.basename(filename))
logger.info('write feature values to file: %s', filepath)
with open(filepath, 'wb') as f:
for c in res.iter_content(chunk_size=1000):
f.write(c)
logger.info(
'download feature metadata at well: plate=%s, well=%s',
well['plate_name'], well['name']
)
res = self._download_object_metadata(
mapobject_type_name, well['plate_name'], well['name']
)
filename = self._extract_filename_from_headers(res.headers)
filepath = os.path.join(directory, os.path.basename(filename))
logger.info('write metadata to file: %s', filepath)
with open(filepath, 'wb') as f:
for c in res.iter_content(chunk_size=1000):
f.write(c)
work = [(download_per_well, well) for well in self.get_wells()]
self._parallelize(work, parallel) | Downloads all feature values for the given object type and stores the
data as *CSV* files on disk.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
directory: str
absolute path to the directory on disk where the file should be
parallel: int
number of parallel processes to use for upload
See also
--------
:meth:`tmclient.api.TmClient.download_feature_values`
:meth:`tmclient.api.TmClient.download_object_metadata` | Below is the the instruction that describes the task:
### Input:
Downloads all feature values for the given object type and stores the
data as *CSV* files on disk.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
directory: str
absolute path to the directory on disk where the file should be
parallel: int
number of parallel processes to use for upload
See also
--------
:meth:`tmclient.api.TmClient.download_feature_values`
:meth:`tmclient.api.TmClient.download_object_metadata`
### Response:
def download_feature_values_and_metadata_files(self, mapobject_type_name,
directory, parallel=1):
'''Downloads all feature values for the given object type and stores the
data as *CSV* files on disk.
Parameters
----------
mapobject_type_name: str
type of the segmented objects
directory: str
absolute path to the directory on disk where the file should be
parallel: int
number of parallel processes to use for upload
See also
--------
:meth:`tmclient.api.TmClient.download_feature_values`
:meth:`tmclient.api.TmClient.download_object_metadata`
'''
def download_per_well(well):
logger.info(
'download feature data at well: plate=%s, well=%s',
well['plate_name'], well['name']
)
res = self._download_feature_values(
mapobject_type_name, well['plate_name'], well['name']
)
filename = self._extract_filename_from_headers(res.headers)
filepath = os.path.join(directory, os.path.basename(filename))
logger.info('write feature values to file: %s', filepath)
with open(filepath, 'wb') as f:
for c in res.iter_content(chunk_size=1000):
f.write(c)
logger.info(
'download feature metadata at well: plate=%s, well=%s',
well['plate_name'], well['name']
)
res = self._download_object_metadata(
mapobject_type_name, well['plate_name'], well['name']
)
filename = self._extract_filename_from_headers(res.headers)
filepath = os.path.join(directory, os.path.basename(filename))
logger.info('write metadata to file: %s', filepath)
with open(filepath, 'wb') as f:
for c in res.iter_content(chunk_size=1000):
f.write(c)
work = [(download_per_well, well) for well in self.get_wells()]
self._parallelize(work, parallel) |
def user_absent(name, channel=14, **kwargs):
'''
Remove user
Delete all user (uid) records having the matching name.
name
string name of user to delete
channel
channel to remove user access from defaults to 14 for auto.
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
user_id_list = __salt__['ipmi.get_name_uids'](name, channel, **kwargs)
if not user_id_list:
ret['result'] = True
ret['comment'] = 'user already absent'
return ret
if __opts__['test']:
ret['comment'] = 'would delete user(s)'
ret['result'] = None
ret['changes'] = {'delete': user_id_list}
return ret
for uid in user_id_list:
__salt__['ipmi.delete_user'](uid, channel, **kwargs)
ret['comment'] = 'user(s) removed'
ret['changes'] = {'old': user_id_list, 'new': 'None'}
return ret | Remove user
Delete all user (uid) records having the matching name.
name
string name of user to delete
channel
channel to remove user access from defaults to 14 for auto.
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None | Below is the the instruction that describes the task:
### Input:
Remove user
Delete all user (uid) records having the matching name.
name
string name of user to delete
channel
channel to remove user access from defaults to 14 for auto.
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None
### Response:
def user_absent(name, channel=14, **kwargs):
'''
Remove user
Delete all user (uid) records having the matching name.
name
string name of user to delete
channel
channel to remove user access from defaults to 14 for auto.
kwargs
- api_host=localhost
- api_user=admin
- api_pass=
- api_port=623
- api_kg=None
'''
ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}
user_id_list = __salt__['ipmi.get_name_uids'](name, channel, **kwargs)
if not user_id_list:
ret['result'] = True
ret['comment'] = 'user already absent'
return ret
if __opts__['test']:
ret['comment'] = 'would delete user(s)'
ret['result'] = None
ret['changes'] = {'delete': user_id_list}
return ret
for uid in user_id_list:
__salt__['ipmi.delete_user'](uid, channel, **kwargs)
ret['comment'] = 'user(s) removed'
ret['changes'] = {'old': user_id_list, 'new': 'None'}
return ret |
def dump(self, obj, many=None):
"""Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param bool many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A dict of serialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
"""
error_store = ErrorStore()
errors = {}
many = self.many if many is None else bool(many)
if many and is_iterable_but_not_string(obj):
obj = list(obj)
if self._has_processors(PRE_DUMP):
try:
processed_obj = self._invoke_dump_processors(
PRE_DUMP,
obj,
many,
original_data=obj,
)
except ValidationError as error:
errors = error.normalized_messages()
result = None
else:
processed_obj = obj
if not errors:
result = self._serialize(
processed_obj,
self.fields,
error_store,
many=many,
accessor=self.get_attribute,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
errors = error_store.errors
if not errors and self._has_processors(POST_DUMP):
try:
result = self._invoke_dump_processors(
POST_DUMP,
result,
many,
original_data=obj,
)
except ValidationError as error:
errors = error.normalized_messages()
if errors:
exc = ValidationError(
errors,
data=obj,
valid_data=result,
)
# User-defined error handler
self.handle_error(exc, obj)
raise exc
return result | Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param bool many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A dict of serialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid. | Below is the the instruction that describes the task:
### Input:
Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param bool many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A dict of serialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
### Response:
def dump(self, obj, many=None):
"""Serialize an object to native Python data types according to this
Schema's fields.
:param obj: The object to serialize.
:param bool many: Whether to serialize `obj` as a collection. If `None`, the value
for `self.many` is used.
:return: A dict of serialized data
:rtype: dict
.. versionadded:: 1.0.0
.. versionchanged:: 3.0.0b7
This method returns the serialized data rather than a ``(data, errors)`` duple.
A :exc:`ValidationError <marshmallow.exceptions.ValidationError>` is raised
if ``obj`` is invalid.
"""
error_store = ErrorStore()
errors = {}
many = self.many if many is None else bool(many)
if many and is_iterable_but_not_string(obj):
obj = list(obj)
if self._has_processors(PRE_DUMP):
try:
processed_obj = self._invoke_dump_processors(
PRE_DUMP,
obj,
many,
original_data=obj,
)
except ValidationError as error:
errors = error.normalized_messages()
result = None
else:
processed_obj = obj
if not errors:
result = self._serialize(
processed_obj,
self.fields,
error_store,
many=many,
accessor=self.get_attribute,
dict_class=self.dict_class,
index_errors=self.opts.index_errors,
)
errors = error_store.errors
if not errors and self._has_processors(POST_DUMP):
try:
result = self._invoke_dump_processors(
POST_DUMP,
result,
many,
original_data=obj,
)
except ValidationError as error:
errors = error.normalized_messages()
if errors:
exc = ValidationError(
errors,
data=obj,
valid_data=result,
)
# User-defined error handler
self.handle_error(exc, obj)
raise exc
return result |
def get_catalog_hierarchy_design_session(self, proxy):
"""Gets the catalog hierarchy design session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchyDesignSession) - a
``CatalogHierarchyDesignSession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy_design()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy_design()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchyDesignSession(proxy=proxy, runtime=self._runtime) | Gets the catalog hierarchy design session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchyDesignSession) - a
``CatalogHierarchyDesignSession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy_design()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy_design()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the catalog hierarchy design session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchyDesignSession) - a
``CatalogHierarchyDesignSession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy_design()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy_design()`` is ``true``.*
### Response:
def get_catalog_hierarchy_design_session(self, proxy):
"""Gets the catalog hierarchy design session.
arg: proxy (osid.proxy.Proxy): proxy
return: (osid.cataloging.CatalogHierarchyDesignSession) - a
``CatalogHierarchyDesignSession``
raise: NullArgument - ``proxy`` is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_catalog_hierarchy_design()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_catalog_hierarchy_design()`` is ``true``.*
"""
if not self.supports_catalog_hierarchy_design():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.CatalogHierarchyDesignSession(proxy=proxy, runtime=self._runtime) |
def get_balance_context(self):
"""Get the high level balances"""
bank_account = Account.objects.get(name='Bank')
return dict(
bank=bank_account,
retained_earnings_accounts=Account.objects.filter(parent__name='Retained Earnings'),
) | Get the high level balances | Below is the the instruction that describes the task:
### Input:
Get the high level balances
### Response:
def get_balance_context(self):
"""Get the high level balances"""
bank_account = Account.objects.get(name='Bank')
return dict(
bank=bank_account,
retained_earnings_accounts=Account.objects.filter(parent__name='Retained Earnings'),
) |
def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the configfile mode.
"""
parser.add_argument('-f', '--file', dest='file', required=True,
help="config file for routing groups "
"(only in configfile mode)")
return ["file"] | Arguments for the configfile mode. | Below is the the instruction that describes the task:
### Input:
Arguments for the configfile mode.
### Response:
def add_arguments(cls, parser, sys_arg_list=None):
"""
Arguments for the configfile mode.
"""
parser.add_argument('-f', '--file', dest='file', required=True,
help="config file for routing groups "
"(only in configfile mode)")
return ["file"] |
def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame:
'''generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
'''
# generate df_datetime for prepending
idx_dt = df_grid_group.index
ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year')
ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY')
ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour')
ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min')
df_datetime = pd.concat([
ser_year,
ser_DOY,
ser_hour,
ser_min,
], axis=1)
df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta(
'd').total_seconds()/(24*60*60)
df_save = pd.concat([df_datetime, df_grid_group], axis=1)
return df_save | generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving | Below is the the instruction that describes the task:
### Input:
generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
### Response:
def gen_df_save(df_grid_group: pd.DataFrame)->pd.DataFrame:
'''generate a dataframe for saving
Parameters
----------
df_output_grid_group : pd.DataFrame
an output dataframe of a single group and grid
Returns
-------
pd.DataFrame
a dataframe with date time info prepended for saving
'''
# generate df_datetime for prepending
idx_dt = df_grid_group.index
ser_year = pd.Series(idx_dt.year, index=idx_dt, name='Year')
ser_DOY = pd.Series(idx_dt.dayofyear, index=idx_dt, name='DOY')
ser_hour = pd.Series(idx_dt.hour, index=idx_dt, name='Hour')
ser_min = pd.Series(idx_dt.minute, index=idx_dt, name='Min')
df_datetime = pd.concat([
ser_year,
ser_DOY,
ser_hour,
ser_min,
], axis=1)
df_datetime['Dectime'] = ser_DOY-1+idx_dt.to_perioddelta(
'd').total_seconds()/(24*60*60)
df_save = pd.concat([df_datetime, df_grid_group], axis=1)
return df_save |
def duplicates(coll):
"""Return the duplicated items in the given collection
:param coll: a collection
:returns: a list of the duplicated items in the collection
>>> duplicates([1, 1, 2, 3, 3, 4, 1, 1])
[1, 3]
"""
return list(set(x for x in coll if coll.count(x) > 1)) | Return the duplicated items in the given collection
:param coll: a collection
:returns: a list of the duplicated items in the collection
>>> duplicates([1, 1, 2, 3, 3, 4, 1, 1])
[1, 3] | Below is the the instruction that describes the task:
### Input:
Return the duplicated items in the given collection
:param coll: a collection
:returns: a list of the duplicated items in the collection
>>> duplicates([1, 1, 2, 3, 3, 4, 1, 1])
[1, 3]
### Response:
def duplicates(coll):
"""Return the duplicated items in the given collection
:param coll: a collection
:returns: a list of the duplicated items in the collection
>>> duplicates([1, 1, 2, 3, 3, 4, 1, 1])
[1, 3]
"""
return list(set(x for x in coll if coll.count(x) > 1)) |
def estimate_hessian(objective_func, parameters,
lower_bounds=None, upper_bounds=None,
step_ratio=2, nmr_steps=5,
max_step_sizes=None,
data=None, cl_runtime_info=None):
"""Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters.
This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson
extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation
on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error,
taking into account outliers using a median filter.
The Hessian is evaluated at the steps:
.. math::
\quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) -
(f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) /
(4 d_j d_k)
where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero
and :math:`d_j` is a scalar spacing :math:`steps_j`.
Steps are generated according to an exponentially diminishing ratio, defined as:
steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1.
Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for
4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest
step size that fits between the Hessian point and the boundaries.
The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a
O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate.
Args:
objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate.
A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x, void* data);
The objective function has the same signature as the minimization function in MOT. For the numerical
hessian, the ``objective_list`` parameter is ignored.
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
and p parameters
lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use -np.inf.
upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use np.inf.
step_ratio (float): the ratio at which the steps diminish.
nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these
step sizes and extrapolate the best step size from among them. The minimum number of steps is 1.
max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter.
If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters.
If a list is given, it should be of the same length as the number of parameters.
data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
Returns:
ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix.
This array can hold NaN's, for elements where the Hessian failed to approximate.
"""
if len(parameters.shape) == 1:
parameters = parameters[None, :]
nmr_voxels = parameters.shape[0]
nmr_params = parameters.shape[1]
nmr_derivatives = nmr_params * (nmr_params + 1) // 2
initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes)
kernel_data = {
'parameters': Array(parameters, ctype='mot_float_type'),
'initial_step': Array(initial_step, ctype='float'),
'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'),
'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'),
'x_tmp': LocalMemory('mot_float_type', nmr_params),
'data': data,
'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps)
}
hessian_kernel = SimpleCLFunction.from_string('''
void _numdiff_hessian(
global mot_float_type* parameters,
global float* initial_step,
global double* derivatives,
global double* errors,
local mot_float_type* x_tmp,
void* data,
local double* scratch){
if(get_local_id(0) == 0){
for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){
x_tmp[i] = parameters[i];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data);
// upper triangle loop
uint coord_ind = 0;
for(int i = 0; i < ''' + str(nmr_params) + '''; i++){
for(int j = i; j < ''' + str(nmr_params) + '''; j++){
_numdiff_hessian_element(
data, x_tmp, f_x_input, i, j, initial_step,
derivatives + coord_ind, errors + coord_ind, scratch);
coord_ind++;
}
}
}
''', dependencies=[objective_func,
_get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio)])
hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info)
return kernel_data['derivatives'].get_data() | Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters.
This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson
extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation
on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error,
taking into account outliers using a median filter.
The Hessian is evaluated at the steps:
.. math::
\quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) -
(f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) /
(4 d_j d_k)
where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero
and :math:`d_j` is a scalar spacing :math:`steps_j`.
Steps are generated according to an exponentially diminishing ratio, defined as:
steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1.
Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for
4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest
step size that fits between the Hessian point and the boundaries.
The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a
O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate.
Args:
objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate.
A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x, void* data);
The objective function has the same signature as the minimization function in MOT. For the numerical
hessian, the ``objective_list`` parameter is ignored.
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
and p parameters
lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use -np.inf.
upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use np.inf.
step_ratio (float): the ratio at which the steps diminish.
nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these
step sizes and extrapolate the best step size from among them. The minimum number of steps is 1.
max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter.
If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters.
If a list is given, it should be of the same length as the number of parameters.
data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
Returns:
ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix.
This array can hold NaN's, for elements where the Hessian failed to approximate. | Below is the the instruction that describes the task:
### Input:
Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters.
This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson
extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation
on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error,
taking into account outliers using a median filter.
The Hessian is evaluated at the steps:
.. math::
\quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) -
(f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) /
(4 d_j d_k)
where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero
and :math:`d_j` is a scalar spacing :math:`steps_j`.
Steps are generated according to an exponentially diminishing ratio, defined as:
steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1.
Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for
4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest
step size that fits between the Hessian point and the boundaries.
The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a
O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate.
Args:
objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate.
A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x, void* data);
The objective function has the same signature as the minimization function in MOT. For the numerical
hessian, the ``objective_list`` parameter is ignored.
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
and p parameters
lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use -np.inf.
upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use np.inf.
step_ratio (float): the ratio at which the steps diminish.
nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these
step sizes and extrapolate the best step size from among them. The minimum number of steps is 1.
max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter.
If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters.
If a list is given, it should be of the same length as the number of parameters.
data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
Returns:
ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix.
This array can hold NaN's, for elements where the Hessian failed to approximate.
### Response:
def estimate_hessian(objective_func, parameters,
lower_bounds=None, upper_bounds=None,
step_ratio=2, nmr_steps=5,
max_step_sizes=None,
data=None, cl_runtime_info=None):
"""Estimate and return the upper triangular elements of the Hessian of the given function at the given parameters.
This calculates the Hessian using central difference (using a 2nd order Taylor expansion) with a Richardson
extrapolation over the proposed sequence of steps. If enough steps are given, we apply a Wynn epsilon extrapolation
on top of the Richardson extrapolated results. If more steps are left, we return the estimate with the lowest error,
taking into account outliers using a median filter.
The Hessian is evaluated at the steps:
.. math::
\quad ((f(x + d_j e_j + d_k e_k) - f(x + d_j e_j - d_k e_k)) -
(f(x - d_j e_j + d_k e_k) - f(x - d_j e_j - d_k e_k)) /
(4 d_j d_k)
where :math:`e_j` is a vector where element :math:`j` is one and the rest are zero
and :math:`d_j` is a scalar spacing :math:`steps_j`.
Steps are generated according to an exponentially diminishing ratio, defined as:
steps = max_step * step_ratio**-i, i = 0,1,..,nmr_steps-1.
Where the maximum step can be provided. For example, a maximum step of 2 with a step ratio of 2, computed for
4 steps gives: [2.0, 1.0, 0.5, 0.25]. If lower and upper bounds are given, we use as maximum step size the largest
step size that fits between the Hessian point and the boundaries.
The steps define the order of the estimation, with 2 steps resulting in a O(h^2) estimate, 3 steps resulting in a
O(h^4) estimate and 4 or more steps resulting in a O(h^6) derivative estimate.
Args:
objective_func (mot.lib.cl_function.CLFunction): The function we want to differentiate.
A CL function with the signature:
.. code-block:: c
double <func_name>(local const mot_float_type* const x, void* data);
The objective function has the same signature as the minimization function in MOT. For the numerical
hessian, the ``objective_list`` parameter is ignored.
parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems,
and p parameters
lower_bounds (list or None): a list of length (p,) for p parameters with the lower bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use -np.inf.
upper_bounds (list or None): a list of length (p,) for p parameters with the upper bounds.
Each element of the list can be a scalar or a vector (of the same length as the number
of problem instances). To disable bounds for this parameter use np.inf.
step_ratio (float): the ratio at which the steps diminish.
nmr_steps (int): the number of steps we will generate. We will calculate the derivative for each of these
step sizes and extrapolate the best step size from among them. The minimum number of steps is 1.
max_step_sizes (float or ndarray or None): the maximum step size, or the maximum step size per parameter.
If None is given, we use 0.1 for all parameters. If a float is given, we use that for all parameters.
If a list is given, it should be of the same length as the number of parameters.
data (mot.lib.kernel_data.KernelData): the user provided data for the ``void* data`` pointer.
cl_runtime_info (mot.configuration.CLRuntimeInfo): the runtime information
Returns:
ndarray: per problem instance a vector with the upper triangular elements of the Hessian matrix.
This array can hold NaN's, for elements where the Hessian failed to approximate.
"""
if len(parameters.shape) == 1:
parameters = parameters[None, :]
nmr_voxels = parameters.shape[0]
nmr_params = parameters.shape[1]
nmr_derivatives = nmr_params * (nmr_params + 1) // 2
initial_step = _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes)
kernel_data = {
'parameters': Array(parameters, ctype='mot_float_type'),
'initial_step': Array(initial_step, ctype='float'),
'derivatives': Zeros((nmr_voxels, nmr_derivatives), 'double'),
'errors': Zeros((nmr_voxels, nmr_derivatives), 'double'),
'x_tmp': LocalMemory('mot_float_type', nmr_params),
'data': data,
'scratch': LocalMemory('double', nmr_steps + (nmr_steps - 1) + nmr_steps)
}
hessian_kernel = SimpleCLFunction.from_string('''
void _numdiff_hessian(
global mot_float_type* parameters,
global float* initial_step,
global double* derivatives,
global double* errors,
local mot_float_type* x_tmp,
void* data,
local double* scratch){
if(get_local_id(0) == 0){
for(uint i = 0; i < ''' + str(nmr_params) + '''; i++){
x_tmp[i] = parameters[i];
}
}
barrier(CLK_LOCAL_MEM_FENCE);
double f_x_input = ''' + objective_func.get_cl_function_name() + '''(x_tmp, data);
// upper triangle loop
uint coord_ind = 0;
for(int i = 0; i < ''' + str(nmr_params) + '''; i++){
for(int j = i; j < ''' + str(nmr_params) + '''; j++){
_numdiff_hessian_element(
data, x_tmp, f_x_input, i, j, initial_step,
derivatives + coord_ind, errors + coord_ind, scratch);
coord_ind++;
}
}
}
''', dependencies=[objective_func,
_get_numdiff_hessian_element_func(objective_func, nmr_steps, step_ratio)])
hessian_kernel.evaluate(kernel_data, nmr_voxels, use_local_reduction=True, cl_runtime_info=cl_runtime_info)
return kernel_data['derivatives'].get_data() |
def get_human_readable_time(time_ms):
"""
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
"""
millis = time_ms % 1000
secs = (time_ms // 1000) % 60
mins = (time_ms // 60000) % 60
hours = (time_ms // 3600000) % 24
days = (time_ms // 86400000)
res = ""
if days > 1:
res += "%d days" % days
elif days == 1:
res += "1 day"
if hours > 1 or (hours == 0 and res):
res += " %d hours" % hours
elif hours == 1:
res += " 1 hour"
if mins > 1 or (mins == 0 and res):
res += " %d mins" % mins
elif mins == 1:
res += " 1 min"
if days == 0 and hours == 0:
res += " %02d secs" % secs
if not res:
res = " %d ms" % millis
return res.strip() | Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration. | Below is the the instruction that describes the task:
### Input:
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
### Response:
def get_human_readable_time(time_ms):
"""
Convert given duration in milliseconds into a human-readable representation, i.e. hours, minutes, seconds,
etc. More specifically, the returned string may look like following:
1 day 3 hours 12 mins
3 days 0 hours 0 mins
8 hours 12 mins
34 mins 02 secs
13 secs
541 ms
In particular, the following rules are applied:
* milliseconds are printed only if the duration is less than a second;
* seconds are printed only if the duration is less than an hour;
* for durations greater than 1 hour we print days, hours and minutes keeping zeros in the middle (i.e. we
return "4 days 0 hours 12 mins" instead of "4 days 12 mins").
:param time_ms: duration, as a number of elapsed milliseconds.
:return: human-readable string representation of the provided duration.
"""
millis = time_ms % 1000
secs = (time_ms // 1000) % 60
mins = (time_ms // 60000) % 60
hours = (time_ms // 3600000) % 24
days = (time_ms // 86400000)
res = ""
if days > 1:
res += "%d days" % days
elif days == 1:
res += "1 day"
if hours > 1 or (hours == 0 and res):
res += " %d hours" % hours
elif hours == 1:
res += " 1 hour"
if mins > 1 or (mins == 0 and res):
res += " %d mins" % mins
elif mins == 1:
res += " 1 min"
if days == 0 and hours == 0:
res += " %02d secs" % secs
if not res:
res = " %d ms" % millis
return res.strip() |
def load_connector_file(self):
"""load config from a JSON connector file,
at a *lower* priority than command-line/config files.
"""
self.log.info("Loading url_file %r", self.url_file)
config = self.config
with open(self.url_file) as f:
d = json.loads(f.read())
if 'exec_key' in d:
config.Session.key = cast_bytes(d['exec_key'])
try:
config.EngineFactory.location
except AttributeError:
config.EngineFactory.location = d['location']
d['url'] = disambiguate_url(d['url'], config.EngineFactory.location)
try:
config.EngineFactory.url
except AttributeError:
config.EngineFactory.url = d['url']
try:
config.EngineFactory.sshserver
except AttributeError:
config.EngineFactory.sshserver = d['ssh'] | load config from a JSON connector file,
at a *lower* priority than command-line/config files. | Below is the the instruction that describes the task:
### Input:
load config from a JSON connector file,
at a *lower* priority than command-line/config files.
### Response:
def load_connector_file(self):
"""load config from a JSON connector file,
at a *lower* priority than command-line/config files.
"""
self.log.info("Loading url_file %r", self.url_file)
config = self.config
with open(self.url_file) as f:
d = json.loads(f.read())
if 'exec_key' in d:
config.Session.key = cast_bytes(d['exec_key'])
try:
config.EngineFactory.location
except AttributeError:
config.EngineFactory.location = d['location']
d['url'] = disambiguate_url(d['url'], config.EngineFactory.location)
try:
config.EngineFactory.url
except AttributeError:
config.EngineFactory.url = d['url']
try:
config.EngineFactory.sshserver
except AttributeError:
config.EngineFactory.sshserver = d['ssh'] |
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) | Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered. | Below is the the instruction that describes the task:
### Input:
Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
### Response:
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
) |
def transform(self, X):
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
check_is_fitted(self, "encoded_columns_")
check_columns_exist(X.columns, self.feature_names_)
Xt = X.copy()
for col, cat in self.categories_.items():
Xt[col].cat.set_categories(cat, inplace=True)
new_data = self._encode(Xt, self.feature_names_)
return new_data.loc[:, self.encoded_columns_] | Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data. | Below is the the instruction that describes the task:
### Input:
Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
### Response:
def transform(self, X):
"""Convert categorical columns to numeric values.
Parameters
----------
X : pandas.DataFrame
Data to encode.
Returns
-------
Xt : pandas.DataFrame
Encoded data.
"""
check_is_fitted(self, "encoded_columns_")
check_columns_exist(X.columns, self.feature_names_)
Xt = X.copy()
for col, cat in self.categories_.items():
Xt[col].cat.set_categories(cat, inplace=True)
new_data = self._encode(Xt, self.feature_names_)
return new_data.loc[:, self.encoded_columns_] |
def is_redundant_union_item(first, other):
# type: (AbstractType, AbstractType) -> bool
"""If union has both items, is the first one redundant?
For example, if first is 'str' and the other is 'Text', return True.
If items are equal, return False.
"""
if isinstance(first, ClassType) and isinstance(other, ClassType):
if first.name == 'str' and other.name == 'Text':
return True
elif first.name == 'bool' and other.name == 'int':
return True
elif first.name == 'int' and other.name == 'float':
return True
elif (first.name in ('List', 'Dict', 'Set') and
other.name == first.name):
if not first.args and other.args:
return True
elif len(first.args) == len(other.args) and first.args:
result = all(first_arg == other_arg or other_arg == AnyType()
for first_arg, other_arg
in zip(first.args, other.args))
return result
return False | If union has both items, is the first one redundant?
For example, if first is 'str' and the other is 'Text', return True.
If items are equal, return False. | Below is the the instruction that describes the task:
### Input:
If union has both items, is the first one redundant?
For example, if first is 'str' and the other is 'Text', return True.
If items are equal, return False.
### Response:
def is_redundant_union_item(first, other):
# type: (AbstractType, AbstractType) -> bool
"""If union has both items, is the first one redundant?
For example, if first is 'str' and the other is 'Text', return True.
If items are equal, return False.
"""
if isinstance(first, ClassType) and isinstance(other, ClassType):
if first.name == 'str' and other.name == 'Text':
return True
elif first.name == 'bool' and other.name == 'int':
return True
elif first.name == 'int' and other.name == 'float':
return True
elif (first.name in ('List', 'Dict', 'Set') and
other.name == first.name):
if not first.args and other.args:
return True
elif len(first.args) == len(other.args) and first.args:
result = all(first_arg == other_arg or other_arg == AnyType()
for first_arg, other_arg
in zip(first.args, other.args))
return result
return False |
def run(**kwargs):
"""Command to run server, pass `--PORT 9000`, `--TEMPLATE_PATH templates`
as arguments to affect on global settings object.
"""
from sampleproject.app import app
app.update_settings(**kwargs)
app.run() | Command to run server, pass `--PORT 9000`, `--TEMPLATE_PATH templates`
as arguments to affect on global settings object. | Below is the the instruction that describes the task:
### Input:
Command to run server, pass `--PORT 9000`, `--TEMPLATE_PATH templates`
as arguments to affect on global settings object.
### Response:
def run(**kwargs):
"""Command to run server, pass `--PORT 9000`, `--TEMPLATE_PATH templates`
as arguments to affect on global settings object.
"""
from sampleproject.app import app
app.update_settings(**kwargs)
app.run() |
def _vagrant_call(node, function, section, comment, status_when_done=None, **kwargs):
'''
Helper to call the vagrant functions. Wildcards supported.
:param node: The Salt-id or wildcard
:param function: the vagrant submodule to call
:param section: the name for the state call.
:param comment: what the state reply should say
:param status_when_done: the Vagrant status expected for this state
:return: the dictionary for the state reply
'''
ret = {'name': node, 'changes': {}, 'result': True, 'comment': ''}
targeted_nodes = []
if isinstance(node, six.string_types):
try: # use shortcut if a single node name
if __salt__['vagrant.get_vm_info'](node):
targeted_nodes = [node]
except SaltInvocationError:
pass
if not targeted_nodes: # the shortcut failed, do this the hard way
all_domains = __salt__['vagrant.list_domains']()
targeted_nodes = fnmatch.filter(all_domains, node)
changed_nodes = []
ignored_nodes = []
for node in targeted_nodes:
if status_when_done:
try:
present_state = __salt__['vagrant.vm_state'](node)[0]
if present_state['state'] == status_when_done:
continue # no change is needed
except (IndexError, SaltInvocationError, CommandExecutionError):
pass
try:
response = __salt__['vagrant.{0}'.format(function)](node, **kwargs)
if isinstance(response, dict):
response = response['name']
changed_nodes.append({'node': node, function: response})
except (SaltInvocationError, CommandExecutionError) as err:
ignored_nodes.append({'node': node, 'issue': six.text_type(err)})
if not changed_nodes:
ret['result'] = True
ret['comment'] = 'No changes seen'
if ignored_nodes:
ret['changes'] = {'ignored': ignored_nodes}
else:
ret['changes'] = {section: changed_nodes}
ret['comment'] = comment
return ret | Helper to call the vagrant functions. Wildcards supported.
:param node: The Salt-id or wildcard
:param function: the vagrant submodule to call
:param section: the name for the state call.
:param comment: what the state reply should say
:param status_when_done: the Vagrant status expected for this state
:return: the dictionary for the state reply | Below is the the instruction that describes the task:
### Input:
Helper to call the vagrant functions. Wildcards supported.
:param node: The Salt-id or wildcard
:param function: the vagrant submodule to call
:param section: the name for the state call.
:param comment: what the state reply should say
:param status_when_done: the Vagrant status expected for this state
:return: the dictionary for the state reply
### Response:
def _vagrant_call(node, function, section, comment, status_when_done=None, **kwargs):
'''
Helper to call the vagrant functions. Wildcards supported.
:param node: The Salt-id or wildcard
:param function: the vagrant submodule to call
:param section: the name for the state call.
:param comment: what the state reply should say
:param status_when_done: the Vagrant status expected for this state
:return: the dictionary for the state reply
'''
ret = {'name': node, 'changes': {}, 'result': True, 'comment': ''}
targeted_nodes = []
if isinstance(node, six.string_types):
try: # use shortcut if a single node name
if __salt__['vagrant.get_vm_info'](node):
targeted_nodes = [node]
except SaltInvocationError:
pass
if not targeted_nodes: # the shortcut failed, do this the hard way
all_domains = __salt__['vagrant.list_domains']()
targeted_nodes = fnmatch.filter(all_domains, node)
changed_nodes = []
ignored_nodes = []
for node in targeted_nodes:
if status_when_done:
try:
present_state = __salt__['vagrant.vm_state'](node)[0]
if present_state['state'] == status_when_done:
continue # no change is needed
except (IndexError, SaltInvocationError, CommandExecutionError):
pass
try:
response = __salt__['vagrant.{0}'.format(function)](node, **kwargs)
if isinstance(response, dict):
response = response['name']
changed_nodes.append({'node': node, function: response})
except (SaltInvocationError, CommandExecutionError) as err:
ignored_nodes.append({'node': node, 'issue': six.text_type(err)})
if not changed_nodes:
ret['result'] = True
ret['comment'] = 'No changes seen'
if ignored_nodes:
ret['changes'] = {'ignored': ignored_nodes}
else:
ret['changes'] = {section: changed_nodes}
ret['comment'] = comment
return ret |
def average_data(self,ranges=[[None,None]],percentile=None):
"""
given a list of ranges, return single point averages for every sweep.
Units are in seconds. Expects something like:
ranges=[[1,2],[4,5],[7,7.5]]
None values will be replaced with maximum/minimum bounds.
For baseline subtraction, make a range baseline then sub it youtself.
returns datas[iSweep][iRange][AVorSD]
if a percentile is given, return that percentile rather than average.
percentile=50 is the median, but requires sorting, and is slower.
"""
ranges=copy.deepcopy(ranges) #TODO: make this cleaner. Why needed?
# clean up ranges, make them indexes
for i in range(len(ranges)):
if ranges[i][0] is None:
ranges[i][0] = 0
else:
ranges[i][0] = int(ranges[i][0]*self.rate)
if ranges[i][1] is None:
ranges[i][1] = -1
else:
ranges[i][1] = int(ranges[i][1]*self.rate)
# do the math
datas=np.empty((self.sweeps,len(ranges),2)) #[sweep][range]=[Av,Er]
for iSweep in range(self.sweeps):
self.setSweep(iSweep)
for iRange in range(len(ranges)):
I1=ranges[iRange][0]
I2=ranges[iRange][1]
if percentile:
datas[iSweep][iRange][0]=np.percentile(self.dataY[I1:I2],percentile)
else:
datas[iSweep][iRange][0]=np.average(self.dataY[I1:I2])
datas[iSweep][iRange][1]=np.std(self.dataY[I1:I2])
return datas | given a list of ranges, return single point averages for every sweep.
Units are in seconds. Expects something like:
ranges=[[1,2],[4,5],[7,7.5]]
None values will be replaced with maximum/minimum bounds.
For baseline subtraction, make a range baseline then sub it youtself.
returns datas[iSweep][iRange][AVorSD]
if a percentile is given, return that percentile rather than average.
percentile=50 is the median, but requires sorting, and is slower. | Below is the the instruction that describes the task:
### Input:
given a list of ranges, return single point averages for every sweep.
Units are in seconds. Expects something like:
ranges=[[1,2],[4,5],[7,7.5]]
None values will be replaced with maximum/minimum bounds.
For baseline subtraction, make a range baseline then sub it youtself.
returns datas[iSweep][iRange][AVorSD]
if a percentile is given, return that percentile rather than average.
percentile=50 is the median, but requires sorting, and is slower.
### Response:
def average_data(self,ranges=[[None,None]],percentile=None):
"""
given a list of ranges, return single point averages for every sweep.
Units are in seconds. Expects something like:
ranges=[[1,2],[4,5],[7,7.5]]
None values will be replaced with maximum/minimum bounds.
For baseline subtraction, make a range baseline then sub it youtself.
returns datas[iSweep][iRange][AVorSD]
if a percentile is given, return that percentile rather than average.
percentile=50 is the median, but requires sorting, and is slower.
"""
ranges=copy.deepcopy(ranges) #TODO: make this cleaner. Why needed?
# clean up ranges, make them indexes
for i in range(len(ranges)):
if ranges[i][0] is None:
ranges[i][0] = 0
else:
ranges[i][0] = int(ranges[i][0]*self.rate)
if ranges[i][1] is None:
ranges[i][1] = -1
else:
ranges[i][1] = int(ranges[i][1]*self.rate)
# do the math
datas=np.empty((self.sweeps,len(ranges),2)) #[sweep][range]=[Av,Er]
for iSweep in range(self.sweeps):
self.setSweep(iSweep)
for iRange in range(len(ranges)):
I1=ranges[iRange][0]
I2=ranges[iRange][1]
if percentile:
datas[iSweep][iRange][0]=np.percentile(self.dataY[I1:I2],percentile)
else:
datas[iSweep][iRange][0]=np.average(self.dataY[I1:I2])
datas[iSweep][iRange][1]=np.std(self.dataY[I1:I2])
return datas |
def merge_entities(doc):
"""Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
"""
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc | Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities | Below is the the instruction that describes the task:
### Input:
Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
### Response:
def merge_entities(doc):
"""Merge entities into a single token.
doc (Doc): The Doc object.
RETURNS (Doc): The Doc object with merged entities.
DOCS: https://spacy.io/api/pipeline-functions#merge_entities
"""
with doc.retokenize() as retokenizer:
for ent in doc.ents:
attrs = {"tag": ent.root.tag, "dep": ent.root.dep, "ent_type": ent.label}
retokenizer.merge(ent, attrs=attrs)
return doc |
def place(slot_name, dttime):
"""
Set a timer to be published at the specified minute.
"""
dttime = datetime.strptime(dttime, '%Y-%m-%d %H:%M:%S')
dttime = dttime.replace(second=0, microsecond=0)
try:
area.context['timers'][dttime].add(slot_name)
except KeyError:
area.context['timers'][dttime] = {slot_name}
area.publish({'status': 'placed'}, slot=slot_name) | Set a timer to be published at the specified minute. | Below is the the instruction that describes the task:
### Input:
Set a timer to be published at the specified minute.
### Response:
def place(slot_name, dttime):
"""
Set a timer to be published at the specified minute.
"""
dttime = datetime.strptime(dttime, '%Y-%m-%d %H:%M:%S')
dttime = dttime.replace(second=0, microsecond=0)
try:
area.context['timers'][dttime].add(slot_name)
except KeyError:
area.context['timers'][dttime] = {slot_name}
area.publish({'status': 'placed'}, slot=slot_name) |
def filter_objects(self, objects, perm=None):
""" Return only objects with specified permission in objects list. If perm not specified, 'view' perm will be used. """
if perm is None:
perm = build_permission_name(self.model_class, 'view')
return filter(lambda o: self.user.has_perm(perm, obj=o), objects) | Return only objects with specified permission in objects list. If perm not specified, 'view' perm will be used. | Below is the the instruction that describes the task:
### Input:
Return only objects with specified permission in objects list. If perm not specified, 'view' perm will be used.
### Response:
def filter_objects(self, objects, perm=None):
""" Return only objects with specified permission in objects list. If perm not specified, 'view' perm will be used. """
if perm is None:
perm = build_permission_name(self.model_class, 'view')
return filter(lambda o: self.user.has_perm(perm, obj=o), objects) |
def edit(self, title=None, body=None, assignee=None, state=None,
milestone=None, labels=None):
"""Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
"""
json = None
data = {'title': title, 'body': body, 'assignee': assignee,
'state': state, 'milestone': milestone, 'labels': labels}
self._remove_none(data)
if data:
if 'milestone' in data and data['milestone'] == 0:
data['milestone'] = None
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False | Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc. | Below is the the instruction that describes the task:
### Input:
Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
### Response:
def edit(self, title=None, body=None, assignee=None, state=None,
milestone=None, labels=None):
"""Edit this issue.
:param str title: Title of the issue
:param str body: markdown formatted body (description) of the issue
:param str assignee: login name of user the issue should be assigned
to
:param str state: accepted values: ('open', 'closed')
:param int milestone: the NUMBER (not title) of the milestone to
assign this to [1]_, or 0 to remove the milestone
:param list labels: list of labels to apply this to
:returns: bool
.. [1] Milestone numbering starts at 1, i.e. the first milestone you
create is 1, the second is 2, etc.
"""
json = None
data = {'title': title, 'body': body, 'assignee': assignee,
'state': state, 'milestone': milestone, 'labels': labels}
self._remove_none(data)
if data:
if 'milestone' in data and data['milestone'] == 0:
data['milestone'] = None
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_(json)
return True
return False |
def is_subset(a, b):
"""Excluding same size"""
return b.left <= a.left and b.right > a.right or b.left < a.left and b.right >= a.right | Excluding same size | Below is the the instruction that describes the task:
### Input:
Excluding same size
### Response:
def is_subset(a, b):
"""Excluding same size"""
return b.left <= a.left and b.right > a.right or b.left < a.left and b.right >= a.right |
def create_id_token(token, user, aud, nonce='', at_hash='', request=None, scope=None):
"""
Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
"""
if scope is None:
scope = []
sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user)
expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')
# Convert datetimes into timestamps.
now = int(time.time())
iat_time = now
exp_time = int(now + expires_in)
user_auth_time = user.last_login or user.date_joined
auth_time = int(dateformat.format(user_auth_time, 'U'))
dic = {
'iss': get_issuer(request=request),
'sub': sub,
'aud': str(aud),
'exp': exp_time,
'iat': iat_time,
'auth_time': auth_time,
}
if nonce:
dic['nonce'] = str(nonce)
if at_hash:
dic['at_hash'] = at_hash
# Inlude (or not) user standard claims in the id_token.
if settings.get('OIDC_IDTOKEN_INCLUDE_CLAIMS'):
if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):
custom_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token)
claims = custom_claims.create_response_dic()
else:
claims = StandardScopeClaims(token).create_response_dic()
dic.update(claims)
dic = run_processing_hook(
dic, 'OIDC_IDTOKEN_PROCESSING_HOOK',
user=user, token=token, request=request)
return dic | Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic. | Below is the the instruction that describes the task:
### Input:
Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
### Response:
def create_id_token(token, user, aud, nonce='', at_hash='', request=None, scope=None):
"""
Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
"""
if scope is None:
scope = []
sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user)
expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')
# Convert datetimes into timestamps.
now = int(time.time())
iat_time = now
exp_time = int(now + expires_in)
user_auth_time = user.last_login or user.date_joined
auth_time = int(dateformat.format(user_auth_time, 'U'))
dic = {
'iss': get_issuer(request=request),
'sub': sub,
'aud': str(aud),
'exp': exp_time,
'iat': iat_time,
'auth_time': auth_time,
}
if nonce:
dic['nonce'] = str(nonce)
if at_hash:
dic['at_hash'] = at_hash
# Inlude (or not) user standard claims in the id_token.
if settings.get('OIDC_IDTOKEN_INCLUDE_CLAIMS'):
if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):
custom_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token)
claims = custom_claims.create_response_dic()
else:
claims = StandardScopeClaims(token).create_response_dic()
dic.update(claims)
dic = run_processing_hook(
dic, 'OIDC_IDTOKEN_PROCESSING_HOOK',
user=user, token=token, request=request)
return dic |
def getbyteslice(self, start, end):
"""Direct access to byte data."""
c = self._rawarray[start:end]
return c | Direct access to byte data. | Below is the the instruction that describes the task:
### Input:
Direct access to byte data.
### Response:
def getbyteslice(self, start, end):
"""Direct access to byte data."""
c = self._rawarray[start:end]
return c |
def _validate_message_type(self):
"""Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException
"""
if self._unsupported_message_type():
self.logger.warning(
'Received unsupported message type: %s', self.message_type)
if self._drop_invalid:
if self._drop_exchange:
self._republish_dropped_message('invalid type')
raise DropMessage
raise MessageException | Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException | Below is the the instruction that describes the task:
### Input:
Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException
### Response:
def _validate_message_type(self):
"""Check to see if the current message's AMQP type property is
supported and if not, raise either a :exc:`DropMessage` or
:exc:`MessageException`.
:raises: DropMessage
:raises: MessageException
"""
if self._unsupported_message_type():
self.logger.warning(
'Received unsupported message type: %s', self.message_type)
if self._drop_invalid:
if self._drop_exchange:
self._republish_dropped_message('invalid type')
raise DropMessage
raise MessageException |
def default_from_address(self):
"""
Cache the coinbase address so that we don't make two requests for every
single transaction.
"""
if self._coinbase_cache_til is not None:
if time.time - self._coinbase_cache_til > 30:
self._coinbase_cache_til = None
self._coinbase_cache = None
if self._coinbase_cache is None:
self._coinbase_cache = self.get_coinbase()
return self._coinbase_cache | Cache the coinbase address so that we don't make two requests for every
single transaction. | Below is the the instruction that describes the task:
### Input:
Cache the coinbase address so that we don't make two requests for every
single transaction.
### Response:
def default_from_address(self):
"""
Cache the coinbase address so that we don't make two requests for every
single transaction.
"""
if self._coinbase_cache_til is not None:
if time.time - self._coinbase_cache_til > 30:
self._coinbase_cache_til = None
self._coinbase_cache = None
if self._coinbase_cache is None:
self._coinbase_cache = self.get_coinbase()
return self._coinbase_cache |
def _format_data(self, data, charset):
""" Format data into XML. """
if data is None or data == '':
return u''
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, charset)
xml.startDocument()
xml.startElement(self._root_element_name(), {})
self._to_xml(xml, data)
xml.endElement(self._root_element_name())
xml.endDocument()
return stream.getvalue() | Format data into XML. | Below is the the instruction that describes the task:
### Input:
Format data into XML.
### Response:
def _format_data(self, data, charset):
""" Format data into XML. """
if data is None or data == '':
return u''
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, charset)
xml.startDocument()
xml.startElement(self._root_element_name(), {})
self._to_xml(xml, data)
xml.endElement(self._root_element_name())
xml.endDocument()
return stream.getvalue() |
def gen_string_table(n):
"""Generates the list of strings that will be used in the benchmarks.
All strings have repeated prefixes and suffices, and n specifies the
number of repetitions.
"""
strings = []
def append(s):
if USE_BYTES_IN_PY3K:
strings.append(s.encode('latin1'))
else:
strings.append(s)
append('-' * n + 'Perl' + '-' * n)
append('P' * n + 'Perl' + 'P' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('P' * n + 'Python' + 'P' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Perl' + '-' * n)
append('P' * n + 'Perl' + 'P' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'PythonPython' + '-' * n)
append('P' * n + 'PythonPython' + 'P' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'Python' + '-' * n)
return strings | Generates the list of strings that will be used in the benchmarks.
All strings have repeated prefixes and suffices, and n specifies the
number of repetitions. | Below is the the instruction that describes the task:
### Input:
Generates the list of strings that will be used in the benchmarks.
All strings have repeated prefixes and suffices, and n specifies the
number of repetitions.
### Response:
def gen_string_table(n):
"""Generates the list of strings that will be used in the benchmarks.
All strings have repeated prefixes and suffices, and n specifies the
number of repetitions.
"""
strings = []
def append(s):
if USE_BYTES_IN_PY3K:
strings.append(s.encode('latin1'))
else:
strings.append(s)
append('-' * n + 'Perl' + '-' * n)
append('P' * n + 'Perl' + 'P' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('P' * n + 'Python' + 'P' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Python' + '-' * n)
append('-' * n + 'Perl' + '-' * n)
append('P' * n + 'Perl' + 'P' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'Perl' + '-' * n)
append('-' * n + 'PythonPython' + '-' * n)
append('P' * n + 'PythonPython' + 'P' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'a5,b7,c9,' + '-' * n)
append('-' * n + 'Python' + '-' * n)
return strings |
def get_cmdclass():
''' A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error.
'''
cmdclass = versioneer.get_cmdclass()
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
cmdclass["bdist_wheel"] = bdist_wheel
return cmdclass | A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error. | Below is the the instruction that describes the task:
### Input:
A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error.
### Response:
def get_cmdclass():
''' A ``cmdclass`` that works around a setuptools deficiency.
There is no need to build wheels when installing a package, however some
versions of setuptools seem to mandate this. This is a hacky workaround
that modifies the ``cmdclass`` returned by versioneer so that not having
wheel installed is not a fatal error.
'''
cmdclass = versioneer.get_cmdclass()
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
# pip is not claiming for bdist_wheel when wheel is not installed
bdist_wheel = None
if bdist_wheel is not None:
cmdclass["bdist_wheel"] = bdist_wheel
return cmdclass |
def nlsq_fit(x, y, dy, func, params_init, verbose=False, **kwargs):
"""Perform a non-linear least squares fit
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. In the array
case, if any of its elements is NaN, the whole array is treated as
NaN (= no weighting)
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
`verbose`: if various messages useful for debugging should be printed on
stdout.
other optional keyword arguments will be passed to leastsq().
Outputs: p, dp, statdict where
p: list of fitted values of par1, par2 etc.
dp: list of estimated errors
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'R2': Coefficient of determination
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, scipy.optimize.leastsq() is used.
"""
if verbose:
t0 = time.monotonic()
print("nlsq_fit starting.")
else:
t0 = 0
func_orig = func
params_init_orig = params_init
func, params_init = hide_fixedparams(func_orig, params_init_orig)
if (dy is None) or (dy == np.nan).sum() > 0 or (dy <= 0).sum() > 0:
if verbose:
print("nlsq_fit: no weighting")
dy = None
def objectivefunc(params, x, y, dy):
"""The target function for leastsq()."""
if dy is None:
return (func(x, *(params.tolist())) - y)
else:
return (func(x, *(params.tolist())) - y) / dy
# do the fitting
if verbose:
print("nlsq_fit: now doing the fitting...")
t1 = time.monotonic()
else:
t1 = 0
par, cov, infodict, mesg, ier = leastsq(objectivefunc,
np.array(params_init),
(x, y, dy), full_output=True,
**kwargs)
if verbose:
print("nlsq_fit: fitting done in %.2f seconds." % (time.monotonic() - t1))
print("nlsq_fit: status from scipy.optimize.leastsq(): %d (%s)" % (ier, mesg))
print("nlsq_fit: extracting statistics.")
# test if the covariance was singular (cov is None)
if cov is None:
cov = np.ones((len(par), len(par))) * np.nan # set it to a NaN matrix
# calculate the Pearson's R^2 parameter (coefficient of determination)
if dy is None:
sserr = np.sum(((func(x, *(par.tolist())) - y)) ** 2)
sstot = np.sum((y - np.mean(y)) ** 2)
else:
sserr = np.sum(((func(x, *(par.tolist())) - y) / dy) ** 2)
sstot = np.sum((y - np.mean(y)) ** 2 / dy ** 2)
r2 = 1 - sserr / sstot
# assemble the statistics dictionary
statdict = {'DoF' : len(x) - len(par), # degrees of freedom
'Chi2' : (infodict['fvec'] ** 2).sum(),
'R2' : r2,
'num_func_eval' : infodict['nfev'],
'func_value' : func(x, *(par.tolist())),
'message' : mesg,
'error_flag' : ier,
}
statdict['Chi2_reduced'] = statdict['Chi2'] / statdict['DoF']
statdict['Covariance'] = cov * statdict['Chi2_reduced']
par, statdict['Covariance'] = resubstitute_fixedparams(par, params_init_orig, statdict['Covariance'])
# calculate the estimated errors of the fit parameters
dpar = np.sqrt(statdict['Covariance'].diagonal())
# Pearson's correlation coefficients (usually 'r') in a matrix.
statdict['Correlation_coeffs'] = statdict['Covariance'] / np.outer(dpar,
dpar)
if verbose:
print("nlsq_fit: returning with results.")
print("nlsq_fit: total time: %.2f sec." % (time.monotonic() - t0))
return par, dpar, statdict | Perform a non-linear least squares fit
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. In the array
case, if any of its elements is NaN, the whole array is treated as
NaN (= no weighting)
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
`verbose`: if various messages useful for debugging should be printed on
stdout.
other optional keyword arguments will be passed to leastsq().
Outputs: p, dp, statdict where
p: list of fitted values of par1, par2 etc.
dp: list of estimated errors
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'R2': Coefficient of determination
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, scipy.optimize.leastsq() is used. | Below is the the instruction that describes the task:
### Input:
Perform a non-linear least squares fit
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. In the array
case, if any of its elements is NaN, the whole array is treated as
NaN (= no weighting)
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
`verbose`: if various messages useful for debugging should be printed on
stdout.
other optional keyword arguments will be passed to leastsq().
Outputs: p, dp, statdict where
p: list of fitted values of par1, par2 etc.
dp: list of estimated errors
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'R2': Coefficient of determination
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, scipy.optimize.leastsq() is used.
### Response:
def nlsq_fit(x, y, dy, func, params_init, verbose=False, **kwargs):
"""Perform a non-linear least squares fit
Inputs:
x: one-dimensional numpy array of the independent variable
y: one-dimensional numpy array of the dependent variable
dy: absolute error (square root of the variance) of the dependent
variable. Either a one-dimensional numpy array or None. In the array
case, if any of its elements is NaN, the whole array is treated as
NaN (= no weighting)
func: a callable with the signature
func(x,par1,par2,par3,...)
params_init: list or tuple of the first estimates of the
parameters par1, par2, par3 etc. to be fitted
`verbose`: if various messages useful for debugging should be printed on
stdout.
other optional keyword arguments will be passed to leastsq().
Outputs: p, dp, statdict where
p: list of fitted values of par1, par2 etc.
dp: list of estimated errors
statdict: dictionary of various statistical parameters:
'DoF': Degrees of freedom
'Chi2': Chi-squared
'Chi2_reduced': Reduced Chi-squared
'R2': Coefficient of determination
'num_func_eval': number of function evaluations during fit.
'func_value': the function evaluated in the best fitting parameters
'message': status message from leastsq()
'error_flag': integer status flag from leastsq() ('ier')
'Covariance': covariance matrix (variances in the diagonal)
'Correlation_coeffs': Pearson's correlation coefficients (usually
denoted by 'r') in a matrix. The diagonal is unity.
Notes:
for the actual fitting, scipy.optimize.leastsq() is used.
"""
if verbose:
t0 = time.monotonic()
print("nlsq_fit starting.")
else:
t0 = 0
func_orig = func
params_init_orig = params_init
func, params_init = hide_fixedparams(func_orig, params_init_orig)
if (dy is None) or (dy == np.nan).sum() > 0 or (dy <= 0).sum() > 0:
if verbose:
print("nlsq_fit: no weighting")
dy = None
def objectivefunc(params, x, y, dy):
"""The target function for leastsq()."""
if dy is None:
return (func(x, *(params.tolist())) - y)
else:
return (func(x, *(params.tolist())) - y) / dy
# do the fitting
if verbose:
print("nlsq_fit: now doing the fitting...")
t1 = time.monotonic()
else:
t1 = 0
par, cov, infodict, mesg, ier = leastsq(objectivefunc,
np.array(params_init),
(x, y, dy), full_output=True,
**kwargs)
if verbose:
print("nlsq_fit: fitting done in %.2f seconds." % (time.monotonic() - t1))
print("nlsq_fit: status from scipy.optimize.leastsq(): %d (%s)" % (ier, mesg))
print("nlsq_fit: extracting statistics.")
# test if the covariance was singular (cov is None)
if cov is None:
cov = np.ones((len(par), len(par))) * np.nan # set it to a NaN matrix
# calculate the Pearson's R^2 parameter (coefficient of determination)
if dy is None:
sserr = np.sum(((func(x, *(par.tolist())) - y)) ** 2)
sstot = np.sum((y - np.mean(y)) ** 2)
else:
sserr = np.sum(((func(x, *(par.tolist())) - y) / dy) ** 2)
sstot = np.sum((y - np.mean(y)) ** 2 / dy ** 2)
r2 = 1 - sserr / sstot
# assemble the statistics dictionary
statdict = {'DoF' : len(x) - len(par), # degrees of freedom
'Chi2' : (infodict['fvec'] ** 2).sum(),
'R2' : r2,
'num_func_eval' : infodict['nfev'],
'func_value' : func(x, *(par.tolist())),
'message' : mesg,
'error_flag' : ier,
}
statdict['Chi2_reduced'] = statdict['Chi2'] / statdict['DoF']
statdict['Covariance'] = cov * statdict['Chi2_reduced']
par, statdict['Covariance'] = resubstitute_fixedparams(par, params_init_orig, statdict['Covariance'])
# calculate the estimated errors of the fit parameters
dpar = np.sqrt(statdict['Covariance'].diagonal())
# Pearson's correlation coefficients (usually 'r') in a matrix.
statdict['Correlation_coeffs'] = statdict['Covariance'] / np.outer(dpar,
dpar)
if verbose:
print("nlsq_fit: returning with results.")
print("nlsq_fit: total time: %.2f sec." % (time.monotonic() - t0))
return par, dpar, statdict |
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options) | Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``. | Below is the the instruction that describes the task:
### Input:
Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
### Response:
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options) |
def retrieve_comparison_image(self):
"""
Search the DB for a comparison image for this cutout.
"""
# selecting comparator when on a comparator should load a new one.
collectionID = self.comparison_image_list[self.comparison_image_index]['EXPNUM']
ref_ra = self.reading.ra * units.degree
ref_dec = self.reading.dec * units.degree
radius = self.radius is not None and self.radius or config.read('CUTOUTS.SINGLETS.RADIUS') * units.arcminute
try:
comparison = collectionID
hdu_list = storage.ra_dec_cutout(storage.dbimages_uri(comparison),
SkyCoord(ref_ra, ref_dec), radius)
ccd = int(hdu_list[-1].header.get('EXTVER', 0))
obs = Observation(str(comparison), 'p',
ccdnum=ccd)
x = hdu_list[-1].header.get('NAXIS1', 0) // 2.0
y = hdu_list[-1].header.get('NAXIS2', 0) // 2.0
ref_ra = self.reading.ra * units.degree
ref_dec = self.reading.dec * units.degree
reading = SourceReading(x, y, self.reading.x, self.reading.y,
ref_ra, ref_dec, self.reading.x, self.reading.y, obs)
self.comparison_image_list[self.comparison_image_index]["REFERENCE"] = SourceCutout(reading, hdu_list)
except Exception as ex:
print traceback.format_exc()
print ex
print "Failed to load comparison image;"
self.comparison_image_index = None
logger.error("{} {}".format(type(ex), str(ex)))
logger.error(traceback.format_exc()) | Search the DB for a comparison image for this cutout. | Below is the the instruction that describes the task:
### Input:
Search the DB for a comparison image for this cutout.
### Response:
def retrieve_comparison_image(self):
"""
Search the DB for a comparison image for this cutout.
"""
# selecting comparator when on a comparator should load a new one.
collectionID = self.comparison_image_list[self.comparison_image_index]['EXPNUM']
ref_ra = self.reading.ra * units.degree
ref_dec = self.reading.dec * units.degree
radius = self.radius is not None and self.radius or config.read('CUTOUTS.SINGLETS.RADIUS') * units.arcminute
try:
comparison = collectionID
hdu_list = storage.ra_dec_cutout(storage.dbimages_uri(comparison),
SkyCoord(ref_ra, ref_dec), radius)
ccd = int(hdu_list[-1].header.get('EXTVER', 0))
obs = Observation(str(comparison), 'p',
ccdnum=ccd)
x = hdu_list[-1].header.get('NAXIS1', 0) // 2.0
y = hdu_list[-1].header.get('NAXIS2', 0) // 2.0
ref_ra = self.reading.ra * units.degree
ref_dec = self.reading.dec * units.degree
reading = SourceReading(x, y, self.reading.x, self.reading.y,
ref_ra, ref_dec, self.reading.x, self.reading.y, obs)
self.comparison_image_list[self.comparison_image_index]["REFERENCE"] = SourceCutout(reading, hdu_list)
except Exception as ex:
print traceback.format_exc()
print ex
print "Failed to load comparison image;"
self.comparison_image_index = None
logger.error("{} {}".format(type(ex), str(ex)))
logger.error(traceback.format_exc()) |
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim) | Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str. | Below is the the instruction that describes the task:
### Input:
Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
### Response:
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim) |
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row) | Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1. | Below is the the instruction that describes the task:
### Input:
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
### Response:
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row) |
async def async_set_state(self, data):
"""Recall scene to group."""
field = self._deconz_id + '/recall'
await self._async_set_state_callback(field, data) | Recall scene to group. | Below is the the instruction that describes the task:
### Input:
Recall scene to group.
### Response:
async def async_set_state(self, data):
"""Recall scene to group."""
field = self._deconz_id + '/recall'
await self._async_set_state_callback(field, data) |
def remove_umis(adj_list, cluster, nodes):
'''removes the specified nodes from the cluster and returns
the remaining nodes '''
# list incomprehension: for x in nodes: for node in adj_list[x]: yield node
nodes_to_remove = set([node
for x in nodes
for node in adj_list[x]] + nodes)
return cluster - nodes_to_remove | removes the specified nodes from the cluster and returns
the remaining nodes | Below is the the instruction that describes the task:
### Input:
removes the specified nodes from the cluster and returns
the remaining nodes
### Response:
def remove_umis(adj_list, cluster, nodes):
'''removes the specified nodes from the cluster and returns
the remaining nodes '''
# list incomprehension: for x in nodes: for node in adj_list[x]: yield node
nodes_to_remove = set([node
for x in nodes
for node in adj_list[x]] + nodes)
return cluster - nodes_to_remove |
def getMonth(s):
"""
Known formats:
Month ("%b")
Month Day ("%b %d")
Month-Month ("%b-%b") --- this gets coerced to the first %b, dropping the month range
Season ("%s") --- this gets coerced to use the first month of the given season
Month Day Year ("%b %d %Y")
Month Year ("%b %Y")
Year Month Day ("%Y %m %d")
"""
monthOrSeason = s.split('-')[0].upper()
if monthOrSeason in monthDict:
return monthDict[monthOrSeason]
else:
monthOrSeason = s.split('-')[1].upper()
if monthOrSeason.isdigit():
return monthOrSeason
else:
return monthDict[monthOrSeason]
raise ValueError("Month format not recognized: " + s) | Known formats:
Month ("%b")
Month Day ("%b %d")
Month-Month ("%b-%b") --- this gets coerced to the first %b, dropping the month range
Season ("%s") --- this gets coerced to use the first month of the given season
Month Day Year ("%b %d %Y")
Month Year ("%b %Y")
Year Month Day ("%Y %m %d") | Below is the the instruction that describes the task:
### Input:
Known formats:
Month ("%b")
Month Day ("%b %d")
Month-Month ("%b-%b") --- this gets coerced to the first %b, dropping the month range
Season ("%s") --- this gets coerced to use the first month of the given season
Month Day Year ("%b %d %Y")
Month Year ("%b %Y")
Year Month Day ("%Y %m %d")
### Response:
def getMonth(s):
"""
Known formats:
Month ("%b")
Month Day ("%b %d")
Month-Month ("%b-%b") --- this gets coerced to the first %b, dropping the month range
Season ("%s") --- this gets coerced to use the first month of the given season
Month Day Year ("%b %d %Y")
Month Year ("%b %Y")
Year Month Day ("%Y %m %d")
"""
monthOrSeason = s.split('-')[0].upper()
if monthOrSeason in monthDict:
return monthDict[monthOrSeason]
else:
monthOrSeason = s.split('-')[1].upper()
if monthOrSeason.isdigit():
return monthOrSeason
else:
return monthDict[monthOrSeason]
raise ValueError("Month format not recognized: " + s) |
def add_parents(self, parents):
"""Adds new parent nodes after filtering for duplicates
Args:
parents (list): list of OmniTree nodes to add as parents
"""
self._parents += [p for p in parents if p not in self._parents] | Adds new parent nodes after filtering for duplicates
Args:
parents (list): list of OmniTree nodes to add as parents | Below is the the instruction that describes the task:
### Input:
Adds new parent nodes after filtering for duplicates
Args:
parents (list): list of OmniTree nodes to add as parents
### Response:
def add_parents(self, parents):
"""Adds new parent nodes after filtering for duplicates
Args:
parents (list): list of OmniTree nodes to add as parents
"""
self._parents += [p for p in parents if p not in self._parents] |
def serialize_metric(self, metric, m_name, keys, m_type):
"""Serialize and send available measures of a metric."""
return [
self.format_metric_string(m_name, getattr(metric, key), m_type)
for key in keys
] | Serialize and send available measures of a metric. | Below is the the instruction that describes the task:
### Input:
Serialize and send available measures of a metric.
### Response:
def serialize_metric(self, metric, m_name, keys, m_type):
"""Serialize and send available measures of a metric."""
return [
self.format_metric_string(m_name, getattr(metric, key), m_type)
for key in keys
] |
def Distribution(pos, size, counts, dtype):
"""
Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param counts: The number of times we have observed each index.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
# calculate normalization constant
total = 0
for i in pos:
total += counts[i]
total = float(total)
# set included positions to normalized probability
for i in pos:
x[i] = counts[i]/total
# If we don't have a set of positions, assume there's only one position
else: x[pos] = 1
return x | Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param counts: The number of times we have observed each index.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype. | Below is the the instruction that describes the task:
### Input:
Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param counts: The number of times we have observed each index.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype.
### Response:
def Distribution(pos, size, counts, dtype):
"""
Returns an array of length size and type dtype that is everywhere 0,
except in the indices listed in sequence pos. The non-zero indices
contain a normalized distribution based on the counts.
:param pos: A single integer or sequence of integers that specify
the position of ones to be set.
:param size: The total size of the array to be returned.
:param counts: The number of times we have observed each index.
:param dtype: The element type (compatible with NumPy array())
of the array to be returned.
:returns: An array of length size and element type dtype.
"""
x = numpy.zeros(size, dtype=dtype)
if hasattr(pos, '__iter__'):
# calculate normalization constant
total = 0
for i in pos:
total += counts[i]
total = float(total)
# set included positions to normalized probability
for i in pos:
x[i] = counts[i]/total
# If we don't have a set of positions, assume there's only one position
else: x[pos] = 1
return x |
def add_spectrum(self, compare_to_existing=True, **kwargs):
"""Add a `Spectrum` instance to this entry."""
spec_key = self._KEYS.SPECTRA
# Make sure that a source is given, and is valid (nor erroneous)
source = self._check_cat_dict_source(Spectrum, spec_key, **kwargs)
if source is None:
return None
# Try to create a new instance of `Spectrum`
new_spectrum = self._init_cat_dict(Spectrum, spec_key, **kwargs)
if new_spectrum is None:
return None
is_dupe = False
for item in self.get(spec_key, []):
# Only the `filename` should be compared for duplicates. If a
# duplicate is found, that means the previous `exclude` array
# should be saved to the new object, and the old deleted
if new_spectrum.is_duplicate_of(item):
if SPECTRUM.EXCLUDE in new_spectrum:
item[SPECTRUM.EXCLUDE] = new_spectrum[SPECTRUM.EXCLUDE]
elif SPECTRUM.EXCLUDE in item:
item.update(new_spectrum)
is_dupe = True
break
if not is_dupe:
self.setdefault(spec_key, []).append(new_spectrum)
return | Add a `Spectrum` instance to this entry. | Below is the the instruction that describes the task:
### Input:
Add a `Spectrum` instance to this entry.
### Response:
def add_spectrum(self, compare_to_existing=True, **kwargs):
"""Add a `Spectrum` instance to this entry."""
spec_key = self._KEYS.SPECTRA
# Make sure that a source is given, and is valid (nor erroneous)
source = self._check_cat_dict_source(Spectrum, spec_key, **kwargs)
if source is None:
return None
# Try to create a new instance of `Spectrum`
new_spectrum = self._init_cat_dict(Spectrum, spec_key, **kwargs)
if new_spectrum is None:
return None
is_dupe = False
for item in self.get(spec_key, []):
# Only the `filename` should be compared for duplicates. If a
# duplicate is found, that means the previous `exclude` array
# should be saved to the new object, and the old deleted
if new_spectrum.is_duplicate_of(item):
if SPECTRUM.EXCLUDE in new_spectrum:
item[SPECTRUM.EXCLUDE] = new_spectrum[SPECTRUM.EXCLUDE]
elif SPECTRUM.EXCLUDE in item:
item.update(new_spectrum)
is_dupe = True
break
if not is_dupe:
self.setdefault(spec_key, []).append(new_spectrum)
return |
def ridge_regress(self, cv = 20, alphas = None ):
"""perform k-folds cross-validated ridge regression on the design_matrix. To be used when the design matrix contains very collinear regressors. For cross-validation and ridge fitting, we use sklearn's RidgeCV functionality. Note: intercept is not fit, and data are not prenormalized.
:param cv: cross-validated folds, inherits RidgeCV cv argument's functionality.
:type cv: int, standard = 20
:param alphas: values of penalization parameter to be traversed by the procedure, inherits RidgeCV cv argument's functionality. Standard value, when parameter is None, is np.logspace(7, 0, 20)
:type alphas: numpy array, from >0 to 1.
:returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
"""
if alphas is None:
alphas = np.logspace(7, 0, 20)
self.rcv = linear_model.RidgeCV(alphas=alphas,
fit_intercept=False,
cv=cv)
self.rcv.fit(self.design_matrix.T, self.resampled_signal.T)
self.betas = self.rcv.coef_.T
self.residuals = self.resampled_signal - self.rcv.predict(self.design_matrix.T)
self.logger.debug('performed ridge regression on %s design_matrix and %s signal, resulting alpha value is %f' % (str(self.design_matrix.shape), str(self.resampled_signal.shape), self.rcv.alpha_)) | perform k-folds cross-validated ridge regression on the design_matrix. To be used when the design matrix contains very collinear regressors. For cross-validation and ridge fitting, we use sklearn's RidgeCV functionality. Note: intercept is not fit, and data are not prenormalized.
:param cv: cross-validated folds, inherits RidgeCV cv argument's functionality.
:type cv: int, standard = 20
:param alphas: values of penalization parameter to be traversed by the procedure, inherits RidgeCV cv argument's functionality. Standard value, when parameter is None, is np.logspace(7, 0, 20)
:type alphas: numpy array, from >0 to 1.
:returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created. | Below is the the instruction that describes the task:
### Input:
perform k-folds cross-validated ridge regression on the design_matrix. To be used when the design matrix contains very collinear regressors. For cross-validation and ridge fitting, we use sklearn's RidgeCV functionality. Note: intercept is not fit, and data are not prenormalized.
:param cv: cross-validated folds, inherits RidgeCV cv argument's functionality.
:type cv: int, standard = 20
:param alphas: values of penalization parameter to be traversed by the procedure, inherits RidgeCV cv argument's functionality. Standard value, when parameter is None, is np.logspace(7, 0, 20)
:type alphas: numpy array, from >0 to 1.
:returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
### Response:
def ridge_regress(self, cv = 20, alphas = None ):
"""perform k-folds cross-validated ridge regression on the design_matrix. To be used when the design matrix contains very collinear regressors. For cross-validation and ridge fitting, we use sklearn's RidgeCV functionality. Note: intercept is not fit, and data are not prenormalized.
:param cv: cross-validated folds, inherits RidgeCV cv argument's functionality.
:type cv: int, standard = 20
:param alphas: values of penalization parameter to be traversed by the procedure, inherits RidgeCV cv argument's functionality. Standard value, when parameter is None, is np.logspace(7, 0, 20)
:type alphas: numpy array, from >0 to 1.
:returns: instance variables 'betas' (nr_betas x nr_signals) and 'residuals' (nr_signals x nr_samples) are created.
"""
if alphas is None:
alphas = np.logspace(7, 0, 20)
self.rcv = linear_model.RidgeCV(alphas=alphas,
fit_intercept=False,
cv=cv)
self.rcv.fit(self.design_matrix.T, self.resampled_signal.T)
self.betas = self.rcv.coef_.T
self.residuals = self.resampled_signal - self.rcv.predict(self.design_matrix.T)
self.logger.debug('performed ridge regression on %s design_matrix and %s signal, resulting alpha value is %f' % (str(self.design_matrix.shape), str(self.resampled_signal.shape), self.rcv.alpha_)) |
def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0,
verbosity=None,
**kwargs):
"""get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs)
Fits a light curve to the data contained in *file* using
:func:`get_lightcurve`.
**Parameters**
file : str or file
File or filename to load data from.
use_cols : iterable or None, optional
Iterable of columns to read from data file, or None to read all columns
(default None).
skiprows : number, optional
Number of rows to skip at beginning of *file* (default 0)
**Returns**
out : dict
See :func:`get_lightcurve`.
"""
data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols)
if len(data) != 0:
masked_data = numpy.ma.array(data=data, mask=None, dtype=float)
return get_lightcurve(masked_data, *args,
verbosity=verbosity, **kwargs)
else:
verbose_print("{}: file contains no data points".format(file),
operation="coverage", verbosity=verbosity)
return | get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs)
Fits a light curve to the data contained in *file* using
:func:`get_lightcurve`.
**Parameters**
file : str or file
File or filename to load data from.
use_cols : iterable or None, optional
Iterable of columns to read from data file, or None to read all columns
(default None).
skiprows : number, optional
Number of rows to skip at beginning of *file* (default 0)
**Returns**
out : dict
See :func:`get_lightcurve`. | Below is the the instruction that describes the task:
### Input:
get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs)
Fits a light curve to the data contained in *file* using
:func:`get_lightcurve`.
**Parameters**
file : str or file
File or filename to load data from.
use_cols : iterable or None, optional
Iterable of columns to read from data file, or None to read all columns
(default None).
skiprows : number, optional
Number of rows to skip at beginning of *file* (default 0)
**Returns**
out : dict
See :func:`get_lightcurve`.
### Response:
def get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0,
verbosity=None,
**kwargs):
"""get_lightcurve_from_file(file, *args, use_cols=None, skiprows=0, **kwargs)
Fits a light curve to the data contained in *file* using
:func:`get_lightcurve`.
**Parameters**
file : str or file
File or filename to load data from.
use_cols : iterable or None, optional
Iterable of columns to read from data file, or None to read all columns
(default None).
skiprows : number, optional
Number of rows to skip at beginning of *file* (default 0)
**Returns**
out : dict
See :func:`get_lightcurve`.
"""
data = numpy.loadtxt(file, skiprows=skiprows, usecols=use_cols)
if len(data) != 0:
masked_data = numpy.ma.array(data=data, mask=None, dtype=float)
return get_lightcurve(masked_data, *args,
verbosity=verbosity, **kwargs)
else:
verbose_print("{}: file contains no data points".format(file),
operation="coverage", verbosity=verbosity)
return |
def date_struct_nn(year, month, day, tz="UTC"):
"""
Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates
"""
if not day:
day = 1
if not month:
month = 1
return date_struct(year, month, day, tz) | Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates | Below is the the instruction that describes the task:
### Input:
Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates
### Response:
def date_struct_nn(year, month, day, tz="UTC"):
"""
Assemble a date object but if day or month is none set them to 1
to make it easier to deal with partial dates
"""
if not day:
day = 1
if not month:
month = 1
return date_struct(year, month, day, tz) |
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
"""
self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE) | Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default). | Below is the the instruction that describes the task:
### Input:
Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
### Response:
def copy_pkg(self, filename, id_=-1):
"""Copy a package to the distribution server.
Bundle-style packages must be zipped prior to copying.
Args:
filename: Full path to file to upload.
id_: ID of Package object to associate with, or -1 for new
packages (default).
"""
self._copy(filename, id_=id_, file_type=PKG_FILE_TYPE) |
def start_heartbeat(self):
""" Reset hearbeat timer """
self.stop_heartbeat()
self._heartbeat_timer = task.LoopingCall(self._heartbeat)
self._heartbeat_timer.start(self._heartbeat_interval, False) | Reset hearbeat timer | Below is the the instruction that describes the task:
### Input:
Reset hearbeat timer
### Response:
def start_heartbeat(self):
""" Reset hearbeat timer """
self.stop_heartbeat()
self._heartbeat_timer = task.LoopingCall(self._heartbeat)
self._heartbeat_timer.start(self._heartbeat_interval, False) |
def partition(thelist, n):
"""
Break a list into ``n`` pieces. The last list may be larger than the rest if
the list doesn't break cleanly. That is::
>>> l = range(10)
>>> partition(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> partition(l, 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
>>> partition(l, 4)
[[0, 1], [2, 3], [4, 5], [6, 7, 8, 9]]
>>> partition(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
p = len(thelist) / n
return [thelist[p*i:p*(i+1)] for i in range(n - 1)] + [thelist[p*(i+1):]] | Break a list into ``n`` pieces. The last list may be larger than the rest if
the list doesn't break cleanly. That is::
>>> l = range(10)
>>> partition(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> partition(l, 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
>>> partition(l, 4)
[[0, 1], [2, 3], [4, 5], [6, 7, 8, 9]]
>>> partition(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] | Below is the the instruction that describes the task:
### Input:
Break a list into ``n`` pieces. The last list may be larger than the rest if
the list doesn't break cleanly. That is::
>>> l = range(10)
>>> partition(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> partition(l, 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
>>> partition(l, 4)
[[0, 1], [2, 3], [4, 5], [6, 7, 8, 9]]
>>> partition(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
### Response:
def partition(thelist, n):
"""
Break a list into ``n`` pieces. The last list may be larger than the rest if
the list doesn't break cleanly. That is::
>>> l = range(10)
>>> partition(l, 2)
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> partition(l, 3)
[[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
>>> partition(l, 4)
[[0, 1], [2, 3], [4, 5], [6, 7, 8, 9]]
>>> partition(l, 5)
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
"""
try:
n = int(n)
thelist = list(thelist)
except (ValueError, TypeError):
return [thelist]
p = len(thelist) / n
return [thelist[p*i:p*(i+1)] for i in range(n - 1)] + [thelist[p*(i+1):]] |
def make_event_filter(self):
"""Create a new event filter."""
event_filter = EventFilter(
self.event_name,
self.event,
self.filters,
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter | Create a new event filter. | Below is the the instruction that describes the task:
### Input:
Create a new event filter.
### Response:
def make_event_filter(self):
"""Create a new event filter."""
event_filter = EventFilter(
self.event_name,
self.event,
self.filters,
from_block=self.from_block,
to_block=self.to_block
)
event_filter.set_poll_interval(0.5)
return event_filter |
def _renew_token(self, retry=True):
"""Renew expired ThreatConnect Token."""
self.renewing = True
self.log.info('Renewing ThreatConnect Token')
self.log.info('Current Token Expiration: {}'.format(self._token_expiration))
try:
params = {'expiredToken': self._token}
url = '{}/appAuth'.format(self._token_url)
r = get(url, params=params, verify=self._session.verify)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
if (
r.status_code == 401
and 'application/json' in r.headers.get('content-type', '')
and 'Retry token is invalid' in r.json().get('message')
):
# TODO: remove this once token renewal issue is fixed
self.log.error('params: {}'.format(params))
self.log.error('url: {}'.format(r.url))
# log failure
err_reason = r.text or r.reason
err_msg = 'Token Retry Error. API status code: {}, API message: {}.'
raise RuntimeError(1042, err_msg.format(r.status_code, err_reason))
elif retry:
warn_msg = 'Token Retry Error. API status code: {}, API message: {}.'
self.log.warning(warn_msg.format(r.status_code, r.text))
# delay and retry token renewal
time.sleep(15)
self._renew_token(False)
else:
err_reason = r.text or r.reason
err_msg = 'Token Retry Error. API status code: {}, API message: {}.'
raise RuntimeError(1042, err_msg.format(r.status_code, err_reason))
data = r.json()
if retry and (data.get('apiToken') is None or data.get('apiTokenExpires') is None):
# add retry logic to handle case if the token renewal doesn't return valid data
warn_msg = 'Token Retry Error: no values for apiToken or apiTokenExpires ({}).'
self.log.warning(warn_msg.format(r.text))
self._renew_token(False)
else:
self._token = data.get('apiToken')
self._token_expiration = int(data.get('apiTokenExpires'))
self.log.info('New Token Expiration: {}'.format(self._token_expiration))
self.renewing = False
except exceptions.SSLError:
self.log.error(u'SSL Error during token renewal.')
self.renewing = False | Renew expired ThreatConnect Token. | Below is the the instruction that describes the task:
### Input:
Renew expired ThreatConnect Token.
### Response:
def _renew_token(self, retry=True):
"""Renew expired ThreatConnect Token."""
self.renewing = True
self.log.info('Renewing ThreatConnect Token')
self.log.info('Current Token Expiration: {}'.format(self._token_expiration))
try:
params = {'expiredToken': self._token}
url = '{}/appAuth'.format(self._token_url)
r = get(url, params=params, verify=self._session.verify)
if not r.ok or 'application/json' not in r.headers.get('content-type', ''):
if (
r.status_code == 401
and 'application/json' in r.headers.get('content-type', '')
and 'Retry token is invalid' in r.json().get('message')
):
# TODO: remove this once token renewal issue is fixed
self.log.error('params: {}'.format(params))
self.log.error('url: {}'.format(r.url))
# log failure
err_reason = r.text or r.reason
err_msg = 'Token Retry Error. API status code: {}, API message: {}.'
raise RuntimeError(1042, err_msg.format(r.status_code, err_reason))
elif retry:
warn_msg = 'Token Retry Error. API status code: {}, API message: {}.'
self.log.warning(warn_msg.format(r.status_code, r.text))
# delay and retry token renewal
time.sleep(15)
self._renew_token(False)
else:
err_reason = r.text or r.reason
err_msg = 'Token Retry Error. API status code: {}, API message: {}.'
raise RuntimeError(1042, err_msg.format(r.status_code, err_reason))
data = r.json()
if retry and (data.get('apiToken') is None or data.get('apiTokenExpires') is None):
# add retry logic to handle case if the token renewal doesn't return valid data
warn_msg = 'Token Retry Error: no values for apiToken or apiTokenExpires ({}).'
self.log.warning(warn_msg.format(r.text))
self._renew_token(False)
else:
self._token = data.get('apiToken')
self._token_expiration = int(data.get('apiTokenExpires'))
self.log.info('New Token Expiration: {}'.format(self._token_expiration))
self.renewing = False
except exceptions.SSLError:
self.log.error(u'SSL Error during token renewal.')
self.renewing = False |
def placeholder_symbol_table(name, version, max_id):
"""Constructs a shared symbol table that consists symbols that all have no known text.
This is generally used for cases where a shared symbol table is not available by the
application.
Args:
name (unicode): The name of the shared symbol table.
version (int): The version of the shared symbol table.
max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``
Returns:
SymbolTable: The synthesized table.
"""
if version <= 0:
raise ValueError('Version must be grater than or equal to 1: %s' % version)
if max_id < 0:
raise ValueError('Max ID must be zero or positive: %s' % max_id)
return SymbolTable(
table_type=SHARED_TABLE_TYPE,
symbols=repeat(None, max_id),
name=name,
version=version,
is_substitute=True
) | Constructs a shared symbol table that consists symbols that all have no known text.
This is generally used for cases where a shared symbol table is not available by the
application.
Args:
name (unicode): The name of the shared symbol table.
version (int): The version of the shared symbol table.
max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``
Returns:
SymbolTable: The synthesized table. | Below is the the instruction that describes the task:
### Input:
Constructs a shared symbol table that consists symbols that all have no known text.
This is generally used for cases where a shared symbol table is not available by the
application.
Args:
name (unicode): The name of the shared symbol table.
version (int): The version of the shared symbol table.
max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``
Returns:
SymbolTable: The synthesized table.
### Response:
def placeholder_symbol_table(name, version, max_id):
"""Constructs a shared symbol table that consists symbols that all have no known text.
This is generally used for cases where a shared symbol table is not available by the
application.
Args:
name (unicode): The name of the shared symbol table.
version (int): The version of the shared symbol table.
max_id (int): The maximum ID allocated by this symbol table, must be ``>= 0``
Returns:
SymbolTable: The synthesized table.
"""
if version <= 0:
raise ValueError('Version must be grater than or equal to 1: %s' % version)
if max_id < 0:
raise ValueError('Max ID must be zero or positive: %s' % max_id)
return SymbolTable(
table_type=SHARED_TABLE_TYPE,
symbols=repeat(None, max_id),
name=name,
version=version,
is_substitute=True
) |
def _build_command_chain(self, command):
"""
Builds execution chain including all intercepters and the specified command.
:param command: the command to build a chain.
"""
next = command
for intercepter in reversed(self._intercepters):
next = InterceptedCommand(intercepter, next)
self._commands_by_name[next.get_name()] = next | Builds execution chain including all intercepters and the specified command.
:param command: the command to build a chain. | Below is the the instruction that describes the task:
### Input:
Builds execution chain including all intercepters and the specified command.
:param command: the command to build a chain.
### Response:
def _build_command_chain(self, command):
"""
Builds execution chain including all intercepters and the specified command.
:param command: the command to build a chain.
"""
next = command
for intercepter in reversed(self._intercepters):
next = InterceptedCommand(intercepter, next)
self._commands_by_name[next.get_name()] = next |
def gen_unique_id(serialized_name, args, kwargs):
"""
Generates and returns a hex-encoded 256-bit ID for the given task name and
args. Used to generate IDs for unique tasks or for task locks.
"""
return hashlib.sha256(json.dumps({
'func': serialized_name,
'args': args,
'kwargs': kwargs,
}, sort_keys=True).encode('utf8')).hexdigest() | Generates and returns a hex-encoded 256-bit ID for the given task name and
args. Used to generate IDs for unique tasks or for task locks. | Below is the the instruction that describes the task:
### Input:
Generates and returns a hex-encoded 256-bit ID for the given task name and
args. Used to generate IDs for unique tasks or for task locks.
### Response:
def gen_unique_id(serialized_name, args, kwargs):
"""
Generates and returns a hex-encoded 256-bit ID for the given task name and
args. Used to generate IDs for unique tasks or for task locks.
"""
return hashlib.sha256(json.dumps({
'func': serialized_name,
'args': args,
'kwargs': kwargs,
}, sort_keys=True).encode('utf8')).hexdigest() |
def get_all_draft_pages_from_space(self, space, start=0, limit=500, status='draft'):
"""
Get list of draft pages from space
Use case is cleanup old drafts from Confluence
:param space:
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of pages to return, this may be restricted by
fixed system limits. Default: 500
:param status:
:return:
"""
return self.get_all_pages_from_space(space, start, limit, status) | Get list of draft pages from space
Use case is cleanup old drafts from Confluence
:param space:
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of pages to return, this may be restricted by
fixed system limits. Default: 500
:param status:
:return: | Below is the the instruction that describes the task:
### Input:
Get list of draft pages from space
Use case is cleanup old drafts from Confluence
:param space:
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of pages to return, this may be restricted by
fixed system limits. Default: 500
:param status:
:return:
### Response:
def get_all_draft_pages_from_space(self, space, start=0, limit=500, status='draft'):
"""
Get list of draft pages from space
Use case is cleanup old drafts from Confluence
:param space:
:param start: OPTIONAL: The start point of the collection to return. Default: None (0).
:param limit: OPTIONAL: The limit of the number of pages to return, this may be restricted by
fixed system limits. Default: 500
:param status:
:return:
"""
return self.get_all_pages_from_space(space, start, limit, status) |
async def create_hlk_sw16_connection(port=None, host=None,
disconnect_callback=None,
reconnect_callback=None, loop=None,
logger=None, timeout=None,
reconnect_interval=None):
"""Create HLK-SW16 Client class."""
client = SW16Client(host, port=port,
disconnect_callback=disconnect_callback,
reconnect_callback=reconnect_callback,
loop=loop, logger=logger,
timeout=timeout, reconnect_interval=reconnect_interval)
await client.setup()
return client | Create HLK-SW16 Client class. | Below is the the instruction that describes the task:
### Input:
Create HLK-SW16 Client class.
### Response:
async def create_hlk_sw16_connection(port=None, host=None,
disconnect_callback=None,
reconnect_callback=None, loop=None,
logger=None, timeout=None,
reconnect_interval=None):
"""Create HLK-SW16 Client class."""
client = SW16Client(host, port=port,
disconnect_callback=disconnect_callback,
reconnect_callback=reconnect_callback,
loop=loop, logger=logger,
timeout=timeout, reconnect_interval=reconnect_interval)
await client.setup()
return client |
def component_on_date(self, date: datetime.date) -> Optional["Interval"]:
"""
Returns the part of this interval that falls on the date given, or
``None`` if the interval doesn't have any part during that date.
"""
return self.intersection(Interval.wholeday(date)) | Returns the part of this interval that falls on the date given, or
``None`` if the interval doesn't have any part during that date. | Below is the the instruction that describes the task:
### Input:
Returns the part of this interval that falls on the date given, or
``None`` if the interval doesn't have any part during that date.
### Response:
def component_on_date(self, date: datetime.date) -> Optional["Interval"]:
"""
Returns the part of this interval that falls on the date given, or
``None`` if the interval doesn't have any part during that date.
"""
return self.intersection(Interval.wholeday(date)) |
def polarity(self):
"""
Sets the polarity of the motor. With `normal` polarity, a positive duty
cycle will cause the motor to rotate clockwise. With `inversed` polarity,
a positive duty cycle will cause the motor to rotate counter-clockwise.
Valid values are `normal` and `inversed`.
"""
self._polarity, value = self.get_attr_string(self._polarity, 'polarity')
return value | Sets the polarity of the motor. With `normal` polarity, a positive duty
cycle will cause the motor to rotate clockwise. With `inversed` polarity,
a positive duty cycle will cause the motor to rotate counter-clockwise.
Valid values are `normal` and `inversed`. | Below is the the instruction that describes the task:
### Input:
Sets the polarity of the motor. With `normal` polarity, a positive duty
cycle will cause the motor to rotate clockwise. With `inversed` polarity,
a positive duty cycle will cause the motor to rotate counter-clockwise.
Valid values are `normal` and `inversed`.
### Response:
def polarity(self):
"""
Sets the polarity of the motor. With `normal` polarity, a positive duty
cycle will cause the motor to rotate clockwise. With `inversed` polarity,
a positive duty cycle will cause the motor to rotate counter-clockwise.
Valid values are `normal` and `inversed`.
"""
self._polarity, value = self.get_attr_string(self._polarity, 'polarity')
return value |
def watch_prefix_once(self, key_prefix, timeout=None, **kwargs):
"""Watches a range of keys with a prefix, similar to watch_once"""
kwargs['range_end'] = \
_increment_last_byte(key_prefix)
return self.watch_once(key_prefix, timeout=timeout, **kwargs) | Watches a range of keys with a prefix, similar to watch_once | Below is the the instruction that describes the task:
### Input:
Watches a range of keys with a prefix, similar to watch_once
### Response:
def watch_prefix_once(self, key_prefix, timeout=None, **kwargs):
"""Watches a range of keys with a prefix, similar to watch_once"""
kwargs['range_end'] = \
_increment_last_byte(key_prefix)
return self.watch_once(key_prefix, timeout=timeout, **kwargs) |
def verify(self, signature, data):
"""Verifies some data was signed by this private key.
:param signature: The signature to verify.
:type signature: ``bytes``
:param data: The data that was supposedly signed.
:type data: ``bytes``
:rtype: ``bool``
"""
return self._pk.public_key.verify(signature, data) | Verifies some data was signed by this private key.
:param signature: The signature to verify.
:type signature: ``bytes``
:param data: The data that was supposedly signed.
:type data: ``bytes``
:rtype: ``bool`` | Below is the the instruction that describes the task:
### Input:
Verifies some data was signed by this private key.
:param signature: The signature to verify.
:type signature: ``bytes``
:param data: The data that was supposedly signed.
:type data: ``bytes``
:rtype: ``bool``
### Response:
def verify(self, signature, data):
"""Verifies some data was signed by this private key.
:param signature: The signature to verify.
:type signature: ``bytes``
:param data: The data that was supposedly signed.
:type data: ``bytes``
:rtype: ``bool``
"""
return self._pk.public_key.verify(signature, data) |
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):
""" Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section.
"""
output_file = open_strings_file(file_path, "a")
write_section_header_to_file(output_file, section_name)
for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):
output_file.write(u'\n')
write_entry_to_file(output_file, entry_comment, entry_key)
output_file.close() | Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section. | Below is the the instruction that describes the task:
### Input:
Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section.
### Response:
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):
""" Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section.
"""
output_file = open_strings_file(file_path, "a")
write_section_header_to_file(output_file, section_name)
for entry_key, entry_comment in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):
output_file.write(u'\n')
write_entry_to_file(output_file, entry_comment, entry_key)
output_file.close() |
def _build_gadgets_rec(self, gadget_tree_root):
"""Build a gadgets from a gadgets tree.
"""
root = gadget_tree_root.get_root()
children = gadget_tree_root.get_children()
node_list = []
root_gadget_ins = root
if not children:
node_list += [[root_gadget_ins]]
else:
for child in children:
node_list_rec = self._build_gadgets_rec(child)
node_list += [n + [root_gadget_ins] for n in node_list_rec]
return node_list | Build a gadgets from a gadgets tree. | Below is the the instruction that describes the task:
### Input:
Build a gadgets from a gadgets tree.
### Response:
def _build_gadgets_rec(self, gadget_tree_root):
"""Build a gadgets from a gadgets tree.
"""
root = gadget_tree_root.get_root()
children = gadget_tree_root.get_children()
node_list = []
root_gadget_ins = root
if not children:
node_list += [[root_gadget_ins]]
else:
for child in children:
node_list_rec = self._build_gadgets_rec(child)
node_list += [n + [root_gadget_ins] for n in node_list_rec]
return node_list |
def pseudoinverse(self):
"""Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse.
"""
transform = False
if self.get_width() > self.get_height():
transform = True
u, sigma, v = self.transform().svd()
else:
u, sigma, v = self.svd()
# calculate inverse of sigma
for i in xrange(min(sigma.get_height(), sigma.get_width())):
val = sigma.get_value(i, i)
# divide only if the value is not 0 or close to zero (rounding errors)
eps = 1.e-15
if eps < val or val < -eps:
sigma.set_value(i, i, 1 / val)
if transform:
return (v * sigma * u.transform()).transform()
else:
return v * sigma * u.transform() | Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse. | Below is the the instruction that describes the task:
### Input:
Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse.
### Response:
def pseudoinverse(self):
"""Return the pseudoinverse (Moore-Penrose-Inverse).
The singular value decomposition is used to calculate the pseudoinverse.
"""
transform = False
if self.get_width() > self.get_height():
transform = True
u, sigma, v = self.transform().svd()
else:
u, sigma, v = self.svd()
# calculate inverse of sigma
for i in xrange(min(sigma.get_height(), sigma.get_width())):
val = sigma.get_value(i, i)
# divide only if the value is not 0 or close to zero (rounding errors)
eps = 1.e-15
if eps < val or val < -eps:
sigma.set_value(i, i, 1 / val)
if transform:
return (v * sigma * u.transform()).transform()
else:
return v * sigma * u.transform() |
def get_full_text(tweet):
"""
Get the full text of a tweet dict.
Includes @-mention replies and long links.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: the untruncated text of a Tweet
(finds extended text if available)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_full_text
>>> # getting the text of a Tweet that is not truncated
>>> original_untruncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "truncated": False,
... "text": "some tweet text"
... }
>>> get_full_text(original_untruncated)
'some tweet text'
>>> activity_untruncated = {"postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text"
... }
>>> get_full_text(activity_untruncated)
'some tweet text'
>>> # getting the text of a truncated Tweet (has over 140 chars)
>>> original_truncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "text": "some tweet text, lorem ip...",
... "truncated": True,
... "extended_tweet":
... {"full_text":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(original_truncated)
'some tweet text, lorem ipsum dolor sit amet'
>>> activity_truncated = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text, lorem ip...",
... "long_object":
... {"body":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(activity_truncated)
'some tweet text, lorem ipsum dolor sit amet'
"""
if is_original_format(tweet):
if tweet["truncated"]:
return tweet["extended_tweet"]["full_text"]
else:
return tweet["text"]
else:
if "long_object" in tweet:
return tweet["long_object"]["body"]
else:
return tweet["body"] | Get the full text of a tweet dict.
Includes @-mention replies and long links.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: the untruncated text of a Tweet
(finds extended text if available)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_full_text
>>> # getting the text of a Tweet that is not truncated
>>> original_untruncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "truncated": False,
... "text": "some tweet text"
... }
>>> get_full_text(original_untruncated)
'some tweet text'
>>> activity_untruncated = {"postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text"
... }
>>> get_full_text(activity_untruncated)
'some tweet text'
>>> # getting the text of a truncated Tweet (has over 140 chars)
>>> original_truncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "text": "some tweet text, lorem ip...",
... "truncated": True,
... "extended_tweet":
... {"full_text":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(original_truncated)
'some tweet text, lorem ipsum dolor sit amet'
>>> activity_truncated = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text, lorem ip...",
... "long_object":
... {"body":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(activity_truncated)
'some tweet text, lorem ipsum dolor sit amet' | Below is the the instruction that describes the task:
### Input:
Get the full text of a tweet dict.
Includes @-mention replies and long links.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: the untruncated text of a Tweet
(finds extended text if available)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_full_text
>>> # getting the text of a Tweet that is not truncated
>>> original_untruncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "truncated": False,
... "text": "some tweet text"
... }
>>> get_full_text(original_untruncated)
'some tweet text'
>>> activity_untruncated = {"postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text"
... }
>>> get_full_text(activity_untruncated)
'some tweet text'
>>> # getting the text of a truncated Tweet (has over 140 chars)
>>> original_truncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "text": "some tweet text, lorem ip...",
... "truncated": True,
... "extended_tweet":
... {"full_text":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(original_truncated)
'some tweet text, lorem ipsum dolor sit amet'
>>> activity_truncated = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text, lorem ip...",
... "long_object":
... {"body":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(activity_truncated)
'some tweet text, lorem ipsum dolor sit amet'
### Response:
def get_full_text(tweet):
"""
Get the full text of a tweet dict.
Includes @-mention replies and long links.
Args:
tweet (Tweet or dict): A Tweet object or dictionary
Returns:
str: the untruncated text of a Tweet
(finds extended text if available)
Example:
>>> from tweet_parser.getter_methods.tweet_text import get_full_text
>>> # getting the text of a Tweet that is not truncated
>>> original_untruncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "truncated": False,
... "text": "some tweet text"
... }
>>> get_full_text(original_untruncated)
'some tweet text'
>>> activity_untruncated = {"postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text"
... }
>>> get_full_text(activity_untruncated)
'some tweet text'
>>> # getting the text of a truncated Tweet (has over 140 chars)
>>> original_truncated = {
... "created_at": "Wed May 24 20:17:19 +0000 2017",
... "text": "some tweet text, lorem ip...",
... "truncated": True,
... "extended_tweet":
... {"full_text":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(original_truncated)
'some tweet text, lorem ipsum dolor sit amet'
>>> activity_truncated = {
... "postedTime": "2017-05-24T20:17:19.000Z",
... "body": "some tweet text, lorem ip...",
... "long_object":
... {"body":
... "some tweet text, lorem ipsum dolor sit amet"}
... }
>>> get_full_text(activity_truncated)
'some tweet text, lorem ipsum dolor sit amet'
"""
if is_original_format(tweet):
if tweet["truncated"]:
return tweet["extended_tweet"]["full_text"]
else:
return tweet["text"]
else:
if "long_object" in tweet:
return tweet["long_object"]["body"]
else:
return tweet["body"] |
def idxstats(in_bam, data):
"""Return BAM index stats for the given file, using samtools idxstats.
"""
index(in_bam, data["config"], check_timestamp=False)
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
samtools = config_utils.get_program("samtools", data["config"])
idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode()
out = []
for line in idxstats_out.split("\n"):
if line.strip():
contig, length, aligned, unaligned = line.split("\t")
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
return out | Return BAM index stats for the given file, using samtools idxstats. | Below is the the instruction that describes the task:
### Input:
Return BAM index stats for the given file, using samtools idxstats.
### Response:
def idxstats(in_bam, data):
"""Return BAM index stats for the given file, using samtools idxstats.
"""
index(in_bam, data["config"], check_timestamp=False)
AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"])
samtools = config_utils.get_program("samtools", data["config"])
idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode()
out = []
for line in idxstats_out.split("\n"):
if line.strip():
contig, length, aligned, unaligned = line.split("\t")
out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned)))
return out |
def einstein_radius_in_units(self, unit_length='arcsec', kpc_per_arcsec=None):
"""The Einstein Radius of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Radius \
may be inaccurate. This is because the differently oriented ellipses of each mass profile """
if self.has_mass_profile:
return sum(map(lambda p: p.einstein_radius_in_units(unit_length=unit_length, kpc_per_arcsec=kpc_per_arcsec),
self.mass_profiles))
else:
return None | The Einstein Radius of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Radius \
may be inaccurate. This is because the differently oriented ellipses of each mass profile | Below is the the instruction that describes the task:
### Input:
The Einstein Radius of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Radius \
may be inaccurate. This is because the differently oriented ellipses of each mass profile
### Response:
def einstein_radius_in_units(self, unit_length='arcsec', kpc_per_arcsec=None):
"""The Einstein Radius of this galaxy, which is the sum of Einstein Radii of its mass profiles.
If the galaxy is composed of multiple ellipitcal profiles with different axis-ratios, this Einstein Radius \
may be inaccurate. This is because the differently oriented ellipses of each mass profile """
if self.has_mass_profile:
return sum(map(lambda p: p.einstein_radius_in_units(unit_length=unit_length, kpc_per_arcsec=kpc_per_arcsec),
self.mass_profiles))
else:
return None |
def _decref_dependencies_recursive(self, term, refcounts, garbage):
"""
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
"""
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_dependencies_recursive(parent, refcounts, garbage) | Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies` | Below is the the instruction that describes the task:
### Input:
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
### Response:
def _decref_dependencies_recursive(self, term, refcounts, garbage):
"""
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
"""
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_dependencies_recursive(parent, refcounts, garbage) |
def delete_folder(self, id, force=None):
"""
Delete folder.
Remove the specified folder. You can only delete empty folders unless you
set the 'force' flag
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - force
"""Set to 'true' to allow deleting a non-empty folder"""
if force is not None:
params["force"] = force
self.logger.debug("DELETE /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/folders/{id}".format(**path), data=data, params=params, no_data=True) | Delete folder.
Remove the specified folder. You can only delete empty folders unless you
set the 'force' flag | Below is the the instruction that describes the task:
### Input:
Delete folder.
Remove the specified folder. You can only delete empty folders unless you
set the 'force' flag
### Response:
def delete_folder(self, id, force=None):
"""
Delete folder.
Remove the specified folder. You can only delete empty folders unless you
set the 'force' flag
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - force
"""Set to 'true' to allow deleting a non-empty folder"""
if force is not None:
params["force"] = force
self.logger.debug("DELETE /api/v1/folders/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/folders/{id}".format(**path), data=data, params=params, no_data=True) |
def default_malformed_message_handler(worker, exc_info, message_parts):
"""The default malformed message handler for :class:`Worker`. It warns
as a :exc:`MalformedMessage`.
"""
exc_type, exc, tb = exc_info
exc_strs = traceback.format_exception_only(exc_type, exc)
exc_str = exc_strs[0].strip()
if len(exc_strs) > 1:
exc_str += '...'
warn('<%s> occurred by %r' % (exc_str, message_parts), MalformedMessage) | The default malformed message handler for :class:`Worker`. It warns
as a :exc:`MalformedMessage`. | Below is the the instruction that describes the task:
### Input:
The default malformed message handler for :class:`Worker`. It warns
as a :exc:`MalformedMessage`.
### Response:
def default_malformed_message_handler(worker, exc_info, message_parts):
"""The default malformed message handler for :class:`Worker`. It warns
as a :exc:`MalformedMessage`.
"""
exc_type, exc, tb = exc_info
exc_strs = traceback.format_exception_only(exc_type, exc)
exc_str = exc_strs[0].strip()
if len(exc_strs) > 1:
exc_str += '...'
warn('<%s> occurred by %r' % (exc_str, message_parts), MalformedMessage) |
def split_kernel(kernel, kernel_subgrid, subsampling_size, subgrid_res):
"""
pixel kernel and subsampling kernel such that the convolution of both applied on an image can be
performed, i.e. smaller subsampling PSF and hole in larger PSF
:param kernel: PSF kernel of the size of the pixel
:param kernel_subgrid: subsampled kernel
:param subsampling_size: size of subsampling PSF in units of image pixels
:return: pixel and subsampling kernel
"""
n = len(kernel)
n_sub = len(kernel_subgrid)
if subsampling_size % 2 == 0:
subsampling_size += 1
if subsampling_size > n:
subsampling_size = n
kernel_hole = copy.deepcopy(kernel)
n_min = int((n-1)/2 - (subsampling_size-1)/2)
n_max = int((n-1)/2 + (subsampling_size-1)/2 + 1)
kernel_hole[n_min:n_max, n_min:n_max] = 0
n_min_sub = int((n_sub - 1) / 2 - (subsampling_size*subgrid_res - 1) / 2)
n_max_sub = int((n_sub - 1) / 2 + (subsampling_size * subgrid_res - 1) / 2 + 1)
kernel_subgrid_cut = kernel_subgrid[n_min_sub:n_max_sub, n_min_sub:n_max_sub]
flux_subsampled = np.sum(kernel_subgrid_cut)
flux_hole = np.sum(kernel_hole)
if flux_hole > 0:
kernel_hole *= (1. - flux_subsampled) / np.sum(kernel_hole)
else:
kernel_subgrid_cut /= np.sum(kernel_subgrid_cut)
return kernel_hole, kernel_subgrid_cut | pixel kernel and subsampling kernel such that the convolution of both applied on an image can be
performed, i.e. smaller subsampling PSF and hole in larger PSF
:param kernel: PSF kernel of the size of the pixel
:param kernel_subgrid: subsampled kernel
:param subsampling_size: size of subsampling PSF in units of image pixels
:return: pixel and subsampling kernel | Below is the the instruction that describes the task:
### Input:
pixel kernel and subsampling kernel such that the convolution of both applied on an image can be
performed, i.e. smaller subsampling PSF and hole in larger PSF
:param kernel: PSF kernel of the size of the pixel
:param kernel_subgrid: subsampled kernel
:param subsampling_size: size of subsampling PSF in units of image pixels
:return: pixel and subsampling kernel
### Response:
def split_kernel(kernel, kernel_subgrid, subsampling_size, subgrid_res):
"""
pixel kernel and subsampling kernel such that the convolution of both applied on an image can be
performed, i.e. smaller subsampling PSF and hole in larger PSF
:param kernel: PSF kernel of the size of the pixel
:param kernel_subgrid: subsampled kernel
:param subsampling_size: size of subsampling PSF in units of image pixels
:return: pixel and subsampling kernel
"""
n = len(kernel)
n_sub = len(kernel_subgrid)
if subsampling_size % 2 == 0:
subsampling_size += 1
if subsampling_size > n:
subsampling_size = n
kernel_hole = copy.deepcopy(kernel)
n_min = int((n-1)/2 - (subsampling_size-1)/2)
n_max = int((n-1)/2 + (subsampling_size-1)/2 + 1)
kernel_hole[n_min:n_max, n_min:n_max] = 0
n_min_sub = int((n_sub - 1) / 2 - (subsampling_size*subgrid_res - 1) / 2)
n_max_sub = int((n_sub - 1) / 2 + (subsampling_size * subgrid_res - 1) / 2 + 1)
kernel_subgrid_cut = kernel_subgrid[n_min_sub:n_max_sub, n_min_sub:n_max_sub]
flux_subsampled = np.sum(kernel_subgrid_cut)
flux_hole = np.sum(kernel_hole)
if flux_hole > 0:
kernel_hole *= (1. - flux_subsampled) / np.sum(kernel_hole)
else:
kernel_subgrid_cut /= np.sum(kernel_subgrid_cut)
return kernel_hole, kernel_subgrid_cut |
def as_dict(self):
"""Represent this dependency as a dict. For json compatibility."""
return dict(
dependencies=list(self),
all=self.all,
success=self.success,
failure=self.failure
) | Represent this dependency as a dict. For json compatibility. | Below is the the instruction that describes the task:
### Input:
Represent this dependency as a dict. For json compatibility.
### Response:
def as_dict(self):
"""Represent this dependency as a dict. For json compatibility."""
return dict(
dependencies=list(self),
all=self.all,
success=self.success,
failure=self.failure
) |
def stderr(self):
"""
Returns stream writer for stderr to output Lambda function errors to
Returns
-------
samcli.lib.utils.stream_writer.StreamWriter
Stream writer for stderr
"""
stream = self._log_file_handle if self._log_file_handle else osutils.stderr()
return StreamWriter(stream, self._is_debugging) | Returns stream writer for stderr to output Lambda function errors to
Returns
-------
samcli.lib.utils.stream_writer.StreamWriter
Stream writer for stderr | Below is the the instruction that describes the task:
### Input:
Returns stream writer for stderr to output Lambda function errors to
Returns
-------
samcli.lib.utils.stream_writer.StreamWriter
Stream writer for stderr
### Response:
def stderr(self):
"""
Returns stream writer for stderr to output Lambda function errors to
Returns
-------
samcli.lib.utils.stream_writer.StreamWriter
Stream writer for stderr
"""
stream = self._log_file_handle if self._log_file_handle else osutils.stderr()
return StreamWriter(stream, self._is_debugging) |
def reach_process_text():
"""Process text with REACH and return INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
offline = True if body.get('offline') else False
rp = reach.process_text(text, offline=offline)
return _stmts_from_proc(rp) | Process text with REACH and return INDRA Statements. | Below is the the instruction that describes the task:
### Input:
Process text with REACH and return INDRA Statements.
### Response:
def reach_process_text():
"""Process text with REACH and return INDRA Statements."""
if request.method == 'OPTIONS':
return {}
response = request.body.read().decode('utf-8')
body = json.loads(response)
text = body.get('text')
offline = True if body.get('offline') else False
rp = reach.process_text(text, offline=offline)
return _stmts_from_proc(rp) |
def remove_bika_listing_resources(portal):
"""Remove all bika_listing resources
"""
logger.info("Removing bika_listing resouces")
REMOVE_JS = [
"++resource++bika.lims.js/bika.lims.bikalisting.js",
"++resource++bika.lims.js/bika.lims.bikalistingfilterbar.js",
]
REMOVE_CSS = [
"bika_listing.css",
]
for js in REMOVE_JS:
logger.info("********** Unregistering JS %s" % js)
portal.portal_javascripts.unregisterResource(js)
for css in REMOVE_CSS:
logger.info("********** Unregistering CSS %s" % css)
portal.portal_css.unregisterResource(css) | Remove all bika_listing resources | Below is the the instruction that describes the task:
### Input:
Remove all bika_listing resources
### Response:
def remove_bika_listing_resources(portal):
"""Remove all bika_listing resources
"""
logger.info("Removing bika_listing resouces")
REMOVE_JS = [
"++resource++bika.lims.js/bika.lims.bikalisting.js",
"++resource++bika.lims.js/bika.lims.bikalistingfilterbar.js",
]
REMOVE_CSS = [
"bika_listing.css",
]
for js in REMOVE_JS:
logger.info("********** Unregistering JS %s" % js)
portal.portal_javascripts.unregisterResource(js)
for css in REMOVE_CSS:
logger.info("********** Unregistering CSS %s" % css)
portal.portal_css.unregisterResource(css) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.