code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def action_print(reader, *args):
"""Simply print the Flow Log records to output."""
arg_count = len(args)
if arg_count == 0:
stop_after = 0
elif arg_count == 1:
stop_after = int(args[0])
else:
raise RuntimeError("0 or 1 arguments expected for action 'print'")
for i, record in enumerate(reader, 1):
print(record.to_message())
if i == stop_after:
break | Simply print the Flow Log records to output. | Below is the the instruction that describes the task:
### Input:
Simply print the Flow Log records to output.
### Response:
def action_print(reader, *args):
"""Simply print the Flow Log records to output."""
arg_count = len(args)
if arg_count == 0:
stop_after = 0
elif arg_count == 1:
stop_after = int(args[0])
else:
raise RuntimeError("0 or 1 arguments expected for action 'print'")
for i, record in enumerate(reader, 1):
print(record.to_message())
if i == stop_after:
break |
def add_node(self, node, weight=1):
"""Adds node to circle and rebuild it."""
self._nodes.add(node)
self._weights[node] = weight
self._rebuild_circle() | Adds node to circle and rebuild it. | Below is the the instruction that describes the task:
### Input:
Adds node to circle and rebuild it.
### Response:
def add_node(self, node, weight=1):
"""Adds node to circle and rebuild it."""
self._nodes.add(node)
self._weights[node] = weight
self._rebuild_circle() |
def remove_entry(data, entry):
'''
Remove an entry in place.
'''
file_field = entry['fields'].get('file')
if file_field:
try:
os.remove(file_field)
except IOError:
click.echo('This entry\'s file was missing')
data.remove(entry) | Remove an entry in place. | Below is the the instruction that describes the task:
### Input:
Remove an entry in place.
### Response:
def remove_entry(data, entry):
'''
Remove an entry in place.
'''
file_field = entry['fields'].get('file')
if file_field:
try:
os.remove(file_field)
except IOError:
click.echo('This entry\'s file was missing')
data.remove(entry) |
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol} | Makes Element obey the general json interface used in pymatgen for
easier serialization. | Below is the the instruction that describes the task:
### Input:
Makes Element obey the general json interface used in pymatgen for
easier serialization.
### Response:
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol} |
def moveforum_view(self, request, forum_id, direction):
""" Moves the given forum toward the requested direction. """
forum = get_object_or_404(Forum, pk=forum_id)
# Fetch the target
target, position = None, None
if direction == 'up':
target, position = forum.get_previous_sibling(), 'left'
elif direction == 'down':
target, position = forum.get_next_sibling(), 'right'
# Do the move
try:
assert target is not None
forum.move_to(target, position)
except (InvalidMove, AssertionError):
pass
self.message_user(request, _("'{}' forum successfully moved").format(forum.name))
return HttpResponseRedirect(reverse('admin:forum_forum_changelist')) | Moves the given forum toward the requested direction. | Below is the the instruction that describes the task:
### Input:
Moves the given forum toward the requested direction.
### Response:
def moveforum_view(self, request, forum_id, direction):
""" Moves the given forum toward the requested direction. """
forum = get_object_or_404(Forum, pk=forum_id)
# Fetch the target
target, position = None, None
if direction == 'up':
target, position = forum.get_previous_sibling(), 'left'
elif direction == 'down':
target, position = forum.get_next_sibling(), 'right'
# Do the move
try:
assert target is not None
forum.move_to(target, position)
except (InvalidMove, AssertionError):
pass
self.message_user(request, _("'{}' forum successfully moved").format(forum.name))
return HttpResponseRedirect(reverse('admin:forum_forum_changelist')) |
def stoch2array(self):
"""Return the stochastic objects as an array."""
a = np.empty(self.dim)
for stochastic in self.stochastics:
a[self._slices[stochastic]] = stochastic.value
return a | Return the stochastic objects as an array. | Below is the the instruction that describes the task:
### Input:
Return the stochastic objects as an array.
### Response:
def stoch2array(self):
"""Return the stochastic objects as an array."""
a = np.empty(self.dim)
for stochastic in self.stochastics:
a[self._slices[stochastic]] = stochastic.value
return a |
def _check_old_config_root():
"""
Prior versions of keyring would search for the config
in XDG_DATA_HOME, but should probably have been
searching for config in XDG_CONFIG_HOME. If the
config exists in the former but not in the latter,
raise a RuntimeError to force the change.
"""
# disable the check - once is enough and avoids infinite loop
globals()['_check_old_config_root'] = lambda: None
config_file_new = os.path.join(_config_root_Linux(), 'keyringrc.cfg')
config_file_old = os.path.join(_data_root_Linux(), 'keyringrc.cfg')
if os.path.isfile(config_file_old) and not os.path.isfile(config_file_new):
msg = ("Keyring config exists only in the old location "
"{config_file_old} and should be moved to {config_file_new} "
"to work with this version of keyring.")
raise RuntimeError(msg.format(**locals())) | Prior versions of keyring would search for the config
in XDG_DATA_HOME, but should probably have been
searching for config in XDG_CONFIG_HOME. If the
config exists in the former but not in the latter,
raise a RuntimeError to force the change. | Below is the the instruction that describes the task:
### Input:
Prior versions of keyring would search for the config
in XDG_DATA_HOME, but should probably have been
searching for config in XDG_CONFIG_HOME. If the
config exists in the former but not in the latter,
raise a RuntimeError to force the change.
### Response:
def _check_old_config_root():
"""
Prior versions of keyring would search for the config
in XDG_DATA_HOME, but should probably have been
searching for config in XDG_CONFIG_HOME. If the
config exists in the former but not in the latter,
raise a RuntimeError to force the change.
"""
# disable the check - once is enough and avoids infinite loop
globals()['_check_old_config_root'] = lambda: None
config_file_new = os.path.join(_config_root_Linux(), 'keyringrc.cfg')
config_file_old = os.path.join(_data_root_Linux(), 'keyringrc.cfg')
if os.path.isfile(config_file_old) and not os.path.isfile(config_file_new):
msg = ("Keyring config exists only in the old location "
"{config_file_old} and should be moved to {config_file_new} "
"to work with this version of keyring.")
raise RuntimeError(msg.format(**locals())) |
def filebrowser(request, file_type):
""" Trigger view for filebrowser """
template = 'filebrowser.html'
upload_form = FileUploadForm()
uploaded_file = None
upload_tab_active = False
is_images_dialog = (file_type == 'img')
is_documents_dialog = (file_type == 'doc')
files = FileBrowserFile.objects.filter(file_type=file_type)
if request.POST:
upload_form = FileUploadForm(request.POST, request.FILES)
upload_tab_active = True
if upload_form.is_valid():
uploaded_file = upload_form.save(commit=False)
uploaded_file.file_type = file_type
uploaded_file.save()
data = {
'upload_form': upload_form,
'uploaded_file': uploaded_file,
'upload_tab_active': upload_tab_active,
'is_images_dialog': is_images_dialog,
'is_documents_dialog': is_documents_dialog
}
per_page = getattr(settings, 'FILEBROWSER_PER_PAGE', 20)
return render_paginate(request, template, files, per_page, data) | Trigger view for filebrowser | Below is the the instruction that describes the task:
### Input:
Trigger view for filebrowser
### Response:
def filebrowser(request, file_type):
""" Trigger view for filebrowser """
template = 'filebrowser.html'
upload_form = FileUploadForm()
uploaded_file = None
upload_tab_active = False
is_images_dialog = (file_type == 'img')
is_documents_dialog = (file_type == 'doc')
files = FileBrowserFile.objects.filter(file_type=file_type)
if request.POST:
upload_form = FileUploadForm(request.POST, request.FILES)
upload_tab_active = True
if upload_form.is_valid():
uploaded_file = upload_form.save(commit=False)
uploaded_file.file_type = file_type
uploaded_file.save()
data = {
'upload_form': upload_form,
'uploaded_file': uploaded_file,
'upload_tab_active': upload_tab_active,
'is_images_dialog': is_images_dialog,
'is_documents_dialog': is_documents_dialog
}
per_page = getattr(settings, 'FILEBROWSER_PER_PAGE', 20)
return render_paginate(request, template, files, per_page, data) |
def ekopr(fname):
"""
Open an existing E-kernel file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.ekopr_c(fname, ctypes.byref(handle))
return handle.value | Open an existing E-kernel file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Open an existing E-kernel file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int
### Response:
def ekopr(fname):
"""
Open an existing E-kernel file for reading.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.ekopr_c(fname, ctypes.byref(handle))
return handle.value |
def lmeasure(reference_intervals_hier, reference_labels_hier,
estimated_intervals_hier, estimated_labels_hier,
frame_size=0.1, beta=1.0):
'''Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
reference_labels_hier : list of list of str
``reference_labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
estimated_intervals_hier : list of ndarray
estimated_labels_hier : list of ndarray
Like ``reference_intervals_hier`` and ``reference_labels_hier``
but for the estimated annotation
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
l_precision : number [0, 1]
L-measure Precision
l_recall : number [0, 1]
L-measure Recall
l_measure : number [0, 1]
F-beta measure for ``(l_precision, l_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
'''
# Compute the number of frames in the window
if frame_size <= 0:
raise ValueError('frame_size ({:.2f}) must be a positive '
'number.'.format(frame_size))
# Validate the hierarchical segmentations
validate_hier_intervals(reference_intervals_hier)
validate_hier_intervals(estimated_intervals_hier)
# Build the least common ancestor matrices
ref_meet = _meet(reference_intervals_hier, reference_labels_hier,
frame_size)
est_meet = _meet(estimated_intervals_hier, estimated_labels_hier,
frame_size)
# Compute precision and recall
l_recall = _gauc(ref_meet, est_meet, True, None)
l_precision = _gauc(est_meet, ref_meet, True, None)
l_measure = util.f_measure(l_precision, l_recall, beta=beta)
return l_precision, l_recall, l_measure | Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
reference_labels_hier : list of list of str
``reference_labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
estimated_intervals_hier : list of ndarray
estimated_labels_hier : list of ndarray
Like ``reference_intervals_hier`` and ``reference_labels_hier``
but for the estimated annotation
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
l_precision : number [0, 1]
L-measure Precision
l_recall : number [0, 1]
L-measure Recall
l_measure : number [0, 1]
F-beta measure for ``(l_precision, l_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0`` | Below is the the instruction that describes the task:
### Input:
Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
reference_labels_hier : list of list of str
``reference_labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
estimated_intervals_hier : list of ndarray
estimated_labels_hier : list of ndarray
Like ``reference_intervals_hier`` and ``reference_labels_hier``
but for the estimated annotation
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
l_precision : number [0, 1]
L-measure Precision
l_recall : number [0, 1]
L-measure Recall
l_measure : number [0, 1]
F-beta measure for ``(l_precision, l_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
### Response:
def lmeasure(reference_intervals_hier, reference_labels_hier,
estimated_intervals_hier, estimated_labels_hier,
frame_size=0.1, beta=1.0):
'''Computes the tree measures for hierarchical segment annotations.
Parameters
----------
reference_intervals_hier : list of ndarray
``reference_intervals_hier[i]`` contains the segment intervals
(in seconds) for the ``i`` th layer of the annotations. Layers are
ordered from top to bottom, so that the last list of intervals should
be the most specific.
reference_labels_hier : list of list of str
``reference_labels_hier[i]`` contains the segment labels for the
``i``th layer of the annotations
estimated_intervals_hier : list of ndarray
estimated_labels_hier : list of ndarray
Like ``reference_intervals_hier`` and ``reference_labels_hier``
but for the estimated annotation
frame_size : float > 0
length (in seconds) of frames. The frame size cannot be longer than
the window.
beta : float > 0
beta parameter for the F-measure.
Returns
-------
l_precision : number [0, 1]
L-measure Precision
l_recall : number [0, 1]
L-measure Recall
l_measure : number [0, 1]
F-beta measure for ``(l_precision, l_recall)``
Raises
------
ValueError
If either of the input hierarchies are inconsistent
If the input hierarchies have different time durations
If ``frame_size > window`` or ``frame_size <= 0``
'''
# Compute the number of frames in the window
if frame_size <= 0:
raise ValueError('frame_size ({:.2f}) must be a positive '
'number.'.format(frame_size))
# Validate the hierarchical segmentations
validate_hier_intervals(reference_intervals_hier)
validate_hier_intervals(estimated_intervals_hier)
# Build the least common ancestor matrices
ref_meet = _meet(reference_intervals_hier, reference_labels_hier,
frame_size)
est_meet = _meet(estimated_intervals_hier, estimated_labels_hier,
frame_size)
# Compute precision and recall
l_recall = _gauc(ref_meet, est_meet, True, None)
l_precision = _gauc(est_meet, ref_meet, True, None)
l_measure = util.f_measure(l_precision, l_recall, beta=beta)
return l_precision, l_recall, l_measure |
def _ValidatePathInfos(path_infos):
"""Validates a sequence of path infos."""
precondition.AssertIterableType(path_infos, rdf_objects.PathInfo)
validated = set()
for path_info in path_infos:
_ValidatePathInfo(path_info)
path_key = (path_info.path_type, path_info.GetPathID())
if path_key in validated:
message = "Conflicting writes for path: '{path}' ({path_type})".format(
path="/".join(path_info.components), path_type=path_info.path_type)
raise ValueError(message)
if path_info.HasField("hash_entry"):
if path_info.hash_entry.sha256 is None:
message = "Path with hash entry without SHA256: {}".format(path_info)
raise ValueError(message)
validated.add(path_key) | Validates a sequence of path infos. | Below is the the instruction that describes the task:
### Input:
Validates a sequence of path infos.
### Response:
def _ValidatePathInfos(path_infos):
"""Validates a sequence of path infos."""
precondition.AssertIterableType(path_infos, rdf_objects.PathInfo)
validated = set()
for path_info in path_infos:
_ValidatePathInfo(path_info)
path_key = (path_info.path_type, path_info.GetPathID())
if path_key in validated:
message = "Conflicting writes for path: '{path}' ({path_type})".format(
path="/".join(path_info.components), path_type=path_info.path_type)
raise ValueError(message)
if path_info.HasField("hash_entry"):
if path_info.hash_entry.sha256 is None:
message = "Path with hash entry without SHA256: {}".format(path_info)
raise ValueError(message)
validated.add(path_key) |
def threaded(callback, listargs):
""" Returns the result of <callback> for each set of \*args in listargs. Each call
to <callback> is called concurrently in their own separate threads.
Parameters:
callback (func): Callback function to apply to each set of \*args.
listargs (list): List of lists; \*args to pass each thread.
"""
threads, results = [], []
job_is_done_event = Event()
for args in listargs:
args += [results, len(results)]
results.append(None)
threads.append(Thread(target=callback, args=args, kwargs=dict(job_is_done_event=job_is_done_event)))
threads[-1].setDaemon(True)
threads[-1].start()
while not job_is_done_event.is_set():
if all([not t.is_alive() for t in threads]):
break
time.sleep(0.05)
return [r for r in results if r is not None] | Returns the result of <callback> for each set of \*args in listargs. Each call
to <callback> is called concurrently in their own separate threads.
Parameters:
callback (func): Callback function to apply to each set of \*args.
listargs (list): List of lists; \*args to pass each thread. | Below is the the instruction that describes the task:
### Input:
Returns the result of <callback> for each set of \*args in listargs. Each call
to <callback> is called concurrently in their own separate threads.
Parameters:
callback (func): Callback function to apply to each set of \*args.
listargs (list): List of lists; \*args to pass each thread.
### Response:
def threaded(callback, listargs):
""" Returns the result of <callback> for each set of \*args in listargs. Each call
to <callback> is called concurrently in their own separate threads.
Parameters:
callback (func): Callback function to apply to each set of \*args.
listargs (list): List of lists; \*args to pass each thread.
"""
threads, results = [], []
job_is_done_event = Event()
for args in listargs:
args += [results, len(results)]
results.append(None)
threads.append(Thread(target=callback, args=args, kwargs=dict(job_is_done_event=job_is_done_event)))
threads[-1].setDaemon(True)
threads[-1].start()
while not job_is_done_event.is_set():
if all([not t.is_alive() for t in threads]):
break
time.sleep(0.05)
return [r for r in results if r is not None] |
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
'''
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
state = _get_state(target)
if not state:
ret[target]['comment'] = ('Package {0} not currently held.'
.format(target))
elif state != 'hold':
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set to be held.'
.format(target))
else:
result = _set_state(target, 'hold')
ret[target].update(changes=result[target], result=True)
ret[target]['comment'] = ('Package {0} is now being held.'
.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set to be held.'
.format(target))
return ret | Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]' | Below is the the instruction that describes the task:
### Input:
Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
### Response:
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
'''
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
state = _get_state(target)
if not state:
ret[target]['comment'] = ('Package {0} not currently held.'
.format(target))
elif state != 'hold':
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set to be held.'
.format(target))
else:
result = _set_state(target, 'hold')
ret[target].update(changes=result[target], result=True)
ret[target]['comment'] = ('Package {0} is now being held.'
.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set to be held.'
.format(target))
return ret |
async def issuer_create_credential_offer(wallet_handle: int,
cred_def_id: str) -> str:
"""
Create credential offer that will be used by Prover for
credential request creation. Offer includes nonce and key correctness proof
for authentication between protocol steps and integrity checking.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_def_id: id of credential definition stored in the wallet
:return:credential offer json:
{
"schema_id": string,
"cred_def_id": string,
// Fields below can depend on Cred Def type
"nonce": string,
"key_correctness_proof" : <key_correctness_proof>
}
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_credential_offer: >>> wallet_handle: %r, cred_def_id: %r",
wallet_handle,
cred_def_id)
if not hasattr(issuer_create_credential_offer, "cb"):
logger.debug("issuer_create_credential_offer: Creating callback")
issuer_create_credential_offer.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_def_id = c_char_p(cred_def_id.encode('utf-8'))
credential_offer_json = await do_call('indy_issuer_create_credential_offer',
c_wallet_handle,
c_cred_def_id,
issuer_create_credential_offer.cb)
res = credential_offer_json.decode()
logger.debug("issuer_create_credential_offer: <<< res: %r", res)
return res | Create credential offer that will be used by Prover for
credential request creation. Offer includes nonce and key correctness proof
for authentication between protocol steps and integrity checking.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_def_id: id of credential definition stored in the wallet
:return:credential offer json:
{
"schema_id": string,
"cred_def_id": string,
// Fields below can depend on Cred Def type
"nonce": string,
"key_correctness_proof" : <key_correctness_proof>
} | Below is the the instruction that describes the task:
### Input:
Create credential offer that will be used by Prover for
credential request creation. Offer includes nonce and key correctness proof
for authentication between protocol steps and integrity checking.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_def_id: id of credential definition stored in the wallet
:return:credential offer json:
{
"schema_id": string,
"cred_def_id": string,
// Fields below can depend on Cred Def type
"nonce": string,
"key_correctness_proof" : <key_correctness_proof>
}
### Response:
async def issuer_create_credential_offer(wallet_handle: int,
cred_def_id: str) -> str:
"""
Create credential offer that will be used by Prover for
credential request creation. Offer includes nonce and key correctness proof
for authentication between protocol steps and integrity checking.
:param wallet_handle: wallet handler (created by open_wallet).
:param cred_def_id: id of credential definition stored in the wallet
:return:credential offer json:
{
"schema_id": string,
"cred_def_id": string,
// Fields below can depend on Cred Def type
"nonce": string,
"key_correctness_proof" : <key_correctness_proof>
}
"""
logger = logging.getLogger(__name__)
logger.debug("issuer_create_credential_offer: >>> wallet_handle: %r, cred_def_id: %r",
wallet_handle,
cred_def_id)
if not hasattr(issuer_create_credential_offer, "cb"):
logger.debug("issuer_create_credential_offer: Creating callback")
issuer_create_credential_offer.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_cred_def_id = c_char_p(cred_def_id.encode('utf-8'))
credential_offer_json = await do_call('indy_issuer_create_credential_offer',
c_wallet_handle,
c_cred_def_id,
issuer_create_credential_offer.cb)
res = credential_offer_json.decode()
logger.debug("issuer_create_credential_offer: <<< res: %r", res)
return res |
def channelModeModifyAcknowledge():
"""CHANNEL MODE MODIFY ACKNOWLEDGE Section 9.1.6"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x17) # 00010111
c = ChannelDescription2()
d = ChannelMode()
packet = a / b / c / d
return packet | CHANNEL MODE MODIFY ACKNOWLEDGE Section 9.1.6 | Below is the the instruction that describes the task:
### Input:
CHANNEL MODE MODIFY ACKNOWLEDGE Section 9.1.6
### Response:
def channelModeModifyAcknowledge():
"""CHANNEL MODE MODIFY ACKNOWLEDGE Section 9.1.6"""
a = TpPd(pd=0x6)
b = MessageType(mesType=0x17) # 00010111
c = ChannelDescription2()
d = ChannelMode()
packet = a / b / c / d
return packet |
def update_publisher_asset(self, upload_stream, publisher_name, asset_type=None, file_name=None, **kwargs):
"""UpdatePublisherAsset.
[Preview API] Update publisher asset like logo. It accepts asset file as an octet stream and file name is passed in header values.
:param object upload_stream: Stream to upload
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
:param String file_name: Header to pass the filename of the uploaded data
:rtype: {str}
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('{str}', self._unwrap_collection(response)) | UpdatePublisherAsset.
[Preview API] Update publisher asset like logo. It accepts asset file as an octet stream and file name is passed in header values.
:param object upload_stream: Stream to upload
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
:param String file_name: Header to pass the filename of the uploaded data
:rtype: {str} | Below is the the instruction that describes the task:
### Input:
UpdatePublisherAsset.
[Preview API] Update publisher asset like logo. It accepts asset file as an octet stream and file name is passed in header values.
:param object upload_stream: Stream to upload
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
:param String file_name: Header to pass the filename of the uploaded data
:rtype: {str}
### Response:
def update_publisher_asset(self, upload_stream, publisher_name, asset_type=None, file_name=None, **kwargs):
"""UpdatePublisherAsset.
[Preview API] Update publisher asset like logo. It accepts asset file as an octet stream and file name is passed in header values.
:param object upload_stream: Stream to upload
:param str publisher_name: Internal name of the publisher
:param str asset_type: Type of asset. Default value is 'logo'.
:param String file_name: Header to pass the filename of the uploaded data
:rtype: {str}
"""
route_values = {}
if publisher_name is not None:
route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str')
query_parameters = {}
if asset_type is not None:
query_parameters['assetType'] = self._serialize.query('asset_type', asset_type, 'str')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
content = self._client.stream_upload(upload_stream, callback=callback)
response = self._send(http_method='PUT',
location_id='21143299-34f9-4c62-8ca8-53da691192f9',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content,
media_type='application/octet-stream')
return self._deserialize('{str}', self._unwrap_collection(response)) |
def query_symbol(self, asset: str) -> str:
"""
This interface is used to query the asset's symbol of ONT or ONG.
:param asset: a string which is used to indicate which asset's symbol we want to get.
:return: asset's symbol in the form of string.
"""
contract_address = self.get_asset_address(asset)
method = 'symbol'
invoke_code = build_native_invoke_code(contract_address, b'\x00', method, bytearray())
tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list())
response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx)
symbol = ContractDataParser.to_utf8_str(response['Result'])
return symbol | This interface is used to query the asset's symbol of ONT or ONG.
:param asset: a string which is used to indicate which asset's symbol we want to get.
:return: asset's symbol in the form of string. | Below is the the instruction that describes the task:
### Input:
This interface is used to query the asset's symbol of ONT or ONG.
:param asset: a string which is used to indicate which asset's symbol we want to get.
:return: asset's symbol in the form of string.
### Response:
def query_symbol(self, asset: str) -> str:
"""
This interface is used to query the asset's symbol of ONT or ONG.
:param asset: a string which is used to indicate which asset's symbol we want to get.
:return: asset's symbol in the form of string.
"""
contract_address = self.get_asset_address(asset)
method = 'symbol'
invoke_code = build_native_invoke_code(contract_address, b'\x00', method, bytearray())
tx = Transaction(0, 0xd1, int(time()), 0, 0, None, invoke_code, bytearray(), list())
response = self.__sdk.rpc.send_raw_transaction_pre_exec(tx)
symbol = ContractDataParser.to_utf8_str(response['Result'])
return symbol |
def setup_sdk_logging(logfile=None, loglevel=logging.INFO):
"""
Setup a NullHandler to the root logger. If ``logfile`` is passed,
additionally add a FileHandler in ``loglevel`` level.
Args:
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
None
"""
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(logging.NullHandler())
if logfile:
fh = logging.FileHandler(logfile)
fh.setLevel(loglevel)
fh.setFormatter(get_default_log_formatter())
logging.root.addHandler(fh) | Setup a NullHandler to the root logger. If ``logfile`` is passed,
additionally add a FileHandler in ``loglevel`` level.
Args:
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Setup a NullHandler to the root logger. If ``logfile`` is passed,
additionally add a FileHandler in ``loglevel`` level.
Args:
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
None
### Response:
def setup_sdk_logging(logfile=None, loglevel=logging.INFO):
"""
Setup a NullHandler to the root logger. If ``logfile`` is passed,
additionally add a FileHandler in ``loglevel`` level.
Args:
logfile(str): A path to setup a log file.
loglevel(int): :mod:`logging` log level.
Returns:
None
"""
logging.root.setLevel(logging.DEBUG)
logging.root.addHandler(logging.NullHandler())
if logfile:
fh = logging.FileHandler(logfile)
fh.setLevel(loglevel)
fh.setFormatter(get_default_log_formatter())
logging.root.addHandler(fh) |
def creator_type(self, creator_type):
"""Sets the creator_type of this Event.
:param creator_type: The creator_type of this Event. # noqa: E501
:type: list[str]
"""
allowed_values = ["USER", "ALERT", "SYSTEM"] # noqa: E501
if not set(creator_type).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `creator_type` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(creator_type) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._creator_type = creator_type | Sets the creator_type of this Event.
:param creator_type: The creator_type of this Event. # noqa: E501
:type: list[str] | Below is the the instruction that describes the task:
### Input:
Sets the creator_type of this Event.
:param creator_type: The creator_type of this Event. # noqa: E501
:type: list[str]
### Response:
def creator_type(self, creator_type):
"""Sets the creator_type of this Event.
:param creator_type: The creator_type of this Event. # noqa: E501
:type: list[str]
"""
allowed_values = ["USER", "ALERT", "SYSTEM"] # noqa: E501
if not set(creator_type).issubset(set(allowed_values)):
raise ValueError(
"Invalid values for `creator_type` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(creator_type) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._creator_type = creator_type |
def getFeatureReport(self, report_num=0, length=63):
"""
Receive a feature report.
Blocks, unless you configured provided file (descriptor) to be
non-blocking.
"""
length += 1
buf = bytearray(length)
buf[0] = report_num
self._ioctl(
_HIDIOCGFEATURE(length),
(ctypes.c_char * length).from_buffer(buf),
True,
)
return buf | Receive a feature report.
Blocks, unless you configured provided file (descriptor) to be
non-blocking. | Below is the the instruction that describes the task:
### Input:
Receive a feature report.
Blocks, unless you configured provided file (descriptor) to be
non-blocking.
### Response:
def getFeatureReport(self, report_num=0, length=63):
"""
Receive a feature report.
Blocks, unless you configured provided file (descriptor) to be
non-blocking.
"""
length += 1
buf = bytearray(length)
buf[0] = report_num
self._ioctl(
_HIDIOCGFEATURE(length),
(ctypes.c_char * length).from_buffer(buf),
True,
)
return buf |
def get_pipeline_yaml(file):
"""Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
"""
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition | Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml. | Below is the the instruction that describes the task:
### Input:
Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
### Response:
def get_pipeline_yaml(file):
"""Return pipeline yaml from open file object.
Use specific custom representers to model the custom pypyr pipeline yaml
format, to load in special literal types like py and sic strings.
If looking to extend the pypyr pipeline syntax with special types, add
these to the tag_representers list.
Args:
file: open file-like object.
Returns:
dict-like representation of loaded yaml.
"""
tag_representers = [PyString, SicString]
yaml_loader = get_yaml_parser_safe()
for representer in tag_representers:
yaml_loader.register_class(representer)
pipeline_definition = yaml_loader.load(file)
return pipeline_definition |
def add_tarball(self, tarball, package):
"""Add a tarball, possibly creating the directory if needed."""
if tarball is None:
logger.error(
"No tarball found for %s: probably a renamed project?",
package)
return
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
logger.info("Created %s", target_dir)
logger.info("Copying tarball to %s", target_dir)
shutil.copy(tarball, target_dir) | Add a tarball, possibly creating the directory if needed. | Below is the the instruction that describes the task:
### Input:
Add a tarball, possibly creating the directory if needed.
### Response:
def add_tarball(self, tarball, package):
"""Add a tarball, possibly creating the directory if needed."""
if tarball is None:
logger.error(
"No tarball found for %s: probably a renamed project?",
package)
return
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
logger.info("Created %s", target_dir)
logger.info("Copying tarball to %s", target_dir)
shutil.copy(tarball, target_dir) |
def corr(x, y, tail='two-sided', method='pearson'):
"""(Robust) correlation between two variables.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
'skipped' : skipped correlation (robust Spearman, requires sklearn)
Returns
-------
stats : pandas DataFrame
Test summary ::
'n' : Sample size (after NaN removal)
'outliers' : number of outliers (only for 'shepherd' or 'skipped')
'r' : Correlation coefficient
'CI95' : 95% parametric confidence intervals
'r2' : R-squared
'adj_r2' : Adjusted R-squared
'p-val' : one or two tailed p-value
'BF10' : Bayes Factor of the alternative hypothesis (Pearson only)
'power' : achieved power of the test (= 1 - type II error).
See also
--------
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
partial_corr : Partial correlation
Notes
-----
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Correlations of -1 or +1 imply
an exact linear relationship.
The Spearman correlation is a nonparametric measure of the monotonicity of
the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Correlations of -1 or +1 imply an exact monotonic
relationship.
Kendall’s tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement.
The percentage bend correlation [1]_ is a robust method that
protects against univariate outliers.
The Shepherd's pi [2]_ and skipped [3]_, [4]_ correlations are both robust
methods that returns the Spearman's rho after bivariate outliers removal.
Note that the skipped correlation requires that the scikit-learn
package is installed (for computing the minimum covariance determinant).
Please note that rows with NaN are automatically removed.
If method='pearson', The JZS Bayes Factor is approximated using the
:py:func:`pingouin.bayesfactor_pearson` function.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Schwarzkopf, D.S., De Haas, B., Rees, G., 2012. Better ways to
improve standards in brain-behavior correlation analysis. Front.
Hum. Neurosci. 6, 200. https://doi.org/10.3389/fnhum.2012.00200
.. [3] Rousselet, G.A., Pernet, C.R., 2012. Improving standards in
brain-behavior correlation analyses. Front. Hum. Neurosci. 6, 119.
https://doi.org/10.3389/fnhum.2012.00119
.. [4] Pernet, C.R., Wilcox, R., Rousselet, G.A., 2012. Robust correlation
analyses: false positive and power validation using a new open
source matlab toolbox. Front. Psychol. 3, 606.
https://doi.org/10.3389/fpsyg.2012.00606
Examples
--------
1. Pearson correlation
>>> import numpy as np
>>> # Generate random correlated samples
>>> np.random.seed(123)
>>> mean, cov = [4, 6], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 30).T
>>> # Compute Pearson correlation
>>> from pingouin import corr
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.491 [0.16, 0.72] 0.242 0.185 0.005813 6.135 0.809
2. Pearson correlation with two outliers
>>> x[3], y[5] = 12, -8
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
3. Spearman correlation
>>> corr(x, y, method="spearman")
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.028034 0.61
4. Percentage bend correlation (robust)
>>> corr(x, y, method='percbend')
n r CI95% r2 adj_r2 p-val power
percbend 30 0.389 [0.03, 0.66] 0.151 0.089 0.033508 0.581
5. Shepherd's pi correlation (robust)
>>> corr(x, y, method='shepherd')
n outliers r CI95% r2 adj_r2 p-val power
shepherd 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
6. Skipped spearman correlation (robust)
>>> corr(x, y, method='skipped')
n outliers r CI95% r2 adj_r2 p-val power
skipped 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
7. One-tailed Spearman correlation
>>> corr(x, y, tail="one-sided", method='spearman')
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.014017 0.726
8. Using columns of a pandas dataframe
>>> import pandas as pd
>>> data = pd.DataFrame({'x': x, 'y': y})
>>> corr(data['x'], data['y'])
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
"""
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
nx = x.size
# Compute correlation coefficient
if method == 'pearson':
r, pval = pearsonr(x, y)
elif method == 'spearman':
r, pval = spearmanr(x, y)
elif method == 'kendall':
r, pval = kendalltau(x, y)
elif method == 'percbend':
r, pval = percbend(x, y)
elif method == 'shepherd':
r, pval, outliers = shepherd(x, y)
elif method == 'skipped':
r, pval, outliers = skipped(x, y, method='spearman')
else:
raise ValueError('Method not recognized.')
assert not np.isnan(r), 'Correlation returned NaN. Check your data.'
# Compute r2 and adj_r2
r2 = r**2
adj_r2 = 1 - (((1 - r2) * (nx - 1)) / (nx - 3))
# Compute the parametric 95% confidence interval and power
if r2 < 1:
ci = compute_esci(stat=r, nx=nx, ny=nx, eftype='r')
pr = round(power_corr(r=r, n=nx, power=None, alpha=0.05, tail=tail), 3)
else:
ci = [1., 1.]
pr = np.inf
# Create dictionnary
stats = {'n': nx,
'r': round(r, 3),
'r2': round(r2, 3),
'adj_r2': round(adj_r2, 3),
'CI95%': [ci],
'p-val': pval if tail == 'two-sided' else .5 * pval,
'power': pr
}
if method in ['shepherd', 'skipped']:
stats['outliers'] = sum(outliers)
# Compute the BF10 for Pearson correlation only
if method == 'pearson' and nx < 1000:
if r2 < 1:
stats['BF10'] = bayesfactor_pearson(r, nx)
else:
stats['BF10'] = str(np.inf)
# Convert to DataFrame
stats = pd.DataFrame.from_records(stats, index=[method])
# Define order
col_keep = ['n', 'outliers', 'r', 'CI95%', 'r2', 'adj_r2', 'p-val',
'BF10', 'power']
col_order = [k for k in col_keep if k in stats.keys().tolist()]
return stats[col_order] | (Robust) correlation between two variables.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
'skipped' : skipped correlation (robust Spearman, requires sklearn)
Returns
-------
stats : pandas DataFrame
Test summary ::
'n' : Sample size (after NaN removal)
'outliers' : number of outliers (only for 'shepherd' or 'skipped')
'r' : Correlation coefficient
'CI95' : 95% parametric confidence intervals
'r2' : R-squared
'adj_r2' : Adjusted R-squared
'p-val' : one or two tailed p-value
'BF10' : Bayes Factor of the alternative hypothesis (Pearson only)
'power' : achieved power of the test (= 1 - type II error).
See also
--------
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
partial_corr : Partial correlation
Notes
-----
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Correlations of -1 or +1 imply
an exact linear relationship.
The Spearman correlation is a nonparametric measure of the monotonicity of
the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Correlations of -1 or +1 imply an exact monotonic
relationship.
Kendall’s tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement.
The percentage bend correlation [1]_ is a robust method that
protects against univariate outliers.
The Shepherd's pi [2]_ and skipped [3]_, [4]_ correlations are both robust
methods that returns the Spearman's rho after bivariate outliers removal.
Note that the skipped correlation requires that the scikit-learn
package is installed (for computing the minimum covariance determinant).
Please note that rows with NaN are automatically removed.
If method='pearson', The JZS Bayes Factor is approximated using the
:py:func:`pingouin.bayesfactor_pearson` function.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Schwarzkopf, D.S., De Haas, B., Rees, G., 2012. Better ways to
improve standards in brain-behavior correlation analysis. Front.
Hum. Neurosci. 6, 200. https://doi.org/10.3389/fnhum.2012.00200
.. [3] Rousselet, G.A., Pernet, C.R., 2012. Improving standards in
brain-behavior correlation analyses. Front. Hum. Neurosci. 6, 119.
https://doi.org/10.3389/fnhum.2012.00119
.. [4] Pernet, C.R., Wilcox, R., Rousselet, G.A., 2012. Robust correlation
analyses: false positive and power validation using a new open
source matlab toolbox. Front. Psychol. 3, 606.
https://doi.org/10.3389/fpsyg.2012.00606
Examples
--------
1. Pearson correlation
>>> import numpy as np
>>> # Generate random correlated samples
>>> np.random.seed(123)
>>> mean, cov = [4, 6], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 30).T
>>> # Compute Pearson correlation
>>> from pingouin import corr
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.491 [0.16, 0.72] 0.242 0.185 0.005813 6.135 0.809
2. Pearson correlation with two outliers
>>> x[3], y[5] = 12, -8
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
3. Spearman correlation
>>> corr(x, y, method="spearman")
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.028034 0.61
4. Percentage bend correlation (robust)
>>> corr(x, y, method='percbend')
n r CI95% r2 adj_r2 p-val power
percbend 30 0.389 [0.03, 0.66] 0.151 0.089 0.033508 0.581
5. Shepherd's pi correlation (robust)
>>> corr(x, y, method='shepherd')
n outliers r CI95% r2 adj_r2 p-val power
shepherd 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
6. Skipped spearman correlation (robust)
>>> corr(x, y, method='skipped')
n outliers r CI95% r2 adj_r2 p-val power
skipped 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
7. One-tailed Spearman correlation
>>> corr(x, y, tail="one-sided", method='spearman')
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.014017 0.726
8. Using columns of a pandas dataframe
>>> import pandas as pd
>>> data = pd.DataFrame({'x': x, 'y': y})
>>> corr(data['x'], data['y'])
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121 | Below is the the instruction that describes the task:
### Input:
(Robust) correlation between two variables.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
'skipped' : skipped correlation (robust Spearman, requires sklearn)
Returns
-------
stats : pandas DataFrame
Test summary ::
'n' : Sample size (after NaN removal)
'outliers' : number of outliers (only for 'shepherd' or 'skipped')
'r' : Correlation coefficient
'CI95' : 95% parametric confidence intervals
'r2' : R-squared
'adj_r2' : Adjusted R-squared
'p-val' : one or two tailed p-value
'BF10' : Bayes Factor of the alternative hypothesis (Pearson only)
'power' : achieved power of the test (= 1 - type II error).
See also
--------
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
partial_corr : Partial correlation
Notes
-----
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Correlations of -1 or +1 imply
an exact linear relationship.
The Spearman correlation is a nonparametric measure of the monotonicity of
the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Correlations of -1 or +1 imply an exact monotonic
relationship.
Kendall’s tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement.
The percentage bend correlation [1]_ is a robust method that
protects against univariate outliers.
The Shepherd's pi [2]_ and skipped [3]_, [4]_ correlations are both robust
methods that returns the Spearman's rho after bivariate outliers removal.
Note that the skipped correlation requires that the scikit-learn
package is installed (for computing the minimum covariance determinant).
Please note that rows with NaN are automatically removed.
If method='pearson', The JZS Bayes Factor is approximated using the
:py:func:`pingouin.bayesfactor_pearson` function.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Schwarzkopf, D.S., De Haas, B., Rees, G., 2012. Better ways to
improve standards in brain-behavior correlation analysis. Front.
Hum. Neurosci. 6, 200. https://doi.org/10.3389/fnhum.2012.00200
.. [3] Rousselet, G.A., Pernet, C.R., 2012. Improving standards in
brain-behavior correlation analyses. Front. Hum. Neurosci. 6, 119.
https://doi.org/10.3389/fnhum.2012.00119
.. [4] Pernet, C.R., Wilcox, R., Rousselet, G.A., 2012. Robust correlation
analyses: false positive and power validation using a new open
source matlab toolbox. Front. Psychol. 3, 606.
https://doi.org/10.3389/fpsyg.2012.00606
Examples
--------
1. Pearson correlation
>>> import numpy as np
>>> # Generate random correlated samples
>>> np.random.seed(123)
>>> mean, cov = [4, 6], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 30).T
>>> # Compute Pearson correlation
>>> from pingouin import corr
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.491 [0.16, 0.72] 0.242 0.185 0.005813 6.135 0.809
2. Pearson correlation with two outliers
>>> x[3], y[5] = 12, -8
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
3. Spearman correlation
>>> corr(x, y, method="spearman")
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.028034 0.61
4. Percentage bend correlation (robust)
>>> corr(x, y, method='percbend')
n r CI95% r2 adj_r2 p-val power
percbend 30 0.389 [0.03, 0.66] 0.151 0.089 0.033508 0.581
5. Shepherd's pi correlation (robust)
>>> corr(x, y, method='shepherd')
n outliers r CI95% r2 adj_r2 p-val power
shepherd 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
6. Skipped spearman correlation (robust)
>>> corr(x, y, method='skipped')
n outliers r CI95% r2 adj_r2 p-val power
skipped 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
7. One-tailed Spearman correlation
>>> corr(x, y, tail="one-sided", method='spearman')
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.014017 0.726
8. Using columns of a pandas dataframe
>>> import pandas as pd
>>> data = pd.DataFrame({'x': x, 'y': y})
>>> corr(data['x'], data['y'])
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
### Response:
def corr(x, y, tail='two-sided', method='pearson'):
"""(Robust) correlation between two variables.
Parameters
----------
x, y : array_like
First and second set of observations. x and y must be independent.
tail : string
Specify whether to return 'one-sided' or 'two-sided' p-value.
method : string
Specify which method to use for the computation of the correlation
coefficient. Available methods are ::
'pearson' : Pearson product-moment correlation
'spearman' : Spearman rank-order correlation
'kendall' : Kendall’s tau (ordinal data)
'percbend' : percentage bend correlation (robust)
'shepherd' : Shepherd's pi correlation (robust Spearman)
'skipped' : skipped correlation (robust Spearman, requires sklearn)
Returns
-------
stats : pandas DataFrame
Test summary ::
'n' : Sample size (after NaN removal)
'outliers' : number of outliers (only for 'shepherd' or 'skipped')
'r' : Correlation coefficient
'CI95' : 95% parametric confidence intervals
'r2' : R-squared
'adj_r2' : Adjusted R-squared
'p-val' : one or two tailed p-value
'BF10' : Bayes Factor of the alternative hypothesis (Pearson only)
'power' : achieved power of the test (= 1 - type II error).
See also
--------
pairwise_corr : Pairwise correlation between columns of a pandas DataFrame
partial_corr : Partial correlation
Notes
-----
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Correlations of -1 or +1 imply
an exact linear relationship.
The Spearman correlation is a nonparametric measure of the monotonicity of
the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Correlations of -1 or +1 imply an exact monotonic
relationship.
Kendall’s tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement.
The percentage bend correlation [1]_ is a robust method that
protects against univariate outliers.
The Shepherd's pi [2]_ and skipped [3]_, [4]_ correlations are both robust
methods that returns the Spearman's rho after bivariate outliers removal.
Note that the skipped correlation requires that the scikit-learn
package is installed (for computing the minimum covariance determinant).
Please note that rows with NaN are automatically removed.
If method='pearson', The JZS Bayes Factor is approximated using the
:py:func:`pingouin.bayesfactor_pearson` function.
References
----------
.. [1] Wilcox, R.R., 1994. The percentage bend correlation coefficient.
Psychometrika 59, 601–616. https://doi.org/10.1007/BF02294395
.. [2] Schwarzkopf, D.S., De Haas, B., Rees, G., 2012. Better ways to
improve standards in brain-behavior correlation analysis. Front.
Hum. Neurosci. 6, 200. https://doi.org/10.3389/fnhum.2012.00200
.. [3] Rousselet, G.A., Pernet, C.R., 2012. Improving standards in
brain-behavior correlation analyses. Front. Hum. Neurosci. 6, 119.
https://doi.org/10.3389/fnhum.2012.00119
.. [4] Pernet, C.R., Wilcox, R., Rousselet, G.A., 2012. Robust correlation
analyses: false positive and power validation using a new open
source matlab toolbox. Front. Psychol. 3, 606.
https://doi.org/10.3389/fpsyg.2012.00606
Examples
--------
1. Pearson correlation
>>> import numpy as np
>>> # Generate random correlated samples
>>> np.random.seed(123)
>>> mean, cov = [4, 6], [(1, .5), (.5, 1)]
>>> x, y = np.random.multivariate_normal(mean, cov, 30).T
>>> # Compute Pearson correlation
>>> from pingouin import corr
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.491 [0.16, 0.72] 0.242 0.185 0.005813 6.135 0.809
2. Pearson correlation with two outliers
>>> x[3], y[5] = 12, -8
>>> corr(x, y)
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
3. Spearman correlation
>>> corr(x, y, method="spearman")
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.028034 0.61
4. Percentage bend correlation (robust)
>>> corr(x, y, method='percbend')
n r CI95% r2 adj_r2 p-val power
percbend 30 0.389 [0.03, 0.66] 0.151 0.089 0.033508 0.581
5. Shepherd's pi correlation (robust)
>>> corr(x, y, method='shepherd')
n outliers r CI95% r2 adj_r2 p-val power
shepherd 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
6. Skipped spearman correlation (robust)
>>> corr(x, y, method='skipped')
n outliers r CI95% r2 adj_r2 p-val power
skipped 30 2 0.437 [0.09, 0.69] 0.191 0.131 0.020128 0.694
7. One-tailed Spearman correlation
>>> corr(x, y, tail="one-sided", method='spearman')
n r CI95% r2 adj_r2 p-val power
spearman 30 0.401 [0.05, 0.67] 0.161 0.099 0.014017 0.726
8. Using columns of a pandas dataframe
>>> import pandas as pd
>>> data = pd.DataFrame({'x': x, 'y': y})
>>> corr(data['x'], data['y'])
n r CI95% r2 adj_r2 p-val BF10 power
pearson 30 0.147 [-0.23, 0.48] 0.022 -0.051 0.439148 0.19 0.121
"""
x = np.asarray(x)
y = np.asarray(y)
# Check size
if x.size != y.size:
raise ValueError('x and y must have the same length.')
# Remove NA
x, y = remove_na(x, y, paired=True)
nx = x.size
# Compute correlation coefficient
if method == 'pearson':
r, pval = pearsonr(x, y)
elif method == 'spearman':
r, pval = spearmanr(x, y)
elif method == 'kendall':
r, pval = kendalltau(x, y)
elif method == 'percbend':
r, pval = percbend(x, y)
elif method == 'shepherd':
r, pval, outliers = shepherd(x, y)
elif method == 'skipped':
r, pval, outliers = skipped(x, y, method='spearman')
else:
raise ValueError('Method not recognized.')
assert not np.isnan(r), 'Correlation returned NaN. Check your data.'
# Compute r2 and adj_r2
r2 = r**2
adj_r2 = 1 - (((1 - r2) * (nx - 1)) / (nx - 3))
# Compute the parametric 95% confidence interval and power
if r2 < 1:
ci = compute_esci(stat=r, nx=nx, ny=nx, eftype='r')
pr = round(power_corr(r=r, n=nx, power=None, alpha=0.05, tail=tail), 3)
else:
ci = [1., 1.]
pr = np.inf
# Create dictionnary
stats = {'n': nx,
'r': round(r, 3),
'r2': round(r2, 3),
'adj_r2': round(adj_r2, 3),
'CI95%': [ci],
'p-val': pval if tail == 'two-sided' else .5 * pval,
'power': pr
}
if method in ['shepherd', 'skipped']:
stats['outliers'] = sum(outliers)
# Compute the BF10 for Pearson correlation only
if method == 'pearson' and nx < 1000:
if r2 < 1:
stats['BF10'] = bayesfactor_pearson(r, nx)
else:
stats['BF10'] = str(np.inf)
# Convert to DataFrame
stats = pd.DataFrame.from_records(stats, index=[method])
# Define order
col_keep = ['n', 'outliers', 'r', 'CI95%', 'r2', 'adj_r2', 'p-val',
'BF10', 'power']
col_order = [k for k in col_keep if k in stats.keys().tolist()]
return stats[col_order] |
def get_event_data(self, section, time_in_seconds=False):
""" Get the template or complement event data.
:param section: Either template, complement, or both.
:param time_in_seconds: Return the start and length fields
in seconds, rather than samples.
:return: The event dataset for the section. If section=both
then it returns a tuple with both sections. Returns None
if the section does not exist.
"""
if section not in ['template', 'complement', 'both']:
raise Exception('Unrecognized section: {} Expected: "template", "complement" or "both"'.format(section))
results = self.get_results()
if results is None:
return None, None if section is 'both' else None
if section == 'both':
sections = ['template', 'complement']
else:
sections = [section]
evdet_group, _ = self._find_event_data()
with EventDetectionTools(self.handle, group_name=evdet_group) as evdet:
event_data, _ = evdet.get_event_data(time_in_seconds=time_in_seconds)
datasets = [None, None]
for n, this_section in enumerate(sections):
if not results['has_{}'.format(this_section)]:
continue
ev1 = results['start_event_{}'.format(this_section)]
ev2 = results['end_event_{}'.format(this_section)]
datasets[n] = event_data[ev1:ev2]
if section == 'both':
return tuple(datasets)
return datasets[0] | Get the template or complement event data.
:param section: Either template, complement, or both.
:param time_in_seconds: Return the start and length fields
in seconds, rather than samples.
:return: The event dataset for the section. If section=both
then it returns a tuple with both sections. Returns None
if the section does not exist. | Below is the the instruction that describes the task:
### Input:
Get the template or complement event data.
:param section: Either template, complement, or both.
:param time_in_seconds: Return the start and length fields
in seconds, rather than samples.
:return: The event dataset for the section. If section=both
then it returns a tuple with both sections. Returns None
if the section does not exist.
### Response:
def get_event_data(self, section, time_in_seconds=False):
""" Get the template or complement event data.
:param section: Either template, complement, or both.
:param time_in_seconds: Return the start and length fields
in seconds, rather than samples.
:return: The event dataset for the section. If section=both
then it returns a tuple with both sections. Returns None
if the section does not exist.
"""
if section not in ['template', 'complement', 'both']:
raise Exception('Unrecognized section: {} Expected: "template", "complement" or "both"'.format(section))
results = self.get_results()
if results is None:
return None, None if section is 'both' else None
if section == 'both':
sections = ['template', 'complement']
else:
sections = [section]
evdet_group, _ = self._find_event_data()
with EventDetectionTools(self.handle, group_name=evdet_group) as evdet:
event_data, _ = evdet.get_event_data(time_in_seconds=time_in_seconds)
datasets = [None, None]
for n, this_section in enumerate(sections):
if not results['has_{}'.format(this_section)]:
continue
ev1 = results['start_event_{}'.format(this_section)]
ev2 = results['end_event_{}'.format(this_section)]
datasets[n] = event_data[ev1:ev2]
if section == 'both':
return tuple(datasets)
return datasets[0] |
def master_srcmdl_xml(self, **kwargs):
""" return the name of a source model file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.master_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | return the name of a source model file | Below is the the instruction that describes the task:
### Input:
return the name of a source model file
### Response:
def master_srcmdl_xml(self, **kwargs):
""" return the name of a source model file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
self._replace_none(kwargs_copy)
localpath = NameFactory.master_srcmdl_xml_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath |
def class_name_to_resource_name(class_name: str) -> str:
"""Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name.
"""
s = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', class_name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s) | Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name. | Below is the the instruction that describes the task:
### Input:
Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name.
### Response:
def class_name_to_resource_name(class_name: str) -> str:
"""Converts a camel case class name to a resource name with spaces.
>>> class_name_to_resource_name('FooBarObject')
'Foo Bar Object'
:param class_name: The name to convert.
:returns: The resource name.
"""
s = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', class_name)
return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s) |
def clone(self, newname, config_path=None, flags=0, bdevtype=None,
bdevdata=None, newsize=0, hookargs=()):
"""
Clone the current container.
"""
args = {}
args['newname'] = newname
args['flags'] = flags
args['newsize'] = newsize
args['hookargs'] = hookargs
if config_path:
args['config_path'] = config_path
if bdevtype:
args['bdevtype'] = bdevtype
if bdevdata:
args['bdevdata'] = bdevdata
if _lxc.Container.clone(self, **args):
return Container(newname, config_path=config_path)
else:
return False | Clone the current container. | Below is the the instruction that describes the task:
### Input:
Clone the current container.
### Response:
def clone(self, newname, config_path=None, flags=0, bdevtype=None,
bdevdata=None, newsize=0, hookargs=()):
"""
Clone the current container.
"""
args = {}
args['newname'] = newname
args['flags'] = flags
args['newsize'] = newsize
args['hookargs'] = hookargs
if config_path:
args['config_path'] = config_path
if bdevtype:
args['bdevtype'] = bdevtype
if bdevdata:
args['bdevdata'] = bdevdata
if _lxc.Container.clone(self, **args):
return Container(newname, config_path=config_path)
else:
return False |
def management_policies(self):
"""Instance depends on the API version:
* 2018-07-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2018_07_01.operations.ManagementPoliciesOperations>`
"""
api_version = self._get_api_version('management_policies')
if api_version == '2018-07-01':
from .v2018_07_01.operations import ManagementPoliciesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-07-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2018_07_01.operations.ManagementPoliciesOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2018-07-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2018_07_01.operations.ManagementPoliciesOperations>`
### Response:
def management_policies(self):
"""Instance depends on the API version:
* 2018-07-01: :class:`ManagementPoliciesOperations<azure.mgmt.storage.v2018_07_01.operations.ManagementPoliciesOperations>`
"""
api_version = self._get_api_version('management_policies')
if api_version == '2018-07-01':
from .v2018_07_01.operations import ManagementPoliciesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def get_ipv6_neighbors_table(self):
"""Return the IPv6 neighbors table."""
ipv6_neighbors_table = []
ipv6_neighbors_table_raw = junos_views.junos_ipv6_neighbors_table(self.device)
ipv6_neighbors_table_raw.get()
ipv6_neighbors_table_items = ipv6_neighbors_table_raw.items()
for ipv6_table_entry in ipv6_neighbors_table_items:
ipv6_entry = {elem[0]: elem[1] for elem in ipv6_table_entry[1]}
ipv6_entry["mac"] = napalm.base.helpers.mac(ipv6_entry.get("mac"))
ipv6_entry["ip"] = napalm.base.helpers.ip(ipv6_entry.get("ip"))
ipv6_neighbors_table.append(ipv6_entry)
return ipv6_neighbors_table | Return the IPv6 neighbors table. | Below is the the instruction that describes the task:
### Input:
Return the IPv6 neighbors table.
### Response:
def get_ipv6_neighbors_table(self):
"""Return the IPv6 neighbors table."""
ipv6_neighbors_table = []
ipv6_neighbors_table_raw = junos_views.junos_ipv6_neighbors_table(self.device)
ipv6_neighbors_table_raw.get()
ipv6_neighbors_table_items = ipv6_neighbors_table_raw.items()
for ipv6_table_entry in ipv6_neighbors_table_items:
ipv6_entry = {elem[0]: elem[1] for elem in ipv6_table_entry[1]}
ipv6_entry["mac"] = napalm.base.helpers.mac(ipv6_entry.get("mac"))
ipv6_entry["ip"] = napalm.base.helpers.ip(ipv6_entry.get("ip"))
ipv6_neighbors_table.append(ipv6_entry)
return ipv6_neighbors_table |
def _should_split_column_cells(cls, column_cells: List[str]) -> bool:
"""
Returns true if there is any cell in this column that can be split.
"""
return any(cls._should_split_cell(cell_text) for cell_text in column_cells) | Returns true if there is any cell in this column that can be split. | Below is the the instruction that describes the task:
### Input:
Returns true if there is any cell in this column that can be split.
### Response:
def _should_split_column_cells(cls, column_cells: List[str]) -> bool:
"""
Returns true if there is any cell in this column that can be split.
"""
return any(cls._should_split_cell(cell_text) for cell_text in column_cells) |
def has_credentials_stored():
"""
Return 'auth token' string, if the user credentials are already stored
"""
try:
with open(credentials_file, 'r') as f:
token = f.readline().strip()
id = f.readline().strip()
return token
except Exception, e:
return False | Return 'auth token' string, if the user credentials are already stored | Below is the the instruction that describes the task:
### Input:
Return 'auth token' string, if the user credentials are already stored
### Response:
def has_credentials_stored():
"""
Return 'auth token' string, if the user credentials are already stored
"""
try:
with open(credentials_file, 'r') as f:
token = f.readline().strip()
id = f.readline().strip()
return token
except Exception, e:
return False |
def do_permissionif(parser, token):
"""
Permission if templatetag
Examples
--------
::
{% if user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elif user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endif %}
{# If you set 'PERMISSION_REPLACE_BUILTIN_IF = False' in settings #}
{% permission user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elpermission user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endpermission %}
"""
bits = token.split_contents()
ELIF = "el%s" % bits[0]
ELSE = "else"
ENDIF = "end%s" % bits[0]
# {% if ... %}
bits = bits[1:]
condition = do_permissionif.Parser(parser, bits).parse()
nodelist = parser.parse((ELIF, ELSE, ENDIF))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith(ELIF):
bits = token.split_contents()[1:]
condition = do_permissionif.Parser(parser, bits).parse()
nodelist = parser.parse((ELIF, ELSE, ENDIF))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == ELSE:
nodelist = parser.parse((ENDIF,))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == ENDIF
return IfNode(conditions_nodelists) | Permission if templatetag
Examples
--------
::
{% if user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elif user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endif %}
{# If you set 'PERMISSION_REPLACE_BUILTIN_IF = False' in settings #}
{% permission user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elpermission user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endpermission %} | Below is the the instruction that describes the task:
### Input:
Permission if templatetag
Examples
--------
::
{% if user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elif user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endif %}
{# If you set 'PERMISSION_REPLACE_BUILTIN_IF = False' in settings #}
{% permission user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elpermission user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endpermission %}
### Response:
def do_permissionif(parser, token):
"""
Permission if templatetag
Examples
--------
::
{% if user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elif user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endif %}
{# If you set 'PERMISSION_REPLACE_BUILTIN_IF = False' in settings #}
{% permission user has 'blogs.add_article' %}
<p>This user have 'blogs.add_article' permission</p>
{% elpermission user has 'blog.change_article' of object %}
<p>This user have 'blogs.change_article' permission of {{object}}</p>
{% endpermission %}
"""
bits = token.split_contents()
ELIF = "el%s" % bits[0]
ELSE = "else"
ENDIF = "end%s" % bits[0]
# {% if ... %}
bits = bits[1:]
condition = do_permissionif.Parser(parser, bits).parse()
nodelist = parser.parse((ELIF, ELSE, ENDIF))
conditions_nodelists = [(condition, nodelist)]
token = parser.next_token()
# {% elif ... %} (repeatable)
while token.contents.startswith(ELIF):
bits = token.split_contents()[1:]
condition = do_permissionif.Parser(parser, bits).parse()
nodelist = parser.parse((ELIF, ELSE, ENDIF))
conditions_nodelists.append((condition, nodelist))
token = parser.next_token()
# {% else %} (optional)
if token.contents == ELSE:
nodelist = parser.parse((ENDIF,))
conditions_nodelists.append((None, nodelist))
token = parser.next_token()
# {% endif %}
assert token.contents == ENDIF
return IfNode(conditions_nodelists) |
def modify_prefix(channel, new_prefix):
"""
Creates an embed UI containing the prefix modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
new_prefix (str): The value of the new prefix
Returns:
embed: The created embed
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
"Prefix updated",
"Modis prefix is now `{}`".format(new_prefix),
modulename=modulename
)
return gui | Creates an embed UI containing the prefix modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
new_prefix (str): The value of the new prefix
Returns:
embed: The created embed | Below is the the instruction that describes the task:
### Input:
Creates an embed UI containing the prefix modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
new_prefix (str): The value of the new prefix
Returns:
embed: The created embed
### Response:
def modify_prefix(channel, new_prefix):
"""
Creates an embed UI containing the prefix modified message
Args:
channel (discord.Channel): The Discord channel to bind the embed to
new_prefix (str): The value of the new prefix
Returns:
embed: The created embed
"""
# Create embed UI object
gui = ui_embed.UI(
channel,
"Prefix updated",
"Modis prefix is now `{}`".format(new_prefix),
modulename=modulename
)
return gui |
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_eth_port_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_eth_port_id = ET.SubElement(fcoe_intf_list, "fcoe-intf-eth-port-id")
fcoe_intf_eth_port_id.text = kwargs.pop('fcoe_intf_eth_port_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_eth_port_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_interface = ET.Element("fcoe_get_interface")
config = fcoe_get_interface
output = ET.SubElement(fcoe_get_interface, "output")
fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list")
fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id")
fcoe_intf_fcoe_port_id_key.text = kwargs.pop('fcoe_intf_fcoe_port_id')
fcoe_intf_eth_port_id = ET.SubElement(fcoe_intf_list, "fcoe-intf-eth-port-id")
fcoe_intf_eth_port_id.text = kwargs.pop('fcoe_intf_eth_port_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def grid_tiles(self, bbox, zoomlevel):
"""
Return a grid of (x, y) tuples representing the juxtaposition
of tiles on the specified ``bbox`` at the specified ``zoomlevel``.
"""
tiles = self.tileslist(bbox, [zoomlevel])
grid = {}
for (z, x, y) in tiles:
if not grid.get(y):
grid[y] = []
grid[y].append(x)
sortedgrid = []
for y in sorted(grid.keys(), reverse=self.tile_scheme == 'tms'):
sortedgrid.append([(x, y) for x in sorted(grid[y])])
return sortedgrid | Return a grid of (x, y) tuples representing the juxtaposition
of tiles on the specified ``bbox`` at the specified ``zoomlevel``. | Below is the the instruction that describes the task:
### Input:
Return a grid of (x, y) tuples representing the juxtaposition
of tiles on the specified ``bbox`` at the specified ``zoomlevel``.
### Response:
def grid_tiles(self, bbox, zoomlevel):
"""
Return a grid of (x, y) tuples representing the juxtaposition
of tiles on the specified ``bbox`` at the specified ``zoomlevel``.
"""
tiles = self.tileslist(bbox, [zoomlevel])
grid = {}
for (z, x, y) in tiles:
if not grid.get(y):
grid[y] = []
grid[y].append(x)
sortedgrid = []
for y in sorted(grid.keys(), reverse=self.tile_scheme == 'tms'):
sortedgrid.append([(x, y) for x in sorted(grid[y])])
return sortedgrid |
def allocate_correlation_matrix(self, iteration = None):
"""!
@brief Allocate correlation matrix between oscillators at the specified step of simulation.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
@return (list) Correlation matrix between oscillators with size [number_oscillators x number_oscillators].
"""
if (self._ccore_sync_dynamic_pointer is not None):
return wrapper.sync_dynamic_allocate_correlation_matrix(self._ccore_sync_dynamic_pointer, iteration);
if ( (self._dynamic is None) or (len(self._dynamic) == 0) ):
return [];
dynamic = self._dynamic;
current_dynamic = dynamic[len(dynamic) - 1];
if (iteration is not None):
current_dynamic = dynamic[iteration];
number_oscillators = len(dynamic[0]);
affinity_matrix = [ [ 0.0 for i in range(number_oscillators) ] for j in range(number_oscillators) ];
for i in range(number_oscillators):
for j in range(number_oscillators):
phase1 = current_dynamic[i];
phase2 = current_dynamic[j];
affinity_matrix[i][j] = abs(math.sin(phase1 - phase2));
return affinity_matrix; | !
@brief Allocate correlation matrix between oscillators at the specified step of simulation.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
@return (list) Correlation matrix between oscillators with size [number_oscillators x number_oscillators]. | Below is the the instruction that describes the task:
### Input:
!
@brief Allocate correlation matrix between oscillators at the specified step of simulation.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
@return (list) Correlation matrix between oscillators with size [number_oscillators x number_oscillators].
### Response:
def allocate_correlation_matrix(self, iteration = None):
"""!
@brief Allocate correlation matrix between oscillators at the specified step of simulation.
@param[in] iteration (uint): Number of iteration of simulation for which correlation matrix should be allocated.
If iternation number is not specified, the last step of simulation is used for the matrix allocation.
@return (list) Correlation matrix between oscillators with size [number_oscillators x number_oscillators].
"""
if (self._ccore_sync_dynamic_pointer is not None):
return wrapper.sync_dynamic_allocate_correlation_matrix(self._ccore_sync_dynamic_pointer, iteration);
if ( (self._dynamic is None) or (len(self._dynamic) == 0) ):
return [];
dynamic = self._dynamic;
current_dynamic = dynamic[len(dynamic) - 1];
if (iteration is not None):
current_dynamic = dynamic[iteration];
number_oscillators = len(dynamic[0]);
affinity_matrix = [ [ 0.0 for i in range(number_oscillators) ] for j in range(number_oscillators) ];
for i in range(number_oscillators):
for j in range(number_oscillators):
phase1 = current_dynamic[i];
phase2 = current_dynamic[j];
affinity_matrix[i][j] = abs(math.sin(phase1 - phase2));
return affinity_matrix; |
def from_main(cls, signature=''):
"""
A decorator that creates an instance and registers the decorated
function as main function, see :meth:`main` for more information.
.. versionadded:: 0.2
"""
instance = cls()
def decorate(function):
instance.main(signature)(function)
return instance
return decorate | A decorator that creates an instance and registers the decorated
function as main function, see :meth:`main` for more information.
.. versionadded:: 0.2 | Below is the the instruction that describes the task:
### Input:
A decorator that creates an instance and registers the decorated
function as main function, see :meth:`main` for more information.
.. versionadded:: 0.2
### Response:
def from_main(cls, signature=''):
"""
A decorator that creates an instance and registers the decorated
function as main function, see :meth:`main` for more information.
.. versionadded:: 0.2
"""
instance = cls()
def decorate(function):
instance.main(signature)(function)
return instance
return decorate |
def on_key_press(self,symbol,modifiers):
"""
Keyboard event handler handling only the escape key.
If an escape key press is detected, mouse exclusivity is toggled via :py:meth:`PengWindow.toggle_exclusivity()`\ .
"""
if symbol == key.ESCAPE:
self.world.peng.window.toggle_exclusivity()
return pyglet.event.EVENT_HANDLED | Keyboard event handler handling only the escape key.
If an escape key press is detected, mouse exclusivity is toggled via :py:meth:`PengWindow.toggle_exclusivity()`\ . | Below is the the instruction that describes the task:
### Input:
Keyboard event handler handling only the escape key.
If an escape key press is detected, mouse exclusivity is toggled via :py:meth:`PengWindow.toggle_exclusivity()`\ .
### Response:
def on_key_press(self,symbol,modifiers):
"""
Keyboard event handler handling only the escape key.
If an escape key press is detected, mouse exclusivity is toggled via :py:meth:`PengWindow.toggle_exclusivity()`\ .
"""
if symbol == key.ESCAPE:
self.world.peng.window.toggle_exclusivity()
return pyglet.event.EVENT_HANDLED |
def _make_command_filename(self, exe):
"""The internal function to build up a filename based on a command."""
outfn = os.path.join(self.commons['cmddir'], self.name(),
self._mangle_command(exe))
# check for collisions
if os.path.exists(outfn):
inc = 2
while True:
newfn = "%s_%d" % (outfn, inc)
if not os.path.exists(newfn):
outfn = newfn
break
inc += 1
return outfn | The internal function to build up a filename based on a command. | Below is the the instruction that describes the task:
### Input:
The internal function to build up a filename based on a command.
### Response:
def _make_command_filename(self, exe):
"""The internal function to build up a filename based on a command."""
outfn = os.path.join(self.commons['cmddir'], self.name(),
self._mangle_command(exe))
# check for collisions
if os.path.exists(outfn):
inc = 2
while True:
newfn = "%s_%d" % (outfn, inc)
if not os.path.exists(newfn):
outfn = newfn
break
inc += 1
return outfn |
def get_value(self, index):
""" Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index
"""
p = index.internalPointer()
k = self.get_key(p, index.row())
return p[k] | Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index | Below is the the instruction that describes the task:
### Input:
Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index
### Response:
def get_value(self, index):
""" Return the value of the given index
The index stores the section as internal pointer.
The row of the index determines the key.
The key is used on the section to return the value
:param index: The QModelIndex
:type index: QModelIndex
:returns: The value for the given index
"""
p = index.internalPointer()
k = self.get_key(p, index.row())
return p[k] |
def diffPrefsPrior(priorstring):
"""Parses `priorstring` and returns `prior` tuple."""
assert isinstance(priorstring, str)
prior = priorstring.split(',')
if len(prior) == 3 and prior[0] == 'invquadratic':
[c1, c2] = [float(x) for x in prior[1 : ]]
assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior"
return ('invquadratic', c1, c2)
else:
raise ValueError("Invalid diffprefsprior: {0}".format(priorstring)) | Parses `priorstring` and returns `prior` tuple. | Below is the the instruction that describes the task:
### Input:
Parses `priorstring` and returns `prior` tuple.
### Response:
def diffPrefsPrior(priorstring):
"""Parses `priorstring` and returns `prior` tuple."""
assert isinstance(priorstring, str)
prior = priorstring.split(',')
if len(prior) == 3 and prior[0] == 'invquadratic':
[c1, c2] = [float(x) for x in prior[1 : ]]
assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior"
return ('invquadratic', c1, c2)
else:
raise ValueError("Invalid diffprefsprior: {0}".format(priorstring)) |
def ToByteArray(self):
"""Serialize the command.
Encodes the command as per the U2F specs, using the standard
ISO 7816-4 extended encoding. All Commands expect data, so
Le is always present.
Returns:
Python bytearray of the encoded command.
"""
lc = self.InternalEncodeLc()
out = bytearray(4) # will extend
out[0] = self.cla
out[1] = self.ins
out[2] = self.p1
out[3] = self.p2
if self.data:
out.extend(lc)
out.extend(self.data)
out.extend([0x00, 0x00]) # Le
else:
out.extend([0x00, 0x00, 0x00]) # Le
return out | Serialize the command.
Encodes the command as per the U2F specs, using the standard
ISO 7816-4 extended encoding. All Commands expect data, so
Le is always present.
Returns:
Python bytearray of the encoded command. | Below is the the instruction that describes the task:
### Input:
Serialize the command.
Encodes the command as per the U2F specs, using the standard
ISO 7816-4 extended encoding. All Commands expect data, so
Le is always present.
Returns:
Python bytearray of the encoded command.
### Response:
def ToByteArray(self):
"""Serialize the command.
Encodes the command as per the U2F specs, using the standard
ISO 7816-4 extended encoding. All Commands expect data, so
Le is always present.
Returns:
Python bytearray of the encoded command.
"""
lc = self.InternalEncodeLc()
out = bytearray(4) # will extend
out[0] = self.cla
out[1] = self.ins
out[2] = self.p1
out[3] = self.p2
if self.data:
out.extend(lc)
out.extend(self.data)
out.extend([0x00, 0x00]) # Le
else:
out.extend([0x00, 0x00, 0x00]) # Le
return out |
def edit_conf(edit_config=False, load_config=None, **kwargs):
"""
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False.
"""
ret = False
# no `edit-config` supplied
if edit_config == '':
return ret
conf_path = misc.get_config_load_path(load_config)
if conf_path is not None:
logger.info('Editing config file {}'.format(conf_path))
if edit_config is None:
# use the following default editors
if platform.system() == 'Linux':
editor = os.environ.get('EDITOR', 'gedit')
elif platform.system() == 'Darwin':
editor = os.environ.get('EDITOR', 'vim')
elif platform.system() == 'Windows':
editor = 'notepad.exe'
else:
# use `edit_config` as default editor
editor = edit_config
call([editor, conf_path])
ret = True
else:
logger.info('Config file does not exist. Save config with \'andes '
'--save-config\'')
ret = True
return ret | Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False. | Below is the the instruction that describes the task:
### Input:
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False.
### Response:
def edit_conf(edit_config=False, load_config=None, **kwargs):
"""
Edit the Andes config file which occurs first in the search path.
Parameters
----------
edit_config : bool
If ``True``, try to open up an editor and edit the config file.
Otherwise returns.
load_config : None or str, optional
Path to the config file, which will be placed to the first in the
search order.
kwargs : dict
Other keyword arguments.
Returns
-------
bool
``True`` is a config file is found and an editor is opened. ``False``
if ``edit_config`` is False.
"""
ret = False
# no `edit-config` supplied
if edit_config == '':
return ret
conf_path = misc.get_config_load_path(load_config)
if conf_path is not None:
logger.info('Editing config file {}'.format(conf_path))
if edit_config is None:
# use the following default editors
if platform.system() == 'Linux':
editor = os.environ.get('EDITOR', 'gedit')
elif platform.system() == 'Darwin':
editor = os.environ.get('EDITOR', 'vim')
elif platform.system() == 'Windows':
editor = 'notepad.exe'
else:
# use `edit_config` as default editor
editor = edit_config
call([editor, conf_path])
ret = True
else:
logger.info('Config file does not exist. Save config with \'andes '
'--save-config\'')
ret = True
return ret |
def Y_ampl(self, new_y_scale):
"""Make scaling on Y axis using predefined values"""
self.parent.value('y_scale', new_y_scale)
self.parent.traces.display() | Make scaling on Y axis using predefined values | Below is the the instruction that describes the task:
### Input:
Make scaling on Y axis using predefined values
### Response:
def Y_ampl(self, new_y_scale):
"""Make scaling on Y axis using predefined values"""
self.parent.value('y_scale', new_y_scale)
self.parent.traces.display() |
def GetSources(self, event):
"""Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
return self.SOURCE_SHORT, self.SOURCE_LONG | Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. | Below is the the instruction that describes the task:
### Input:
Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
### Response:
def GetSources(self, event):
"""Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
return self.SOURCE_SHORT, self.SOURCE_LONG |
def _generate_namespace():
"""
Generate an LSID namespace based off Jupyter user or system user
:return: string
"""
raw_namespace = None
# Get the Jupyter user, if available
try:
raw_namespace = os.environ['JPY_USER']
except KeyError:
pass
# Otherwise get the current user
if raw_namespace is None or raw_namespace == '':
raw_namespace = getpass.getuser()
# Remove illegal characters and return
return re.sub(r'[^\w.-]', '-', raw_namespace) | Generate an LSID namespace based off Jupyter user or system user
:return: string | Below is the the instruction that describes the task:
### Input:
Generate an LSID namespace based off Jupyter user or system user
:return: string
### Response:
def _generate_namespace():
"""
Generate an LSID namespace based off Jupyter user or system user
:return: string
"""
raw_namespace = None
# Get the Jupyter user, if available
try:
raw_namespace = os.environ['JPY_USER']
except KeyError:
pass
# Otherwise get the current user
if raw_namespace is None or raw_namespace == '':
raw_namespace = getpass.getuser()
# Remove illegal characters and return
return re.sub(r'[^\w.-]', '-', raw_namespace) |
def observer(self, component_type=ComponentType):
"""
You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`.
"""
def inner(func):
self.add_observer(func, component_type)
return func
return inner | You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`. | Below is the the instruction that describes the task:
### Input:
You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`.
### Response:
def observer(self, component_type=ComponentType):
"""
You can use ``@broker.observer()`` as a decorator to your callback
instead of :func:`Broker.add_observer`.
"""
def inner(func):
self.add_observer(func, component_type)
return func
return inner |
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts | Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names. | Below is the the instruction that describes the task:
### Input:
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
### Response:
def aggregate_weights(weights, drop_date=False):
"""
Transforms list of tuples of weights into pandas.DataFrame of weights.
Parameters:
-----------
weights: list
A list of tuples consisting of the generic instrument name,
the tradeable contract as a string, the weight on this contract as a
float and the date as a pandas.Timestamp.
drop_date: boolean
Whether to drop the date from the multiIndex
Returns
-------
A pandas.DataFrame of loadings of generic contracts on tradeable
instruments for a given date. The columns are generic instrument names and
the index is strings representing instrument names.
"""
dwts = pd.DataFrame(weights,
columns=["generic", "contract", "weight", "date"])
dwts = dwts.pivot_table(index=['date', 'contract'],
columns=['generic'], values='weight', fill_value=0)
dwts = dwts.astype(float)
dwts = dwts.sort_index()
if drop_date:
dwts.index = dwts.index.levels[-1]
return dwts |
def load_state(self, state_id, delete=True):
"""
Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State
"""
return self._store.load_state(f'{self._prefix}{state_id:08x}{self._suffix}', delete=delete) | Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State | Below is the the instruction that describes the task:
### Input:
Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State
### Response:
def load_state(self, state_id, delete=True):
"""
Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State
"""
return self._store.load_state(f'{self._prefix}{state_id:08x}{self._suffix}', delete=delete) |
def new_space(
self,
name=None,
bases=None,
formula=None,
*,
refs=None,
source=None,
is_derived=False,
prefix=""
):
"""Create a new child space.
Args:
name (str): Name of the space. If omitted, the space is
created automatically.
bases: If specified, the new space becomes a derived space of
the `base` space.
formula: Function whose parameters used to set space parameters.
refs: a mapping of refs to be added.
arguments: ordered dict of space parameter names to their values.
source: A source module from which cell definitions are read.
prefix: Prefix to the autogenerated name when name is None.
"""
from modelx.core.space import StaticSpaceImpl
if name is None:
name = self.spacenamer.get_next(self.namespace, prefix)
if name in self.namespace:
raise ValueError("Name '%s' already exists." % name)
if not prefix and not is_valid_name(name):
raise ValueError("Invalid name '%s'." % name)
space = self._new_space(
name=name,
formula=formula,
refs=refs,
source=source,
is_derived=is_derived,
)
self._set_space(space)
self.model.spacegraph.add_space(space)
# Set up direct base spaces and mro
if bases is not None:
if isinstance(bases, StaticSpaceImpl):
bases = [bases]
space.add_bases(bases)
return space | Create a new child space.
Args:
name (str): Name of the space. If omitted, the space is
created automatically.
bases: If specified, the new space becomes a derived space of
the `base` space.
formula: Function whose parameters used to set space parameters.
refs: a mapping of refs to be added.
arguments: ordered dict of space parameter names to their values.
source: A source module from which cell definitions are read.
prefix: Prefix to the autogenerated name when name is None. | Below is the the instruction that describes the task:
### Input:
Create a new child space.
Args:
name (str): Name of the space. If omitted, the space is
created automatically.
bases: If specified, the new space becomes a derived space of
the `base` space.
formula: Function whose parameters used to set space parameters.
refs: a mapping of refs to be added.
arguments: ordered dict of space parameter names to their values.
source: A source module from which cell definitions are read.
prefix: Prefix to the autogenerated name when name is None.
### Response:
def new_space(
self,
name=None,
bases=None,
formula=None,
*,
refs=None,
source=None,
is_derived=False,
prefix=""
):
"""Create a new child space.
Args:
name (str): Name of the space. If omitted, the space is
created automatically.
bases: If specified, the new space becomes a derived space of
the `base` space.
formula: Function whose parameters used to set space parameters.
refs: a mapping of refs to be added.
arguments: ordered dict of space parameter names to their values.
source: A source module from which cell definitions are read.
prefix: Prefix to the autogenerated name when name is None.
"""
from modelx.core.space import StaticSpaceImpl
if name is None:
name = self.spacenamer.get_next(self.namespace, prefix)
if name in self.namespace:
raise ValueError("Name '%s' already exists." % name)
if not prefix and not is_valid_name(name):
raise ValueError("Invalid name '%s'." % name)
space = self._new_space(
name=name,
formula=formula,
refs=refs,
source=source,
is_derived=is_derived,
)
self._set_space(space)
self.model.spacegraph.add_space(space)
# Set up direct base spaces and mro
if bases is not None:
if isinstance(bases, StaticSpaceImpl):
bases = [bases]
space.add_bases(bases)
return space |
def validate(metric_class):
"""
Does basic Metric option validation.
"""
if not hasattr(metric_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for metric %s." % metric_class.__name__)
if not hasattr(metric_class, 'widget'):
raise ImproperlyConfigured("No 'widget' attribute found for metric %s." % metric_class.__name__) | Does basic Metric option validation. | Below is the the instruction that describes the task:
### Input:
Does basic Metric option validation.
### Response:
def validate(metric_class):
"""
Does basic Metric option validation.
"""
if not hasattr(metric_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for metric %s." % metric_class.__name__)
if not hasattr(metric_class, 'widget'):
raise ImproperlyConfigured("No 'widget' attribute found for metric %s." % metric_class.__name__) |
def fetch_access_token(self, url, verifier=None, **request_kwargs):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, "verifier", None):
raise VerifierMissing("No client verifier has been set.")
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting verifier attribute, should not be used anymore.")
self._client.client.verifier = None
return token | Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
} | Below is the the instruction that describes the task:
### Input:
Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
### Response:
def fetch_access_token(self, url, verifier=None, **request_kwargs):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, "verifier", None):
raise VerifierMissing("No client verifier has been set.")
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting verifier attribute, should not be used anymore.")
self._client.client.verifier = None
return token |
def load_file(self, path):
"""Load a YAML file with parameter data and other metadata.
:param path: Path to YAML file.
:type path: str
The data in the YAML file is used to set the properties for the instance.
The YAML file must contain a ``parameters`` key.
It may optionally include any keys defined in :attr:`.property_keys`.
.. code-block:: yaml
# data.yml
---
name: Shield frequencies
parameters:
- name: a
value: 24.50
- name: β
value: 42.10
meta:
phase_inverted: true
.. code-block:: python
parameters = Parameters('data.yml')
parameters.name #=> 'Shield frequencies'
parameters.get_value('a') #=> 24.50
parameters.get_meta('β')['phase_inverted'] #=> true
"""
data = yaml.load(open(path, 'r'))
for key in self.property_keys:
if key in data: setattr(self, key, data[key])
self.parameters = self.parameter_list(data['parameters']) | Load a YAML file with parameter data and other metadata.
:param path: Path to YAML file.
:type path: str
The data in the YAML file is used to set the properties for the instance.
The YAML file must contain a ``parameters`` key.
It may optionally include any keys defined in :attr:`.property_keys`.
.. code-block:: yaml
# data.yml
---
name: Shield frequencies
parameters:
- name: a
value: 24.50
- name: β
value: 42.10
meta:
phase_inverted: true
.. code-block:: python
parameters = Parameters('data.yml')
parameters.name #=> 'Shield frequencies'
parameters.get_value('a') #=> 24.50
parameters.get_meta('β')['phase_inverted'] #=> true | Below is the the instruction that describes the task:
### Input:
Load a YAML file with parameter data and other metadata.
:param path: Path to YAML file.
:type path: str
The data in the YAML file is used to set the properties for the instance.
The YAML file must contain a ``parameters`` key.
It may optionally include any keys defined in :attr:`.property_keys`.
.. code-block:: yaml
# data.yml
---
name: Shield frequencies
parameters:
- name: a
value: 24.50
- name: β
value: 42.10
meta:
phase_inverted: true
.. code-block:: python
parameters = Parameters('data.yml')
parameters.name #=> 'Shield frequencies'
parameters.get_value('a') #=> 24.50
parameters.get_meta('β')['phase_inverted'] #=> true
### Response:
def load_file(self, path):
"""Load a YAML file with parameter data and other metadata.
:param path: Path to YAML file.
:type path: str
The data in the YAML file is used to set the properties for the instance.
The YAML file must contain a ``parameters`` key.
It may optionally include any keys defined in :attr:`.property_keys`.
.. code-block:: yaml
# data.yml
---
name: Shield frequencies
parameters:
- name: a
value: 24.50
- name: β
value: 42.10
meta:
phase_inverted: true
.. code-block:: python
parameters = Parameters('data.yml')
parameters.name #=> 'Shield frequencies'
parameters.get_value('a') #=> 24.50
parameters.get_meta('β')['phase_inverted'] #=> true
"""
data = yaml.load(open(path, 'r'))
for key in self.property_keys:
if key in data: setattr(self, key, data[key])
self.parameters = self.parameter_list(data['parameters']) |
def reverse_mapping(mapping):
"""
For every key, value pair, return the mapping for the
equivalent value, key pair
>>> reverse_mapping({'a': 'b'}) == {'b': 'a'}
True
"""
keys, values = zip(*mapping.items())
return dict(zip(values, keys)) | For every key, value pair, return the mapping for the
equivalent value, key pair
>>> reverse_mapping({'a': 'b'}) == {'b': 'a'}
True | Below is the the instruction that describes the task:
### Input:
For every key, value pair, return the mapping for the
equivalent value, key pair
>>> reverse_mapping({'a': 'b'}) == {'b': 'a'}
True
### Response:
def reverse_mapping(mapping):
"""
For every key, value pair, return the mapping for the
equivalent value, key pair
>>> reverse_mapping({'a': 'b'}) == {'b': 'a'}
True
"""
keys, values = zip(*mapping.items())
return dict(zip(values, keys)) |
def show_fibrechannel_interface_info_output_show_fibrechannel_interface_portsgroup_rbridgeid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fibrechannel_interface_info = ET.Element("show_fibrechannel_interface_info")
config = show_fibrechannel_interface_info
output = ET.SubElement(show_fibrechannel_interface_info, "output")
show_fibrechannel_interface = ET.SubElement(output, "show-fibrechannel-interface")
portsgroup_rbridgeid = ET.SubElement(show_fibrechannel_interface, "portsgroup-rbridgeid")
portsgroup_rbridgeid.text = kwargs.pop('portsgroup_rbridgeid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_fibrechannel_interface_info_output_show_fibrechannel_interface_portsgroup_rbridgeid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_fibrechannel_interface_info = ET.Element("show_fibrechannel_interface_info")
config = show_fibrechannel_interface_info
output = ET.SubElement(show_fibrechannel_interface_info, "output")
show_fibrechannel_interface = ET.SubElement(output, "show-fibrechannel-interface")
portsgroup_rbridgeid = ET.SubElement(show_fibrechannel_interface, "portsgroup-rbridgeid")
portsgroup_rbridgeid.text = kwargs.pop('portsgroup_rbridgeid')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_entries_for_scope(self, user_scope, scope_name, scope_value, key=None):
"""GetEntriesForScope.
[Preview API] Get all setting entries for the given named scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:param str key: Optional key under which to filter all the entries
:rtype: {object}
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if scope_name is not None:
route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str')
if scope_value is not None:
route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str')
if key is not None:
route_values['key'] = self._serialize.url('key', key, 'str')
response = self._send(http_method='GET',
location_id='4cbaafaf-e8af-4570-98d1-79ee99c56327',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('{object}', self._unwrap_collection(response)) | GetEntriesForScope.
[Preview API] Get all setting entries for the given named scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:param str key: Optional key under which to filter all the entries
:rtype: {object} | Below is the the instruction that describes the task:
### Input:
GetEntriesForScope.
[Preview API] Get all setting entries for the given named scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:param str key: Optional key under which to filter all the entries
:rtype: {object}
### Response:
def get_entries_for_scope(self, user_scope, scope_name, scope_value, key=None):
"""GetEntriesForScope.
[Preview API] Get all setting entries for the given named scope
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:param str key: Optional key under which to filter all the entries
:rtype: {object}
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if scope_name is not None:
route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str')
if scope_value is not None:
route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str')
if key is not None:
route_values['key'] = self._serialize.url('key', key, 'str')
response = self._send(http_method='GET',
location_id='4cbaafaf-e8af-4570-98d1-79ee99c56327',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('{object}', self._unwrap_collection(response)) |
def get_assessments_by_query(self, assessment_query):
"""Gets a list of ``Assessments`` matching the given assessment query.
arg: assessment_query (osid.assessment.AssessmentQuery): the
assessment query
return: (osid.assessment.AssessmentList) - the returned
``AssessmentList``
raise: NullArgument - ``assessment_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
"""Gets a list of ``Assessments`` matching the given assessment query.
arg: assessment_query (osid.assessment.AssessmentQuery): the
assessment query
return: (osid.assessment.AssessmentList) - the returned
``AssessmentList``
raise: NullArgument - ``assessment_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
if 'assessmentOfferedId' in assessment_query._query_terms:
collection = JSONClientValidated('assessment',
collection='AssessmentOffered',
runtime=self._runtime)
match = '$in' in assessment_query._query_terms['assessmentOfferedId'].keys()
if match:
match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms['assessmentOfferedId']['$in']]
query = {'$in': match_identifiers}
else:
match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms['assessmentOfferedId']['$in']]
query = {'$nin': match_identifiers}
result = collection.find({
"_id": query
})
assessment_ids = [ObjectId(Id(r['assessmentId']).identifier) for r in result]
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find({
"_id": {"$in": assessment_ids}
})
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy)
else:
# and_list = list()
# or_list = list()
# for term in assessment_query._query_terms:
# and_list.append({term: assessment_query._query_terms[term]})
# for term in assessment_query._keyword_terms:
# or_list.append({term: assessment_query._keyword_terms[term]})
# if or_list:
# and_list.append({'$or': or_list})
# view_filter = self._view_filter()
# if view_filter:
# and_list.append(view_filter)
# if and_list:
# query_terms = {'$and': and_list}
#
# collection = JSONClientValidated('assessment',
# collection='Assessment',
# runtime=self._runtime)
# result = collection.find(query_terms).sort('_id', DESCENDING)
# else:
# result = []
# return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy)
and_list = list()
or_list = list()
for term in assessment_query._query_terms:
if '$in' in assessment_query._query_terms[term] and '$nin' in assessment_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': assessment_query._query_terms[term]['$in']}},
{term: {'$nin': assessment_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: assessment_query._query_terms[term]})
for term in assessment_query._keyword_terms:
or_list.append({term: assessment_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) | Gets a list of ``Assessments`` matching the given assessment query.
arg: assessment_query (osid.assessment.AssessmentQuery): the
assessment query
return: (osid.assessment.AssessmentList) - the returned
``AssessmentList``
raise: NullArgument - ``assessment_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a list of ``Assessments`` matching the given assessment query.
arg: assessment_query (osid.assessment.AssessmentQuery): the
assessment query
return: (osid.assessment.AssessmentList) - the returned
``AssessmentList``
raise: NullArgument - ``assessment_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_assessments_by_query(self, assessment_query):
"""Gets a list of ``Assessments`` matching the given assessment query.
arg: assessment_query (osid.assessment.AssessmentQuery): the
assessment query
return: (osid.assessment.AssessmentList) - the returned
``AssessmentList``
raise: NullArgument - ``assessment_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
"""Gets a list of ``Assessments`` matching the given assessment query.
arg: assessment_query (osid.assessment.AssessmentQuery): the
assessment query
return: (osid.assessment.AssessmentList) - the returned
``AssessmentList``
raise: NullArgument - ``assessment_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
if 'assessmentOfferedId' in assessment_query._query_terms:
collection = JSONClientValidated('assessment',
collection='AssessmentOffered',
runtime=self._runtime)
match = '$in' in assessment_query._query_terms['assessmentOfferedId'].keys()
if match:
match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms['assessmentOfferedId']['$in']]
query = {'$in': match_identifiers}
else:
match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms['assessmentOfferedId']['$in']]
query = {'$nin': match_identifiers}
result = collection.find({
"_id": query
})
assessment_ids = [ObjectId(Id(r['assessmentId']).identifier) for r in result]
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find({
"_id": {"$in": assessment_ids}
})
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy)
else:
# and_list = list()
# or_list = list()
# for term in assessment_query._query_terms:
# and_list.append({term: assessment_query._query_terms[term]})
# for term in assessment_query._keyword_terms:
# or_list.append({term: assessment_query._keyword_terms[term]})
# if or_list:
# and_list.append({'$or': or_list})
# view_filter = self._view_filter()
# if view_filter:
# and_list.append(view_filter)
# if and_list:
# query_terms = {'$and': and_list}
#
# collection = JSONClientValidated('assessment',
# collection='Assessment',
# runtime=self._runtime)
# result = collection.find(query_terms).sort('_id', DESCENDING)
# else:
# result = []
# return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy)
and_list = list()
or_list = list()
for term in assessment_query._query_terms:
if '$in' in assessment_query._query_terms[term] and '$nin' in assessment_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': assessment_query._query_terms[term]['$in']}},
{term: {'$nin': assessment_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: assessment_query._query_terms[term]})
for term in assessment_query._keyword_terms:
or_list.append({term: assessment_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('assessment',
collection='Assessment',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) |
def write(self, target, *args, **kwargs):
"""Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs) | Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
----- | Below is the the instruction that describes the task:
### Input:
Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----
### Response:
def write(self, target, *args, **kwargs):
"""Write this `SegmentList` to a file
Arguments and keywords depend on the output format, see the
online documentation for full details for each format.
Parameters
----------
target : `str`
output filename
Notes
-----"""
return io_registry.write(self, target, *args, **kwargs) |
def NewWalker(self, reader):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlReaderNewWalker(reader__o, self._o)
return ret | Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. | Below is the the instruction that describes the task:
### Input:
Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader.
### Response:
def NewWalker(self, reader):
"""Setup an xmltextReader to parse a preparsed XML document.
This reuses the existing @reader xmlTextReader. """
if reader is None: reader__o = None
else: reader__o = reader._o
ret = libxml2mod.xmlReaderNewWalker(reader__o, self._o)
return ret |
def nic_remove(self, nic):
"""
Detach a nic from a bridge
:param nic: nic name to detach
"""
args = {
'nic': nic,
}
self._nic_remove_chk.check(args)
return self._client.json('bridge.nic-remove', args) | Detach a nic from a bridge
:param nic: nic name to detach | Below is the the instruction that describes the task:
### Input:
Detach a nic from a bridge
:param nic: nic name to detach
### Response:
def nic_remove(self, nic):
"""
Detach a nic from a bridge
:param nic: nic name to detach
"""
args = {
'nic': nic,
}
self._nic_remove_chk.check(args)
return self._client.json('bridge.nic-remove', args) |
def allzeros(msg):
"""check if the data bits are all zeros
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
"""
d = hex2bin(data(msg))
if bin2int(d) > 0:
return False
else:
return True | check if the data bits are all zeros
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False | Below is the the instruction that describes the task:
### Input:
check if the data bits are all zeros
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
### Response:
def allzeros(msg):
"""check if the data bits are all zeros
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
bool: True or False
"""
d = hex2bin(data(msg))
if bin2int(d) > 0:
return False
else:
return True |
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Gamma, kpts=[kpts],
kpts_shift=shift) | Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object | Below is the the instruction that describes the task:
### Input:
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
### Response:
def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):
"""
Convenient static constructor for an automatic Gamma centered Kpoint
grid.
Args:
kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice
vectors. Defaults to (1,1,1)
shift: Shift to be applied to the kpoints. Defaults to (0,0,0).
Returns:
Kpoints object
"""
return Kpoints("Automatic kpoint scheme", 0,
Kpoints.supported_modes.Gamma, kpts=[kpts],
kpts_shift=shift) |
def isExploitable(self):
"""
Guess how likely is it that the bug causing the crash can be leveraged
into an exploitable vulnerability.
@note: Don't take this as an equivalent of a real exploitability
analysis, that can only be done by a human being! This is only
a guideline, useful for example to sort crashes - placing the most
interesting ones at the top.
@see: The heuristics are similar to those of the B{!exploitable}
extension for I{WinDBG}, which can be downloaded from here:
U{http://www.codeplex.com/msecdbg}
@rtype: tuple( str, str, str )
@return: The first element of the tuple is the result of the analysis,
being one of the following:
- Not an exception
- Not exploitable
- Not likely exploitable
- Unknown
- Probably exploitable
- Exploitable
The second element of the tuple is a code to identify the matched
heuristic rule.
The third element of the tuple is a description string of the
reason behind the result.
"""
# Terminal rules
if self.eventCode != win32.EXCEPTION_DEBUG_EVENT:
return ("Not an exception", "NotAnException", "The event is not an exception.")
if self.stackRange and self.pc is not None and self.stackRange[0] <= self.pc < self.stackRange[1]:
return ("Exploitable", "StackCodeExecution", "Code execution from the stack is considered exploitable.")
# This rule is NOT from !exploitable
if self.stackRange and self.sp is not None and not (self.stackRange[0] <= self.sp < self.stackRange[1]):
return ("Exploitable", "StackPointerCorruption", "Stack pointer corruption is considered exploitable.")
if self.exceptionCode == win32.EXCEPTION_ILLEGAL_INSTRUCTION:
return ("Exploitable", "IllegalInstruction", "An illegal instruction exception indicates that the attacker controls execution flow.")
if self.exceptionCode == win32.EXCEPTION_PRIV_INSTRUCTION:
return ("Exploitable", "PrivilegedInstruction", "A privileged instruction exception indicates that the attacker controls execution flow.")
if self.exceptionCode == win32.EXCEPTION_GUARD_PAGE:
return ("Exploitable", "GuardPage", "A guard page violation indicates a stack overflow has occured, and the stack of another thread was reached (possibly the overflow length is not controlled by the attacker).")
if self.exceptionCode == win32.STATUS_STACK_BUFFER_OVERRUN:
return ("Exploitable", "GSViolation", "An overrun of a protected stack buffer has been detected. This is considered exploitable, and must be fixed.")
if self.exceptionCode == win32.STATUS_HEAP_CORRUPTION:
return ("Exploitable", "HeapCorruption", "Heap Corruption has been detected. This is considered exploitable, and must be fixed.")
if self.exceptionCode == win32.EXCEPTION_ACCESS_VIOLATION:
nearNull = self.faultAddress is None or MemoryAddresses.align_address_to_page_start(self.faultAddress) == 0
controlFlow = self.__is_control_flow()
blockDataMove = self.__is_block_data_move()
if self.faultType == win32.EXCEPTION_EXECUTE_FAULT:
if nearNull:
return ("Probably exploitable", "DEPViolation", "User mode DEP access violations are probably exploitable if near NULL.")
else:
return ("Exploitable", "DEPViolation", "User mode DEP access violations are exploitable.")
elif self.faultType == win32.EXCEPTION_WRITE_FAULT:
if nearNull:
return ("Probably exploitable", "WriteAV", "User mode write access violations that are near NULL are probably exploitable.")
else:
return ("Exploitable", "WriteAV", "User mode write access violations that are not near NULL are exploitable.")
elif self.faultType == win32.EXCEPTION_READ_FAULT:
if self.faultAddress == self.pc:
if nearNull:
return ("Probably exploitable", "ReadAVonIP", "Access violations at the instruction pointer are probably exploitable if near NULL.")
else:
return ("Exploitable", "ReadAVonIP", "Access violations at the instruction pointer are exploitable if not near NULL.")
if controlFlow:
if nearNull:
return ("Probably exploitable", "ReadAVonControlFlow", "Access violations near null in control flow instructions are considered probably exploitable.")
else:
return ("Exploitable", "ReadAVonControlFlow", "Access violations not near null in control flow instructions are considered exploitable.")
if blockDataMove:
return ("Probably exploitable", "ReadAVonBlockMove", "This is a read access violation in a block data move, and is therefore classified as probably exploitable.")
# Rule: Tainted information used to control branch addresses is considered probably exploitable
# Rule: Tainted information used to control the target of a later write is probably exploitable
# Non terminal rules
# XXX TODO add rule to check if code is in writeable memory (probably exploitable)
# XXX TODO maybe we should be returning a list of tuples instead?
result = ("Unknown", "Unknown", "Exploitability unknown.")
if self.exceptionCode == win32.EXCEPTION_ACCESS_VIOLATION:
if self.faultType == win32.EXCEPTION_READ_FAULT:
if nearNull:
result = ("Not likely exploitable", "ReadAVNearNull", "This is a user mode read access violation near null, and is probably not exploitable.")
elif self.exceptionCode == win32.EXCEPTION_INT_DIVIDE_BY_ZERO:
result = ("Not likely exploitable", "DivideByZero", "This is an integer divide by zero, and is probably not exploitable.")
elif self.exceptionCode == win32.EXCEPTION_FLT_DIVIDE_BY_ZERO:
result = ("Not likely exploitable", "DivideByZero", "This is a floating point divide by zero, and is probably not exploitable.")
elif self.exceptionCode in (win32.EXCEPTION_BREAKPOINT, win32.STATUS_WX86_BREAKPOINT):
result = ("Unknown", "Breakpoint", "While a breakpoint itself is probably not exploitable, it may also be an indication that an attacker is testing a target. In either case breakpoints should not exist in production code.")
# Rule: If the stack contains unknown symbols in user mode, call that out
# Rule: Tainted information used to control the source of a later block move unknown, but called out explicitly
# Rule: Tainted information used as an argument to a function is an unknown risk, but called out explicitly
# Rule: Tainted information used to control branch selection is an unknown risk, but called out explicitly
return result | Guess how likely is it that the bug causing the crash can be leveraged
into an exploitable vulnerability.
@note: Don't take this as an equivalent of a real exploitability
analysis, that can only be done by a human being! This is only
a guideline, useful for example to sort crashes - placing the most
interesting ones at the top.
@see: The heuristics are similar to those of the B{!exploitable}
extension for I{WinDBG}, which can be downloaded from here:
U{http://www.codeplex.com/msecdbg}
@rtype: tuple( str, str, str )
@return: The first element of the tuple is the result of the analysis,
being one of the following:
- Not an exception
- Not exploitable
- Not likely exploitable
- Unknown
- Probably exploitable
- Exploitable
The second element of the tuple is a code to identify the matched
heuristic rule.
The third element of the tuple is a description string of the
reason behind the result. | Below is the the instruction that describes the task:
### Input:
Guess how likely is it that the bug causing the crash can be leveraged
into an exploitable vulnerability.
@note: Don't take this as an equivalent of a real exploitability
analysis, that can only be done by a human being! This is only
a guideline, useful for example to sort crashes - placing the most
interesting ones at the top.
@see: The heuristics are similar to those of the B{!exploitable}
extension for I{WinDBG}, which can be downloaded from here:
U{http://www.codeplex.com/msecdbg}
@rtype: tuple( str, str, str )
@return: The first element of the tuple is the result of the analysis,
being one of the following:
- Not an exception
- Not exploitable
- Not likely exploitable
- Unknown
- Probably exploitable
- Exploitable
The second element of the tuple is a code to identify the matched
heuristic rule.
The third element of the tuple is a description string of the
reason behind the result.
### Response:
def isExploitable(self):
"""
Guess how likely is it that the bug causing the crash can be leveraged
into an exploitable vulnerability.
@note: Don't take this as an equivalent of a real exploitability
analysis, that can only be done by a human being! This is only
a guideline, useful for example to sort crashes - placing the most
interesting ones at the top.
@see: The heuristics are similar to those of the B{!exploitable}
extension for I{WinDBG}, which can be downloaded from here:
U{http://www.codeplex.com/msecdbg}
@rtype: tuple( str, str, str )
@return: The first element of the tuple is the result of the analysis,
being one of the following:
- Not an exception
- Not exploitable
- Not likely exploitable
- Unknown
- Probably exploitable
- Exploitable
The second element of the tuple is a code to identify the matched
heuristic rule.
The third element of the tuple is a description string of the
reason behind the result.
"""
# Terminal rules
if self.eventCode != win32.EXCEPTION_DEBUG_EVENT:
return ("Not an exception", "NotAnException", "The event is not an exception.")
if self.stackRange and self.pc is not None and self.stackRange[0] <= self.pc < self.stackRange[1]:
return ("Exploitable", "StackCodeExecution", "Code execution from the stack is considered exploitable.")
# This rule is NOT from !exploitable
if self.stackRange and self.sp is not None and not (self.stackRange[0] <= self.sp < self.stackRange[1]):
return ("Exploitable", "StackPointerCorruption", "Stack pointer corruption is considered exploitable.")
if self.exceptionCode == win32.EXCEPTION_ILLEGAL_INSTRUCTION:
return ("Exploitable", "IllegalInstruction", "An illegal instruction exception indicates that the attacker controls execution flow.")
if self.exceptionCode == win32.EXCEPTION_PRIV_INSTRUCTION:
return ("Exploitable", "PrivilegedInstruction", "A privileged instruction exception indicates that the attacker controls execution flow.")
if self.exceptionCode == win32.EXCEPTION_GUARD_PAGE:
return ("Exploitable", "GuardPage", "A guard page violation indicates a stack overflow has occured, and the stack of another thread was reached (possibly the overflow length is not controlled by the attacker).")
if self.exceptionCode == win32.STATUS_STACK_BUFFER_OVERRUN:
return ("Exploitable", "GSViolation", "An overrun of a protected stack buffer has been detected. This is considered exploitable, and must be fixed.")
if self.exceptionCode == win32.STATUS_HEAP_CORRUPTION:
return ("Exploitable", "HeapCorruption", "Heap Corruption has been detected. This is considered exploitable, and must be fixed.")
if self.exceptionCode == win32.EXCEPTION_ACCESS_VIOLATION:
nearNull = self.faultAddress is None or MemoryAddresses.align_address_to_page_start(self.faultAddress) == 0
controlFlow = self.__is_control_flow()
blockDataMove = self.__is_block_data_move()
if self.faultType == win32.EXCEPTION_EXECUTE_FAULT:
if nearNull:
return ("Probably exploitable", "DEPViolation", "User mode DEP access violations are probably exploitable if near NULL.")
else:
return ("Exploitable", "DEPViolation", "User mode DEP access violations are exploitable.")
elif self.faultType == win32.EXCEPTION_WRITE_FAULT:
if nearNull:
return ("Probably exploitable", "WriteAV", "User mode write access violations that are near NULL are probably exploitable.")
else:
return ("Exploitable", "WriteAV", "User mode write access violations that are not near NULL are exploitable.")
elif self.faultType == win32.EXCEPTION_READ_FAULT:
if self.faultAddress == self.pc:
if nearNull:
return ("Probably exploitable", "ReadAVonIP", "Access violations at the instruction pointer are probably exploitable if near NULL.")
else:
return ("Exploitable", "ReadAVonIP", "Access violations at the instruction pointer are exploitable if not near NULL.")
if controlFlow:
if nearNull:
return ("Probably exploitable", "ReadAVonControlFlow", "Access violations near null in control flow instructions are considered probably exploitable.")
else:
return ("Exploitable", "ReadAVonControlFlow", "Access violations not near null in control flow instructions are considered exploitable.")
if blockDataMove:
return ("Probably exploitable", "ReadAVonBlockMove", "This is a read access violation in a block data move, and is therefore classified as probably exploitable.")
# Rule: Tainted information used to control branch addresses is considered probably exploitable
# Rule: Tainted information used to control the target of a later write is probably exploitable
# Non terminal rules
# XXX TODO add rule to check if code is in writeable memory (probably exploitable)
# XXX TODO maybe we should be returning a list of tuples instead?
result = ("Unknown", "Unknown", "Exploitability unknown.")
if self.exceptionCode == win32.EXCEPTION_ACCESS_VIOLATION:
if self.faultType == win32.EXCEPTION_READ_FAULT:
if nearNull:
result = ("Not likely exploitable", "ReadAVNearNull", "This is a user mode read access violation near null, and is probably not exploitable.")
elif self.exceptionCode == win32.EXCEPTION_INT_DIVIDE_BY_ZERO:
result = ("Not likely exploitable", "DivideByZero", "This is an integer divide by zero, and is probably not exploitable.")
elif self.exceptionCode == win32.EXCEPTION_FLT_DIVIDE_BY_ZERO:
result = ("Not likely exploitable", "DivideByZero", "This is a floating point divide by zero, and is probably not exploitable.")
elif self.exceptionCode in (win32.EXCEPTION_BREAKPOINT, win32.STATUS_WX86_BREAKPOINT):
result = ("Unknown", "Breakpoint", "While a breakpoint itself is probably not exploitable, it may also be an indication that an attacker is testing a target. In either case breakpoints should not exist in production code.")
# Rule: If the stack contains unknown symbols in user mode, call that out
# Rule: Tainted information used to control the source of a later block move unknown, but called out explicitly
# Rule: Tainted information used as an argument to a function is an unknown risk, but called out explicitly
# Rule: Tainted information used to control branch selection is an unknown risk, but called out explicitly
return result |
def toggle_legend(self, evt=None, show=None):
"toggle legend display"
if show is None:
show = not self.conf.show_legend
self.conf.show_legend = show
self.conf.draw_legend() | toggle legend display | Below is the the instruction that describes the task:
### Input:
toggle legend display
### Response:
def toggle_legend(self, evt=None, show=None):
"toggle legend display"
if show is None:
show = not self.conf.show_legend
self.conf.show_legend = show
self.conf.draw_legend() |
def zpad(v, Nv):
"""Zero-pad initial axes of array to specified size. Padding is
applied to the right, top, etc. of the array indices.
Parameters
----------
v : array_like
Array to be padded
Nv : tuple
Sizes to which each of initial indices should be padded
Returns
-------
vp : ndarray
Padded array
"""
vp = np.zeros(Nv + v.shape[len(Nv):], dtype=v.dtype)
axnslc = tuple([slice(0, x) for x in v.shape])
vp[axnslc] = v
return vp | Zero-pad initial axes of array to specified size. Padding is
applied to the right, top, etc. of the array indices.
Parameters
----------
v : array_like
Array to be padded
Nv : tuple
Sizes to which each of initial indices should be padded
Returns
-------
vp : ndarray
Padded array | Below is the the instruction that describes the task:
### Input:
Zero-pad initial axes of array to specified size. Padding is
applied to the right, top, etc. of the array indices.
Parameters
----------
v : array_like
Array to be padded
Nv : tuple
Sizes to which each of initial indices should be padded
Returns
-------
vp : ndarray
Padded array
### Response:
def zpad(v, Nv):
"""Zero-pad initial axes of array to specified size. Padding is
applied to the right, top, etc. of the array indices.
Parameters
----------
v : array_like
Array to be padded
Nv : tuple
Sizes to which each of initial indices should be padded
Returns
-------
vp : ndarray
Padded array
"""
vp = np.zeros(Nv + v.shape[len(Nv):], dtype=v.dtype)
axnslc = tuple([slice(0, x) for x in v.shape])
vp[axnslc] = v
return vp |
def apply_and_get_result(self, string):
"""
Perform the substitution represented by this object on string and return
the result.
"""
if self.is_multiline:
compiled_pattern = re.compile(self.pattern, re.MULTILINE)
else:
compiled_pattern = re.compile(self.pattern)
result = re.sub(compiled_pattern, self.repl, string)
return result | Perform the substitution represented by this object on string and return
the result. | Below is the the instruction that describes the task:
### Input:
Perform the substitution represented by this object on string and return
the result.
### Response:
def apply_and_get_result(self, string):
"""
Perform the substitution represented by this object on string and return
the result.
"""
if self.is_multiline:
compiled_pattern = re.compile(self.pattern, re.MULTILINE)
else:
compiled_pattern = re.compile(self.pattern)
result = re.sub(compiled_pattern, self.repl, string)
return result |
def download(self,
files=None,
destination=None,
overwrite=False,
callback=None):
"""Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
"""
if files is None:
files = self.files
elif not isinstance(files, list):
files = [files]
if destination is None:
destination = os.path.expanduser('~')
for f in files:
if not isinstance(f, dict):
raise FMBaseError('File must be a <dict> with file data')
self._download(f, destination, overwrite, callback) | Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func`` | Below is the the instruction that describes the task:
### Input:
Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
### Response:
def download(self,
files=None,
destination=None,
overwrite=False,
callback=None):
"""Download file or files.
:param files: file or files to download
:param destination: destination path (defaults to users home directory)
:param overwrite: replace existing files?
:param callback: callback function that will receive total file size
and written bytes as arguments
:type files: ``list`` of ``dict`` with file data from filemail
:type destination: ``str`` or ``unicode``
:type overwrite: ``bool``
:type callback: ``func``
"""
if files is None:
files = self.files
elif not isinstance(files, list):
files = [files]
if destination is None:
destination = os.path.expanduser('~')
for f in files:
if not isinstance(f, dict):
raise FMBaseError('File must be a <dict> with file data')
self._download(f, destination, overwrite, callback) |
def list_exchanges_for_vhost(self, vhost):
"""
A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/exchanges/{0}'.format(
urllib.parse.quote_plus(vhost)
)) | A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str | Below is the the instruction that describes the task:
### Input:
A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str
### Response:
def list_exchanges_for_vhost(self, vhost):
"""
A list of all exchanges in a given virtual host.
:param vhost: The vhost name
:type vhost: str
"""
return self._api_get('/api/exchanges/{0}'.format(
urllib.parse.quote_plus(vhost)
)) |
def pageSizePicked( self, pageSize ):
"""
Updates when the user picks a page size.
:param pageSize | <str>
"""
try:
pageSize = int(self._pageSizeCombo.currentText())
except ValueError:
pageSize = 0
self.setPageSize(pageSize)
self.pageSizeChanged.emit(pageSize) | Updates when the user picks a page size.
:param pageSize | <str> | Below is the the instruction that describes the task:
### Input:
Updates when the user picks a page size.
:param pageSize | <str>
### Response:
def pageSizePicked( self, pageSize ):
"""
Updates when the user picks a page size.
:param pageSize | <str>
"""
try:
pageSize = int(self._pageSizeCombo.currentText())
except ValueError:
pageSize = 0
self.setPageSize(pageSize)
self.pageSizeChanged.emit(pageSize) |
def _http_date(_date_str: str) -> Optional[datetime.datetime]:
"""Process a date string, return a datetime object
"""
if _date_str is not None:
timetuple = parsedate(_date_str)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None | Process a date string, return a datetime object | Below is the the instruction that describes the task:
### Input:
Process a date string, return a datetime object
### Response:
def _http_date(_date_str: str) -> Optional[datetime.datetime]:
"""Process a date string, return a datetime object
"""
if _date_str is not None:
timetuple = parsedate(_date_str)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None |
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol) | Plot the poles and zeros of the FIR filter in the z-plane | Below is the the instruction that describes the task:
### Input:
Plot the poles and zeros of the FIR filter in the z-plane
### Response:
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol) |
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding) | Parse a string or file-like object into a tree | Below is the the instruction that describes the task:
### Input:
Parse a string or file-like object into a tree
### Response:
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding) |
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
# pylint: disable=too-many-arguments
"""
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
"""
logger.debug("Parameters for get_response:")
logger.debug("\t - endpoint: %s", endpoint)
logger.debug("\t - method: %s", method)
logger.debug("\t - headers: %s", headers)
logger.debug("\t - json: %s", json)
logger.debug("\t - params: %s", params)
logger.debug("\t - data: %s", data)
url = self.get_url(endpoint)
# First stage. Errors are connection errors (timeout, no session, ...)
try:
response = self.session.request(method=method, url=url, headers=headers, json=json,
params=params, data=data, proxies=self.proxies,
timeout=self.timeout)
logger.debug("response headers: %s", response.headers)
logger.debug("response content: %s", response.content)
except RequestException as e:
response = {"_status": "ERR",
"_error": {"message": e, "code": BACKEND_ERROR},
"_issues": {"message": e, "code": BACKEND_ERROR}}
raise BackendException(code=BACKEND_ERROR,
message=e,
response=response)
else:
return response | Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response | Below is the the instruction that describes the task:
### Input:
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
### Response:
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
# pylint: disable=too-many-arguments
"""
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
"""
logger.debug("Parameters for get_response:")
logger.debug("\t - endpoint: %s", endpoint)
logger.debug("\t - method: %s", method)
logger.debug("\t - headers: %s", headers)
logger.debug("\t - json: %s", json)
logger.debug("\t - params: %s", params)
logger.debug("\t - data: %s", data)
url = self.get_url(endpoint)
# First stage. Errors are connection errors (timeout, no session, ...)
try:
response = self.session.request(method=method, url=url, headers=headers, json=json,
params=params, data=data, proxies=self.proxies,
timeout=self.timeout)
logger.debug("response headers: %s", response.headers)
logger.debug("response content: %s", response.content)
except RequestException as e:
response = {"_status": "ERR",
"_error": {"message": e, "code": BACKEND_ERROR},
"_issues": {"message": e, "code": BACKEND_ERROR}}
raise BackendException(code=BACKEND_ERROR,
message=e,
response=response)
else:
return response |
def noreplynum_get(self, service_staff_id, start_date, end_date, session):
'''taobao.wangwang.eservice.noreplynum.get 客服未回复人数
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"未回复情况" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"未回复情况"(未回复人数、未回复的ID)。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"未回复情况"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"未回复情况"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录'''
request = TOPRequest('taobao.wangwang.eservice.noreplynum.get')
request['service_staff_id'] = service_staff_id
request['start_date'] = start_date
request['end_date'] = end_date
self.create(self.execute(request, session))
return self.non_reply_stat_on_days | taobao.wangwang.eservice.noreplynum.get 客服未回复人数
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"未回复情况" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"未回复情况"(未回复人数、未回复的ID)。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"未回复情况"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"未回复情况"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录 | Below is the the instruction that describes the task:
### Input:
taobao.wangwang.eservice.noreplynum.get 客服未回复人数
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"未回复情况" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"未回复情况"(未回复人数、未回复的ID)。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"未回复情况"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"未回复情况"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录
### Response:
def noreplynum_get(self, service_staff_id, start_date, end_date, session):
'''taobao.wangwang.eservice.noreplynum.get 客服未回复人数
根据操作者ID,返回被查者ID指定日期内每个帐号每日的"未回复情况" 备注:
- 1、如果是操作者ID=被查者ID,返回被查者ID的"未回复情况"(未回复人数、未回复的ID)。
- 2、如果操作者是组管理员,他可以查询他的组中的所有子帐号的"未回复情况"。
- 3、如果操作者是主账户,他可以查询所有子帐号的"未回复情况"。
- 4、被查者ID可以是多个,用 "," 隔开,id数不能超过30。
- 5、开始时间与结束时间之间的间隔不能超过7天
- 6、不能查询90天以前的数据
- 7、不能查询当天的记录'''
request = TOPRequest('taobao.wangwang.eservice.noreplynum.get')
request['service_staff_id'] = service_staff_id
request['start_date'] = start_date
request['end_date'] = end_date
self.create(self.execute(request, session))
return self.non_reply_stat_on_days |
def parallel_worker(conn, steps):
"""
All messages follow the form: <message>, <data>
Valid messages
--------------
run, { 'step': <step_name>, 'input': <input_data> }
finalise, { 'step': <step_name> }
next, { 'step': <step_name> }
stop, None
"""
while True:
message, data = conn.recv()
sys.stderr.write(' worker recieved "{}" for {}\n'.format(message, data['step']))
if message == 'run':
steps[data['step']].send(('run', data['input']))
conn.send(('running', {'step': data['step']}))
elif message == 'finalise':
steps[data['step']].send(('finalise', None))
conn.send(('finalising', {'step': data['step']}))
elif message == 'next':
sys.stderr.write(' worker sending "next" to {}\n'.format(data['step']))
steps[data['step']].send(('next', None))
result = steps[data['step']].recv()
conn.send(result)
sys.stderr.write(' worker recieved "{}" from {}\n'.format(result[0], data['step']))
elif message == 'stop':
conn.send(('stopped', None))
break
else:
raise ValueError('unknown message to worker: {}'.format(message)) | All messages follow the form: <message>, <data>
Valid messages
--------------
run, { 'step': <step_name>, 'input': <input_data> }
finalise, { 'step': <step_name> }
next, { 'step': <step_name> }
stop, None | Below is the the instruction that describes the task:
### Input:
All messages follow the form: <message>, <data>
Valid messages
--------------
run, { 'step': <step_name>, 'input': <input_data> }
finalise, { 'step': <step_name> }
next, { 'step': <step_name> }
stop, None
### Response:
def parallel_worker(conn, steps):
"""
All messages follow the form: <message>, <data>
Valid messages
--------------
run, { 'step': <step_name>, 'input': <input_data> }
finalise, { 'step': <step_name> }
next, { 'step': <step_name> }
stop, None
"""
while True:
message, data = conn.recv()
sys.stderr.write(' worker recieved "{}" for {}\n'.format(message, data['step']))
if message == 'run':
steps[data['step']].send(('run', data['input']))
conn.send(('running', {'step': data['step']}))
elif message == 'finalise':
steps[data['step']].send(('finalise', None))
conn.send(('finalising', {'step': data['step']}))
elif message == 'next':
sys.stderr.write(' worker sending "next" to {}\n'.format(data['step']))
steps[data['step']].send(('next', None))
result = steps[data['step']].recv()
conn.send(result)
sys.stderr.write(' worker recieved "{}" from {}\n'.format(result[0], data['step']))
elif message == 'stop':
conn.send(('stopped', None))
break
else:
raise ValueError('unknown message to worker: {}'.format(message)) |
def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs | Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo | Below is the the instruction that describes the task:
### Input:
Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
### Response:
def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs |
def _slice_value(index, context=None):
"""Get the value of the given slice index."""
if isinstance(index, Const):
if isinstance(index.value, (int, type(None))):
return index.value
elif index is None:
return None
else:
# Try to infer what the index actually is.
# Since we can't return all the possible values,
# we'll stop at the first possible value.
try:
inferred = next(index.infer(context=context))
except exceptions.InferenceError:
pass
else:
if isinstance(inferred, Const):
if isinstance(inferred.value, (int, type(None))):
return inferred.value
# Use a sentinel, because None can be a valid
# value that this function can return,
# as it is the case for unspecified bounds.
return _SLICE_SENTINEL | Get the value of the given slice index. | Below is the the instruction that describes the task:
### Input:
Get the value of the given slice index.
### Response:
def _slice_value(index, context=None):
"""Get the value of the given slice index."""
if isinstance(index, Const):
if isinstance(index.value, (int, type(None))):
return index.value
elif index is None:
return None
else:
# Try to infer what the index actually is.
# Since we can't return all the possible values,
# we'll stop at the first possible value.
try:
inferred = next(index.infer(context=context))
except exceptions.InferenceError:
pass
else:
if isinstance(inferred, Const):
if isinstance(inferred.value, (int, type(None))):
return inferred.value
# Use a sentinel, because None can be a valid
# value that this function can return,
# as it is the case for unspecified bounds.
return _SLICE_SENTINEL |
def load_bookmarks_without_file(filename):
"""Load all bookmarks but those from a specific file."""
bookmarks = _load_all_bookmarks()
return {k: v for k, v in bookmarks.items() if v[0] != filename} | Load all bookmarks but those from a specific file. | Below is the the instruction that describes the task:
### Input:
Load all bookmarks but those from a specific file.
### Response:
def load_bookmarks_without_file(filename):
"""Load all bookmarks but those from a specific file."""
bookmarks = _load_all_bookmarks()
return {k: v for k, v in bookmarks.items() if v[0] != filename} |
def cpuinfo():
''' Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...}
'''
cpuinfo=OrderedDict()
procinfo=OrderedDict()
nprocs = 0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
# end of one processor
cpuinfo['proc%s' % nprocs] = procinfo
nprocs=nprocs+1
# Reset
procinfo=OrderedDict()
else:
if len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
else:
procinfo[line.split(':')[0].strip()] = ''
return cpuinfo | Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...} | Below is the the instruction that describes the task:
### Input:
Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...}
### Response:
def cpuinfo():
''' Return the information in /proc/cpuinfo
as a dictionary in the following format:
cpu_info['proc0']={...}
cpu_info['proc1']={...}
'''
cpuinfo=OrderedDict()
procinfo=OrderedDict()
nprocs = 0
with open('/proc/cpuinfo') as f:
for line in f:
if not line.strip():
# end of one processor
cpuinfo['proc%s' % nprocs] = procinfo
nprocs=nprocs+1
# Reset
procinfo=OrderedDict()
else:
if len(line.split(':')) == 2:
procinfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
else:
procinfo[line.split(':')[0].strip()] = ''
return cpuinfo |
def p_numeric_literal(self, p):
"""numeric_literal : NUMBER"""
p[0] = self.asttypes.Number(p[1])
p[0].setpos(p) | numeric_literal : NUMBER | Below is the the instruction that describes the task:
### Input:
numeric_literal : NUMBER
### Response:
def p_numeric_literal(self, p):
"""numeric_literal : NUMBER"""
p[0] = self.asttypes.Number(p[1])
p[0].setpos(p) |
def _get_download_output_manager_cls(self, transfer_future, osutil):
"""Retrieves a class for managing output for a download
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future for the request
:type osutil: s3transfer.utils.OSUtils
:param osutil: The os utility associated to the transfer
:rtype: class of DownloadOutputManager
:returns: The appropriate class to use for managing a specific type of
input for downloads.
"""
download_manager_resolver_chain = [
DownloadSpecialFilenameOutputManager,
DownloadFilenameOutputManager,
DownloadSeekableOutputManager,
DownloadNonSeekableOutputManager,
]
fileobj = transfer_future.meta.call_args.fileobj
for download_manager_cls in download_manager_resolver_chain:
if download_manager_cls.is_compatible(fileobj, osutil):
return download_manager_cls
raise RuntimeError(
'Output %s of type: %s is not supported.' % (
fileobj, type(fileobj))) | Retrieves a class for managing output for a download
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future for the request
:type osutil: s3transfer.utils.OSUtils
:param osutil: The os utility associated to the transfer
:rtype: class of DownloadOutputManager
:returns: The appropriate class to use for managing a specific type of
input for downloads. | Below is the the instruction that describes the task:
### Input:
Retrieves a class for managing output for a download
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future for the request
:type osutil: s3transfer.utils.OSUtils
:param osutil: The os utility associated to the transfer
:rtype: class of DownloadOutputManager
:returns: The appropriate class to use for managing a specific type of
input for downloads.
### Response:
def _get_download_output_manager_cls(self, transfer_future, osutil):
"""Retrieves a class for managing output for a download
:type transfer_future: s3transfer.futures.TransferFuture
:param transfer_future: The transfer future for the request
:type osutil: s3transfer.utils.OSUtils
:param osutil: The os utility associated to the transfer
:rtype: class of DownloadOutputManager
:returns: The appropriate class to use for managing a specific type of
input for downloads.
"""
download_manager_resolver_chain = [
DownloadSpecialFilenameOutputManager,
DownloadFilenameOutputManager,
DownloadSeekableOutputManager,
DownloadNonSeekableOutputManager,
]
fileobj = transfer_future.meta.call_args.fileobj
for download_manager_cls in download_manager_resolver_chain:
if download_manager_cls.is_compatible(fileobj, osutil):
return download_manager_cls
raise RuntimeError(
'Output %s of type: %s is not supported.' % (
fileobj, type(fileobj))) |
def atomic_copy(src, dst):
"""Copy the file src to dst, overwriting dst atomically."""
with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.chmod(tmp_dst.name, os.stat(src).st_mode)
os.rename(tmp_dst.name, dst) | Copy the file src to dst, overwriting dst atomically. | Below is the the instruction that describes the task:
### Input:
Copy the file src to dst, overwriting dst atomically.
### Response:
def atomic_copy(src, dst):
"""Copy the file src to dst, overwriting dst atomically."""
with temporary_file(root_dir=os.path.dirname(dst)) as tmp_dst:
shutil.copyfile(src, tmp_dst.name)
os.chmod(tmp_dst.name, os.stat(src).st_mode)
os.rename(tmp_dst.name, dst) |
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d | Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench. | Below is the the instruction that describes the task:
### Input:
Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
### Response:
def _getContextFactory(path, workbench):
"""Get a context factory.
If the client already has a credentials at path, use them.
Otherwise, generate them at path. Notifications are reported to
the given workbench.
"""
try:
return succeed(getContextFactory(path))
except IOError:
d = prompt(workbench, u"E-mail entry", u"Enter e-mail:")
d.addCallback(_makeCredentials, path, workbench)
d.addCallback(lambda _result: getContextFactory(path))
return d |
def page_location_template(self):
"""
Returns the published URL template for a page type.
"""
cycle = self.election_day.cycle.name
model_class = self.model_type.model_class()
if model_class == ElectionDay:
return "/{}/".format(cycle)
if model_class == Office:
# President
if self.jurisdiction:
if self.division_level.name == DivisionLevel.STATE:
return "/{}/president/{{state}}".format(cycle)
else:
return "/{}/president/".format(cycle)
# Governor
else:
return "/{}/{{state}}/governor/".format(cycle)
elif model_class == Body:
# Senate
if self.body.slug == "senate":
if self.jurisdiction:
if self.division_level.name == DivisionLevel.STATE:
return "/{}/senate/{{state}}/".format(cycle)
else:
return "/{}/senate/".format(cycle)
else:
return "/{}/{{state}}/senate/".format(cycle)
# House
else:
if self.jurisdiction:
if self.division_level.name == DivisionLevel.STATE:
return "/{}/house/{{state}}/".format(cycle)
else:
return "/{}/house/".format(cycle)
else:
return "/{}/{{state}}/house/".format(cycle)
elif model_class == Division:
return "/{}/{{state}}/".format(cycle)
else:
return "ORPHAN TYPE" | Returns the published URL template for a page type. | Below is the the instruction that describes the task:
### Input:
Returns the published URL template for a page type.
### Response:
def page_location_template(self):
"""
Returns the published URL template for a page type.
"""
cycle = self.election_day.cycle.name
model_class = self.model_type.model_class()
if model_class == ElectionDay:
return "/{}/".format(cycle)
if model_class == Office:
# President
if self.jurisdiction:
if self.division_level.name == DivisionLevel.STATE:
return "/{}/president/{{state}}".format(cycle)
else:
return "/{}/president/".format(cycle)
# Governor
else:
return "/{}/{{state}}/governor/".format(cycle)
elif model_class == Body:
# Senate
if self.body.slug == "senate":
if self.jurisdiction:
if self.division_level.name == DivisionLevel.STATE:
return "/{}/senate/{{state}}/".format(cycle)
else:
return "/{}/senate/".format(cycle)
else:
return "/{}/{{state}}/senate/".format(cycle)
# House
else:
if self.jurisdiction:
if self.division_level.name == DivisionLevel.STATE:
return "/{}/house/{{state}}/".format(cycle)
else:
return "/{}/house/".format(cycle)
else:
return "/{}/{{state}}/house/".format(cycle)
elif model_class == Division:
return "/{}/{{state}}/".format(cycle)
else:
return "ORPHAN TYPE" |
def apply_T2(word):
'''There is a syllable boundary within a VV sequence of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].'''
WORD = word
offset = 0
for vv in vv_sequences(WORD):
seq = vv.group(2)
if not is_diphthong(seq) and not is_long(seq):
i = vv.start(2) + 1 + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1
RULE = ' T2' if word != WORD else ''
return WORD, RULE | There is a syllable boundary within a VV sequence of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa]. | Below is the the instruction that describes the task:
### Input:
There is a syllable boundary within a VV sequence of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].
### Response:
def apply_T2(word):
'''There is a syllable boundary within a VV sequence of two nonidentical
vowels that are not a genuine diphthong, e.g., [ta.e], [ko.et.taa].'''
WORD = word
offset = 0
for vv in vv_sequences(WORD):
seq = vv.group(2)
if not is_diphthong(seq) and not is_long(seq):
i = vv.start(2) + 1 + offset
WORD = WORD[:i] + '.' + WORD[i:]
offset += 1
RULE = ' T2' if word != WORD else ''
return WORD, RULE |
def random_vector(size, mean=0, std=1, rng=None):
'''Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
return (mean + std * rng.randn(size)).astype(FLOAT) | Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units. | Below is the the instruction that describes the task:
### Input:
Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units.
### Response:
def random_vector(size, mean=0, std=1, rng=None):
'''Create a vector of randomly-initialized values.
Parameters
----------
size : int
Length of vecctor to create.
mean : float, optional
Mean value for initial vector values. Defaults to 0.
std : float, optional
Standard deviation for initial vector values. Defaults to 1.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
vector : numpy array
An array containing random values. This often represents the bias for a
layer of computation units.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
return (mean + std * rng.randn(size)).astype(FLOAT) |
def on_menu_criteria_file(self, event):
"""
read pmag_criteria.txt file
and open changecriteria dialog
"""
if self.data_model == 3:
default_file = "criteria.txt"
else:
default_file = "pmag_criteria.txt"
read_sucsess = False
dlg = wx.FileDialog(
self, message="choose pmag criteria file",
defaultDir=self.WD,
defaultFile=default_file,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if self.show_dlg(dlg) == wx.ID_OK:
criteria_file = dlg.GetPath()
print(("-I- Read new criteria file: %s" % criteria_file))
# check if this is a valid pmag_criteria file
try:
mag_meas_data, file_type = pmag.magic_read(criteria_file)
except:
dlg = wx.MessageDialog(
self, caption="Error", message="not a valid pmag_criteria file", style=wx.OK)
result = self.show_dlg(dlg)
if result == wx.ID_OK:
dlg.Destroy()
dlg.Destroy()
return
# initialize criteria
self.acceptance_criteria = self.read_criteria_file(criteria_file)
read_sucsess = True
dlg.Destroy()
if read_sucsess:
self.on_menu_change_criteria(None) | read pmag_criteria.txt file
and open changecriteria dialog | Below is the the instruction that describes the task:
### Input:
read pmag_criteria.txt file
and open changecriteria dialog
### Response:
def on_menu_criteria_file(self, event):
"""
read pmag_criteria.txt file
and open changecriteria dialog
"""
if self.data_model == 3:
default_file = "criteria.txt"
else:
default_file = "pmag_criteria.txt"
read_sucsess = False
dlg = wx.FileDialog(
self, message="choose pmag criteria file",
defaultDir=self.WD,
defaultFile=default_file,
style=wx.FD_OPEN | wx.FD_CHANGE_DIR
)
if self.show_dlg(dlg) == wx.ID_OK:
criteria_file = dlg.GetPath()
print(("-I- Read new criteria file: %s" % criteria_file))
# check if this is a valid pmag_criteria file
try:
mag_meas_data, file_type = pmag.magic_read(criteria_file)
except:
dlg = wx.MessageDialog(
self, caption="Error", message="not a valid pmag_criteria file", style=wx.OK)
result = self.show_dlg(dlg)
if result == wx.ID_OK:
dlg.Destroy()
dlg.Destroy()
return
# initialize criteria
self.acceptance_criteria = self.read_criteria_file(criteria_file)
read_sucsess = True
dlg.Destroy()
if read_sucsess:
self.on_menu_change_criteria(None) |
def _get_topologies_with_watch(self, callback, isWatching):
"""
Helper function to get topologies with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_topologies_path()
if isWatching:
LOG.info("Adding children watch for path: " + path)
# pylint: disable=unused-variable
@self.client.ChildrenWatch(path)
def watch_topologies(topologies):
""" callback to watch topologies """
callback(topologies)
# Returning False will result in no future watches
# being triggered. If isWatching is True, then
# the future watches will be triggered.
return isWatching | Helper function to get topologies with
a callback. The future watch is placed
only if isWatching is True. | Below is the the instruction that describes the task:
### Input:
Helper function to get topologies with
a callback. The future watch is placed
only if isWatching is True.
### Response:
def _get_topologies_with_watch(self, callback, isWatching):
"""
Helper function to get topologies with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_topologies_path()
if isWatching:
LOG.info("Adding children watch for path: " + path)
# pylint: disable=unused-variable
@self.client.ChildrenWatch(path)
def watch_topologies(topologies):
""" callback to watch topologies """
callback(topologies)
# Returning False will result in no future watches
# being triggered. If isWatching is True, then
# the future watches will be triggered.
return isWatching |
def p_label_list_list(p):
""" label_list : label_list COMMA ID
| label_list COMMA NUMBER
"""
p[0] = p[1]
entry = check_and_make_label(p[3], p.lineno(3))
p[1].append(entry) | label_list : label_list COMMA ID
| label_list COMMA NUMBER | Below is the the instruction that describes the task:
### Input:
label_list : label_list COMMA ID
| label_list COMMA NUMBER
### Response:
def p_label_list_list(p):
""" label_list : label_list COMMA ID
| label_list COMMA NUMBER
"""
p[0] = p[1]
entry = check_and_make_label(p[3], p.lineno(3))
p[1].append(entry) |
def save(self, filename):
'''save settings to a file. Return True/False on success/failure'''
try:
f = open(filename, mode='w')
except Exception:
return False
for k in self.list():
f.write("%s=%s\n" % (k, self.get(k)))
f.close()
return True | save settings to a file. Return True/False on success/failure | Below is the the instruction that describes the task:
### Input:
save settings to a file. Return True/False on success/failure
### Response:
def save(self, filename):
'''save settings to a file. Return True/False on success/failure'''
try:
f = open(filename, mode='w')
except Exception:
return False
for k in self.list():
f.write("%s=%s\n" % (k, self.get(k)))
f.close()
return True |
def _construct_request(self):
"""
Utility for constructing the request header and connection
"""
if self.parsed_endpoint.scheme == 'https':
conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc)
else:
conn = httplib.HTTPConnection(self.parsed_endpoint.netloc)
head = {
"Accept": "application/json",
"User-Agent": USER_AGENT,
API_TOKEN_HEADER_NAME: self.api_token,
}
if self.api_version in ['0.1', '0.01a']:
head[API_VERSION_HEADER_NAME] = self.api_version
return conn, head | Utility for constructing the request header and connection | Below is the the instruction that describes the task:
### Input:
Utility for constructing the request header and connection
### Response:
def _construct_request(self):
"""
Utility for constructing the request header and connection
"""
if self.parsed_endpoint.scheme == 'https':
conn = httplib.HTTPSConnection(self.parsed_endpoint.netloc)
else:
conn = httplib.HTTPConnection(self.parsed_endpoint.netloc)
head = {
"Accept": "application/json",
"User-Agent": USER_AGENT,
API_TOKEN_HEADER_NAME: self.api_token,
}
if self.api_version in ['0.1', '0.01a']:
head[API_VERSION_HEADER_NAME] = self.api_version
return conn, head |
def _get_conn(self, host, port):
"""Get or create a connection to a broker using host and port"""
host_key = (host, port)
if host_key not in self.conns:
self.conns[host_key] = KafkaConnection(
host,
port,
timeout=self.timeout
)
return self.conns[host_key] | Get or create a connection to a broker using host and port | Below is the the instruction that describes the task:
### Input:
Get or create a connection to a broker using host and port
### Response:
def _get_conn(self, host, port):
"""Get or create a connection to a broker using host and port"""
host_key = (host, port)
if host_key not in self.conns:
self.conns[host_key] = KafkaConnection(
host,
port,
timeout=self.timeout
)
return self.conns[host_key] |
def handle_alarm(sender, **kwargs):
"""
Handles alarm events from the AlarmDecoder.
"""
zone = kwargs.pop('zone', None)
text = "Alarm: Zone {0}".format(zone)
# Build the email message
msg = MIMEText(text)
msg['Subject'] = SUBJECT
msg['From'] = FROM_ADDRESS
msg['To'] = TO_ADDRESS
s = smtplib.SMTP(SMTP_SERVER)
# Authenticate if needed
if SMTP_USERNAME is not None:
s.login(SMTP_USERNAME, SMTP_PASSWORD)
# Send the email
s.sendmail(FROM_ADDRESS, TO_ADDRESS, msg.as_string())
s.quit()
print('sent alarm email:', text) | Handles alarm events from the AlarmDecoder. | Below is the the instruction that describes the task:
### Input:
Handles alarm events from the AlarmDecoder.
### Response:
def handle_alarm(sender, **kwargs):
"""
Handles alarm events from the AlarmDecoder.
"""
zone = kwargs.pop('zone', None)
text = "Alarm: Zone {0}".format(zone)
# Build the email message
msg = MIMEText(text)
msg['Subject'] = SUBJECT
msg['From'] = FROM_ADDRESS
msg['To'] = TO_ADDRESS
s = smtplib.SMTP(SMTP_SERVER)
# Authenticate if needed
if SMTP_USERNAME is not None:
s.login(SMTP_USERNAME, SMTP_PASSWORD)
# Send the email
s.sendmail(FROM_ADDRESS, TO_ADDRESS, msg.as_string())
s.quit()
print('sent alarm email:', text) |
def combinatorics(self):
"""
Returns mutually exclusive/inclusive clampings
Returns
-------
(dict,dict)
A tuple of 2 dictionaries.
For each literal key, the first dict has as value the set of mutually exclusive clampings while
the second dict has as value the set of mutually inclusive clampings.
"""
df = self.to_dataframe()
literals = set((l for l in it.chain.from_iterable(self)))
exclusive, inclusive = defaultdict(set), defaultdict(set)
for l1, l2 in it.combinations(it.ifilter(lambda l: self.frequency(l) < 1., literals), 2):
a1, a2 = df[l1.variable] == l1.signature, df[l2.variable] == l2.signature
if (a1 != a2).all():
exclusive[l1].add(l2)
exclusive[l2].add(l1)
if (a1 == a2).all():
inclusive[l1].add(l2)
inclusive[l2].add(l1)
return exclusive, inclusive | Returns mutually exclusive/inclusive clampings
Returns
-------
(dict,dict)
A tuple of 2 dictionaries.
For each literal key, the first dict has as value the set of mutually exclusive clampings while
the second dict has as value the set of mutually inclusive clampings. | Below is the the instruction that describes the task:
### Input:
Returns mutually exclusive/inclusive clampings
Returns
-------
(dict,dict)
A tuple of 2 dictionaries.
For each literal key, the first dict has as value the set of mutually exclusive clampings while
the second dict has as value the set of mutually inclusive clampings.
### Response:
def combinatorics(self):
"""
Returns mutually exclusive/inclusive clampings
Returns
-------
(dict,dict)
A tuple of 2 dictionaries.
For each literal key, the first dict has as value the set of mutually exclusive clampings while
the second dict has as value the set of mutually inclusive clampings.
"""
df = self.to_dataframe()
literals = set((l for l in it.chain.from_iterable(self)))
exclusive, inclusive = defaultdict(set), defaultdict(set)
for l1, l2 in it.combinations(it.ifilter(lambda l: self.frequency(l) < 1., literals), 2):
a1, a2 = df[l1.variable] == l1.signature, df[l2.variable] == l2.signature
if (a1 != a2).all():
exclusive[l1].add(l2)
exclusive[l2].add(l1)
if (a1 == a2).all():
inclusive[l1].add(l2)
inclusive[l2].add(l1)
return exclusive, inclusive |
def internal_get_completions(dbg, seq, thread_id, frame_id, act_tok, line=-1, column=-1):
'''
Note that if the column is >= 0, the act_tok is considered text and the actual
activation token/qualifier is computed in this command.
'''
try:
remove_path = None
try:
qualifier = u''
if column >= 0:
token_and_qualifier = extract_token_and_qualifier(act_tok, line, column)
act_tok = token_and_qualifier[0]
if act_tok:
act_tok += u'.'
qualifier = token_and_qualifier[1]
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
if IS_PY2:
if not isinstance(act_tok, bytes):
act_tok = act_tok.encode('utf-8')
if not isinstance(qualifier, bytes):
qualifier = qualifier.encode('utf-8')
completions = _pydev_completer.generate_completions(frame, act_tok)
# Note that qualifier and start are only actually valid for the
# Debug Adapter Protocol (for the line-based protocol, the IDE
# is required to filter the completions returned).
cmd = dbg.cmd_factory.make_get_completions_message(
seq, completions, qualifier, start=column - len(qualifier))
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(seq, "internal_get_completions: Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd) | Note that if the column is >= 0, the act_tok is considered text and the actual
activation token/qualifier is computed in this command. | Below is the the instruction that describes the task:
### Input:
Note that if the column is >= 0, the act_tok is considered text and the actual
activation token/qualifier is computed in this command.
### Response:
def internal_get_completions(dbg, seq, thread_id, frame_id, act_tok, line=-1, column=-1):
'''
Note that if the column is >= 0, the act_tok is considered text and the actual
activation token/qualifier is computed in this command.
'''
try:
remove_path = None
try:
qualifier = u''
if column >= 0:
token_and_qualifier = extract_token_and_qualifier(act_tok, line, column)
act_tok = token_and_qualifier[0]
if act_tok:
act_tok += u'.'
qualifier = token_and_qualifier[1]
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
if IS_PY2:
if not isinstance(act_tok, bytes):
act_tok = act_tok.encode('utf-8')
if not isinstance(qualifier, bytes):
qualifier = qualifier.encode('utf-8')
completions = _pydev_completer.generate_completions(frame, act_tok)
# Note that qualifier and start are only actually valid for the
# Debug Adapter Protocol (for the line-based protocol, the IDE
# is required to filter the completions returned).
cmd = dbg.cmd_factory.make_get_completions_message(
seq, completions, qualifier, start=column - len(qualifier))
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(seq, "internal_get_completions: Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd) |
def mesh_axis_to_cumprod(self, tensor_shape):
"""For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None.
"""
tensor_layout = self.tensor_layout(tensor_shape)
ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)
ta2cumprod = tensor_shape.cumprod
return [None if ta is None else ta2cumprod[ta] for ta in ma2ta] | For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None. | Below is the the instruction that describes the task:
### Input:
For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None.
### Response:
def mesh_axis_to_cumprod(self, tensor_shape):
"""For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None.
"""
tensor_layout = self.tensor_layout(tensor_shape)
ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)
ta2cumprod = tensor_shape.cumprod
return [None if ta is None else ta2cumprod[ta] for ta in ma2ta] |
def indices(self, index=None, params=None):
"""
The indices command provides a cross-section of each index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'm', 'g'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg health: A health status ("green", "yellow", or "red" to filter only
indices matching the specified health status, default None, valid
choices are: 'green', 'yellow', 'red'
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'indices', index), params=params) | The indices command provides a cross-section of each index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'm', 'g'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg health: A health status ("green", "yellow", or "red" to filter only
indices matching the specified health status, default None, valid
choices are: 'green', 'yellow', 'red'
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False | Below is the the instruction that describes the task:
### Input:
The indices command provides a cross-section of each index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'm', 'g'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg health: A health status ("green", "yellow", or "red" to filter only
indices matching the specified health status, default None, valid
choices are: 'green', 'yellow', 'red'
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
### Response:
def indices(self, index=None, params=None):
"""
The indices command provides a cross-section of each index.
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values, valid choices are:
'b', 'k', 'm', 'g'
:arg format: a short version of the Accept header, e.g. json, yaml
:arg h: Comma-separated list of column names to display
:arg health: A health status ("green", "yellow", or "red" to filter only
indices matching the specified health status, default None, valid
choices are: 'green', 'yellow', 'red'
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg s: Comma-separated list of column names or column aliases to sort
by
:arg v: Verbose mode. Display column headers, default False
"""
return self.transport.perform_request('GET', _make_path('_cat',
'indices', index), params=params) |
def access_level_up_to(self, access_level):
""" returns all items that have an access level equal or lower than the one specified """
# if access_level is number
if isinstance(access_level, int):
value = access_level
# else if is string get the numeric value
else:
value = ACCESS_LEVELS.get(access_level)
# return queryset
return self.filter(access_level__lte=value) | returns all items that have an access level equal or lower than the one specified | Below is the the instruction that describes the task:
### Input:
returns all items that have an access level equal or lower than the one specified
### Response:
def access_level_up_to(self, access_level):
""" returns all items that have an access level equal or lower than the one specified """
# if access_level is number
if isinstance(access_level, int):
value = access_level
# else if is string get the numeric value
else:
value = ACCESS_LEVELS.get(access_level)
# return queryset
return self.filter(access_level__lte=value) |
def __add_class_views(self):
"""
Include any custom class views on the Sanic JWT Blueprint
"""
config = self.config
if "class_views" in self.kwargs:
class_views = self.kwargs.pop("class_views")
for route, view in class_views:
if issubclass(view, endpoints.BaseEndpoint) and isinstance(
route, str
):
self.bp.add_route(
view.as_view(
self.responses,
config=self.config,
instance=self.instance,
),
route,
strict_slashes=config.strict_slashes(),
)
else:
raise exceptions.InvalidClassViewsFormat() | Include any custom class views on the Sanic JWT Blueprint | Below is the the instruction that describes the task:
### Input:
Include any custom class views on the Sanic JWT Blueprint
### Response:
def __add_class_views(self):
"""
Include any custom class views on the Sanic JWT Blueprint
"""
config = self.config
if "class_views" in self.kwargs:
class_views = self.kwargs.pop("class_views")
for route, view in class_views:
if issubclass(view, endpoints.BaseEndpoint) and isinstance(
route, str
):
self.bp.add_route(
view.as_view(
self.responses,
config=self.config,
instance=self.instance,
),
route,
strict_slashes=config.strict_slashes(),
)
else:
raise exceptions.InvalidClassViewsFormat() |
def pre_attention(self, segment_number, query_antecedent,
memory_antecedent, bias):
"""Called prior to self-attention, to incorporate memory items.
Args:
segment_number: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias)
"""
with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE):
assert memory_antecedent is None, "We only support language modeling"
with tf.control_dependencies([
tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]):
difference = self.batch_size - tf.size(segment_number)
segment_number = tf.pad(segment_number, [[0, difference]])
reset_op = self.reset(tf.reshape(tf.where(
tf.less(segment_number, self.segment_number)), [-1]))
memory_results = {}
with tf.control_dependencies([reset_op]):
with tf.control_dependencies([
self.update_segment_number(segment_number)]):
x = tf.pad(query_antecedent, [
[0, difference], [0, 0], [0, 0]])
access_logits, retrieved_mem = self.read(x)
memory_results["x"] = x
memory_results["access_logits"] = access_logits
memory_results["retrieved_mem"] = retrieved_mem
return memory_results, query_antecedent, memory_antecedent, bias | Called prior to self-attention, to incorporate memory items.
Args:
segment_number: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias) | Below is the the instruction that describes the task:
### Input:
Called prior to self-attention, to incorporate memory items.
Args:
segment_number: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias)
### Response:
def pre_attention(self, segment_number, query_antecedent,
memory_antecedent, bias):
"""Called prior to self-attention, to incorporate memory items.
Args:
segment_number: an integer Tensor with shape [batch]
query_antecedent: a Tensor with shape [batch, length_q, channels]
memory_antecedent: must be None. Attention normally allows this to be a
Tensor with shape [batch, length_m, channels], but we currently only
support memory for decoder-side self-attention.
bias: bias Tensor (see attention_bias())
Returns:
(data, new_query_antecedent, new_memory_antecedent, new_bias)
"""
with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE):
assert memory_antecedent is None, "We only support language modeling"
with tf.control_dependencies([
tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]):
difference = self.batch_size - tf.size(segment_number)
segment_number = tf.pad(segment_number, [[0, difference]])
reset_op = self.reset(tf.reshape(tf.where(
tf.less(segment_number, self.segment_number)), [-1]))
memory_results = {}
with tf.control_dependencies([reset_op]):
with tf.control_dependencies([
self.update_segment_number(segment_number)]):
x = tf.pad(query_antecedent, [
[0, difference], [0, 0], [0, 0]])
access_logits, retrieved_mem = self.read(x)
memory_results["x"] = x
memory_results["access_logits"] = access_logits
memory_results["retrieved_mem"] = retrieved_mem
return memory_results, query_antecedent, memory_antecedent, bias |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.