code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def personas(text, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given input text, returns the authors likelihood of being 16 different personality
types in a dict.
Example usage:
.. code-block:: python
>>> text = "I love going out with my friends"
>>> entities = indicoio.personas(text)
{'architect': 0.2191890478134155, 'logician': 0.0158474326133728,
'commander': 0.07654544115066528 ...}
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: The authors 'Extraversion', 'Conscientiousness',
'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary.
"""
url_params = {"batch": batch, "api_key": api_key, "version": version}
kwargs['persona'] = True
return api_handler(text, cloud=cloud, api="personality", url_params=url_params, **kwargs) | Given input text, returns the authors likelihood of being 16 different personality
types in a dict.
Example usage:
.. code-block:: python
>>> text = "I love going out with my friends"
>>> entities = indicoio.personas(text)
{'architect': 0.2191890478134155, 'logician': 0.0158474326133728,
'commander': 0.07654544115066528 ...}
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: The authors 'Extraversion', 'Conscientiousness',
'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary. | Below is the the instruction that describes the task:
### Input:
Given input text, returns the authors likelihood of being 16 different personality
types in a dict.
Example usage:
.. code-block:: python
>>> text = "I love going out with my friends"
>>> entities = indicoio.personas(text)
{'architect': 0.2191890478134155, 'logician': 0.0158474326133728,
'commander': 0.07654544115066528 ...}
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: The authors 'Extraversion', 'Conscientiousness',
'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary.
### Response:
def personas(text, cloud=None, batch=False, api_key=None, version=None, **kwargs):
"""
Given input text, returns the authors likelihood of being 16 different personality
types in a dict.
Example usage:
.. code-block:: python
>>> text = "I love going out with my friends"
>>> entities = indicoio.personas(text)
{'architect': 0.2191890478134155, 'logician': 0.0158474326133728,
'commander': 0.07654544115066528 ...}
:param text: The text to be analyzed.
:type text: str or unicode
:rtype: The authors 'Extraversion', 'Conscientiousness',
'Openness', and 'Agreeableness' score (a float between 0 and 1) in a dictionary.
"""
url_params = {"batch": batch, "api_key": api_key, "version": version}
kwargs['persona'] = True
return api_handler(text, cloud=cloud, api="personality", url_params=url_params, **kwargs) |
def load_data(self, table_name, obj, **kwargs):
"""
Parameters
----------
table_name : string
obj: pandas.DataFrame
"""
# kwargs is a catch all for any options required by other backends.
self.dictionary[table_name] = pd.DataFrame(obj) | Parameters
----------
table_name : string
obj: pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
Parameters
----------
table_name : string
obj: pandas.DataFrame
### Response:
def load_data(self, table_name, obj, **kwargs):
"""
Parameters
----------
table_name : string
obj: pandas.DataFrame
"""
# kwargs is a catch all for any options required by other backends.
self.dictionary[table_name] = pd.DataFrame(obj) |
def filter(self, *LayoutClasses, **kwargs):
"""
Returns a LayoutSlice pointing to layout objects of type `LayoutClass`
"""
self._check_layout()
max_level = kwargs.pop('max_level', 0)
greedy = kwargs.pop('greedy', False)
filtered_layout_objects = self.layout.get_layout_objects(LayoutClasses, max_level=max_level, greedy=greedy)
return LayoutSlice(self.layout, filtered_layout_objects) | Returns a LayoutSlice pointing to layout objects of type `LayoutClass` | Below is the the instruction that describes the task:
### Input:
Returns a LayoutSlice pointing to layout objects of type `LayoutClass`
### Response:
def filter(self, *LayoutClasses, **kwargs):
"""
Returns a LayoutSlice pointing to layout objects of type `LayoutClass`
"""
self._check_layout()
max_level = kwargs.pop('max_level', 0)
greedy = kwargs.pop('greedy', False)
filtered_layout_objects = self.layout.get_layout_objects(LayoutClasses, max_level=max_level, greedy=greedy)
return LayoutSlice(self.layout, filtered_layout_objects) |
def get_joystick_axes(joy):
"""
Returns the values of all axes of the specified joystick.
Wrapper for:
const float* glfwGetJoystickAxes(int joy, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickAxes(joy, count)
return result, count_value.value | Returns the values of all axes of the specified joystick.
Wrapper for:
const float* glfwGetJoystickAxes(int joy, int* count); | Below is the the instruction that describes the task:
### Input:
Returns the values of all axes of the specified joystick.
Wrapper for:
const float* glfwGetJoystickAxes(int joy, int* count);
### Response:
def get_joystick_axes(joy):
"""
Returns the values of all axes of the specified joystick.
Wrapper for:
const float* glfwGetJoystickAxes(int joy, int* count);
"""
count_value = ctypes.c_int(0)
count = ctypes.pointer(count_value)
result = _glfw.glfwGetJoystickAxes(joy, count)
return result, count_value.value |
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err) | Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53 | Below is the the instruction that describes the task:
### Input:
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
### Response:
def set_system_date_time(years=None,
months=None,
days=None,
hours=None,
minutes=None,
seconds=None):
'''
Set the system date and time. Each argument is an element of the date, but
not required. If an element is not passed, the current system value for that
element will be used. For example, if you don't pass the year, the current
system year will be used. (Used by set_system_date and set_system_time)
Args:
years (int): Years digit, ie: 2015
months (int): Months digit: 1 - 12
days (int): Days digit: 1 - 31
hours (int): Hours digit: 0 - 23
minutes (int): Minutes digit: 0 - 59
seconds (int): Seconds digit: 0 - 59
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date_ time 2015 5 12 11 37 53
'''
# Get the current date/time
try:
date_time = win32api.GetLocalTime()
except win32api.error as exc:
(number, context, message) = exc.args
log.error('Failed to get local time')
log.error('nbr: %s', number)
log.error('ctx: %s', context)
log.error('msg: %s', message)
return False
# Check for passed values. If not passed, use current values
if years is None:
years = date_time[0]
if months is None:
months = date_time[1]
if days is None:
days = date_time[3]
if hours is None:
hours = date_time[4]
if minutes is None:
minutes = date_time[5]
if seconds is None:
seconds = date_time[6]
try:
class SYSTEMTIME(ctypes.Structure):
_fields_ = [
('wYear', ctypes.c_int16),
('wMonth', ctypes.c_int16),
('wDayOfWeek', ctypes.c_int16),
('wDay', ctypes.c_int16),
('wHour', ctypes.c_int16),
('wMinute', ctypes.c_int16),
('wSecond', ctypes.c_int16),
('wMilliseconds', ctypes.c_int16)]
system_time = SYSTEMTIME()
system_time.wYear = int(years)
system_time.wMonth = int(months)
system_time.wDay = int(days)
system_time.wHour = int(hours)
system_time.wMinute = int(minutes)
system_time.wSecond = int(seconds)
system_time_ptr = ctypes.pointer(system_time)
succeeded = ctypes.windll.kernel32.SetLocalTime(system_time_ptr)
if succeeded is not 0:
return True
else:
log.error('Failed to set local time')
raise CommandExecutionError(
win32api.FormatMessage(succeeded).rstrip())
except OSError as err:
log.error('Failed to set local time')
raise CommandExecutionError(err) |
def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):
'''get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
'''
if regexp == None:
regexp = "."
if description is None:
description = "This is a custom filter generated by the user."
custom = {"description":description,
"regexp":regexp}
# Include extra files?
if include_files is not None:
if not isinstance(include_files,set):
include_files = set(include_files)
custom['include_files'] = include_files
# Skip files?
if skip_files is not None:
if not isinstance(skip_files,set):
skip_files = set(skip_files)
custom['skip_files'] = skip_files
return custom | get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description | Below is the the instruction that describes the task:
### Input:
get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
### Response:
def get_custom_level(regexp=None,description=None,skip_files=None,include_files=None):
'''get_custom_level will generate a custom level for the user,
based on a regular expression. If used outside the context of tarsum, the user
can generate their own named and described filters.
:param regexp: must be defined, the file filter regular expression
:param description: optional description
'''
if regexp == None:
regexp = "."
if description is None:
description = "This is a custom filter generated by the user."
custom = {"description":description,
"regexp":regexp}
# Include extra files?
if include_files is not None:
if not isinstance(include_files,set):
include_files = set(include_files)
custom['include_files'] = include_files
# Skip files?
if skip_files is not None:
if not isinstance(skip_files,set):
skip_files = set(skip_files)
custom['skip_files'] = skip_files
return custom |
def iter_ruptures(self):
"""
Yield ruptures for the current set of indices
"""
assert self.orig, '%s is not fully initialized' % self
for ridx in range(self.start, self.stop):
if self.orig.rate[ridx]: # ruptures may have have zero rate
rup = self.get_ucerf_rupture(ridx, self.src_filter)
if rup:
yield rup | Yield ruptures for the current set of indices | Below is the the instruction that describes the task:
### Input:
Yield ruptures for the current set of indices
### Response:
def iter_ruptures(self):
"""
Yield ruptures for the current set of indices
"""
assert self.orig, '%s is not fully initialized' % self
for ridx in range(self.start, self.stop):
if self.orig.rate[ridx]: # ruptures may have have zero rate
rup = self.get_ucerf_rupture(ridx, self.src_filter)
if rup:
yield rup |
def merge(self):
"""Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance.
"""
self.merged_root = self._recursive_merge(self.root, self.head,
self.update)
if self.conflicts:
raise MergeError('Conflicts Occurred in Merge Process',
self.conflicts) | Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance. | Below is the the instruction that describes the task:
### Input:
Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance.
### Response:
def merge(self):
"""Populates result members.
Performs the merge algorithm using the specified config and fills in
the members that provide stats about the merging procedure.
Attributes:
merged_root: The result of the merge.
aligned_root, aligned_head, aligned_update: Copies of root, head
and update in which all matched list entities have the same
list index for easier diff viewing.
head_stats, update_stats: Stats for each list field present in the
head or update objects. Instance of
:class:`json_merger.stats.ListMatchStats`
conflicts: List of :class:`json_merger.conflict.Conflict` instances
that occured during the merge.
Raises:
:class:`json_merger.errors.MergeError` : If conflicts occur during
the call.
Example:
>>> from json_merger import Merger
>>> # We compare people by their name
>>> from json_merger.comparator import PrimaryKeyComparator
>>> from json_merger.config import DictMergerOps, UnifierOps
>>> from json_merger.errors import MergeError
>>> # Use this only for doctest :)
>>> from pprint import pprint as pp
>>>
>>> root = {'people': [{'name': 'Jimmy', 'age': 30}]}
>>> head = {'people': [{'name': 'Jimmy', 'age': 31},
... {'name': 'George'}]}
>>> update = {'people': [{'name': 'John'},
... {'name': 'Jimmy', 'age': 32}]}
>>>
>>> class NameComparator(PrimaryKeyComparator):
... # Two objects are the same entitity if they have the
... # same name.
... primary_key_fields = ['name']
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST,
... comparators = {'people': NameComparator})
>>> # We do a merge
>>> try:
... m.merge()
... except MergeError as e:
... # Conflicts are the same thing as the exception content.
... assert e.content == m.conflicts
>>> # This is how the lists are aligned:
>>> pp(m.aligned_root['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 30, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> pp(m.aligned_head['people'], width=60)
['#$PLACEHOLDER$#',
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]
>>> pp(m.aligned_update['people'], width=60)
[{'name': 'John'},
{'age': 32, 'name': 'Jimmy'},
'#$PLACEHOLDER$#']
>>> # This is the end result of the merge:
>>> pp(m.merged_root, width=60)
{'people': [{'name': 'John'},
{'age': 31, 'name': 'Jimmy'},
{'name': 'George'}]}
>>> # With some conflicts:
>>> pp(m.conflicts, width=60)
[('SET_FIELD', ('people', 1, 'age'), 32)]
>>> # And some stats:
>>> pp(m.head_stats[('people',)].in_result)
[{'age': 31, 'name': 'Jimmy'}, {'name': 'George'}]
>>> pp(m.update_stats[('people',)].not_in_result)
[]
Note:
Even if conflicts occur, merged_root, aligned_root, aligned_head
and aligned_update are always populated by following the
startegies set for the merger instance.
"""
self.merged_root = self._recursive_merge(self.root, self.head,
self.update)
if self.conflicts:
raise MergeError('Conflicts Occurred in Merge Process',
self.conflicts) |
def dragMoveEvent(self, event):
"""Reimplement Qt method"""
index = self.indexAt(event.pos())
if index:
dst = self.get_filename(index)
if osp.isdir(dst):
event.acceptProposedAction()
else:
event.ignore()
else:
event.ignore() | Reimplement Qt method | Below is the the instruction that describes the task:
### Input:
Reimplement Qt method
### Response:
def dragMoveEvent(self, event):
"""Reimplement Qt method"""
index = self.indexAt(event.pos())
if index:
dst = self.get_filename(index)
if osp.isdir(dst):
event.acceptProposedAction()
else:
event.ignore()
else:
event.ignore() |
def authkeywords(self):
"""List of author-provided keywords of the abstract."""
keywords = self._json['authkeywords']
if keywords is None:
return None
else:
try:
return [d['$'] for d in keywords['author-keyword']]
except TypeError: # Singleton keyword
return [keywords['author-keyword']['$']] | List of author-provided keywords of the abstract. | Below is the the instruction that describes the task:
### Input:
List of author-provided keywords of the abstract.
### Response:
def authkeywords(self):
"""List of author-provided keywords of the abstract."""
keywords = self._json['authkeywords']
if keywords is None:
return None
else:
try:
return [d['$'] for d in keywords['author-keyword']]
except TypeError: # Singleton keyword
return [keywords['author-keyword']['$']] |
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
offerredPath = self.absFilePath
f = None
fName = _path.basename(offerredPath)
urlPath = _U2.unquote(self.path)
if urlPath == '/':
self.send_response(307)
self.send_header("Location", "/" + _U2.quote(fName))
self.end_headers()
return None
ctype = self.guess_type(offerredPath)
try:
f = open(offerredPath, 'rb')
except OSError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = _os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Content-Disposition", "attachment")
self.send_header("Last-Modified",
self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise | Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do. | Below is the the instruction that describes the task:
### Input:
Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
### Response:
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
offerredPath = self.absFilePath
f = None
fName = _path.basename(offerredPath)
urlPath = _U2.unquote(self.path)
if urlPath == '/':
self.send_response(307)
self.send_header("Location", "/" + _U2.quote(fName))
self.end_headers()
return None
ctype = self.guess_type(offerredPath)
try:
f = open(offerredPath, 'rb')
except OSError:
self.send_error(404, "File not found")
return None
try:
self.send_response(200)
self.send_header("Content-type", ctype)
fs = _os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Content-Disposition", "attachment")
self.send_header("Last-Modified",
self.date_time_string(fs.st_mtime))
self.end_headers()
return f
except:
f.close()
raise |
def get_server(key, server=MAIN_SERVER, servers=LOAD_SERVERS):
""" given a key, get the IP address of the server that has the pvt key that
owns the name/key
"""
namecoind = NamecoindClient(NAMECOIND_SERVER, NAMECOIND_PORT,
NAMECOIND_USER, NAMECOIND_PASSWD)
info = namecoind.name_show(key)
if 'address' in info:
return check_address(info['address'], server, servers)
response = {}
response["registered"] = False
response["server"] = None
response["ismine"] = False
return response | given a key, get the IP address of the server that has the pvt key that
owns the name/key | Below is the the instruction that describes the task:
### Input:
given a key, get the IP address of the server that has the pvt key that
owns the name/key
### Response:
def get_server(key, server=MAIN_SERVER, servers=LOAD_SERVERS):
""" given a key, get the IP address of the server that has the pvt key that
owns the name/key
"""
namecoind = NamecoindClient(NAMECOIND_SERVER, NAMECOIND_PORT,
NAMECOIND_USER, NAMECOIND_PASSWD)
info = namecoind.name_show(key)
if 'address' in info:
return check_address(info['address'], server, servers)
response = {}
response["registered"] = False
response["server"] = None
response["ismine"] = False
return response |
def instance_from_url(url, user=None):
""" Restore instance from URL """
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
url = clear_url(url)
match = resolve(url)
model = get_model_from_resolve_match(match)
queryset = model.objects.all()
if user is not None:
queryset = filter_queryset_for_user(model.objects.all(), user)
return queryset.get(**match.kwargs) | Restore instance from URL | Below is the the instruction that describes the task:
### Input:
Restore instance from URL
### Response:
def instance_from_url(url, user=None):
""" Restore instance from URL """
# XXX: This circular dependency will be removed then filter_queryset_for_user
# will be moved to model manager method
from waldur_core.structure.managers import filter_queryset_for_user
url = clear_url(url)
match = resolve(url)
model = get_model_from_resolve_match(match)
queryset = model.objects.all()
if user is not None:
queryset = filter_queryset_for_user(model.objects.all(), user)
return queryset.get(**match.kwargs) |
def generate_messages_info(msgs):
"""Add proper formated variable names, initializers and type names to use in templates"""
for msg in msgs:
msg.swift_name = camel_case_from_underscores(msg.name)
msg.formatted_description = ""
if msg.description:
msg.description = " ".join(msg.description.split())
msg.formatted_description = "\n/**\n %s\n*/\n" % " ".join(msg.description.split())
msg.message_description = msg.description.replace('"','\\"')
for field in msg.ordered_fields:
field.swift_name = lower_camel_case_from_underscores(field.name)
field.return_type = swift_types[field.type][0]
# configure fields initializers
if field.enum:
# handle enums
field.return_type = camel_case_from_underscores(field.enum)
field.initial_value = "try data.mavEnumeration(offset: %u)" % field.wire_offset
elif field.array_length > 0:
if field.return_type == "String":
# handle strings
field.initial_value = "data." + swift_types[field.type][2] % (field.wire_offset, field.array_length)
else:
# other array types
field.return_type = "[%s]" % field.return_type
field.initial_value = "try data.mavArray(offset: %u, count: %u)" % (field.wire_offset, field.array_length)
else:
# simple type field
field.initial_value = "try data." + swift_types[field.type][2] % field.wire_offset
field.formatted_description = ""
if field.description:
field.description = " ".join(field.description.split())
field.formatted_description = "\n\t/// " + field.description + "\n"
fields_info = map(lambda field: '("%s", %u, "%s", "%s")' % (field.swift_name, field.wire_offset, field.return_type, field.description.replace('"','\\"')), msg.fields)
msg.fields_info = ", ".join(fields_info)
msgs.sort(key = lambda msg : msg.id) | Add proper formated variable names, initializers and type names to use in templates | Below is the the instruction that describes the task:
### Input:
Add proper formated variable names, initializers and type names to use in templates
### Response:
def generate_messages_info(msgs):
"""Add proper formated variable names, initializers and type names to use in templates"""
for msg in msgs:
msg.swift_name = camel_case_from_underscores(msg.name)
msg.formatted_description = ""
if msg.description:
msg.description = " ".join(msg.description.split())
msg.formatted_description = "\n/**\n %s\n*/\n" % " ".join(msg.description.split())
msg.message_description = msg.description.replace('"','\\"')
for field in msg.ordered_fields:
field.swift_name = lower_camel_case_from_underscores(field.name)
field.return_type = swift_types[field.type][0]
# configure fields initializers
if field.enum:
# handle enums
field.return_type = camel_case_from_underscores(field.enum)
field.initial_value = "try data.mavEnumeration(offset: %u)" % field.wire_offset
elif field.array_length > 0:
if field.return_type == "String":
# handle strings
field.initial_value = "data." + swift_types[field.type][2] % (field.wire_offset, field.array_length)
else:
# other array types
field.return_type = "[%s]" % field.return_type
field.initial_value = "try data.mavArray(offset: %u, count: %u)" % (field.wire_offset, field.array_length)
else:
# simple type field
field.initial_value = "try data." + swift_types[field.type][2] % field.wire_offset
field.formatted_description = ""
if field.description:
field.description = " ".join(field.description.split())
field.formatted_description = "\n\t/// " + field.description + "\n"
fields_info = map(lambda field: '("%s", %u, "%s", "%s")' % (field.swift_name, field.wire_offset, field.return_type, field.description.replace('"','\\"')), msg.fields)
msg.fields_info = ", ".join(fields_info)
msgs.sort(key = lambda msg : msg.id) |
def _is_did(did):
"""Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier().
"""
return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists() | Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier(). | Below is the the instruction that describes the task:
### Input:
Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier().
### Response:
def _is_did(did):
"""Return True if ``did`` is recorded in a local context.
``did``=None is supported and returns False.
A DID can be classified with classify_identifier().
"""
return d1_gmn.app.models.IdNamespace.objects.filter(did=did).exists() |
def apply(self, tokens):
""" Applies the named entity recognizer to the given list of tokens,
where each token is a [word, tag] list.
"""
# Note: we could also scan for patterns, e.g.,
# "my|his|her name is|was *" => NNP-PERS.
i = 0
while i < len(tokens):
w = tokens[i][0].lower()
if RE_ENTITY1.match(w) \
or RE_ENTITY2.match(w) \
or RE_ENTITY3.match(w):
tokens[i][1] = self.tag
if w in self:
for e in self[w]:
# Look ahead to see if successive words match the named entity.
e, tag = (e[:-1], "-"+e[-1].upper()) if e[-1] in self._cmd else (e, "")
b = True
for j, e in enumerate(e):
if i + j >= len(tokens) or tokens[i+j][0].lower() != e:
b = False; break
if b:
for token in tokens[i:i+j+1]:
token[1] = token[1] if token[1].startswith(self.tag) else self.tag
token[1] += tag
i += j
break
i += 1
return tokens | Applies the named entity recognizer to the given list of tokens,
where each token is a [word, tag] list. | Below is the the instruction that describes the task:
### Input:
Applies the named entity recognizer to the given list of tokens,
where each token is a [word, tag] list.
### Response:
def apply(self, tokens):
""" Applies the named entity recognizer to the given list of tokens,
where each token is a [word, tag] list.
"""
# Note: we could also scan for patterns, e.g.,
# "my|his|her name is|was *" => NNP-PERS.
i = 0
while i < len(tokens):
w = tokens[i][0].lower()
if RE_ENTITY1.match(w) \
or RE_ENTITY2.match(w) \
or RE_ENTITY3.match(w):
tokens[i][1] = self.tag
if w in self:
for e in self[w]:
# Look ahead to see if successive words match the named entity.
e, tag = (e[:-1], "-"+e[-1].upper()) if e[-1] in self._cmd else (e, "")
b = True
for j, e in enumerate(e):
if i + j >= len(tokens) or tokens[i+j][0].lower() != e:
b = False; break
if b:
for token in tokens[i:i+j+1]:
token[1] = token[1] if token[1].startswith(self.tag) else self.tag
token[1] += tag
i += j
break
i += 1
return tokens |
def getWindow(title, exact=False):
"""Return Window object if 'title' or its part found in visible windows titles, else return None
Return only 1 window found first
Args:
title: unicode string
exact (bool): True if search only exact match
"""
titles = getWindows()
hwnd = titles.get(title, None)
if not hwnd and not exact:
for k, v in titles.items():
if title in k:
hwnd = v
break
if hwnd:
return Window(hwnd)
else:
return None | Return Window object if 'title' or its part found in visible windows titles, else return None
Return only 1 window found first
Args:
title: unicode string
exact (bool): True if search only exact match | Below is the the instruction that describes the task:
### Input:
Return Window object if 'title' or its part found in visible windows titles, else return None
Return only 1 window found first
Args:
title: unicode string
exact (bool): True if search only exact match
### Response:
def getWindow(title, exact=False):
"""Return Window object if 'title' or its part found in visible windows titles, else return None
Return only 1 window found first
Args:
title: unicode string
exact (bool): True if search only exact match
"""
titles = getWindows()
hwnd = titles.get(title, None)
if not hwnd and not exact:
for k, v in titles.items():
if title in k:
hwnd = v
break
if hwnd:
return Window(hwnd)
else:
return None |
def vals2colors(vals,cmap='GnBu_d',res=100):
"""Maps values to colors
Args:
values (list or list of lists) - list of values to map to colors
cmap (str) - color map (default is 'husl')
res (int) - resolution of the color map (default: 100)
Returns:
list of rgb tuples
"""
# flatten if list of lists
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
# get palette from seaborn
palette = np.array(sns.color_palette(cmap, res))
ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1
return [tuple(i) for i in palette[ranks, :]] | Maps values to colors
Args:
values (list or list of lists) - list of values to map to colors
cmap (str) - color map (default is 'husl')
res (int) - resolution of the color map (default: 100)
Returns:
list of rgb tuples | Below is the the instruction that describes the task:
### Input:
Maps values to colors
Args:
values (list or list of lists) - list of values to map to colors
cmap (str) - color map (default is 'husl')
res (int) - resolution of the color map (default: 100)
Returns:
list of rgb tuples
### Response:
def vals2colors(vals,cmap='GnBu_d',res=100):
"""Maps values to colors
Args:
values (list or list of lists) - list of values to map to colors
cmap (str) - color map (default is 'husl')
res (int) - resolution of the color map (default: 100)
Returns:
list of rgb tuples
"""
# flatten if list of lists
if any(isinstance(el, list) for el in vals):
vals = list(itertools.chain(*vals))
# get palette from seaborn
palette = np.array(sns.color_palette(cmap, res))
ranks = np.digitize(vals, np.linspace(np.min(vals), np.max(vals)+1, res+1)) - 1
return [tuple(i) for i in palette[ranks, :]] |
def compare(jaide, commands):
""" Perform a show | compare with some set commands.
@param jaide: The jaide connection to the device.
@type jaide: jaide.Jaide object
@param commands: The set commands to send to the device to compare with.
@type commands: str or list
@returns: The output from the device.
@rtype str
"""
output = color("show | compare:\n", 'yel')
return output + color_diffs(jaide.compare_config(commands)) | Perform a show | compare with some set commands.
@param jaide: The jaide connection to the device.
@type jaide: jaide.Jaide object
@param commands: The set commands to send to the device to compare with.
@type commands: str or list
@returns: The output from the device.
@rtype str | Below is the the instruction that describes the task:
### Input:
Perform a show | compare with some set commands.
@param jaide: The jaide connection to the device.
@type jaide: jaide.Jaide object
@param commands: The set commands to send to the device to compare with.
@type commands: str or list
@returns: The output from the device.
@rtype str
### Response:
def compare(jaide, commands):
""" Perform a show | compare with some set commands.
@param jaide: The jaide connection to the device.
@type jaide: jaide.Jaide object
@param commands: The set commands to send to the device to compare with.
@type commands: str or list
@returns: The output from the device.
@rtype str
"""
output = color("show | compare:\n", 'yel')
return output + color_diffs(jaide.compare_config(commands)) |
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Create a project in a PyBossa server. | Below is the the instruction that describes the task:
### Input:
Create a project in a PyBossa server.
### Response:
def _create_project(config):
"""Create a project in a PyBossa server."""
try:
response = config.pbclient.create_project(config.project['name'],
config.project['short_name'],
config.project['description'])
check_api_error(response)
return ("Project: %s created!" % config.project['short_name'])
except exceptions.ConnectionError:
return("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise |
def solve_unchecked(self, sense=None):
"""Solve problem and return result.
The user must manually check the status of the result to determine
whether an optimal solution was found. A :class:`SolverError` may still
be raised if the underlying solver raises an exception.
"""
if sense is not None:
self.set_objective_sense(sense)
self._p.optimize()
self._result = Result(self)
return self._result | Solve problem and return result.
The user must manually check the status of the result to determine
whether an optimal solution was found. A :class:`SolverError` may still
be raised if the underlying solver raises an exception. | Below is the the instruction that describes the task:
### Input:
Solve problem and return result.
The user must manually check the status of the result to determine
whether an optimal solution was found. A :class:`SolverError` may still
be raised if the underlying solver raises an exception.
### Response:
def solve_unchecked(self, sense=None):
"""Solve problem and return result.
The user must manually check the status of the result to determine
whether an optimal solution was found. A :class:`SolverError` may still
be raised if the underlying solver raises an exception.
"""
if sense is not None:
self.set_objective_sense(sense)
self._p.optimize()
self._result = Result(self)
return self._result |
def delete_all_objects(self, nms, async_=False):
"""
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
"""
if nms is None:
nms = self.api.list_object_names(self.name, full_listing=True)
return self.api.bulk_delete(self.name, nms, async_=async_) | Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call | Below is the the instruction that describes the task:
### Input:
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
### Response:
def delete_all_objects(self, nms, async_=False):
"""
Deletes all objects from this container.
By default the call will block until all objects have been deleted. By
passing True for the 'async_' parameter, this method will not block, and
instead return an object that can be used to follow the progress of the
deletion. When deletion is complete the bulk deletion object's
'results' attribute will be populated with the information returned
from the API call. In synchronous mode this is the value that is
returned when the call completes. It is a dictionary with the following
keys:
deleted - the number of objects deleted
not_found - the number of objects not found
status - the HTTP return status code. '200 OK' indicates success
errors - a list of any errors returned by the bulk delete call
"""
if nms is None:
nms = self.api.list_object_names(self.name, full_listing=True)
return self.api.bulk_delete(self.name, nms, async_=async_) |
def nation(self, nation_name, password=None, autologin=None):
"""Setup access to the Nation API with the Nation object
:param nation_name: Name of the nation
:param password: (Optional) password for this nation
:param autologin (Optional) autologin for this nation
:type nation_name: str
:type password: str
:type autologin: str
:returns: Nation Object based off nation_name
:rtype: Nation
"""
return Nation(nation_name, self, password=password, autologin=autologin) | Setup access to the Nation API with the Nation object
:param nation_name: Name of the nation
:param password: (Optional) password for this nation
:param autologin (Optional) autologin for this nation
:type nation_name: str
:type password: str
:type autologin: str
:returns: Nation Object based off nation_name
:rtype: Nation | Below is the the instruction that describes the task:
### Input:
Setup access to the Nation API with the Nation object
:param nation_name: Name of the nation
:param password: (Optional) password for this nation
:param autologin (Optional) autologin for this nation
:type nation_name: str
:type password: str
:type autologin: str
:returns: Nation Object based off nation_name
:rtype: Nation
### Response:
def nation(self, nation_name, password=None, autologin=None):
"""Setup access to the Nation API with the Nation object
:param nation_name: Name of the nation
:param password: (Optional) password for this nation
:param autologin (Optional) autologin for this nation
:type nation_name: str
:type password: str
:type autologin: str
:returns: Nation Object based off nation_name
:rtype: Nation
"""
return Nation(nation_name, self, password=password, autologin=autologin) |
def timedelta_to_str(value, with_microseconds=False):
"""
String representation of datetime.timedelta
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: String representation of datetime.timedelta or None if val is None
:rtype: string/None
:raise: TypeError when val is not timedelta
"""
if value is None:
return None
if not isinstance(value, timedelta):
raise TypeError('value must be a datetime.timedelta object')
hours, remainder = divmod(value.seconds, SECONDS_IN_HOUR)
hours += value.days * HOURS_IN_DAY
minutes, seconds = divmod(remainder, SECONDS_IN_MINUTE)
if with_microseconds:
return '%02d:%02d:%02d.%06d' % (hours, minutes, seconds,
value.microseconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds) | String representation of datetime.timedelta
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: String representation of datetime.timedelta or None if val is None
:rtype: string/None
:raise: TypeError when val is not timedelta | Below is the the instruction that describes the task:
### Input:
String representation of datetime.timedelta
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: String representation of datetime.timedelta or None if val is None
:rtype: string/None
:raise: TypeError when val is not timedelta
### Response:
def timedelta_to_str(value, with_microseconds=False):
"""
String representation of datetime.timedelta
:param value: timedelta to convert
:type value: datetime.timedelta
:param with_microseconds:
:type with_microseconds: bool
:return: String representation of datetime.timedelta or None if val is None
:rtype: string/None
:raise: TypeError when val is not timedelta
"""
if value is None:
return None
if not isinstance(value, timedelta):
raise TypeError('value must be a datetime.timedelta object')
hours, remainder = divmod(value.seconds, SECONDS_IN_HOUR)
hours += value.days * HOURS_IN_DAY
minutes, seconds = divmod(remainder, SECONDS_IN_MINUTE)
if with_microseconds:
return '%02d:%02d:%02d.%06d' % (hours, minutes, seconds,
value.microseconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds) |
def qnormal(mu, sigma, q, random_state):
'''
mu: float or array_like of floats
sigma: float or array_like of floats
q: sample step
random_state: an object of numpy.random.RandomState
'''
return np.round(normal(mu, sigma, random_state) / q) * q | mu: float or array_like of floats
sigma: float or array_like of floats
q: sample step
random_state: an object of numpy.random.RandomState | Below is the the instruction that describes the task:
### Input:
mu: float or array_like of floats
sigma: float or array_like of floats
q: sample step
random_state: an object of numpy.random.RandomState
### Response:
def qnormal(mu, sigma, q, random_state):
'''
mu: float or array_like of floats
sigma: float or array_like of floats
q: sample step
random_state: an object of numpy.random.RandomState
'''
return np.round(normal(mu, sigma, random_state) / q) * q |
def split_path(path_):
"""
Split the requested path into (locale, path).
locale will be empty if it isn't found.
"""
path = path_.lstrip('/')
# Use partitition instead of split since it always returns 3 parts
first, _, rest = path.partition('/')
lang = first.lower()
if lang in settings.LANGUAGE_URL_MAP:
return settings.LANGUAGE_URL_MAP[lang], rest
else:
supported = find_supported(first)
if len(supported):
return supported[0], rest
else:
return '', path | Split the requested path into (locale, path).
locale will be empty if it isn't found. | Below is the the instruction that describes the task:
### Input:
Split the requested path into (locale, path).
locale will be empty if it isn't found.
### Response:
def split_path(path_):
"""
Split the requested path into (locale, path).
locale will be empty if it isn't found.
"""
path = path_.lstrip('/')
# Use partitition instead of split since it always returns 3 parts
first, _, rest = path.partition('/')
lang = first.lower()
if lang in settings.LANGUAGE_URL_MAP:
return settings.LANGUAGE_URL_MAP[lang], rest
else:
supported = find_supported(first)
if len(supported):
return supported[0], rest
else:
return '', path |
def parse_names(cls, expression):
"""Return the list of identifiers used in the expression."""
names = set()
try:
ast_node = ast.parse(expression, "ast")
class Visitor(ast.NodeVisitor):
def visit_Name(self, node):
names.add(node.id)
Visitor().visit(ast_node)
except Exception:
pass
return names | Return the list of identifiers used in the expression. | Below is the the instruction that describes the task:
### Input:
Return the list of identifiers used in the expression.
### Response:
def parse_names(cls, expression):
"""Return the list of identifiers used in the expression."""
names = set()
try:
ast_node = ast.parse(expression, "ast")
class Visitor(ast.NodeVisitor):
def visit_Name(self, node):
names.add(node.id)
Visitor().visit(ast_node)
except Exception:
pass
return names |
def get(self, request, slug):
"""Basic functionality for GET request to view.
"""
matching_datasets = self.generate_matching_datasets(slug)
if matching_datasets is None:
raise Http404("Datasets meeting these criteria do not exist.")
base_context = {
'datasets': matching_datasets,
'num_datasets': matching_datasets.count(),
'page_title': self.generate_page_title(slug),
}
additional_context = self.generate_additional_context(
matching_datasets
)
base_context.update(additional_context)
context = base_context
return render(
request,
self.template_path,
context
) | Basic functionality for GET request to view. | Below is the the instruction that describes the task:
### Input:
Basic functionality for GET request to view.
### Response:
def get(self, request, slug):
"""Basic functionality for GET request to view.
"""
matching_datasets = self.generate_matching_datasets(slug)
if matching_datasets is None:
raise Http404("Datasets meeting these criteria do not exist.")
base_context = {
'datasets': matching_datasets,
'num_datasets': matching_datasets.count(),
'page_title': self.generate_page_title(slug),
}
additional_context = self.generate_additional_context(
matching_datasets
)
base_context.update(additional_context)
context = base_context
return render(
request,
self.template_path,
context
) |
def _init_credentials(self, oauth_token, oauth_token_secret):
"Depending on the state passed in, get self._oauth up and running"
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# credentials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
# This is a brand new set of credentials - we need to generate
# an oauth token so it's available for the url property.
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
url = self.base_url + REQUEST_TOKEN_URL
headers = {'User-Agent': self.user_agent}
response = requests.post(url=url, headers=headers, auth=oauth)
self._process_oauth_response(response) | Depending on the state passed in, get self._oauth up and running | Below is the the instruction that describes the task:
### Input:
Depending on the state passed in, get self._oauth up and running
### Response:
def _init_credentials(self, oauth_token, oauth_token_secret):
"Depending on the state passed in, get self._oauth up and running"
if oauth_token and oauth_token_secret:
if self.verified:
# If provided, this is a fully verified set of
# credentials. Store the oauth_token and secret
# and initialize OAuth around those
self._init_oauth(oauth_token, oauth_token_secret)
else:
# If provided, we are reconstructing an initalized
# (but non-verified) set of public credentials.
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
# This is a brand new set of credentials - we need to generate
# an oauth token so it's available for the url property.
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
url = self.base_url + REQUEST_TOKEN_URL
headers = {'User-Agent': self.user_agent}
response = requests.post(url=url, headers=headers, auth=oauth)
self._process_oauth_response(response) |
def create_transform(self, rule):
"""
Create single transform from rule.
Rule can be string, or {key: value} pair
"""
try:
name, value = next(iter(rule.items()))
except AttributeError:
name, value = rule, None
try:
return self.REGISTRY[name](value, conf=self.conf)
except KeyError:
raise RuntimeError(
"Unknown transform: %r" % (name,)
) | Create single transform from rule.
Rule can be string, or {key: value} pair | Below is the the instruction that describes the task:
### Input:
Create single transform from rule.
Rule can be string, or {key: value} pair
### Response:
def create_transform(self, rule):
"""
Create single transform from rule.
Rule can be string, or {key: value} pair
"""
try:
name, value = next(iter(rule.items()))
except AttributeError:
name, value = rule, None
try:
return self.REGISTRY[name](value, conf=self.conf)
except KeyError:
raise RuntimeError(
"Unknown transform: %r" % (name,)
) |
def density_grid(*args, **kwargs):
"""
Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.). Returns a regular (in
lat-long space) grid of density estimates over a hemispherical surface.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are in
numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points within
a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are in
points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher sigmas
will lead to more smoothing of the resulting density distribution. This
parameter only applies to Kamb-based methods. Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single int
is given, it is interpreted as an NxN grid. If a tuple of ints is given
it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The array
will be normalized to sum to 1, so absolute value of the weights do not
affect the result. Defaults to None.
Returns
-------
xi, yi, zi : 2D arrays
The longitude, latitude and density values of the regularly gridded
density estimates. Longitude and latitude are in radians.
See Also
---------
mplstereonet.StereonetAxes.density_contourf
mplstereonet.StereonetAxes.density_contour
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
"""
def do_nothing(x, y):
return x, y
measurement = kwargs.get('measurement', 'poles')
gridsize = kwargs.get('gridsize', 100)
weights = kwargs.get('weights', None)
try:
gridsize = int(gridsize)
gridsize = (gridsize, gridsize)
except TypeError:
pass
func = {'poles':stereonet_math.pole,
'lines':stereonet_math.line,
'rakes':stereonet_math.rake,
'radians':do_nothing}[measurement]
lon, lat = func(*args)
method = kwargs.get('method', 'exponential_kamb')
sigma = kwargs.get('sigma', 3)
func = {'linear_kamb':_linear_inverse_kamb,
'square_kamb':_square_inverse_kamb,
'schmidt':_schmidt_count,
'kamb':_kamb_count,
'exponential_kamb':_exponential_kamb,
}[method]
lon, lat, z = _count_points(lon, lat, func, sigma, gridsize, weights)
return lon, lat, z | Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.). Returns a regular (in
lat-long space) grid of density estimates over a hemispherical surface.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are in
numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points within
a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are in
points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher sigmas
will lead to more smoothing of the resulting density distribution. This
parameter only applies to Kamb-based methods. Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single int
is given, it is interpreted as an NxN grid. If a tuple of ints is given
it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The array
will be normalized to sum to 1, so absolute value of the weights do not
affect the result. Defaults to None.
Returns
-------
xi, yi, zi : 2D arrays
The longitude, latitude and density values of the regularly gridded
density estimates. Longitude and latitude are in radians.
See Also
---------
mplstereonet.StereonetAxes.density_contourf
mplstereonet.StereonetAxes.density_contour
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909. | Below is the the instruction that describes the task:
### Input:
Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.). Returns a regular (in
lat-long space) grid of density estimates over a hemispherical surface.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are in
numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points within
a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are in
points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher sigmas
will lead to more smoothing of the resulting density distribution. This
parameter only applies to Kamb-based methods. Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single int
is given, it is interpreted as an NxN grid. If a tuple of ints is given
it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The array
will be normalized to sum to 1, so absolute value of the weights do not
affect the result. Defaults to None.
Returns
-------
xi, yi, zi : 2D arrays
The longitude, latitude and density values of the regularly gridded
density estimates. Longitude and latitude are in radians.
See Also
---------
mplstereonet.StereonetAxes.density_contourf
mplstereonet.StereonetAxes.density_contour
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
### Response:
def density_grid(*args, **kwargs):
"""
Estimates point density of the given linear orientation measurements
(Interpreted as poles, lines, rakes, or "raw" longitudes and latitudes
based on the `measurement` keyword argument.). Returns a regular (in
lat-long space) grid of density estimates over a hemispherical surface.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for contouring.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
method : string, optional
The method of density estimation to use. Defaults to
``"exponential_kamb"``. May be one of the following:
``"exponential_kamb"`` : Kamb with exponential smoothing
A modified Kamb method using exponential smoothing [1]_. Units are
in numbers of standard deviations by which the density estimate
differs from uniform.
``"linear_kamb"`` : Kamb with linear smoothing
A modified Kamb method using linear smoothing [1]_. Units are in
numbers of standard deviations by which the density estimate
differs from uniform.
``"kamb"`` : Kamb with no smoothing
Kamb's method [2]_ with no smoothing. Units are in numbers of
standard deviations by which the density estimate differs from
uniform.
``"schmidt"`` : 1% counts
The traditional "Schmidt" (a.k.a. 1%) method. Counts points within
a counting circle comprising 1% of the total area of the
hemisphere. Does not take into account sample size. Units are in
points per 1% area.
sigma : int or float, optional
The number of standard deviations defining the expected number of
standard deviations by which a random sample from a uniform
distribution of points would be expected to vary from being evenly
distributed across the hemisphere. This controls the size of the
counting circle, and therefore the degree of smoothing. Higher sigmas
will lead to more smoothing of the resulting density distribution. This
parameter only applies to Kamb-based methods. Defaults to 3.
gridsize : int or 2-item tuple of ints, optional
The size of the grid that the density is estimated on. If a single int
is given, it is interpreted as an NxN grid. If a tuple of ints is given
it is interpreted as (nrows, ncols). Defaults to 100.
weights : array-like, optional
The relative weight to be applied to each input measurement. The array
will be normalized to sum to 1, so absolute value of the weights do not
affect the result. Defaults to None.
Returns
-------
xi, yi, zi : 2D arrays
The longitude, latitude and density values of the regularly gridded
density estimates. Longitude and latitude are in radians.
See Also
---------
mplstereonet.StereonetAxes.density_contourf
mplstereonet.StereonetAxes.density_contour
References
----------
.. [1] Vollmer, 1995. C Program for Automatic Contouring of Spherical
Orientation Data Using a Modified Kamb Method. Computers &
Geosciences, Vol. 21, No. 1, pp. 31--49.
.. [2] Kamb, 1959. Ice Petrofabric Observations from Blue Glacier,
Washington, in Relation to Theory and Experiment. Journal of
Geophysical Research, Vol. 64, No. 11, pp. 1891--1909.
"""
def do_nothing(x, y):
return x, y
measurement = kwargs.get('measurement', 'poles')
gridsize = kwargs.get('gridsize', 100)
weights = kwargs.get('weights', None)
try:
gridsize = int(gridsize)
gridsize = (gridsize, gridsize)
except TypeError:
pass
func = {'poles':stereonet_math.pole,
'lines':stereonet_math.line,
'rakes':stereonet_math.rake,
'radians':do_nothing}[measurement]
lon, lat = func(*args)
method = kwargs.get('method', 'exponential_kamb')
sigma = kwargs.get('sigma', 3)
func = {'linear_kamb':_linear_inverse_kamb,
'square_kamb':_square_inverse_kamb,
'schmidt':_schmidt_count,
'kamb':_kamb_count,
'exponential_kamb':_exponential_kamb,
}[method]
lon, lat, z = _count_points(lon, lat, func, sigma, gridsize, weights)
return lon, lat, z |
def filter_any_above_threshold(
self,
multi_key_fn,
value_dict,
threshold,
default_value=0.0):
"""Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
"""
def filter_fn(x):
for key in multi_key_fn(x):
value = value_dict.get(key, default_value)
if value > threshold:
return True
return False
return self.filter(filter_fn) | Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict` | Below is the the instruction that describes the task:
### Input:
Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
### Response:
def filter_any_above_threshold(
self,
multi_key_fn,
value_dict,
threshold,
default_value=0.0):
"""Like filter_above_threshold but `multi_key_fn` returns multiple
keys and the element is kept if any of them have a value above
the given threshold.
Parameters
----------
multi_key_fn : callable
Given an element of this collection, returns multiple keys
into `value_dict`
value_dict : dict
Dict from keys returned by `extract_key_fn` to float values
threshold : float
Only keep elements whose value in `value_dict` is above this
threshold.
default_value : float
Value to use for elements whose key is not in `value_dict`
"""
def filter_fn(x):
for key in multi_key_fn(x):
value = value_dict.get(key, default_value)
if value > threshold:
return True
return False
return self.filter(filter_fn) |
def is_git_repo():
"""Check whether the current folder is a Git repo."""
cmd = "git", "rev-parse", "--git-dir"
try:
subprocess.run(cmd, stdout=subprocess.DEVNULL, check=True)
return True
except subprocess.CalledProcessError:
return False | Check whether the current folder is a Git repo. | Below is the the instruction that describes the task:
### Input:
Check whether the current folder is a Git repo.
### Response:
def is_git_repo():
"""Check whether the current folder is a Git repo."""
cmd = "git", "rev-parse", "--git-dir"
try:
subprocess.run(cmd, stdout=subprocess.DEVNULL, check=True)
return True
except subprocess.CalledProcessError:
return False |
def fasta(self):
"""
Convert the subsampled reads to FASTA format using reformat.sh
"""
logging.info('Converting FASTQ files to FASTA format')
# Create the threads for the analysis
for _ in range(self.cpus):
threads = Thread(target=self.fastathreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name as the FASTA file - the same as the FASTQ, but with .fa file extension
sample[self.analysistype].fasta = \
os.path.splitext(sample[self.analysistype].subsampledfastq)[0] + '.fa'
# Set the system call
sample[self.analysistype].reformatcall = 'reformat.sh in={fastq} out={fasta}'\
.format(fastq=sample[self.analysistype].subsampledfastq,
fasta=sample[self.analysistype].fasta)
# Add the sample to the queue
self.fastaqueue.put(sample)
self.fastaqueue.join() | Convert the subsampled reads to FASTA format using reformat.sh | Below is the the instruction that describes the task:
### Input:
Convert the subsampled reads to FASTA format using reformat.sh
### Response:
def fasta(self):
"""
Convert the subsampled reads to FASTA format using reformat.sh
"""
logging.info('Converting FASTQ files to FASTA format')
# Create the threads for the analysis
for _ in range(self.cpus):
threads = Thread(target=self.fastathreads, args=())
threads.setDaemon(True)
threads.start()
with progressbar(self.runmetadata.samples) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA':
# Set the name as the FASTA file - the same as the FASTQ, but with .fa file extension
sample[self.analysistype].fasta = \
os.path.splitext(sample[self.analysistype].subsampledfastq)[0] + '.fa'
# Set the system call
sample[self.analysistype].reformatcall = 'reformat.sh in={fastq} out={fasta}'\
.format(fastq=sample[self.analysistype].subsampledfastq,
fasta=sample[self.analysistype].fasta)
# Add the sample to the queue
self.fastaqueue.put(sample)
self.fastaqueue.join() |
def plotpsd(data, dt, ndivide=1, window=hanning, overlap_half=False, ax=None, **kwargs):
"""Plot PSD (Power Spectral Density).
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
overlap_half (bool): Split data to half-overlapped regions.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot().
"""
if ax is None:
ax = plt.gca()
vk, psddata = psd(data, dt, ndivide, window, overlap_half)
ax.loglog(vk, psddata, **kwargs)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('PSD')
ax.legend() | Plot PSD (Power Spectral Density).
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
overlap_half (bool): Split data to half-overlapped regions.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot(). | Below is the the instruction that describes the task:
### Input:
Plot PSD (Power Spectral Density).
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
overlap_half (bool): Split data to half-overlapped regions.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot().
### Response:
def plotpsd(data, dt, ndivide=1, window=hanning, overlap_half=False, ax=None, **kwargs):
"""Plot PSD (Power Spectral Density).
Args:
data (np.ndarray): Input data.
dt (float): Time between each data.
ndivide (int): Do averaging (split data into ndivide, get psd of each, and average them).
overlap_half (bool): Split data to half-overlapped regions.
ax (matplotlib.axes): Axis the figure is plotted on.
kwargs (optional): Plot options passed to ax.plot().
"""
if ax is None:
ax = plt.gca()
vk, psddata = psd(data, dt, ndivide, window, overlap_half)
ax.loglog(vk, psddata, **kwargs)
ax.set_xlabel('Frequency [Hz]')
ax.set_ylabel('PSD')
ax.legend() |
def get_command_from_module(
command_module,
remote_connection: environ.RemoteConnection
):
"""
Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return:
"""
use_remote = (
remote_connection.active and
hasattr(command_module, 'execute_remote')
)
return (
command_module.execute_remote
if use_remote else
command_module.execute
) | Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return: | Below is the the instruction that describes the task:
### Input:
Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return:
### Response:
def get_command_from_module(
command_module,
remote_connection: environ.RemoteConnection
):
"""
Returns the execution command to use for the specified module, which may
be different depending upon remote connection
:param command_module:
:param remote_connection:
:return:
"""
use_remote = (
remote_connection.active and
hasattr(command_module, 'execute_remote')
)
return (
command_module.execute_remote
if use_remote else
command_module.execute
) |
def push_plugin(self, name):
"""
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True) | Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful | Below is the the instruction that describes the task:
### Input:
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
### Response:
def push_plugin(self, name):
"""
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True) |
def w_diffuser_inner(sed_inputs=sed_dict):
"""Return the inner width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return ut.ceil_nearest(w_diffuser_inner_min(sed_inputs).magnitude,
(np.arange(1/16,1/4,1/16)*u.inch).magnitude) | Return the inner width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>> | Below is the the instruction that describes the task:
### Input:
Return the inner width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
### Response:
def w_diffuser_inner(sed_inputs=sed_dict):
"""Return the inner width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Inner width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return ut.ceil_nearest(w_diffuser_inner_min(sed_inputs).magnitude,
(np.arange(1/16,1/4,1/16)*u.inch).magnitude) |
def search_elementnames(self, *substrings: str,
name: str = 'elementnames') -> 'Selection':
"""Return a new selection containing all elements of the current
selection with a name containing at least one of the given substrings.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, _ = prepare_full_example_2()
Pass the (sub)strings as positional arguments and, optionally, the
name of the newly created |Selection| object as a keyword argument:
>>> test = pub.selections.complete.copy('test')
>>> from hydpy import prepare_model
>>> test.search_elementnames('dill', 'lahn_1')
Selection("elementnames",
nodes=(),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
Wrong string specifications result in errors like the following:
>>> test.search_elementnames(['dill', 'lahn_1'])
Traceback (most recent call last):
...
TypeError: While trying to determine the elements of selection \
`test` with names containing at least one of the given substrings \
`['dill', 'lahn_1']`, the following error occurred: 'in <string>' \
requires string as left operand, not list
Method |Selection.select_elementnames| restricts the current selection
to the one determined with the method |Selection.search_elementnames|:
>>> test.select_elementnames('dill', 'lahn_1')
Selection("test",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
On the contrary, the method |Selection.deselect_elementnames|
restricts the current selection to all devices not determined
by the method |Selection.search_elementnames|:
>>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1')
Selection("complete",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_lahn_2", "land_lahn_3",
"stream_lahn_2_lahn_3"))
"""
try:
selection = Selection(name)
for element in self.elements:
for substring in substrings:
if substring in element.name:
selection.elements += element
break
return selection
except BaseException:
values = objecttools.enumeration(substrings)
objecttools.augment_excmessage(
f'While trying to determine the elements of selection '
f'`{self.name}` with names containing at least one '
f'of the given substrings `{values}`') | Return a new selection containing all elements of the current
selection with a name containing at least one of the given substrings.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, _ = prepare_full_example_2()
Pass the (sub)strings as positional arguments and, optionally, the
name of the newly created |Selection| object as a keyword argument:
>>> test = pub.selections.complete.copy('test')
>>> from hydpy import prepare_model
>>> test.search_elementnames('dill', 'lahn_1')
Selection("elementnames",
nodes=(),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
Wrong string specifications result in errors like the following:
>>> test.search_elementnames(['dill', 'lahn_1'])
Traceback (most recent call last):
...
TypeError: While trying to determine the elements of selection \
`test` with names containing at least one of the given substrings \
`['dill', 'lahn_1']`, the following error occurred: 'in <string>' \
requires string as left operand, not list
Method |Selection.select_elementnames| restricts the current selection
to the one determined with the method |Selection.search_elementnames|:
>>> test.select_elementnames('dill', 'lahn_1')
Selection("test",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
On the contrary, the method |Selection.deselect_elementnames|
restricts the current selection to all devices not determined
by the method |Selection.search_elementnames|:
>>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1')
Selection("complete",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_lahn_2", "land_lahn_3",
"stream_lahn_2_lahn_3")) | Below is the the instruction that describes the task:
### Input:
Return a new selection containing all elements of the current
selection with a name containing at least one of the given substrings.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, _ = prepare_full_example_2()
Pass the (sub)strings as positional arguments and, optionally, the
name of the newly created |Selection| object as a keyword argument:
>>> test = pub.selections.complete.copy('test')
>>> from hydpy import prepare_model
>>> test.search_elementnames('dill', 'lahn_1')
Selection("elementnames",
nodes=(),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
Wrong string specifications result in errors like the following:
>>> test.search_elementnames(['dill', 'lahn_1'])
Traceback (most recent call last):
...
TypeError: While trying to determine the elements of selection \
`test` with names containing at least one of the given substrings \
`['dill', 'lahn_1']`, the following error occurred: 'in <string>' \
requires string as left operand, not list
Method |Selection.select_elementnames| restricts the current selection
to the one determined with the method |Selection.search_elementnames|:
>>> test.select_elementnames('dill', 'lahn_1')
Selection("test",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
On the contrary, the method |Selection.deselect_elementnames|
restricts the current selection to all devices not determined
by the method |Selection.search_elementnames|:
>>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1')
Selection("complete",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_lahn_2", "land_lahn_3",
"stream_lahn_2_lahn_3"))
### Response:
def search_elementnames(self, *substrings: str,
name: str = 'elementnames') -> 'Selection':
"""Return a new selection containing all elements of the current
selection with a name containing at least one of the given substrings.
>>> from hydpy.core.examples import prepare_full_example_2
>>> hp, pub, _ = prepare_full_example_2()
Pass the (sub)strings as positional arguments and, optionally, the
name of the newly created |Selection| object as a keyword argument:
>>> test = pub.selections.complete.copy('test')
>>> from hydpy import prepare_model
>>> test.search_elementnames('dill', 'lahn_1')
Selection("elementnames",
nodes=(),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
Wrong string specifications result in errors like the following:
>>> test.search_elementnames(['dill', 'lahn_1'])
Traceback (most recent call last):
...
TypeError: While trying to determine the elements of selection \
`test` with names containing at least one of the given substrings \
`['dill', 'lahn_1']`, the following error occurred: 'in <string>' \
requires string as left operand, not list
Method |Selection.select_elementnames| restricts the current selection
to the one determined with the method |Selection.search_elementnames|:
>>> test.select_elementnames('dill', 'lahn_1')
Selection("test",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_dill", "land_lahn_1", "stream_dill_lahn_2",
"stream_lahn_1_lahn_2"))
On the contrary, the method |Selection.deselect_elementnames|
restricts the current selection to all devices not determined
by the method |Selection.search_elementnames|:
>>> pub.selections.complete.deselect_elementnames('dill', 'lahn_1')
Selection("complete",
nodes=("dill", "lahn_1", "lahn_2", "lahn_3"),
elements=("land_lahn_2", "land_lahn_3",
"stream_lahn_2_lahn_3"))
"""
try:
selection = Selection(name)
for element in self.elements:
for substring in substrings:
if substring in element.name:
selection.elements += element
break
return selection
except BaseException:
values = objecttools.enumeration(substrings)
objecttools.augment_excmessage(
f'While trying to determine the elements of selection '
f'`{self.name}` with names containing at least one '
f'of the given substrings `{values}`') |
def posix_props(self, prop, in_group=False):
"""
Insert POSIX properties.
Posix style properties are not as forgiving
as Unicode properties. Case does matter,
and whitespace and '-' and '_' will not be tolerated.
"""
try:
if self.is_bytes or not self.unicode:
pattern = _uniprops.get_posix_property(
prop, (_uniprops.POSIX_BYTES if self.is_bytes else _uniprops.POSIX)
)
else:
pattern = _uniprops.get_posix_property(prop, _uniprops.POSIX_UNICODE)
except Exception:
raise ValueError('Invalid POSIX property!')
if not in_group and not pattern: # pragma: no cover
pattern = '^%s' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE)
return [pattern] | Insert POSIX properties.
Posix style properties are not as forgiving
as Unicode properties. Case does matter,
and whitespace and '-' and '_' will not be tolerated. | Below is the the instruction that describes the task:
### Input:
Insert POSIX properties.
Posix style properties are not as forgiving
as Unicode properties. Case does matter,
and whitespace and '-' and '_' will not be tolerated.
### Response:
def posix_props(self, prop, in_group=False):
"""
Insert POSIX properties.
Posix style properties are not as forgiving
as Unicode properties. Case does matter,
and whitespace and '-' and '_' will not be tolerated.
"""
try:
if self.is_bytes or not self.unicode:
pattern = _uniprops.get_posix_property(
prop, (_uniprops.POSIX_BYTES if self.is_bytes else _uniprops.POSIX)
)
else:
pattern = _uniprops.get_posix_property(prop, _uniprops.POSIX_UNICODE)
except Exception:
raise ValueError('Invalid POSIX property!')
if not in_group and not pattern: # pragma: no cover
pattern = '^%s' % ('\x00-\xff' if self.is_bytes else _uniprops.UNICODE_RANGE)
return [pattern] |
def store(self, result, filename, pretty=True):
"""
Write a result to the given file.
Parameters
----------
result : memote.MemoteResult
The dictionary structure of results.
filename : str or pathlib.Path
Store results directly to the given filename.
pretty : bool, optional
Whether (default) or not to write JSON in a more legible format.
"""
LOGGER.info("Storing result in '%s'.", filename)
if filename.endswith(".gz"):
with gzip.open(filename, "wb") as file_handle:
file_handle.write(
jsonify(result, pretty=pretty).encode("utf-8")
)
else:
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(jsonify(result, pretty=pretty)) | Write a result to the given file.
Parameters
----------
result : memote.MemoteResult
The dictionary structure of results.
filename : str or pathlib.Path
Store results directly to the given filename.
pretty : bool, optional
Whether (default) or not to write JSON in a more legible format. | Below is the the instruction that describes the task:
### Input:
Write a result to the given file.
Parameters
----------
result : memote.MemoteResult
The dictionary structure of results.
filename : str or pathlib.Path
Store results directly to the given filename.
pretty : bool, optional
Whether (default) or not to write JSON in a more legible format.
### Response:
def store(self, result, filename, pretty=True):
"""
Write a result to the given file.
Parameters
----------
result : memote.MemoteResult
The dictionary structure of results.
filename : str or pathlib.Path
Store results directly to the given filename.
pretty : bool, optional
Whether (default) or not to write JSON in a more legible format.
"""
LOGGER.info("Storing result in '%s'.", filename)
if filename.endswith(".gz"):
with gzip.open(filename, "wb") as file_handle:
file_handle.write(
jsonify(result, pretty=pretty).encode("utf-8")
)
else:
with open(filename, "w", encoding="utf-8") as file_handle:
file_handle.write(jsonify(result, pretty=pretty)) |
def lanczos_op(f, s, order=30):
r"""
Perform the lanczos approximation of the signal s.
Parameters
----------
f: Filter
s : ndarray
Signal to approximate.
order : int
Degree of the lanczos approximation. (default = 30)
Returns
-------
L : ndarray
lanczos approximation of s
"""
G = f.G
Nf = len(f.g)
# To have the right shape for the output array depending on the signal dim
try:
Nv = np.shape(s)[1]
is2d = True
c = np.zeros((G.N*Nf, Nv))
except IndexError:
Nv = 1
is2d = False
c = np.zeros((G.N*Nf))
tmpN = np.arange(G.N, dtype=int)
for j in range(Nv):
if is2d:
V, H, _ = lanczos(G.L.toarray(), order, s[:, j])
else:
V, H, _ = lanczos(G.L.toarray(), order, s)
Eh, Uh = np.linalg.eig(H)
Eh[Eh < 0] = 0
fe = f.evaluate(Eh)
V = np.dot(V, Uh)
for i in range(Nf):
if is2d:
c[tmpN + i*G.N, j] = np.dot(V, fe[:][i] * np.dot(V.T, s[:, j]))
else:
c[tmpN + i*G.N] = np.dot(V, fe[:][i] * np.dot(V.T, s))
return c | r"""
Perform the lanczos approximation of the signal s.
Parameters
----------
f: Filter
s : ndarray
Signal to approximate.
order : int
Degree of the lanczos approximation. (default = 30)
Returns
-------
L : ndarray
lanczos approximation of s | Below is the the instruction that describes the task:
### Input:
r"""
Perform the lanczos approximation of the signal s.
Parameters
----------
f: Filter
s : ndarray
Signal to approximate.
order : int
Degree of the lanczos approximation. (default = 30)
Returns
-------
L : ndarray
lanczos approximation of s
### Response:
def lanczos_op(f, s, order=30):
r"""
Perform the lanczos approximation of the signal s.
Parameters
----------
f: Filter
s : ndarray
Signal to approximate.
order : int
Degree of the lanczos approximation. (default = 30)
Returns
-------
L : ndarray
lanczos approximation of s
"""
G = f.G
Nf = len(f.g)
# To have the right shape for the output array depending on the signal dim
try:
Nv = np.shape(s)[1]
is2d = True
c = np.zeros((G.N*Nf, Nv))
except IndexError:
Nv = 1
is2d = False
c = np.zeros((G.N*Nf))
tmpN = np.arange(G.N, dtype=int)
for j in range(Nv):
if is2d:
V, H, _ = lanczos(G.L.toarray(), order, s[:, j])
else:
V, H, _ = lanczos(G.L.toarray(), order, s)
Eh, Uh = np.linalg.eig(H)
Eh[Eh < 0] = 0
fe = f.evaluate(Eh)
V = np.dot(V, Uh)
for i in range(Nf):
if is2d:
c[tmpN + i*G.N, j] = np.dot(V, fe[:][i] * np.dot(V.T, s[:, j]))
else:
c[tmpN + i*G.N] = np.dot(V, fe[:][i] * np.dot(V.T, s))
return c |
def contact_get_common_groups(self, contact_id):
"""
Returns groups common between a user and the contact with given id.
:return: Contact or Error
:rtype: Contact
"""
for group in self.wapi_functions.getCommonGroups(contact_id):
yield factory_chat(group, self) | Returns groups common between a user and the contact with given id.
:return: Contact or Error
:rtype: Contact | Below is the the instruction that describes the task:
### Input:
Returns groups common between a user and the contact with given id.
:return: Contact or Error
:rtype: Contact
### Response:
def contact_get_common_groups(self, contact_id):
"""
Returns groups common between a user and the contact with given id.
:return: Contact or Error
:rtype: Contact
"""
for group in self.wapi_functions.getCommonGroups(contact_id):
yield factory_chat(group, self) |
def conf_int(self, alpha=0.05, **kwargs):
r"""Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method'
"""
return self.arima_res_.conf_int(alpha=alpha, **kwargs) | r"""Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method' | Below is the the instruction that describes the task:
### Input:
r"""Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method'
### Response:
def conf_int(self, alpha=0.05, **kwargs):
r"""Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method'
"""
return self.arima_res_.conf_int(alpha=alpha, **kwargs) |
def do_GET(self):
"""Dispatching logic. There are three paths defined:
/ - Display an empty form asking for an identity URL to
verify
/verify - Handle form submission, initiating OpenID verification
/process - Handle a redirect from an OpenID server
Any other path gets a 404 response. This function also parses
the query parameters.
If an exception occurs in this function, a traceback is
written to the requesting browser.
"""
try:
self.parsed_uri = urlparse.urlparse(self.path)
self.query = {}
for k, v in cgi.parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
if path == '/':
self.render()
elif path == '/verify':
self.doVerify()
elif path == '/process':
self.doProcess()
elif path == '/affiliate':
self.doAffiliate()
else:
self.notFound()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.send_response(500)
self.send_header('Content-type', 'text/html')
self.setSessionCookie()
self.end_headers()
self.wfile.write(cgitb.html(sys.exc_info(), context=10)) | Dispatching logic. There are three paths defined:
/ - Display an empty form asking for an identity URL to
verify
/verify - Handle form submission, initiating OpenID verification
/process - Handle a redirect from an OpenID server
Any other path gets a 404 response. This function also parses
the query parameters.
If an exception occurs in this function, a traceback is
written to the requesting browser. | Below is the the instruction that describes the task:
### Input:
Dispatching logic. There are three paths defined:
/ - Display an empty form asking for an identity URL to
verify
/verify - Handle form submission, initiating OpenID verification
/process - Handle a redirect from an OpenID server
Any other path gets a 404 response. This function also parses
the query parameters.
If an exception occurs in this function, a traceback is
written to the requesting browser.
### Response:
def do_GET(self):
"""Dispatching logic. There are three paths defined:
/ - Display an empty form asking for an identity URL to
verify
/verify - Handle form submission, initiating OpenID verification
/process - Handle a redirect from an OpenID server
Any other path gets a 404 response. This function also parses
the query parameters.
If an exception occurs in this function, a traceback is
written to the requesting browser.
"""
try:
self.parsed_uri = urlparse.urlparse(self.path)
self.query = {}
for k, v in cgi.parse_qsl(self.parsed_uri[4]):
self.query[k] = v.decode('utf-8')
path = self.parsed_uri[2]
if path == '/':
self.render()
elif path == '/verify':
self.doVerify()
elif path == '/process':
self.doProcess()
elif path == '/affiliate':
self.doAffiliate()
else:
self.notFound()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.send_response(500)
self.send_header('Content-type', 'text/html')
self.setSessionCookie()
self.end_headers()
self.wfile.write(cgitb.html(sys.exc_info(), context=10)) |
def get_display_name(self, room=None):
"""Get this user's display name.
Args:
room (Room): Optional. When specified, return the display name of the user
in this room.
Returns:
The display name. Defaults to the user ID if not set.
"""
if room:
try:
return room.members_displaynames[self.user_id]
except KeyError:
return self.user_id
if not self.displayname:
self.displayname = self.api.get_display_name(self.user_id)
return self.displayname or self.user_id | Get this user's display name.
Args:
room (Room): Optional. When specified, return the display name of the user
in this room.
Returns:
The display name. Defaults to the user ID if not set. | Below is the the instruction that describes the task:
### Input:
Get this user's display name.
Args:
room (Room): Optional. When specified, return the display name of the user
in this room.
Returns:
The display name. Defaults to the user ID if not set.
### Response:
def get_display_name(self, room=None):
"""Get this user's display name.
Args:
room (Room): Optional. When specified, return the display name of the user
in this room.
Returns:
The display name. Defaults to the user ID if not set.
"""
if room:
try:
return room.members_displaynames[self.user_id]
except KeyError:
return self.user_id
if not self.displayname:
self.displayname = self.api.get_display_name(self.user_id)
return self.displayname or self.user_id |
def repay_loan(self, order_id, amount):
"""
归还借贷
:param order_id:
:param amount:
:return:
"""
params = {'order-id': order_id, 'amount': amount}
path = f'/v1/margin/orders/{order_id}/repay'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper | 归还借贷
:param order_id:
:param amount:
:return: | Below is the the instruction that describes the task:
### Input:
归还借贷
:param order_id:
:param amount:
:return:
### Response:
def repay_loan(self, order_id, amount):
"""
归还借贷
:param order_id:
:param amount:
:return:
"""
params = {'order-id': order_id, 'amount': amount}
path = f'/v1/margin/orders/{order_id}/repay'
def _wrapper(_func):
@wraps(_func)
def handle():
_func(api_key_post(params, path))
return handle
return _wrapper |
def plot_mv_storage_integration(self, **kwargs):
"""
Plots storage position in MV grid of integrated storages.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
"""
if self.network.pypsa is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
node_color='storage_integration',
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', ''))
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot storage integration in MV grid.") | Plots storage position in MV grid of integrated storages.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`. | Below is the the instruction that describes the task:
### Input:
Plots storage position in MV grid of integrated storages.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
### Response:
def plot_mv_storage_integration(self, **kwargs):
"""
Plots storage position in MV grid of integrated storages.
For more information see :func:`edisgo.tools.plots.mv_grid_topology`.
"""
if self.network.pypsa is not None:
plots.mv_grid_topology(
self.network.pypsa, self.network.config,
node_color='storage_integration',
filename=kwargs.get('filename', None),
grid_district_geom=kwargs.get('grid_district_geom', True),
background_map=kwargs.get('background_map', True),
xlim=kwargs.get('xlim', None), ylim=kwargs.get('ylim', None),
title=kwargs.get('title', ''))
else:
if self.network.pypsa is None:
logging.warning("pypsa representation of MV grid needed to "
"plot storage integration in MV grid.") |
def getitem(lst, indices):
"""Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists.
"""
if not indices:
return lst
i, indices = indices[0], indices[1:]
item = list.__getitem__(lst, i)
if isinstance(i, int):
return getitem(item, indices)
# Empty slice: check if all subsequent indices are in range for the
# full slice, raise IndexError otherwise. This is NumPy's behavior.
if not item:
if lst:
_ = getitem(lst, (slice(None),) + indices)
elif any(isinstance(k, int) for k in indices):
raise IndexError
return []
return [getitem(x, indices) for x in item] | Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists. | Below is the the instruction that describes the task:
### Input:
Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists.
### Response:
def getitem(lst, indices):
"""Definition for multidimensional slicing and indexing on arbitrarily
shaped nested lists.
"""
if not indices:
return lst
i, indices = indices[0], indices[1:]
item = list.__getitem__(lst, i)
if isinstance(i, int):
return getitem(item, indices)
# Empty slice: check if all subsequent indices are in range for the
# full slice, raise IndexError otherwise. This is NumPy's behavior.
if not item:
if lst:
_ = getitem(lst, (slice(None),) + indices)
elif any(isinstance(k, int) for k in indices):
raise IndexError
return []
return [getitem(x, indices) for x in item] |
def _get_pitcher(self, pitcher):
"""
get pitcher object
:param pitcher: Beautifulsoup object(pitcher element)
:return: pitcher(dict)
"""
values = OrderedDict()
player = self.players.rosters.get(pitcher.get('id'))
values['pos'] = pitcher.get('pos', MlbamConst.UNKNOWN_SHORT)
values['id'] = pitcher.get('id', MlbamConst.UNKNOWN_SHORT)
values['first'] = player.first
values['last'] = player.last
values['box_name'] = player.box_name
values['rl'] = player.rl
values['bats'] = player.bats
values['out'] = pitcher.get('out', MlbamConst.UNKNOWN_SHORT)
values['bf'] = pitcher.get('bf', MlbamConst.UNKNOWN_SHORT)
return values | get pitcher object
:param pitcher: Beautifulsoup object(pitcher element)
:return: pitcher(dict) | Below is the the instruction that describes the task:
### Input:
get pitcher object
:param pitcher: Beautifulsoup object(pitcher element)
:return: pitcher(dict)
### Response:
def _get_pitcher(self, pitcher):
"""
get pitcher object
:param pitcher: Beautifulsoup object(pitcher element)
:return: pitcher(dict)
"""
values = OrderedDict()
player = self.players.rosters.get(pitcher.get('id'))
values['pos'] = pitcher.get('pos', MlbamConst.UNKNOWN_SHORT)
values['id'] = pitcher.get('id', MlbamConst.UNKNOWN_SHORT)
values['first'] = player.first
values['last'] = player.last
values['box_name'] = player.box_name
values['rl'] = player.rl
values['bats'] = player.bats
values['out'] = pitcher.get('out', MlbamConst.UNKNOWN_SHORT)
values['bf'] = pitcher.get('bf', MlbamConst.UNKNOWN_SHORT)
return values |
def y0(x, context=None):
"""
Return the value of the second kind Bessel function of order 0 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_y0,
(BigFloat._implicit_convert(x),),
context,
) | Return the value of the second kind Bessel function of order 0 at x. | Below is the the instruction that describes the task:
### Input:
Return the value of the second kind Bessel function of order 0 at x.
### Response:
def y0(x, context=None):
"""
Return the value of the second kind Bessel function of order 0 at x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_y0,
(BigFloat._implicit_convert(x),),
context,
) |
def install(self, version, upgrade=False):
"""
Installs Poetry in $POETRY_HOME.
"""
print("Installing version: " + colorize("info", version))
self.make_lib(version)
self.make_bin()
self.make_env()
self.update_path()
return 0 | Installs Poetry in $POETRY_HOME. | Below is the the instruction that describes the task:
### Input:
Installs Poetry in $POETRY_HOME.
### Response:
def install(self, version, upgrade=False):
"""
Installs Poetry in $POETRY_HOME.
"""
print("Installing version: " + colorize("info", version))
self.make_lib(version)
self.make_bin()
self.make_env()
self.update_path()
return 0 |
def cli(env, identifier, body):
"""Adds an update to an existing ticket."""
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
if body is None:
body = click.edit('\n\n' + ticket.TEMPLATE_MSG)
mgr.update_ticket(ticket_id=ticket_id, body=body)
env.fout("Ticket Updated!") | Adds an update to an existing ticket. | Below is the the instruction that describes the task:
### Input:
Adds an update to an existing ticket.
### Response:
def cli(env, identifier, body):
"""Adds an update to an existing ticket."""
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
if body is None:
body = click.edit('\n\n' + ticket.TEMPLATE_MSG)
mgr.update_ticket(ticket_id=ticket_id, body=body)
env.fout("Ticket Updated!") |
def remove_file_no_raise(file_name, config):
"""Removes file from disk if exception is raised."""
# The removal can be disabled by the config for debugging purposes.
if config.keep_xml:
return True
try:
if os.path.exists(file_name):
os.remove(file_name)
except IOError as error:
loggers.root.error(
"Error occurred while removing temporary created file('%s'): %s",
file_name, str(error)) | Removes file from disk if exception is raised. | Below is the the instruction that describes the task:
### Input:
Removes file from disk if exception is raised.
### Response:
def remove_file_no_raise(file_name, config):
"""Removes file from disk if exception is raised."""
# The removal can be disabled by the config for debugging purposes.
if config.keep_xml:
return True
try:
if os.path.exists(file_name):
os.remove(file_name)
except IOError as error:
loggers.root.error(
"Error occurred while removing temporary created file('%s'): %s",
file_name, str(error)) |
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
BDEFileEntry: file entry or None.
"""
return bde_file_entry.BDEFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True) | Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
BDEFileEntry: file entry or None. | Below is the the instruction that describes the task:
### Input:
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
BDEFileEntry: file entry or None.
### Response:
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
BDEFileEntry: file entry or None.
"""
return bde_file_entry.BDEFileEntry(
self._resolver_context, self, path_spec, is_root=True, is_virtual=True) |
def features(self):
"""
The set of features supported by this MUC. This may vary depending on
features exported by the MUC service, so be sure to check this for each
individual MUC.
"""
return {
aioxmpp.im.conversation.ConversationFeature.BAN,
aioxmpp.im.conversation.ConversationFeature.BAN_WITH_KICK,
aioxmpp.im.conversation.ConversationFeature.KICK,
aioxmpp.im.conversation.ConversationFeature.SEND_MESSAGE,
aioxmpp.im.conversation.ConversationFeature.SEND_MESSAGE_TRACKED,
aioxmpp.im.conversation.ConversationFeature.SET_TOPIC,
aioxmpp.im.conversation.ConversationFeature.SET_NICK,
aioxmpp.im.conversation.ConversationFeature.INVITE,
aioxmpp.im.conversation.ConversationFeature.INVITE_DIRECT,
} | The set of features supported by this MUC. This may vary depending on
features exported by the MUC service, so be sure to check this for each
individual MUC. | Below is the the instruction that describes the task:
### Input:
The set of features supported by this MUC. This may vary depending on
features exported by the MUC service, so be sure to check this for each
individual MUC.
### Response:
def features(self):
"""
The set of features supported by this MUC. This may vary depending on
features exported by the MUC service, so be sure to check this for each
individual MUC.
"""
return {
aioxmpp.im.conversation.ConversationFeature.BAN,
aioxmpp.im.conversation.ConversationFeature.BAN_WITH_KICK,
aioxmpp.im.conversation.ConversationFeature.KICK,
aioxmpp.im.conversation.ConversationFeature.SEND_MESSAGE,
aioxmpp.im.conversation.ConversationFeature.SEND_MESSAGE_TRACKED,
aioxmpp.im.conversation.ConversationFeature.SET_TOPIC,
aioxmpp.im.conversation.ConversationFeature.SET_NICK,
aioxmpp.im.conversation.ConversationFeature.INVITE,
aioxmpp.im.conversation.ConversationFeature.INVITE_DIRECT,
} |
def match(self, request):
"""
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
"""
# If mock already expired, fail it
if self._times <= 0:
raise PookExpiredMock('Mock expired')
# Trigger mock filters
for test in self.filters:
if not test(request, self):
return False, []
# Trigger mock mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Match incoming request against registered mock matchers
matches, errors = self.matchers.match(request)
# If not matched, return False
if not matches:
return False, errors
# Register matched request for further inspecion and reference
self._calls.append(request)
# Increase mock call counter
self._matches += 1
if not self._persist:
self._times -= 1
# Raise simulated error
if self._error:
raise self._error
# Trigger callback when matched
for callback in self.callbacks:
callback(request, self)
return True, [] | Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions. | Below is the the instruction that describes the task:
### Input:
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
### Response:
def match(self, request):
"""
Matches an outgoing HTTP request against the current mock matchers.
This method acts like a delegator to `pook.MatcherEngine`.
Arguments:
request (pook.Request): request instance to match.
Raises:
Exception: if the mock has an exception defined.
Returns:
tuple(bool, list[Exception]): ``True`` if the mock matches
the outgoing HTTP request, otherwise ``False``. Also returns
an optional list of error exceptions.
"""
# If mock already expired, fail it
if self._times <= 0:
raise PookExpiredMock('Mock expired')
# Trigger mock filters
for test in self.filters:
if not test(request, self):
return False, []
# Trigger mock mappers
for mapper in self.mappers:
request = mapper(request, self)
if not request:
raise ValueError('map function must return a request object')
# Match incoming request against registered mock matchers
matches, errors = self.matchers.match(request)
# If not matched, return False
if not matches:
return False, errors
# Register matched request for further inspecion and reference
self._calls.append(request)
# Increase mock call counter
self._matches += 1
if not self._persist:
self._times -= 1
# Raise simulated error
if self._error:
raise self._error
# Trigger callback when matched
for callback in self.callbacks:
callback(request, self)
return True, [] |
def validate_time_inversion(self):
"""
Check time inversion of the time range.
:raises ValueError:
If |attr_start_datetime| is
bigger than |attr_end_datetime|.
:raises TypeError:
Any one of |attr_start_datetime| and |attr_end_datetime|,
or both is inappropriate datetime value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:00:00+0900")
try:
time_range.validate_time_inversion()
except ValueError:
print "time inversion"
:Output:
.. parsed-literal::
time inversion
"""
if not self.is_set():
# for python2/3 compatibility
raise TypeError
if self.start_datetime > self.end_datetime:
raise ValueError(
"time inversion found: {:s} > {:s}".format(
str(self.start_datetime), str(self.end_datetime)
)
) | Check time inversion of the time range.
:raises ValueError:
If |attr_start_datetime| is
bigger than |attr_end_datetime|.
:raises TypeError:
Any one of |attr_start_datetime| and |attr_end_datetime|,
or both is inappropriate datetime value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:00:00+0900")
try:
time_range.validate_time_inversion()
except ValueError:
print "time inversion"
:Output:
.. parsed-literal::
time inversion | Below is the the instruction that describes the task:
### Input:
Check time inversion of the time range.
:raises ValueError:
If |attr_start_datetime| is
bigger than |attr_end_datetime|.
:raises TypeError:
Any one of |attr_start_datetime| and |attr_end_datetime|,
or both is inappropriate datetime value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:00:00+0900")
try:
time_range.validate_time_inversion()
except ValueError:
print "time inversion"
:Output:
.. parsed-literal::
time inversion
### Response:
def validate_time_inversion(self):
"""
Check time inversion of the time range.
:raises ValueError:
If |attr_start_datetime| is
bigger than |attr_end_datetime|.
:raises TypeError:
Any one of |attr_start_datetime| and |attr_end_datetime|,
or both is inappropriate datetime value.
:Sample Code:
.. code:: python
from datetimerange import DateTimeRange
time_range = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:00:00+0900")
try:
time_range.validate_time_inversion()
except ValueError:
print "time inversion"
:Output:
.. parsed-literal::
time inversion
"""
if not self.is_set():
# for python2/3 compatibility
raise TypeError
if self.start_datetime > self.end_datetime:
raise ValueError(
"time inversion found: {:s} > {:s}".format(
str(self.start_datetime), str(self.end_datetime)
)
) |
def compute_batch(self, duplicate_manager=None, context_manager=None):
"""
Computes the elements of the batch sequentially by penalizing the acquisition.
"""
from ...acquisitions import AcquisitionLP
assert isinstance(self.acquisition, AcquisitionLP)
self.acquisition.update_batches(None,None,None)
# --- GET first element in the batch
X_batch = self.acquisition.optimize()[0]
k=1
if self.batch_size >1:
# ---------- Approximate the constants of the the method
L = estimate_L(self.acquisition.model.model,self.acquisition.space.get_bounds())
Min = self.acquisition.model.model.Y.min()
# --- GET the remaining elements
while k<self.batch_size:
self.acquisition.update_batches(X_batch,L,Min)
new_sample = self.acquisition.optimize()[0]
X_batch = np.vstack((X_batch,new_sample))
k +=1
# --- Back to the non-penalized acquisition
self.acquisition.update_batches(None,None,None)
return X_batch | Computes the elements of the batch sequentially by penalizing the acquisition. | Below is the the instruction that describes the task:
### Input:
Computes the elements of the batch sequentially by penalizing the acquisition.
### Response:
def compute_batch(self, duplicate_manager=None, context_manager=None):
"""
Computes the elements of the batch sequentially by penalizing the acquisition.
"""
from ...acquisitions import AcquisitionLP
assert isinstance(self.acquisition, AcquisitionLP)
self.acquisition.update_batches(None,None,None)
# --- GET first element in the batch
X_batch = self.acquisition.optimize()[0]
k=1
if self.batch_size >1:
# ---------- Approximate the constants of the the method
L = estimate_L(self.acquisition.model.model,self.acquisition.space.get_bounds())
Min = self.acquisition.model.model.Y.min()
# --- GET the remaining elements
while k<self.batch_size:
self.acquisition.update_batches(X_batch,L,Min)
new_sample = self.acquisition.optimize()[0]
X_batch = np.vstack((X_batch,new_sample))
k +=1
# --- Back to the non-penalized acquisition
self.acquisition.update_batches(None,None,None)
return X_batch |
def with_active_flag(self):
"""
A query set where each result is annotated with an 'is_active' field that indicates
if it's the most recent entry for that combination of keys.
"""
if self.model.KEY_FIELDS:
return self.get_queryset().annotate(
is_active=models.ExpressionWrapper(
models.Q(pk__in=self._current_ids_subquery()),
output_field=models.IntegerField(),
)
)
return self.get_queryset().annotate(
is_active=models.ExpressionWrapper(
models.Q(pk=self.model.current().pk),
output_field=models.IntegerField(),
)
) | A query set where each result is annotated with an 'is_active' field that indicates
if it's the most recent entry for that combination of keys. | Below is the the instruction that describes the task:
### Input:
A query set where each result is annotated with an 'is_active' field that indicates
if it's the most recent entry for that combination of keys.
### Response:
def with_active_flag(self):
"""
A query set where each result is annotated with an 'is_active' field that indicates
if it's the most recent entry for that combination of keys.
"""
if self.model.KEY_FIELDS:
return self.get_queryset().annotate(
is_active=models.ExpressionWrapper(
models.Q(pk__in=self._current_ids_subquery()),
output_field=models.IntegerField(),
)
)
return self.get_queryset().annotate(
is_active=models.ExpressionWrapper(
models.Q(pk=self.model.current().pk),
output_field=models.IntegerField(),
)
) |
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False)) | Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response | Below is the the instruction that describes the task:
### Input:
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
### Response:
def login(request, next_page=None, required=False, gateway=False):
"""
Forwards to CAS login URL or verifies CAS ticket
:param: request RequestObj
:param: next_page Next page to redirect after login
:param: required
:param: gateway Gatewayed response
"""
if not next_page:
next_page = _redirect_url(request)
try:
# use callable for pre-django 2.0
is_authenticated = request.user.is_authenticated()
except TypeError:
is_authenticated = request.user.is_authenticated
if is_authenticated:
return HttpResponseRedirect(next_page)
ticket = request.GET.get('ticket')
if gateway:
service = _service_url(request, next_page, True)
else:
service = _service_url(request, next_page, False)
if ticket:
user = auth.authenticate(ticket=ticket, service=service)
if user is not None:
auth.login(request, user)
if settings.CAS_PROXY_CALLBACK:
proxy_callback(request)
return HttpResponseRedirect(next_page)
elif settings.CAS_RETRY_LOGIN or required:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False))
else:
logger.warning('User has a valid ticket but not a valid session')
# Has ticket, not session
if gateway:
# Gatewayed responses should nto redirect.
return False
if getattr(settings, 'CAS_CUSTOM_FORBIDDEN'):
return HttpResponseRedirect(reverse(settings.CAS_CUSTOM_FORBIDDEN) + "?" + request.META['QUERY_STRING'])
else:
error = "<h1>Forbidden</h1><p>Login failed.</p>"
return HttpResponseForbidden(error)
else:
if gateway:
return HttpResponseRedirect(_login_url(service, ticket, True))
else:
return HttpResponseRedirect(_login_url(service, ticket, False)) |
def cached_object(self, path, compute_fn):
"""
If `cached_object` has already been called for a value of `path` in this
running Python instance, then it should have a cached value in the
_memory_cache; return that value.
If this function was never called before with a particular value of
`path`, then call compute_fn, and pickle it to `path`.
If `path` already exists, unpickle it and store that value in
_memory_cache.
"""
if path in self._memory_cache:
return self._memory_cache[path]
if exists(path) and not self.is_empty(path):
obj = load_pickle(path)
else:
obj = compute_fn()
dump_pickle(obj, path)
self._memory_cache[path] = obj
return obj | If `cached_object` has already been called for a value of `path` in this
running Python instance, then it should have a cached value in the
_memory_cache; return that value.
If this function was never called before with a particular value of
`path`, then call compute_fn, and pickle it to `path`.
If `path` already exists, unpickle it and store that value in
_memory_cache. | Below is the the instruction that describes the task:
### Input:
If `cached_object` has already been called for a value of `path` in this
running Python instance, then it should have a cached value in the
_memory_cache; return that value.
If this function was never called before with a particular value of
`path`, then call compute_fn, and pickle it to `path`.
If `path` already exists, unpickle it and store that value in
_memory_cache.
### Response:
def cached_object(self, path, compute_fn):
"""
If `cached_object` has already been called for a value of `path` in this
running Python instance, then it should have a cached value in the
_memory_cache; return that value.
If this function was never called before with a particular value of
`path`, then call compute_fn, and pickle it to `path`.
If `path` already exists, unpickle it and store that value in
_memory_cache.
"""
if path in self._memory_cache:
return self._memory_cache[path]
if exists(path) and not self.is_empty(path):
obj = load_pickle(path)
else:
obj = compute_fn()
dump_pickle(obj, path)
self._memory_cache[path] = obj
return obj |
def immediateAssignmentReject():
"""IMMEDIATE ASSIGNMENT REJECT Section 9.1.20"""
a = L2PseudoLength(l2pLength=0x13)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x3a) # 00111010
d = PageModeAndSpareHalfOctets()
f = RequestReference()
g = WaitIndication()
h = RequestReference()
i = WaitIndication()
j = RequestReference()
k = WaitIndication()
l = RequestReference()
m = WaitIndication()
n = IraRestOctets()
packet = a / b / c / d / f / g / h / i / j / k / l / m / n
return packet | IMMEDIATE ASSIGNMENT REJECT Section 9.1.20 | Below is the the instruction that describes the task:
### Input:
IMMEDIATE ASSIGNMENT REJECT Section 9.1.20
### Response:
def immediateAssignmentReject():
"""IMMEDIATE ASSIGNMENT REJECT Section 9.1.20"""
a = L2PseudoLength(l2pLength=0x13)
b = TpPd(pd=0x6)
c = MessageType(mesType=0x3a) # 00111010
d = PageModeAndSpareHalfOctets()
f = RequestReference()
g = WaitIndication()
h = RequestReference()
i = WaitIndication()
j = RequestReference()
k = WaitIndication()
l = RequestReference()
m = WaitIndication()
n = IraRestOctets()
packet = a / b / c / d / f / g / h / i / j / k / l / m / n
return packet |
def load_post(self, wp_post_id):
"""
Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object
"""
path = "sites/{}/posts/{}".format(self.site_id, wp_post_id)
response = self.get(path)
if response.ok and response.text:
api_post = response.json()
self.get_ref_data_map(bulk_mode=False)
self.load_wp_post(api_post, bulk_mode=False)
# the post should exist in the db now, so return it so that callers can work with it
try:
post = Post.objects.get(site_id=self.site_id, wp_id=wp_post_id)
except Exception as ex:
logger.exception("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, ex.message))
else:
return post
else:
logger.warning("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, response.text)) | Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object | Below is the the instruction that describes the task:
### Input:
Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object
### Response:
def load_post(self, wp_post_id):
"""
Refresh local content for a single post from the the WordPress REST API.
This can be called from a webhook on the WordPress side when a post is updated.
:param wp_post_id: the wordpress post ID
:return: the fully loaded local post object
"""
path = "sites/{}/posts/{}".format(self.site_id, wp_post_id)
response = self.get(path)
if response.ok and response.text:
api_post = response.json()
self.get_ref_data_map(bulk_mode=False)
self.load_wp_post(api_post, bulk_mode=False)
# the post should exist in the db now, so return it so that callers can work with it
try:
post = Post.objects.get(site_id=self.site_id, wp_id=wp_post_id)
except Exception as ex:
logger.exception("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, ex.message))
else:
return post
else:
logger.warning("Unable to load post with wp_post_id={}:\n{}".format(wp_post_id, response.text)) |
def format_geonames(self, entry, searchterm=None):
"""
Pull out just the fields we want from a geonames entry
To do:
- switch to model picking
Parameters
-----------
res : dict
ES/geonames result
searchterm : str
(not implemented). Needed for better results picking
Returns
--------
new_res : dict
containing selected fields from selected geonames entry
"""
try:
lat, lon = entry['coordinates'].split(",")
new_res = {"admin1" : self.get_admin1(entry['country_code2'], entry['admin1_code']),
"lat" : lat,
"lon" : lon,
"country_code3" : entry["country_code3"],
"geonameid" : entry["geonameid"],
"place_name" : entry["name"],
"feature_class" : entry["feature_class"],
"feature_code" : entry["feature_code"]}
return new_res
except (IndexError, TypeError):
# two conditions for these errors:
# 1. there are no results for some reason (Index)
# 2. res is set to "" because the country model was below the thresh
new_res = {"admin1" : "",
"lat" : "",
"lon" : "",
"country_code3" : "",
"geonameid" : "",
"place_name" : "",
"feature_class" : "",
"feature_code" : ""}
return new_res | Pull out just the fields we want from a geonames entry
To do:
- switch to model picking
Parameters
-----------
res : dict
ES/geonames result
searchterm : str
(not implemented). Needed for better results picking
Returns
--------
new_res : dict
containing selected fields from selected geonames entry | Below is the the instruction that describes the task:
### Input:
Pull out just the fields we want from a geonames entry
To do:
- switch to model picking
Parameters
-----------
res : dict
ES/geonames result
searchterm : str
(not implemented). Needed for better results picking
Returns
--------
new_res : dict
containing selected fields from selected geonames entry
### Response:
def format_geonames(self, entry, searchterm=None):
"""
Pull out just the fields we want from a geonames entry
To do:
- switch to model picking
Parameters
-----------
res : dict
ES/geonames result
searchterm : str
(not implemented). Needed for better results picking
Returns
--------
new_res : dict
containing selected fields from selected geonames entry
"""
try:
lat, lon = entry['coordinates'].split(",")
new_res = {"admin1" : self.get_admin1(entry['country_code2'], entry['admin1_code']),
"lat" : lat,
"lon" : lon,
"country_code3" : entry["country_code3"],
"geonameid" : entry["geonameid"],
"place_name" : entry["name"],
"feature_class" : entry["feature_class"],
"feature_code" : entry["feature_code"]}
return new_res
except (IndexError, TypeError):
# two conditions for these errors:
# 1. there are no results for some reason (Index)
# 2. res is set to "" because the country model was below the thresh
new_res = {"admin1" : "",
"lat" : "",
"lon" : "",
"country_code3" : "",
"geonameid" : "",
"place_name" : "",
"feature_class" : "",
"feature_code" : ""}
return new_res |
def create_class(self, data, options=None, **kwargs):
"""Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output
"""
obj_map = dict((cls.type, cls) for cls in ALL_CLASSES)
try:
cls = obj_map[data["kind"]]
except (KeyError, TypeError):
LOGGER.warning("Unknown Type: %s" % data)
else:
# Recurse for children
obj = cls(data, jinja_env=self.jinja_env)
if "children" in data:
for child_data in data["children"]:
for child_obj in self.create_class(child_data, options=options):
obj.children.append(child_obj)
yield obj | Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output | Below is the the instruction that describes the task:
### Input:
Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output
### Response:
def create_class(self, data, options=None, **kwargs):
"""Return instance of class based on Javascript data
Data keys handled here:
type
Set the object class
consts, types, vars, funcs
Recurse into :py:meth:`create_class` to create child object
instances
:param data: dictionary data from godocjson output
"""
obj_map = dict((cls.type, cls) for cls in ALL_CLASSES)
try:
cls = obj_map[data["kind"]]
except (KeyError, TypeError):
LOGGER.warning("Unknown Type: %s" % data)
else:
# Recurse for children
obj = cls(data, jinja_env=self.jinja_env)
if "children" in data:
for child_data in data["children"]:
for child_obj in self.create_class(child_data, options=options):
obj.children.append(child_obj)
yield obj |
def delete_subscription(self, project, subscription,
fail_if_not_exists=False):
"""Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: str
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().delete(
subscription=full_subscription).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 404 indicates that the subscription was not found
if str(e.resp['status']) == '404':
message = 'Subscription does not exist: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting subscription {}'.format(full_subscription),
e) | Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: str
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool | Below is the the instruction that describes the task:
### Input:
Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: str
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
### Response:
def delete_subscription(self, project, subscription,
fail_if_not_exists=False):
"""Deletes a Pub/Sub subscription, if it exists.
:param project: the GCP project ID where the subscription exists
:type project: str
:param subscription: the Pub/Sub subscription name to delete; do not
include the ``projects/{project}/subscriptions/`` prefix.
:type subscription: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_subscription = _format_subscription(project, subscription)
try:
service.projects().subscriptions().delete(
subscription=full_subscription).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 404 indicates that the subscription was not found
if str(e.resp['status']) == '404':
message = 'Subscription does not exist: {}'.format(
full_subscription)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting subscription {}'.format(full_subscription),
e) |
def plot(self):
'''
Plot the individual and the gene
'''
pl.plot(self.x, self.y, '.')
pl.plot(self.x_int, self.y_int)
pl.grid(True)
pl.show() | Plot the individual and the gene | Below is the the instruction that describes the task:
### Input:
Plot the individual and the gene
### Response:
def plot(self):
'''
Plot the individual and the gene
'''
pl.plot(self.x, self.y, '.')
pl.plot(self.x_int, self.y_int)
pl.grid(True)
pl.show() |
def add_options(self):
""" Add program options.
"""
super(RtorrentMove, self).add_options()
# basic options
self.add_bool_option("-n", "--dry-run",
help="don't move data, just tell what would happen")
self.add_bool_option("-F", "--force-incomplete",
help="force a move of incomplete data") | Add program options. | Below is the the instruction that describes the task:
### Input:
Add program options.
### Response:
def add_options(self):
""" Add program options.
"""
super(RtorrentMove, self).add_options()
# basic options
self.add_bool_option("-n", "--dry-run",
help="don't move data, just tell what would happen")
self.add_bool_option("-F", "--force-incomplete",
help="force a move of incomplete data") |
def delete_collection_mutating_webhook_configuration(self, **kwargs): # noqa: E501
"""delete_collection_mutating_webhook_configuration # noqa: E501
delete collection of MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
return data | delete_collection_mutating_webhook_configuration # noqa: E501
delete collection of MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
delete_collection_mutating_webhook_configuration # noqa: E501
delete collection of MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_collection_mutating_webhook_configuration(self, **kwargs): # noqa: E501
"""delete_collection_mutating_webhook_configuration # noqa: E501
delete collection of MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_mutating_webhook_configuration(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.delete_collection_mutating_webhook_configuration_with_http_info(**kwargs) # noqa: E501
return data |
def build_definitions(dpath="sqlalchemy_models/_definitions.json"):
"""
Nasty hacky method of ensuring LedgerAmounts are rendered as floats in json schemas, instead of integers.
:param str dpath: The path of the definitions file to create as part of the build process.
"""
command.run(AlsoChildrenWalker, module=todo, outdir="sqlalchemy_models", definition_name="_definitions.json",
relation_decision=RelationDesicion())
pass # skip due to unpatched issue in alchemyjsonschema run()
definitions = ""
for line in open(dpath, 'r'):
definitions += line
definitions = json.loads(definitions)
newdef = open(dpath.replace("_def", "def"), 'w+')
for name in definitions['definitions']:
for mod in [um, em, wm, bm]:
if hasattr(mod, name):
model = getattr(mod, name)
for attr in definitions['definitions'][name]['properties']:
if hasattr(getattr(model, attr).property, 'columns') and \
isinstance(getattr(model, attr).property.columns[0].type, LedgerAmount):
definitions['definitions'][name]['properties'][attr]['type'] = "number"
newdef.write(json.dumps(definitions, indent=2))
newdef.close() | Nasty hacky method of ensuring LedgerAmounts are rendered as floats in json schemas, instead of integers.
:param str dpath: The path of the definitions file to create as part of the build process. | Below is the the instruction that describes the task:
### Input:
Nasty hacky method of ensuring LedgerAmounts are rendered as floats in json schemas, instead of integers.
:param str dpath: The path of the definitions file to create as part of the build process.
### Response:
def build_definitions(dpath="sqlalchemy_models/_definitions.json"):
"""
Nasty hacky method of ensuring LedgerAmounts are rendered as floats in json schemas, instead of integers.
:param str dpath: The path of the definitions file to create as part of the build process.
"""
command.run(AlsoChildrenWalker, module=todo, outdir="sqlalchemy_models", definition_name="_definitions.json",
relation_decision=RelationDesicion())
pass # skip due to unpatched issue in alchemyjsonschema run()
definitions = ""
for line in open(dpath, 'r'):
definitions += line
definitions = json.loads(definitions)
newdef = open(dpath.replace("_def", "def"), 'w+')
for name in definitions['definitions']:
for mod in [um, em, wm, bm]:
if hasattr(mod, name):
model = getattr(mod, name)
for attr in definitions['definitions'][name]['properties']:
if hasattr(getattr(model, attr).property, 'columns') and \
isinstance(getattr(model, attr).property.columns[0].type, LedgerAmount):
definitions['definitions'][name]['properties'][attr]['type'] = "number"
newdef.write(json.dumps(definitions, indent=2))
newdef.close() |
def writetb_parts(path, kvs, num_per_file, **kw):
"""Write typedbytes sequence files to HDFS given an iterator of KeyValue pairs
This is useful when a task is CPU bound and wish to break up its inputs
into pieces smaller than the HDFS block size. This causes Hadoop to launch
one Map task per file. As such, you can consider this a way of forcing
a minimum number of map tasks.
:param path: HDFS path (string)
:param kvs: Iterator of (key, value)
:param num_per_file: Max # of kv pairs per file.
:param java_mem_mb: Integer of java heap size in MB (default 256)
:raises: IOError: An error occurred while saving the data.
"""
out = []
part_num = 0
def _flush(out, part_num):
hadoopy.writetb('%s/part-%.5d' % (path, part_num), out, **kw)
return [], part_num + 1
for kv in kvs:
out.append(kv)
if len(out) >= num_per_file:
out, part_num = _flush(out, part_num)
if out:
out, part_num = _flush(out, part_num) | Write typedbytes sequence files to HDFS given an iterator of KeyValue pairs
This is useful when a task is CPU bound and wish to break up its inputs
into pieces smaller than the HDFS block size. This causes Hadoop to launch
one Map task per file. As such, you can consider this a way of forcing
a minimum number of map tasks.
:param path: HDFS path (string)
:param kvs: Iterator of (key, value)
:param num_per_file: Max # of kv pairs per file.
:param java_mem_mb: Integer of java heap size in MB (default 256)
:raises: IOError: An error occurred while saving the data. | Below is the the instruction that describes the task:
### Input:
Write typedbytes sequence files to HDFS given an iterator of KeyValue pairs
This is useful when a task is CPU bound and wish to break up its inputs
into pieces smaller than the HDFS block size. This causes Hadoop to launch
one Map task per file. As such, you can consider this a way of forcing
a minimum number of map tasks.
:param path: HDFS path (string)
:param kvs: Iterator of (key, value)
:param num_per_file: Max # of kv pairs per file.
:param java_mem_mb: Integer of java heap size in MB (default 256)
:raises: IOError: An error occurred while saving the data.
### Response:
def writetb_parts(path, kvs, num_per_file, **kw):
"""Write typedbytes sequence files to HDFS given an iterator of KeyValue pairs
This is useful when a task is CPU bound and wish to break up its inputs
into pieces smaller than the HDFS block size. This causes Hadoop to launch
one Map task per file. As such, you can consider this a way of forcing
a minimum number of map tasks.
:param path: HDFS path (string)
:param kvs: Iterator of (key, value)
:param num_per_file: Max # of kv pairs per file.
:param java_mem_mb: Integer of java heap size in MB (default 256)
:raises: IOError: An error occurred while saving the data.
"""
out = []
part_num = 0
def _flush(out, part_num):
hadoopy.writetb('%s/part-%.5d' % (path, part_num), out, **kw)
return [], part_num + 1
for kv in kvs:
out.append(kv)
if len(out) >= num_per_file:
out, part_num = _flush(out, part_num)
if out:
out, part_num = _flush(out, part_num) |
def update_composition(self, composiiton_form):
"""Updates an existing composition.
arg: composiiton_form (osid.repository.CompositionForm): the
form containing the elements to be updated
raise: IllegalState - ``composition_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``composition_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``composition_form`` did not originate
from ``get_composition_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.update_resource_template
collection = JSONClientValidated('repository',
collection='Composition',
runtime=self._runtime)
if not isinstance(composiiton_form, ABCCompositionForm):
raise errors.InvalidArgument('argument type is not an CompositionForm')
if not composiiton_form.is_for_update():
raise errors.InvalidArgument('the CompositionForm is for update only, not create')
try:
if self._forms[composiiton_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('composiiton_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('composiiton_form did not originate from this session')
if not composiiton_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(composiiton_form._my_map)
self._forms[composiiton_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned:
return objects.Composition(
osid_object_map=composiiton_form._my_map,
runtime=self._runtime,
proxy=self._proxy) | Updates an existing composition.
arg: composiiton_form (osid.repository.CompositionForm): the
form containing the elements to be updated
raise: IllegalState - ``composition_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``composition_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``composition_form`` did not originate
from ``get_composition_form_for_update()``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Updates an existing composition.
arg: composiiton_form (osid.repository.CompositionForm): the
form containing the elements to be updated
raise: IllegalState - ``composition_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``composition_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``composition_form`` did not originate
from ``get_composition_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
### Response:
def update_composition(self, composiiton_form):
"""Updates an existing composition.
arg: composiiton_form (osid.repository.CompositionForm): the
form containing the elements to be updated
raise: IllegalState - ``composition_form`` already used in an
update transaction
raise: InvalidArgument - the form contains an invalid value
raise: NullArgument - ``composition_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``composition_form`` did not originate
from ``get_composition_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.update_resource_template
collection = JSONClientValidated('repository',
collection='Composition',
runtime=self._runtime)
if not isinstance(composiiton_form, ABCCompositionForm):
raise errors.InvalidArgument('argument type is not an CompositionForm')
if not composiiton_form.is_for_update():
raise errors.InvalidArgument('the CompositionForm is for update only, not create')
try:
if self._forms[composiiton_form.get_id().get_identifier()] == UPDATED:
raise errors.IllegalState('composiiton_form already used in an update transaction')
except KeyError:
raise errors.Unsupported('composiiton_form did not originate from this session')
if not composiiton_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
collection.save(composiiton_form._my_map)
self._forms[composiiton_form.get_id().get_identifier()] = UPDATED
# Note: this is out of spec. The OSIDs don't require an object to be returned:
return objects.Composition(
osid_object_map=composiiton_form._my_map,
runtime=self._runtime,
proxy=self._proxy) |
def get_best_answer(self, query):
"""Get best answer to a question.
:param query: A question to get an answer
:type query: :class:`str`
:returns: An answer to a question
:rtype: :class:`str`
:raises: :class:`NoAnswerError` when can not found answer to a question
"""
query = to_unicode(query)
session = self.Session()
grams = self._get_grams(session, query)
if not grams:
raise NoAnswerError('Can not found answer')
documents = set([doc for gram in grams for doc in gram.documents])
self._recalc_idfs(session, grams)
idfs = dict((gram.gram, gram.idf) for gram in grams)
docs = dict(
(doc.answer, _cosine_measure(idfs, self._get_tf_idfs(doc)))
for doc in documents)
docs = dict((key, val) for (key, val) in docs.items() if val)
session.commit()
try:
max_ratio = max(docs.values())
answers = [answer for answer in docs.keys()
if docs.get(answer) == max_ratio]
answer = random.choice(answers)
logger.debug('{0} -> {1} ({2})'.format(query, answer, max_ratio))
return (answer, max_ratio)
except ValueError:
raise NoAnswerError('Can not found answer')
finally:
session.commit() | Get best answer to a question.
:param query: A question to get an answer
:type query: :class:`str`
:returns: An answer to a question
:rtype: :class:`str`
:raises: :class:`NoAnswerError` when can not found answer to a question | Below is the the instruction that describes the task:
### Input:
Get best answer to a question.
:param query: A question to get an answer
:type query: :class:`str`
:returns: An answer to a question
:rtype: :class:`str`
:raises: :class:`NoAnswerError` when can not found answer to a question
### Response:
def get_best_answer(self, query):
"""Get best answer to a question.
:param query: A question to get an answer
:type query: :class:`str`
:returns: An answer to a question
:rtype: :class:`str`
:raises: :class:`NoAnswerError` when can not found answer to a question
"""
query = to_unicode(query)
session = self.Session()
grams = self._get_grams(session, query)
if not grams:
raise NoAnswerError('Can not found answer')
documents = set([doc for gram in grams for doc in gram.documents])
self._recalc_idfs(session, grams)
idfs = dict((gram.gram, gram.idf) for gram in grams)
docs = dict(
(doc.answer, _cosine_measure(idfs, self._get_tf_idfs(doc)))
for doc in documents)
docs = dict((key, val) for (key, val) in docs.items() if val)
session.commit()
try:
max_ratio = max(docs.values())
answers = [answer for answer in docs.keys()
if docs.get(answer) == max_ratio]
answer = random.choice(answers)
logger.debug('{0} -> {1} ({2})'.format(query, answer, max_ratio))
return (answer, max_ratio)
except ValueError:
raise NoAnswerError('Can not found answer')
finally:
session.commit() |
def lazy_constant(fn):
"""Decorator to make a function that takes no arguments use the LazyConstant class."""
class NewLazyConstant(LazyConstant):
@functools.wraps(fn)
def __call__(self):
return self.get_value()
return NewLazyConstant(fn) | Decorator to make a function that takes no arguments use the LazyConstant class. | Below is the the instruction that describes the task:
### Input:
Decorator to make a function that takes no arguments use the LazyConstant class.
### Response:
def lazy_constant(fn):
"""Decorator to make a function that takes no arguments use the LazyConstant class."""
class NewLazyConstant(LazyConstant):
@functools.wraps(fn)
def __call__(self):
return self.get_value()
return NewLazyConstant(fn) |
def _le_butt(self, annot, p1, p2, lr):
"""Make stream commands for butt line end symbol. "lr" denotes left (False) or right point.
"""
m, im, L, R, w, scol, fcol, opacity = self._le_annot_parms(annot, p1, p2)
shift = 3
d = shift * max(1, w)
M = R if lr else L
top = (M + (0, -d/2.)) * im
bot = (M + (0, d/2.)) * im
ap = "\nq\n%s%f %f m\n" % (opacity, top.x, top.y)
ap += "%f %f l\n" % (bot.x, bot.y)
ap += "%g w\n" % w
ap += scol + "s\nQ\n"
return ap | Make stream commands for butt line end symbol. "lr" denotes left (False) or right point. | Below is the the instruction that describes the task:
### Input:
Make stream commands for butt line end symbol. "lr" denotes left (False) or right point.
### Response:
def _le_butt(self, annot, p1, p2, lr):
"""Make stream commands for butt line end symbol. "lr" denotes left (False) or right point.
"""
m, im, L, R, w, scol, fcol, opacity = self._le_annot_parms(annot, p1, p2)
shift = 3
d = shift * max(1, w)
M = R if lr else L
top = (M + (0, -d/2.)) * im
bot = (M + (0, d/2.)) * im
ap = "\nq\n%s%f %f m\n" % (opacity, top.x, top.y)
ap += "%f %f l\n" % (bot.x, bot.y)
ap += "%g w\n" % w
ap += scol + "s\nQ\n"
return ap |
def change_event_type(self):
"""Action: change highlighted event's type by cycling through event
type list."""
if self.current_event is None:
return
hl_params = self.highlight.params
self.scene.removeItem(self.highlight)
ev = self.current_event
new_name = self.parent.notes.change_event_type(name=ev['name'],
time=(ev['start'],
ev['end']),
chan=ev['chan'])
msg = "Event from {} to {} changed type from '{}' to '{}'".format(
ev['start'], ev['end'], ev['name'], new_name)
ev['name'] = new_name
self.current_event = ev
self.current_etype = new_name
#self.event_sel = True
self.parent.notes.idx_eventtype.setCurrentText(new_name)
self.parent.statusBar().showMessage(msg)
self.display_annotations()
self.highlight = RectMarker(*hl_params)
self.scene.addItem(self.highlight) | Action: change highlighted event's type by cycling through event
type list. | Below is the the instruction that describes the task:
### Input:
Action: change highlighted event's type by cycling through event
type list.
### Response:
def change_event_type(self):
"""Action: change highlighted event's type by cycling through event
type list."""
if self.current_event is None:
return
hl_params = self.highlight.params
self.scene.removeItem(self.highlight)
ev = self.current_event
new_name = self.parent.notes.change_event_type(name=ev['name'],
time=(ev['start'],
ev['end']),
chan=ev['chan'])
msg = "Event from {} to {} changed type from '{}' to '{}'".format(
ev['start'], ev['end'], ev['name'], new_name)
ev['name'] = new_name
self.current_event = ev
self.current_etype = new_name
#self.event_sel = True
self.parent.notes.idx_eventtype.setCurrentText(new_name)
self.parent.statusBar().showMessage(msg)
self.display_annotations()
self.highlight = RectMarker(*hl_params)
self.scene.addItem(self.highlight) |
def _compute_scale(self, instruction_id, svg_dict):
"""Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed
"""
bbox = list(map(float, svg_dict["svg"]["@viewBox"].split()))
scale = self._zoom / (bbox[3] - bbox[1])
self._symbol_id_to_scale[instruction_id] = scale | Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed | Below is the the instruction that describes the task:
### Input:
Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed
### Response:
def _compute_scale(self, instruction_id, svg_dict):
"""Compute the scale of an instruction svg.
Compute the scale using the bounding box stored in the
:paramref:`svg_dict`. The scale is saved in a dictionary using
:paramref:`instruction_id` as key.
:param str instruction_id: id identifying a symbol in the defs
:param dict svg_dict: dictionary containing the SVG for the
instruction currently processed
"""
bbox = list(map(float, svg_dict["svg"]["@viewBox"].split()))
scale = self._zoom / (bbox[3] - bbox[1])
self._symbol_id_to_scale[instruction_id] = scale |
def replace_macros(string, spec=None):
"""Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
"""
if spec:
assert isinstance(spec, Spec)
def _is_conditional(macro: str) -> bool:
return macro.startswith("?") or macro.startswith("!")
def _test_conditional(macro: str) -> bool:
if macro[0] == "?":
return True
if macro[0] == "!":
return False
raise Exception("Given string is not a conditional macro")
def _macro_repl(match):
macro_name = match.group(1)
if _is_conditional(macro_name) and spec:
parts = macro_name[1:].split(sep=":", maxsplit=1)
assert parts
if _test_conditional(macro_name): # ?
if hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
return getattr(spec, parts[0], None)
return ""
else: # !
if not hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
return getattr(spec, parts[0], None)
return ""
if spec:
value = getattr(spec, macro_name, None)
if value:
return str(value)
return match.string[match.start() : match.end()]
# Recursively expand macros
# Note: If macros are not defined in the spec file, this won't try to
# expand them.
while True:
ret = re.sub(_macro_pattern, _macro_repl, string)
if ret != string:
string = ret
continue
return ret | Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible. | Below is the the instruction that describes the task:
### Input:
Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
### Response:
def replace_macros(string, spec=None):
"""Replace all macros in given string with corresponding values.
For example: a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced
:param spec An optional spec file. If given, definitions in that spec
file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
"""
if spec:
assert isinstance(spec, Spec)
def _is_conditional(macro: str) -> bool:
return macro.startswith("?") or macro.startswith("!")
def _test_conditional(macro: str) -> bool:
if macro[0] == "?":
return True
if macro[0] == "!":
return False
raise Exception("Given string is not a conditional macro")
def _macro_repl(match):
macro_name = match.group(1)
if _is_conditional(macro_name) and spec:
parts = macro_name[1:].split(sep=":", maxsplit=1)
assert parts
if _test_conditional(macro_name): # ?
if hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
return getattr(spec, parts[0], None)
return ""
else: # !
if not hasattr(spec, parts[0]):
if len(parts) == 2:
return parts[1]
return getattr(spec, parts[0], None)
return ""
if spec:
value = getattr(spec, macro_name, None)
if value:
return str(value)
return match.string[match.start() : match.end()]
# Recursively expand macros
# Note: If macros are not defined in the spec file, this won't try to
# expand them.
while True:
ret = re.sub(_macro_pattern, _macro_repl, string)
if ret != string:
string = ret
continue
return ret |
def nodes_map(self):
"""
Build a mapping from node type to a list of nodes.
A typed mapping helps avoid polymorphism at non-persistent layers.
"""
dct = dict()
for node in self.nodes.values():
cls = next(base for base in getmro(node.__class__) if "__tablename__" in base.__dict__)
key = getattr(cls, "__alias__", underscore(cls.__name__))
dct.setdefault(key, []).append(node)
return dct | Build a mapping from node type to a list of nodes.
A typed mapping helps avoid polymorphism at non-persistent layers. | Below is the the instruction that describes the task:
### Input:
Build a mapping from node type to a list of nodes.
A typed mapping helps avoid polymorphism at non-persistent layers.
### Response:
def nodes_map(self):
"""
Build a mapping from node type to a list of nodes.
A typed mapping helps avoid polymorphism at non-persistent layers.
"""
dct = dict()
for node in self.nodes.values():
cls = next(base for base in getmro(node.__class__) if "__tablename__" in base.__dict__)
key = getattr(cls, "__alias__", underscore(cls.__name__))
dct.setdefault(key, []).append(node)
return dct |
def get_thresholds(points=100, power=3) -> list:
"""Run a function with a series of thresholds between 0 and 1"""
return [(i / (points + 1)) ** power for i in range(1, points + 1)] | Run a function with a series of thresholds between 0 and 1 | Below is the the instruction that describes the task:
### Input:
Run a function with a series of thresholds between 0 and 1
### Response:
def get_thresholds(points=100, power=3) -> list:
"""Run a function with a series of thresholds between 0 and 1"""
return [(i / (points + 1)) ** power for i in range(1, points + 1)] |
def getLipdNames(D=None):
"""
Get a list of all LiPD names in the library
| Example
| names = lipd.getLipdNames(D)
:return list f_list: File list
"""
_names = []
try:
if not D:
print("Error: LiPD data not provided. Pass LiPD data into the function.")
else:
_names = D.keys()
except Exception:
pass
return _names | Get a list of all LiPD names in the library
| Example
| names = lipd.getLipdNames(D)
:return list f_list: File list | Below is the the instruction that describes the task:
### Input:
Get a list of all LiPD names in the library
| Example
| names = lipd.getLipdNames(D)
:return list f_list: File list
### Response:
def getLipdNames(D=None):
"""
Get a list of all LiPD names in the library
| Example
| names = lipd.getLipdNames(D)
:return list f_list: File list
"""
_names = []
try:
if not D:
print("Error: LiPD data not provided. Pass LiPD data into the function.")
else:
_names = D.keys()
except Exception:
pass
return _names |
def check_token(self):
"""
Checks if the token exists
:return bool: True if it exists on the store
"""
try:
doc = self.doc_ref.get()
except Exception as e:
log.error('Token (collection: {}, doc_id: {}) '
'could not be retrieved from the backend: {}'
.format(self.collection, self.doc_id, str(e)))
doc = None
return doc and doc.exists | Checks if the token exists
:return bool: True if it exists on the store | Below is the the instruction that describes the task:
### Input:
Checks if the token exists
:return bool: True if it exists on the store
### Response:
def check_token(self):
"""
Checks if the token exists
:return bool: True if it exists on the store
"""
try:
doc = self.doc_ref.get()
except Exception as e:
log.error('Token (collection: {}, doc_id: {}) '
'could not be retrieved from the backend: {}'
.format(self.collection, self.doc_id, str(e)))
doc = None
return doc and doc.exists |
def prepare_intercept(callback):
"""
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
"""
_setup_name_tables()
def process_key(event_type, vk, scan_code, is_extended):
global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt
#print(event_type, vk, scan_code, is_extended)
# Pressing alt-gr also generates an extra "right alt" event
if vk == 0xA5 and ignore_next_right_alt:
ignore_next_right_alt = False
return True
modifiers = (
('shift',) * shift_is_pressed +
('alt gr',) * altgr_is_pressed +
('num lock',) * (user32.GetKeyState(0x90) & 1) +
('caps lock',) * (user32.GetKeyState(0x14) & 1) +
('scroll lock',) * (user32.GetKeyState(0x91) & 1)
)
entry = (scan_code, vk, is_extended, modifiers)
if entry not in to_name:
to_name[entry] = list(get_event_names(*entry))
names = to_name[entry]
name = names[0] if names else None
# TODO: inaccurate when holding multiple different shifts.
if vk in shift_vks:
shift_is_pressed = event_type == KEY_DOWN
if scan_code == 541 and vk == 162:
ignore_next_right_alt = True
altgr_is_pressed = event_type == KEY_DOWN
is_keypad = (scan_code, vk, is_extended) in keypad_keys
return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))
def low_level_keyboard_handler(nCode, wParam, lParam):
try:
vk = lParam.contents.vk_code
# Ignore the second `alt` DOWN observed in some cases.
fake_alt = (LLKHF_INJECTED | 0x20)
# Ignore events generated by SendInput with Unicode.
if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:
event_type = keyboard_event_types[wParam]
is_extended = lParam.contents.flags & 1
scan_code = lParam.contents.scan_code
should_continue = process_key(event_type, vk, scan_code, is_extended)
if not should_continue:
return -1
except Exception as e:
print('Error in keyboard hook:')
traceback.print_exc()
return CallNextHookEx(None, nCode, wParam, lParam)
WH_KEYBOARD_LL = c_int(13)
keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)
handle = GetModuleHandleW(None)
thread_id = DWORD(0)
keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, keyboard_callback) | Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept). | Below is the the instruction that describes the task:
### Input:
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
### Response:
def prepare_intercept(callback):
"""
Registers a Windows low level keyboard hook. The provided callback will
be invoked for each high-level keyboard event, and is expected to return
True if the key event should be passed to the next program, or False if
the event is to be blocked.
No event is processed until the Windows messages are pumped (see
start_intercept).
"""
_setup_name_tables()
def process_key(event_type, vk, scan_code, is_extended):
global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt
#print(event_type, vk, scan_code, is_extended)
# Pressing alt-gr also generates an extra "right alt" event
if vk == 0xA5 and ignore_next_right_alt:
ignore_next_right_alt = False
return True
modifiers = (
('shift',) * shift_is_pressed +
('alt gr',) * altgr_is_pressed +
('num lock',) * (user32.GetKeyState(0x90) & 1) +
('caps lock',) * (user32.GetKeyState(0x14) & 1) +
('scroll lock',) * (user32.GetKeyState(0x91) & 1)
)
entry = (scan_code, vk, is_extended, modifiers)
if entry not in to_name:
to_name[entry] = list(get_event_names(*entry))
names = to_name[entry]
name = names[0] if names else None
# TODO: inaccurate when holding multiple different shifts.
if vk in shift_vks:
shift_is_pressed = event_type == KEY_DOWN
if scan_code == 541 and vk == 162:
ignore_next_right_alt = True
altgr_is_pressed = event_type == KEY_DOWN
is_keypad = (scan_code, vk, is_extended) in keypad_keys
return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))
def low_level_keyboard_handler(nCode, wParam, lParam):
try:
vk = lParam.contents.vk_code
# Ignore the second `alt` DOWN observed in some cases.
fake_alt = (LLKHF_INJECTED | 0x20)
# Ignore events generated by SendInput with Unicode.
if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:
event_type = keyboard_event_types[wParam]
is_extended = lParam.contents.flags & 1
scan_code = lParam.contents.scan_code
should_continue = process_key(event_type, vk, scan_code, is_extended)
if not should_continue:
return -1
except Exception as e:
print('Error in keyboard hook:')
traceback.print_exc()
return CallNextHookEx(None, nCode, wParam, lParam)
WH_KEYBOARD_LL = c_int(13)
keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)
handle = GetModuleHandleW(None)
thread_id = DWORD(0)
keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)
# Register to remove the hook when the interpreter exits. Unfortunately a
# try/finally block doesn't seem to work here.
atexit.register(UnhookWindowsHookEx, keyboard_callback) |
def _make_query(self, ID: str, methodname: str, returnable: bool, *args: Any, **kwargs: Any):
"""将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
returnable (bool): - 是否要求返回结果
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
"""
query = {
"MPRPC": self.VERSION,
"ID": ID,
"METHOD": methodname,
"RETURN": returnable,
"ARGS": args,
"KWARGS": kwargs
}
print(query)
return query | 将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
returnable (bool): - 是否要求返回结果
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式 | Below is the the instruction that describes the task:
### Input:
将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
returnable (bool): - 是否要求返回结果
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
### Response:
def _make_query(self, ID: str, methodname: str, returnable: bool, *args: Any, **kwargs: Any):
"""将调用请求的ID,方法名,参数包装为请求数据.
Parameters:
ID (str): - 任务ID
methodname (str): - 要调用的方法名
returnable (bool): - 是否要求返回结果
args (Any): - 要调用的方法的位置参数
kwargs (Any): - 要调用的方法的关键字参数
Return:
(Dict[str, Any]) : - 请求的python字典形式
"""
query = {
"MPRPC": self.VERSION,
"ID": ID,
"METHOD": methodname,
"RETURN": returnable,
"ARGS": args,
"KWARGS": kwargs
}
print(query)
return query |
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
api = self.get_api(social_user, owner_id)
return api.post('{}/feed'.format(owner_id or 'me'), params=kwargs)
return _post | message: <str>
image: <file> as object_attachment
owner_id: <str> | Below is the the instruction that describes the task:
### Input:
message: <str>
image: <file> as object_attachment
owner_id: <str>
### Response:
def get_api_publisher(self, social_user):
"""
message: <str>
image: <file> as object_attachment
owner_id: <str>
"""
def _post(owner_id=None, **kwargs):
api = self.get_api(social_user, owner_id)
return api.post('{}/feed'.format(owner_id or 'me'), params=kwargs)
return _post |
def convert_rect(self, rect):
'''
Same as Container().convert_rect(), but adds scrolling.
'''
return Container.convert_rect(self, rect).move(-self.offset) | Same as Container().convert_rect(), but adds scrolling. | Below is the the instruction that describes the task:
### Input:
Same as Container().convert_rect(), but adds scrolling.
### Response:
def convert_rect(self, rect):
'''
Same as Container().convert_rect(), but adds scrolling.
'''
return Container.convert_rect(self, rect).move(-self.offset) |
def tmdb_movies(api_key, id_tmdb, language="en-US", cache=True):
""" Lookup a movie item using The Movie Database
Online docs: developers.themoviedb.org/3/movies
"""
try:
url = "https://api.themoviedb.org/3/movie/%d" % int(id_tmdb)
except ValueError:
raise MapiProviderException("id_tmdb must be numeric")
parameters = {"api_key": api_key, "language": language}
status, content = _request_json(url, parameters, cache=cache)
if status == 401:
raise MapiProviderException("invalid API key")
elif status == 404:
raise MapiNotFoundException
elif status != 200 or not any(content.keys()):
raise MapiNetworkException("TMDb down or unavailable?")
return content | Lookup a movie item using The Movie Database
Online docs: developers.themoviedb.org/3/movies | Below is the the instruction that describes the task:
### Input:
Lookup a movie item using The Movie Database
Online docs: developers.themoviedb.org/3/movies
### Response:
def tmdb_movies(api_key, id_tmdb, language="en-US", cache=True):
""" Lookup a movie item using The Movie Database
Online docs: developers.themoviedb.org/3/movies
"""
try:
url = "https://api.themoviedb.org/3/movie/%d" % int(id_tmdb)
except ValueError:
raise MapiProviderException("id_tmdb must be numeric")
parameters = {"api_key": api_key, "language": language}
status, content = _request_json(url, parameters, cache=cache)
if status == 401:
raise MapiProviderException("invalid API key")
elif status == 404:
raise MapiNotFoundException
elif status != 200 or not any(content.keys()):
raise MapiNetworkException("TMDb down or unavailable?")
return content |
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from this distribution.
In the returned set, mass2 <= mass1.
Parameters
----------
size : int
The number of values to generate; default is 1.
param : str, optional
If provided, will just return values for the given parameter.
Returns
-------
structured array
The random values in a numpy structured array.
"""
arr = super(UniformComponentMasses, self).rvs(size=size)
# enforce m1 > m2
m1 = conversions.primary_mass(arr['mass1'], arr['mass2'])
m2 = conversions.secondary_mass(arr['mass1'], arr['mass2'])
arr['mass1'][:] = m1
arr['mass2'][:] = m2
if param is not None:
arr = arr[param]
return arr | Gives a set of random values drawn from this distribution.
In the returned set, mass2 <= mass1.
Parameters
----------
size : int
The number of values to generate; default is 1.
param : str, optional
If provided, will just return values for the given parameter.
Returns
-------
structured array
The random values in a numpy structured array. | Below is the the instruction that describes the task:
### Input:
Gives a set of random values drawn from this distribution.
In the returned set, mass2 <= mass1.
Parameters
----------
size : int
The number of values to generate; default is 1.
param : str, optional
If provided, will just return values for the given parameter.
Returns
-------
structured array
The random values in a numpy structured array.
### Response:
def rvs(self, size=1, param=None):
"""Gives a set of random values drawn from this distribution.
In the returned set, mass2 <= mass1.
Parameters
----------
size : int
The number of values to generate; default is 1.
param : str, optional
If provided, will just return values for the given parameter.
Returns
-------
structured array
The random values in a numpy structured array.
"""
arr = super(UniformComponentMasses, self).rvs(size=size)
# enforce m1 > m2
m1 = conversions.primary_mass(arr['mass1'], arr['mass2'])
m2 = conversions.secondary_mass(arr['mass1'], arr['mass2'])
arr['mass1'][:] = m1
arr['mass2'][:] = m2
if param is not None:
arr = arr[param]
return arr |
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = kwargs
if 'view' not in context:
context['view'] = self
# Insert the considered forum, topic and post into the context
context['forum'] = self.get_forum()
context['topic'] = self.get_topic()
context['post'] = self.get_post()
# Handles the preview of the attachments
if context['attachment_formset']:
if hasattr(self, 'attachment_preview') and self.attachment_preview:
context['attachment_preview'] = self.attachment_preview
attachments = []
# Computes the list of the attachment file names that should be attached
# to the forum post being created or updated
for form in context['attachment_formset'].forms:
if (
form['DELETE'].value() or
(not form['file'].html_name in self.request._files and not form.instance.pk)
):
continue
attachments.append(
(
form,
self.request._files[form['file'].html_name].name if not form.instance
else form.instance.filename
)
)
context['attachment_file_previews'] = attachments
return context | Returns the context data to provide to the template. | Below is the the instruction that describes the task:
### Input:
Returns the context data to provide to the template.
### Response:
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = kwargs
if 'view' not in context:
context['view'] = self
# Insert the considered forum, topic and post into the context
context['forum'] = self.get_forum()
context['topic'] = self.get_topic()
context['post'] = self.get_post()
# Handles the preview of the attachments
if context['attachment_formset']:
if hasattr(self, 'attachment_preview') and self.attachment_preview:
context['attachment_preview'] = self.attachment_preview
attachments = []
# Computes the list of the attachment file names that should be attached
# to the forum post being created or updated
for form in context['attachment_formset'].forms:
if (
form['DELETE'].value() or
(not form['file'].html_name in self.request._files and not form.instance.pk)
):
continue
attachments.append(
(
form,
self.request._files[form['file'].html_name].name if not form.instance
else form.instance.filename
)
)
context['attachment_file_previews'] = attachments
return context |
def dataReceived(self, data):
""" Called from Twisted whenever data is received. """
self.bytes_in += (len(data))
self.buffer_in = self.buffer_in + data
while self.CheckDataReceived():
pass | Called from Twisted whenever data is received. | Below is the the instruction that describes the task:
### Input:
Called from Twisted whenever data is received.
### Response:
def dataReceived(self, data):
""" Called from Twisted whenever data is received. """
self.bytes_in += (len(data))
self.buffer_in = self.buffer_in + data
while self.CheckDataReceived():
pass |
def get_tx_mapping_options(self, tx_ac):
"""Return all transcript alignment sets for a given transcript
accession (tx_ac); returns empty list if transcript does not
exist. Use this method to discovery possible mapping options
supported in the database
:param tx_ac: transcript accession with version (e.g., 'NM_000051.3')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | AC_000143.1
alt_aln_method | splign
-[ RECORD 2 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | NC_000011.9
alt_aln_method | blat
"""
rows = self._fetchall(self._queries['tx_mapping_options'], [tx_ac])
return rows | Return all transcript alignment sets for a given transcript
accession (tx_ac); returns empty list if transcript does not
exist. Use this method to discovery possible mapping options
supported in the database
:param tx_ac: transcript accession with version (e.g., 'NM_000051.3')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | AC_000143.1
alt_aln_method | splign
-[ RECORD 2 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | NC_000011.9
alt_aln_method | blat | Below is the the instruction that describes the task:
### Input:
Return all transcript alignment sets for a given transcript
accession (tx_ac); returns empty list if transcript does not
exist. Use this method to discovery possible mapping options
supported in the database
:param tx_ac: transcript accession with version (e.g., 'NM_000051.3')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | AC_000143.1
alt_aln_method | splign
-[ RECORD 2 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | NC_000011.9
alt_aln_method | blat
### Response:
def get_tx_mapping_options(self, tx_ac):
"""Return all transcript alignment sets for a given transcript
accession (tx_ac); returns empty list if transcript does not
exist. Use this method to discovery possible mapping options
supported in the database
:param tx_ac: transcript accession with version (e.g., 'NM_000051.3')
:type tx_ac: str
# database output
-[ RECORD 1 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | AC_000143.1
alt_aln_method | splign
-[ RECORD 2 ]--+------------
hgnc | ATM
cds_start_i | 385
cds_end_i | 9556
tx_ac | NM_000051.3
alt_ac | NC_000011.9
alt_aln_method | blat
"""
rows = self._fetchall(self._queries['tx_mapping_options'], [tx_ac])
return rows |
def tempo_account_associate_with_jira_project(self, account_id, project_id,
default_account=False,
link_type='MANUAL'):
"""
The AccountLinkBean for associate Account with project
Adds a link to an Account.
{
scopeType:PROJECT
defaultAccount:boolean
linkType:IMPORTED | MANUAL
name:string
key:string
accountId:number
scope:number
id:number
}
:param project_id:
:param account_id
:param default_account
:param link_type
:return:
"""
data = {}
if account_id:
data['accountId'] = account_id
if default_account:
data['defaultAccount'] = default_account
if link_type:
data['linkType'] = link_type
if project_id:
data['scope'] = project_id
data['scopeType'] = 'PROJECT'
url = 'rest/tempo-accounts/1/link/'
return self.post(url, data=data) | The AccountLinkBean for associate Account with project
Adds a link to an Account.
{
scopeType:PROJECT
defaultAccount:boolean
linkType:IMPORTED | MANUAL
name:string
key:string
accountId:number
scope:number
id:number
}
:param project_id:
:param account_id
:param default_account
:param link_type
:return: | Below is the the instruction that describes the task:
### Input:
The AccountLinkBean for associate Account with project
Adds a link to an Account.
{
scopeType:PROJECT
defaultAccount:boolean
linkType:IMPORTED | MANUAL
name:string
key:string
accountId:number
scope:number
id:number
}
:param project_id:
:param account_id
:param default_account
:param link_type
:return:
### Response:
def tempo_account_associate_with_jira_project(self, account_id, project_id,
default_account=False,
link_type='MANUAL'):
"""
The AccountLinkBean for associate Account with project
Adds a link to an Account.
{
scopeType:PROJECT
defaultAccount:boolean
linkType:IMPORTED | MANUAL
name:string
key:string
accountId:number
scope:number
id:number
}
:param project_id:
:param account_id
:param default_account
:param link_type
:return:
"""
data = {}
if account_id:
data['accountId'] = account_id
if default_account:
data['defaultAccount'] = default_account
if link_type:
data['linkType'] = link_type
if project_id:
data['scope'] = project_id
data['scopeType'] = 'PROJECT'
url = 'rest/tempo-accounts/1/link/'
return self.post(url, data=data) |
def database_dsn(self):
"""Substitute the root dir into the database DSN, for Sqlite"""
if not self._config.library.database:
return 'sqlite:///{root}/library.db'.format(root=self._root)
return self._config.library.database.format(root=self._root) | Substitute the root dir into the database DSN, for Sqlite | Below is the the instruction that describes the task:
### Input:
Substitute the root dir into the database DSN, for Sqlite
### Response:
def database_dsn(self):
"""Substitute the root dir into the database DSN, for Sqlite"""
if not self._config.library.database:
return 'sqlite:///{root}/library.db'.format(root=self._root)
return self._config.library.database.format(root=self._root) |
def _get_runner(classpath, main, jvm_options, args, executor,
cwd, distribution,
create_synthetic_jar, synthetic_jar_dir):
"""Gets the java runner for execute_java and execute_java_async."""
executor = executor or SubprocessExecutor(distribution)
safe_cp = classpath
if create_synthetic_jar:
safe_cp = safe_classpath(classpath, synthetic_jar_dir)
logger.debug('Bundling classpath {} into {}'.format(':'.join(classpath), safe_cp))
return executor.runner(safe_cp, main, args=args, jvm_options=jvm_options, cwd=cwd) | Gets the java runner for execute_java and execute_java_async. | Below is the the instruction that describes the task:
### Input:
Gets the java runner for execute_java and execute_java_async.
### Response:
def _get_runner(classpath, main, jvm_options, args, executor,
cwd, distribution,
create_synthetic_jar, synthetic_jar_dir):
"""Gets the java runner for execute_java and execute_java_async."""
executor = executor or SubprocessExecutor(distribution)
safe_cp = classpath
if create_synthetic_jar:
safe_cp = safe_classpath(classpath, synthetic_jar_dir)
logger.debug('Bundling classpath {} into {}'.format(':'.join(classpath), safe_cp))
return executor.runner(safe_cp, main, args=args, jvm_options=jvm_options, cwd=cwd) |
def convert(values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation):
"""
Convert a value or a list of values from an unit to another one.
The two units must represent the same physical dimension.
"""
source_dimension = get_dimension_by_unit_measure_or_abbreviation(source_measure_or_unit_abbreviation)
target_dimension = get_dimension_by_unit_measure_or_abbreviation(target_measure_or_unit_abbreviation)
if source_dimension == target_dimension:
source=JSONObject({})
target=JSONObject({})
source.unit_abbreviation, source.factor = _parse_unit(source_measure_or_unit_abbreviation)
target.unit_abbreviation, target.factor = _parse_unit(target_measure_or_unit_abbreviation)
source.unit_data = get_unit_by_abbreviation(source.unit_abbreviation)
target.unit_data = get_unit_by_abbreviation(target.unit_abbreviation)
source.conv_factor = JSONObject({'lf': source.unit_data.lf, 'cf': source.unit_data.cf})
target.conv_factor = JSONObject({'lf': target.unit_data.lf, 'cf': target.unit_data.cf})
if isinstance(values, float):
# If values is a float => returns a float
return (source.conv_factor.lf / target.conv_factor.lf * (source.factor * values)
+ (source.conv_factor.cf - target.conv_factor.cf)
/ target.conv_factor.lf) / target.factor
elif isinstance(values, list):
# If values is a list of floats => returns a list of floats
return [(source.conv_factor.lf / target.conv_factor.lf * (source.factor * value)
+ (source.conv_factor.cf - target.conv_factor.cf)
/ target.conv_factor.lf) / target.factor for value in values]
else:
raise HydraError("Unit conversion: dimensions are not consistent.") | Convert a value or a list of values from an unit to another one.
The two units must represent the same physical dimension. | Below is the the instruction that describes the task:
### Input:
Convert a value or a list of values from an unit to another one.
The two units must represent the same physical dimension.
### Response:
def convert(values, source_measure_or_unit_abbreviation, target_measure_or_unit_abbreviation):
"""
Convert a value or a list of values from an unit to another one.
The two units must represent the same physical dimension.
"""
source_dimension = get_dimension_by_unit_measure_or_abbreviation(source_measure_or_unit_abbreviation)
target_dimension = get_dimension_by_unit_measure_or_abbreviation(target_measure_or_unit_abbreviation)
if source_dimension == target_dimension:
source=JSONObject({})
target=JSONObject({})
source.unit_abbreviation, source.factor = _parse_unit(source_measure_or_unit_abbreviation)
target.unit_abbreviation, target.factor = _parse_unit(target_measure_or_unit_abbreviation)
source.unit_data = get_unit_by_abbreviation(source.unit_abbreviation)
target.unit_data = get_unit_by_abbreviation(target.unit_abbreviation)
source.conv_factor = JSONObject({'lf': source.unit_data.lf, 'cf': source.unit_data.cf})
target.conv_factor = JSONObject({'lf': target.unit_data.lf, 'cf': target.unit_data.cf})
if isinstance(values, float):
# If values is a float => returns a float
return (source.conv_factor.lf / target.conv_factor.lf * (source.factor * values)
+ (source.conv_factor.cf - target.conv_factor.cf)
/ target.conv_factor.lf) / target.factor
elif isinstance(values, list):
# If values is a list of floats => returns a list of floats
return [(source.conv_factor.lf / target.conv_factor.lf * (source.factor * value)
+ (source.conv_factor.cf - target.conv_factor.cf)
/ target.conv_factor.lf) / target.factor for value in values]
else:
raise HydraError("Unit conversion: dimensions are not consistent.") |
def inverse_transform(self, X):
"""Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.columns_)
elif isinstance(X, da.Array):
# later on we concat(..., axis=1), which requires
# known divisions. Suboptimal, but I think unavoidable.
unknown = np.isnan(X.chunks[0]).any()
if unknown:
lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute()
X = X.copy()
chunks = (tuple(lengths), X.chunks[1])
X._chunks = chunks
X = dd.from_dask_array(X, columns=self.columns_)
big = isinstance(X, dd.DataFrame)
if big:
chunks = np.array(X.divisions)
chunks[-1] = chunks[-1] + 1
chunks = tuple(chunks[1:] - chunks[:-1])
X = X.copy()
for col in self.categorical_columns_:
if _HAS_CTD:
dtype = self.dtypes_[col]
categories, ordered = dtype.categories, dtype.ordered
else:
categories, ordered = self.dtypes_[col]
# use .values to avoid warning from pandas
codes = X[col].values
if big:
# dask
codes._chunks = (chunks,)
# Need a Categorical.from_codes for dask
series = (
dd.from_dask_array(codes, columns=col)
.astype("category")
.cat.set_categories(np.arange(len(categories)), ordered=ordered)
.cat.rename_categories(categories)
)
# Bug in pandas <= 0.20.3 lost name
if series.name is None:
series.name = col
series.divisions = X.divisions
else:
# pandas
series = pd.Series(
pd.Categorical.from_codes(codes, categories, ordered=ordered),
name=col,
)
X[col] = series
return X | Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame | Below is the the instruction that describes the task:
### Input:
Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
### Response:
def inverse_transform(self, X):
"""Inverse ordinal-encode the columns in `X`
Parameters
----------
X : array or dataframe
Either the NumPy, dask, or pandas version
Returns
-------
data : DataFrame
Dask array or dataframe will return a Dask DataFrame.
Numpy array or pandas dataframe will return a pandas DataFrame
"""
if isinstance(X, np.ndarray):
X = pd.DataFrame(X, columns=self.columns_)
elif isinstance(X, da.Array):
# later on we concat(..., axis=1), which requires
# known divisions. Suboptimal, but I think unavoidable.
unknown = np.isnan(X.chunks[0]).any()
if unknown:
lengths = blockwise(len, "i", X[:, 0], "i", dtype="i8").compute()
X = X.copy()
chunks = (tuple(lengths), X.chunks[1])
X._chunks = chunks
X = dd.from_dask_array(X, columns=self.columns_)
big = isinstance(X, dd.DataFrame)
if big:
chunks = np.array(X.divisions)
chunks[-1] = chunks[-1] + 1
chunks = tuple(chunks[1:] - chunks[:-1])
X = X.copy()
for col in self.categorical_columns_:
if _HAS_CTD:
dtype = self.dtypes_[col]
categories, ordered = dtype.categories, dtype.ordered
else:
categories, ordered = self.dtypes_[col]
# use .values to avoid warning from pandas
codes = X[col].values
if big:
# dask
codes._chunks = (chunks,)
# Need a Categorical.from_codes for dask
series = (
dd.from_dask_array(codes, columns=col)
.astype("category")
.cat.set_categories(np.arange(len(categories)), ordered=ordered)
.cat.rename_categories(categories)
)
# Bug in pandas <= 0.20.3 lost name
if series.name is None:
series.name = col
series.divisions = X.divisions
else:
# pandas
series = pd.Series(
pd.Categorical.from_codes(codes, categories, ordered=ordered),
name=col,
)
X[col] = series
return X |
def refinements(self):
"""Refinements for the visualization"""
# Show sticks for all residues interacing with the ligand
cmd.select('AllBSRes', 'byres (Hydrophobic-P or HBondDonor-P or HBondAccept-P or PosCharge-P or NegCharge-P or '
'StackRings-P or PiCatRing-P or HalogenAcc or Metal-P)')
cmd.show('sticks', 'AllBSRes')
# Show spheres for the ring centroids
cmd.hide('everything', 'centroids*')
cmd.show('nb_spheres', 'centroids*')
# Show spheres for centers of charge
if self.object_exists('Chargecenter-P') or self.object_exists('Chargecenter-L'):
cmd.hide('nonbonded', 'chargecenter*')
cmd.show('spheres', 'chargecenter*')
cmd.set('sphere_scale', 0.4, 'chargecenter*')
cmd.color('yellow', 'chargecenter*')
cmd.set('valence', 1) # Show bond valency (e.g. double bonds)
# Optional cartoon representation of the protein
cmd.copy('%sCartoon' % self.protname, self.protname)
cmd.show('cartoon', '%sCartoon' % self.protname)
cmd.show('sticks', '%sCartoon' % self.protname)
cmd.set('stick_transparency', 1, '%sCartoon' % self.protname)
# Resize water molecules. Sometimes they are not heteroatoms HOH, but part of the protein
cmd.set('sphere_scale', 0.2, 'resn HOH or Water') # Needs to be done here because of the copy made
cmd.set('sphere_transparency', 0.4, '!(resn HOH or Water)')
if 'Centroids*' in cmd.get_names("selections"):
cmd.color('grey80', 'Centroids*')
cmd.hide('spheres', '%sCartoon' % self.protname)
cmd.hide('cartoon', '%sCartoon and resn DA+DG+DC+DU+DT+A+G+C+U+T' % self.protname) # Hide DNA/RNA Cartoon
if self.ligname == 'SF4': # Special case for iron-sulfur clusters, can't be visualized with sticks
cmd.show('spheres', '%s' % self.ligname)
cmd.hide('everything', 'resn HOH &!Water') # Hide all non-interacting water molecules
cmd.hide('sticks', '%s and !%s and !AllBSRes' %
(self.protname, self.ligname)) # Hide all non-interacting residues
if self.ligandtype in ['PEPTIDE', 'INTRA']:
self.adapt_for_peptides()
if self.ligandtype == 'INTRA':
self.adapt_for_intra() | Refinements for the visualization | Below is the the instruction that describes the task:
### Input:
Refinements for the visualization
### Response:
def refinements(self):
"""Refinements for the visualization"""
# Show sticks for all residues interacing with the ligand
cmd.select('AllBSRes', 'byres (Hydrophobic-P or HBondDonor-P or HBondAccept-P or PosCharge-P or NegCharge-P or '
'StackRings-P or PiCatRing-P or HalogenAcc or Metal-P)')
cmd.show('sticks', 'AllBSRes')
# Show spheres for the ring centroids
cmd.hide('everything', 'centroids*')
cmd.show('nb_spheres', 'centroids*')
# Show spheres for centers of charge
if self.object_exists('Chargecenter-P') or self.object_exists('Chargecenter-L'):
cmd.hide('nonbonded', 'chargecenter*')
cmd.show('spheres', 'chargecenter*')
cmd.set('sphere_scale', 0.4, 'chargecenter*')
cmd.color('yellow', 'chargecenter*')
cmd.set('valence', 1) # Show bond valency (e.g. double bonds)
# Optional cartoon representation of the protein
cmd.copy('%sCartoon' % self.protname, self.protname)
cmd.show('cartoon', '%sCartoon' % self.protname)
cmd.show('sticks', '%sCartoon' % self.protname)
cmd.set('stick_transparency', 1, '%sCartoon' % self.protname)
# Resize water molecules. Sometimes they are not heteroatoms HOH, but part of the protein
cmd.set('sphere_scale', 0.2, 'resn HOH or Water') # Needs to be done here because of the copy made
cmd.set('sphere_transparency', 0.4, '!(resn HOH or Water)')
if 'Centroids*' in cmd.get_names("selections"):
cmd.color('grey80', 'Centroids*')
cmd.hide('spheres', '%sCartoon' % self.protname)
cmd.hide('cartoon', '%sCartoon and resn DA+DG+DC+DU+DT+A+G+C+U+T' % self.protname) # Hide DNA/RNA Cartoon
if self.ligname == 'SF4': # Special case for iron-sulfur clusters, can't be visualized with sticks
cmd.show('spheres', '%s' % self.ligname)
cmd.hide('everything', 'resn HOH &!Water') # Hide all non-interacting water molecules
cmd.hide('sticks', '%s and !%s and !AllBSRes' %
(self.protname, self.ligname)) # Hide all non-interacting residues
if self.ligandtype in ['PEPTIDE', 'INTRA']:
self.adapt_for_peptides()
if self.ligandtype == 'INTRA':
self.adapt_for_intra() |
def _init_db(self):
"""Connect to the database and get new session number."""
# register adapters
sqlite3.register_adapter(dict, _adapt_dict)
sqlite3.register_converter('dict', _convert_dict)
sqlite3.register_adapter(list, _adapt_bufs)
sqlite3.register_converter('bufs', _convert_bufs)
# connect to the db
dbfile = os.path.join(self.location, self.filename)
self._db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES,
# isolation_level = None)#,
cached_statements=64)
# print dir(self._db)
first_table = previous_table = self.table
i=0
while not self._check_table():
i+=1
self.table = first_table+'_%i'%i
self.log.warn(
"Table %s exists and doesn't match db format, trying %s"%
(previous_table, self.table)
)
previous_table = self.table
self._db.execute("""CREATE TABLE IF NOT EXISTS %s
(msg_id text PRIMARY KEY,
header dict text,
content dict text,
buffers bufs blob,
submitted timestamp,
client_uuid text,
engine_uuid text,
started timestamp,
completed timestamp,
resubmitted text,
received timestamp,
result_header dict text,
result_content dict text,
result_buffers bufs blob,
queue text,
pyin text,
pyout text,
pyerr text,
stdout text,
stderr text)
"""%self.table)
self._db.commit() | Connect to the database and get new session number. | Below is the the instruction that describes the task:
### Input:
Connect to the database and get new session number.
### Response:
def _init_db(self):
"""Connect to the database and get new session number."""
# register adapters
sqlite3.register_adapter(dict, _adapt_dict)
sqlite3.register_converter('dict', _convert_dict)
sqlite3.register_adapter(list, _adapt_bufs)
sqlite3.register_converter('bufs', _convert_bufs)
# connect to the db
dbfile = os.path.join(self.location, self.filename)
self._db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES,
# isolation_level = None)#,
cached_statements=64)
# print dir(self._db)
first_table = previous_table = self.table
i=0
while not self._check_table():
i+=1
self.table = first_table+'_%i'%i
self.log.warn(
"Table %s exists and doesn't match db format, trying %s"%
(previous_table, self.table)
)
previous_table = self.table
self._db.execute("""CREATE TABLE IF NOT EXISTS %s
(msg_id text PRIMARY KEY,
header dict text,
content dict text,
buffers bufs blob,
submitted timestamp,
client_uuid text,
engine_uuid text,
started timestamp,
completed timestamp,
resubmitted text,
received timestamp,
result_header dict text,
result_content dict text,
result_buffers bufs blob,
queue text,
pyin text,
pyout text,
pyerr text,
stdout text,
stderr text)
"""%self.table)
self._db.commit() |
def valid_conkey(self, conkey):
"""Check that the conkey is a valid one. Return True if valid. A
condition key is valid if it is one in the _COND_PREFIXES
list. With the prefix removed, the remaining string must be
either a number or the empty string."""
for prefix in _COND_PREFIXES:
trailing = conkey.lstrip(prefix)
if trailing == '' and conkey: # conkey is not empty
return True
try:
int(trailing)
return True
except ValueError:
pass
return False | Check that the conkey is a valid one. Return True if valid. A
condition key is valid if it is one in the _COND_PREFIXES
list. With the prefix removed, the remaining string must be
either a number or the empty string. | Below is the the instruction that describes the task:
### Input:
Check that the conkey is a valid one. Return True if valid. A
condition key is valid if it is one in the _COND_PREFIXES
list. With the prefix removed, the remaining string must be
either a number or the empty string.
### Response:
def valid_conkey(self, conkey):
"""Check that the conkey is a valid one. Return True if valid. A
condition key is valid if it is one in the _COND_PREFIXES
list. With the prefix removed, the remaining string must be
either a number or the empty string."""
for prefix in _COND_PREFIXES:
trailing = conkey.lstrip(prefix)
if trailing == '' and conkey: # conkey is not empty
return True
try:
int(trailing)
return True
except ValueError:
pass
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.