code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def active_trail_nodes(self, variables, observed=None):
"""
Returns a dictionary with the given variables as keys and all the nodes reachable
from that respective variable as values.
Parameters
----------
variables: str or array like
variables whose active trails are to be found.
observed : List of nodes (optional)
If given the active trails would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.base import DAG
>>> student = DAG()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff': {'diff', 'grades'}}
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
"""
if observed:
observed_list = observed if isinstance(observed, (list, tuple)) else [observed]
else:
observed_list = []
ancestors_list = self._get_ancestors_of(observed_list)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
active_trails = {}
for start in variables if isinstance(variables, (list, tuple)) else [variables]:
visit_list = set()
visit_list.add((start, 'up'))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if (node, direction) not in traversed_list:
if node not in observed_list:
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == 'up' and node not in observed_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
for child in self.successors(node):
visit_list.add((child, 'down'))
elif direction == 'down':
if node not in observed_list:
for child in self.successors(node):
visit_list.add((child, 'down'))
if node in ancestors_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
active_trails[start] = active_nodes
return active_trails | Returns a dictionary with the given variables as keys and all the nodes reachable
from that respective variable as values.
Parameters
----------
variables: str or array like
variables whose active trails are to be found.
observed : List of nodes (optional)
If given the active trails would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.base import DAG
>>> student = DAG()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff': {'diff', 'grades'}}
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1 | Below is the the instruction that describes the task:
### Input:
Returns a dictionary with the given variables as keys and all the nodes reachable
from that respective variable as values.
Parameters
----------
variables: str or array like
variables whose active trails are to be found.
observed : List of nodes (optional)
If given the active trails would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.base import DAG
>>> student = DAG()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff': {'diff', 'grades'}}
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
### Response:
def active_trail_nodes(self, variables, observed=None):
"""
Returns a dictionary with the given variables as keys and all the nodes reachable
from that respective variable as values.
Parameters
----------
variables: str or array like
variables whose active trails are to be found.
observed : List of nodes (optional)
If given the active trails would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.base import DAG
>>> student = DAG()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff': {'diff', 'grades'}}
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
"""
if observed:
observed_list = observed if isinstance(observed, (list, tuple)) else [observed]
else:
observed_list = []
ancestors_list = self._get_ancestors_of(observed_list)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
active_trails = {}
for start in variables if isinstance(variables, (list, tuple)) else [variables]:
visit_list = set()
visit_list.add((start, 'up'))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if (node, direction) not in traversed_list:
if node not in observed_list:
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == 'up' and node not in observed_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
for child in self.successors(node):
visit_list.add((child, 'down'))
elif direction == 'down':
if node not in observed_list:
for child in self.successors(node):
visit_list.add((child, 'down'))
if node in ancestors_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
active_trails[start] = active_nodes
return active_trails |
def affine_rotation_matrix(angle=(-20, 20)):
"""Create an affine transform matrix for image rotation.
NOTE: In OpenCV, x is width and y is height.
Parameters
-----------
angle : int/float or tuple of two int/float
Degree to rotate, usually -180 ~ 180.
- int/float, a fixed angle.
- tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values.
Returns
-------
numpy.array
An affine transform matrix.
"""
if isinstance(angle, tuple):
theta = np.pi / 180 * np.random.uniform(angle[0], angle[1])
else:
theta = np.pi / 180 * angle
rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0], \
[-np.sin(theta), np.cos(theta), 0], \
[0, 0, 1]])
return rotation_matrix | Create an affine transform matrix for image rotation.
NOTE: In OpenCV, x is width and y is height.
Parameters
-----------
angle : int/float or tuple of two int/float
Degree to rotate, usually -180 ~ 180.
- int/float, a fixed angle.
- tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values.
Returns
-------
numpy.array
An affine transform matrix. | Below is the the instruction that describes the task:
### Input:
Create an affine transform matrix for image rotation.
NOTE: In OpenCV, x is width and y is height.
Parameters
-----------
angle : int/float or tuple of two int/float
Degree to rotate, usually -180 ~ 180.
- int/float, a fixed angle.
- tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values.
Returns
-------
numpy.array
An affine transform matrix.
### Response:
def affine_rotation_matrix(angle=(-20, 20)):
"""Create an affine transform matrix for image rotation.
NOTE: In OpenCV, x is width and y is height.
Parameters
-----------
angle : int/float or tuple of two int/float
Degree to rotate, usually -180 ~ 180.
- int/float, a fixed angle.
- tuple of 2 floats/ints, randomly sample a value as the angle between these 2 values.
Returns
-------
numpy.array
An affine transform matrix.
"""
if isinstance(angle, tuple):
theta = np.pi / 180 * np.random.uniform(angle[0], angle[1])
else:
theta = np.pi / 180 * angle
rotation_matrix = np.array([[np.cos(theta), np.sin(theta), 0], \
[-np.sin(theta), np.cos(theta), 0], \
[0, 0, 1]])
return rotation_matrix |
def getchannel(samples: np.ndarray, ch:int) -> np.ndarray:
"""
Returns a view into a channel of samples.
samples : a numpy array representing the audio data
ch : the channel to extract (channels begin with 0)
"""
N = numchannels(samples)
if ch > (N - 1):
raise ValueError("channel %d out of range" % ch)
if N == 1:
return samples
return samples[:, ch] | Returns a view into a channel of samples.
samples : a numpy array representing the audio data
ch : the channel to extract (channels begin with 0) | Below is the the instruction that describes the task:
### Input:
Returns a view into a channel of samples.
samples : a numpy array representing the audio data
ch : the channel to extract (channels begin with 0)
### Response:
def getchannel(samples: np.ndarray, ch:int) -> np.ndarray:
"""
Returns a view into a channel of samples.
samples : a numpy array representing the audio data
ch : the channel to extract (channels begin with 0)
"""
N = numchannels(samples)
if ch > (N - 1):
raise ValueError("channel %d out of range" % ch)
if N == 1:
return samples
return samples[:, ch] |
def repeat_all(self, count=2):
""" Repeat this entire Control code a number of times.
Returns a new Control with this one's data repeated.
"""
try:
return self.__class__(''.join(str(self) * count))
except TypeError:
raise TypeError(
'`count` must be an integer. Got: {!r}'.format(count)
) | Repeat this entire Control code a number of times.
Returns a new Control with this one's data repeated. | Below is the the instruction that describes the task:
### Input:
Repeat this entire Control code a number of times.
Returns a new Control with this one's data repeated.
### Response:
def repeat_all(self, count=2):
""" Repeat this entire Control code a number of times.
Returns a new Control with this one's data repeated.
"""
try:
return self.__class__(''.join(str(self) * count))
except TypeError:
raise TypeError(
'`count` must be an integer. Got: {!r}'.format(count)
) |
def change_event_actions(self, handler, actions):
"""
This allows the client to change the actions for an event, in the case that there is a desire for slightly different behavior, such as reasigning keys.
handler - the handler object that the desired changes are made to.
actions - The methods that are called when this handler is varified against the current event.
"""
if not isinstance(handler, Handler):
raise TypeError("given object must be of type Handler.")
if not self.remove_handler(handler):
raise ValueError("You must pass in a valid handler that already exists.")
self.add_handler(handler.type, actions, handler.params)
self.event = handler.event | This allows the client to change the actions for an event, in the case that there is a desire for slightly different behavior, such as reasigning keys.
handler - the handler object that the desired changes are made to.
actions - The methods that are called when this handler is varified against the current event. | Below is the the instruction that describes the task:
### Input:
This allows the client to change the actions for an event, in the case that there is a desire for slightly different behavior, such as reasigning keys.
handler - the handler object that the desired changes are made to.
actions - The methods that are called when this handler is varified against the current event.
### Response:
def change_event_actions(self, handler, actions):
"""
This allows the client to change the actions for an event, in the case that there is a desire for slightly different behavior, such as reasigning keys.
handler - the handler object that the desired changes are made to.
actions - The methods that are called when this handler is varified against the current event.
"""
if not isinstance(handler, Handler):
raise TypeError("given object must be of type Handler.")
if not self.remove_handler(handler):
raise ValueError("You must pass in a valid handler that already exists.")
self.add_handler(handler.type, actions, handler.params)
self.event = handler.event |
def select_bucket_region(custom_bucket, hook_region, stacker_bucket_region,
provider_region):
"""Returns the appropriate region to use when uploading functions.
Select the appropriate region for the bucket where lambdas are uploaded in.
Args:
custom_bucket (str, None): The custom bucket name provided by the
`bucket` kwarg of the aws_lambda hook, if provided.
hook_region (str): The contents of the `bucket_region` argument to
the hook.
stacker_bucket_region (str): The contents of the
`stacker_bucket_region` global setting.
provider_region (str): The region being used by the provider.
Returns:
str: The appropriate region string.
"""
region = None
if custom_bucket:
region = hook_region
else:
region = stacker_bucket_region
return region or provider_region | Returns the appropriate region to use when uploading functions.
Select the appropriate region for the bucket where lambdas are uploaded in.
Args:
custom_bucket (str, None): The custom bucket name provided by the
`bucket` kwarg of the aws_lambda hook, if provided.
hook_region (str): The contents of the `bucket_region` argument to
the hook.
stacker_bucket_region (str): The contents of the
`stacker_bucket_region` global setting.
provider_region (str): The region being used by the provider.
Returns:
str: The appropriate region string. | Below is the the instruction that describes the task:
### Input:
Returns the appropriate region to use when uploading functions.
Select the appropriate region for the bucket where lambdas are uploaded in.
Args:
custom_bucket (str, None): The custom bucket name provided by the
`bucket` kwarg of the aws_lambda hook, if provided.
hook_region (str): The contents of the `bucket_region` argument to
the hook.
stacker_bucket_region (str): The contents of the
`stacker_bucket_region` global setting.
provider_region (str): The region being used by the provider.
Returns:
str: The appropriate region string.
### Response:
def select_bucket_region(custom_bucket, hook_region, stacker_bucket_region,
provider_region):
"""Returns the appropriate region to use when uploading functions.
Select the appropriate region for the bucket where lambdas are uploaded in.
Args:
custom_bucket (str, None): The custom bucket name provided by the
`bucket` kwarg of the aws_lambda hook, if provided.
hook_region (str): The contents of the `bucket_region` argument to
the hook.
stacker_bucket_region (str): The contents of the
`stacker_bucket_region` global setting.
provider_region (str): The region being used by the provider.
Returns:
str: The appropriate region string.
"""
region = None
if custom_bucket:
region = hook_region
else:
region = stacker_bucket_region
return region or provider_region |
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs) | Create a tuple of all values of *obj*'s *attrs*. | Below is the the instruction that describes the task:
### Input:
Create a tuple of all values of *obj*'s *attrs*.
### Response:
def _attrs_to_tuple(obj, attrs):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a.name) for a in attrs) |
def all_equal(iterable):
"""Checks whether all items in an iterable are equal.
Parameters
----------
iterable
An iterable, e.g. a string og a list.
Returns
-------
boolean
True or False.
Examples
-------
>>> all_equal([2, 2, 2])
True
>>> all_equal([1, 2, 3])
False
"""
if len(iterable) in [0, 1]:
return False
first = iterable[0]
return all([first == i for i in iterable[1:]]) | Checks whether all items in an iterable are equal.
Parameters
----------
iterable
An iterable, e.g. a string og a list.
Returns
-------
boolean
True or False.
Examples
-------
>>> all_equal([2, 2, 2])
True
>>> all_equal([1, 2, 3])
False | Below is the the instruction that describes the task:
### Input:
Checks whether all items in an iterable are equal.
Parameters
----------
iterable
An iterable, e.g. a string og a list.
Returns
-------
boolean
True or False.
Examples
-------
>>> all_equal([2, 2, 2])
True
>>> all_equal([1, 2, 3])
False
### Response:
def all_equal(iterable):
"""Checks whether all items in an iterable are equal.
Parameters
----------
iterable
An iterable, e.g. a string og a list.
Returns
-------
boolean
True or False.
Examples
-------
>>> all_equal([2, 2, 2])
True
>>> all_equal([1, 2, 3])
False
"""
if len(iterable) in [0, 1]:
return False
first = iterable[0]
return all([first == i for i in iterable[1:]]) |
def modify_event(uid):
'''Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
'''
try:
data = request.get_json()['data'][0]
if data['type'] != 'event' or data['id'] != uid:
return make_error_response('Invalid data', 400)
# Check attributes
for key in data['attributes'].keys():
if key not in ('status', 'start', 'end'):
return make_error_response('Invalid data', 400)
# Check new status
new_status = data['attributes'].get('status')
if new_status:
new_status = new_status.upper().replace(' ', '_')
data['attributes']['status'] = int(getattr(Status, new_status))
except Exception:
return make_error_response('Invalid data', 400)
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first()
if not event:
return make_error_response('No event with specified uid', 404)
event.start = data['attributes'].get('start', event.start)
event.end = data['attributes'].get('end', event.end)
event.status = data['attributes'].get('status', event.status)
logger.debug('Updating event %s via api', uid)
db.commit()
return make_data_response(event.serialize()) | Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified. | Below is the the instruction that describes the task:
### Input:
Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
### Response:
def modify_event(uid):
'''Modify an event specified by its uid. The modifications for the event
are expected as JSON with the content type correctly set in the request.
Note that this method works for recorded events only. Upcoming events part
of the scheduler cache cannot be modified.
'''
try:
data = request.get_json()['data'][0]
if data['type'] != 'event' or data['id'] != uid:
return make_error_response('Invalid data', 400)
# Check attributes
for key in data['attributes'].keys():
if key not in ('status', 'start', 'end'):
return make_error_response('Invalid data', 400)
# Check new status
new_status = data['attributes'].get('status')
if new_status:
new_status = new_status.upper().replace(' ', '_')
data['attributes']['status'] = int(getattr(Status, new_status))
except Exception:
return make_error_response('Invalid data', 400)
db = get_session()
event = db.query(RecordedEvent).filter(RecordedEvent.uid == uid).first()
if not event:
return make_error_response('No event with specified uid', 404)
event.start = data['attributes'].get('start', event.start)
event.end = data['attributes'].get('end', event.end)
event.status = data['attributes'].get('status', event.status)
logger.debug('Updating event %s via api', uid)
db.commit()
return make_data_response(event.serialize()) |
def query_loci(self, filter_column, filter_value, feature):
"""
Query for loci satisfying a given filter and feature type.
Parameters
----------
filter_column : str
Name of column to filter results by.
filter_value : str
Only return loci which have this value in the their filter_column.
feature : str
Feature names such as 'transcript', 'gene', and 'exon'
Returns list of Locus objects
"""
# list of values containing (contig, start, stop, strand)
result_tuples = self.query(
select_column_names=["seqname", "start", "end", "strand"],
filter_column=filter_column,
filter_value=filter_value,
feature=feature,
distinct=True,
required=True)
return [
Locus(contig, start, end, strand)
for (contig, start, end, strand)
in result_tuples
] | Query for loci satisfying a given filter and feature type.
Parameters
----------
filter_column : str
Name of column to filter results by.
filter_value : str
Only return loci which have this value in the their filter_column.
feature : str
Feature names such as 'transcript', 'gene', and 'exon'
Returns list of Locus objects | Below is the the instruction that describes the task:
### Input:
Query for loci satisfying a given filter and feature type.
Parameters
----------
filter_column : str
Name of column to filter results by.
filter_value : str
Only return loci which have this value in the their filter_column.
feature : str
Feature names such as 'transcript', 'gene', and 'exon'
Returns list of Locus objects
### Response:
def query_loci(self, filter_column, filter_value, feature):
"""
Query for loci satisfying a given filter and feature type.
Parameters
----------
filter_column : str
Name of column to filter results by.
filter_value : str
Only return loci which have this value in the their filter_column.
feature : str
Feature names such as 'transcript', 'gene', and 'exon'
Returns list of Locus objects
"""
# list of values containing (contig, start, stop, strand)
result_tuples = self.query(
select_column_names=["seqname", "start", "end", "strand"],
filter_column=filter_column,
filter_value=filter_value,
feature=feature,
distinct=True,
required=True)
return [
Locus(contig, start, end, strand)
for (contig, start, end, strand)
in result_tuples
] |
def start_app_and_connect(self):
"""Starts snippet apk on the device and connects to it.
This wraps the main logic with safe handling
Raises:
AppStartPreCheckError, when pre-launch checks fail.
"""
try:
self._start_app_and_connect()
except AppStartPreCheckError:
# Precheck errors don't need cleanup, directly raise.
raise
except Exception as e:
# Log the stacktrace of `e` as re-raising doesn't preserve trace.
self._ad.log.exception('Failed to start app and connect.')
# If errors happen, make sure we clean up before raising.
try:
self.stop_app()
except:
self._ad.log.exception(
'Failed to stop app after failure to start and connect.')
# Explicitly raise the original error from starting app.
raise e | Starts snippet apk on the device and connects to it.
This wraps the main logic with safe handling
Raises:
AppStartPreCheckError, when pre-launch checks fail. | Below is the the instruction that describes the task:
### Input:
Starts snippet apk on the device and connects to it.
This wraps the main logic with safe handling
Raises:
AppStartPreCheckError, when pre-launch checks fail.
### Response:
def start_app_and_connect(self):
"""Starts snippet apk on the device and connects to it.
This wraps the main logic with safe handling
Raises:
AppStartPreCheckError, when pre-launch checks fail.
"""
try:
self._start_app_and_connect()
except AppStartPreCheckError:
# Precheck errors don't need cleanup, directly raise.
raise
except Exception as e:
# Log the stacktrace of `e` as re-raising doesn't preserve trace.
self._ad.log.exception('Failed to start app and connect.')
# If errors happen, make sure we clean up before raising.
try:
self.stop_app()
except:
self._ad.log.exception(
'Failed to stop app after failure to start and connect.')
# Explicitly raise the original error from starting app.
raise e |
def total_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `total_sky_cover`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `total_sky_cover`')
if value > 10.0:
raise ValueError('value need to be smaller 10.0 '
'for field `total_sky_cover`')
self._total_sky_cover = value | Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | Below is the the instruction that describes the task:
### Input:
Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
### Response:
def total_sky_cover(self, value=99.0):
"""Corresponds to IDD Field `total_sky_cover` This is the value for
total sky cover (tenths of coverage). (i.e. 1 is 1/10 covered. 10 is
total coverage). (Amount of sky dome in tenths covered by clouds or
obscuring phenomena at the hour indicated at the time indicated.)
Args:
value (float): value for IDD Field `total_sky_cover`
value >= 0.0
value <= 10.0
Missing value: 99.0
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `total_sky_cover`'.format(value))
if value < 0.0:
raise ValueError('value need to be greater or equal 0.0 '
'for field `total_sky_cover`')
if value > 10.0:
raise ValueError('value need to be smaller 10.0 '
'for field `total_sky_cover`')
self._total_sky_cover = value |
def get_hash_of_files(root_path, directories=None):
"""Generate md5 hash of files."""
if not directories:
directories = [{'path': './'}]
files_to_hash = []
for i in directories:
ignorer = get_ignorer(os.path.join(root_path, i['path']),
i.get('exclusions'))
with change_dir(root_path):
for root, dirs, files in os.walk(i['path'], topdown=True):
if (root != './') and ignorer.is_ignored(root, True):
dirs[:] = []
files[:] = []
else:
for filename in files:
filepath = os.path.join(root, filename)
if not ignorer.is_ignored(filepath):
files_to_hash.append(
filepath[2:] if filepath.startswith('./') else filepath # noqa
)
return calculate_hash_of_files(files_to_hash, root_path) | Generate md5 hash of files. | Below is the the instruction that describes the task:
### Input:
Generate md5 hash of files.
### Response:
def get_hash_of_files(root_path, directories=None):
"""Generate md5 hash of files."""
if not directories:
directories = [{'path': './'}]
files_to_hash = []
for i in directories:
ignorer = get_ignorer(os.path.join(root_path, i['path']),
i.get('exclusions'))
with change_dir(root_path):
for root, dirs, files in os.walk(i['path'], topdown=True):
if (root != './') and ignorer.is_ignored(root, True):
dirs[:] = []
files[:] = []
else:
for filename in files:
filepath = os.path.join(root, filename)
if not ignorer.is_ignored(filepath):
files_to_hash.append(
filepath[2:] if filepath.startswith('./') else filepath # noqa
)
return calculate_hash_of_files(files_to_hash, root_path) |
def annotate_and_optimize_ast(
parsed_ast: ast.Module,
source_code: str,
class_types: Optional[ClassTypes] = None,
) -> None:
"""
Performs annotation and optimization on a parsed python AST by doing the
following:
* Annotating all AST nodes with the originating source code of the AST
* Annotating class definition nodes with their original class type
("contract" or "struct")
* Substituting negative values for unary subtractions
:param parsed_ast: The AST to be annotated and optimized.
:param source_code: The originating source code of the AST.
:param class_types: A mapping of class names to original class types.
:return: The annotated and optmized AST.
"""
AnnotatingVisitor(source_code, class_types).visit(parsed_ast)
RewriteUnarySubVisitor().visit(parsed_ast)
EnsureSingleExitChecker().visit(parsed_ast) | Performs annotation and optimization on a parsed python AST by doing the
following:
* Annotating all AST nodes with the originating source code of the AST
* Annotating class definition nodes with their original class type
("contract" or "struct")
* Substituting negative values for unary subtractions
:param parsed_ast: The AST to be annotated and optimized.
:param source_code: The originating source code of the AST.
:param class_types: A mapping of class names to original class types.
:return: The annotated and optmized AST. | Below is the the instruction that describes the task:
### Input:
Performs annotation and optimization on a parsed python AST by doing the
following:
* Annotating all AST nodes with the originating source code of the AST
* Annotating class definition nodes with their original class type
("contract" or "struct")
* Substituting negative values for unary subtractions
:param parsed_ast: The AST to be annotated and optimized.
:param source_code: The originating source code of the AST.
:param class_types: A mapping of class names to original class types.
:return: The annotated and optmized AST.
### Response:
def annotate_and_optimize_ast(
parsed_ast: ast.Module,
source_code: str,
class_types: Optional[ClassTypes] = None,
) -> None:
"""
Performs annotation and optimization on a parsed python AST by doing the
following:
* Annotating all AST nodes with the originating source code of the AST
* Annotating class definition nodes with their original class type
("contract" or "struct")
* Substituting negative values for unary subtractions
:param parsed_ast: The AST to be annotated and optimized.
:param source_code: The originating source code of the AST.
:param class_types: A mapping of class names to original class types.
:return: The annotated and optmized AST.
"""
AnnotatingVisitor(source_code, class_types).visit(parsed_ast)
RewriteUnarySubVisitor().visit(parsed_ast)
EnsureSingleExitChecker().visit(parsed_ast) |
def start(self):
""" Begin execution of this task. """
logger.info('Starting task {0}'.format(self.name))
self.reset()
if self.hostname:
host = self.parent_job.parent.get_host(self.hostname)
if host:
self.remote_ssh(host)
else:
self.remote_failure = True
else:
self.process = subprocess.Popen(self.command,
shell=True,
env=os.environ.copy(),
stdout=self.stdout_file,
stderr=self.stderr_file)
self.started_at = datetime.utcnow()
self._start_check_timer() | Begin execution of this task. | Below is the the instruction that describes the task:
### Input:
Begin execution of this task.
### Response:
def start(self):
""" Begin execution of this task. """
logger.info('Starting task {0}'.format(self.name))
self.reset()
if self.hostname:
host = self.parent_job.parent.get_host(self.hostname)
if host:
self.remote_ssh(host)
else:
self.remote_failure = True
else:
self.process = subprocess.Popen(self.command,
shell=True,
env=os.environ.copy(),
stdout=self.stdout_file,
stderr=self.stderr_file)
self.started_at = datetime.utcnow()
self._start_check_timer() |
def normalize_bit(A, B, bit, id2desc):
"""
normalize the bit score:
normalization factor = average max bit score for the two ORFs
normalized = bit score / normalization factor
"""
Amax, Bmax = id2desc[A][-1], id2desc[B][-1]
norm_factor = float(numpy.average([Amax, Bmax]))
normalized = bit / norm_factor
return normalized | normalize the bit score:
normalization factor = average max bit score for the two ORFs
normalized = bit score / normalization factor | Below is the the instruction that describes the task:
### Input:
normalize the bit score:
normalization factor = average max bit score for the two ORFs
normalized = bit score / normalization factor
### Response:
def normalize_bit(A, B, bit, id2desc):
"""
normalize the bit score:
normalization factor = average max bit score for the two ORFs
normalized = bit score / normalization factor
"""
Amax, Bmax = id2desc[A][-1], id2desc[B][-1]
norm_factor = float(numpy.average([Amax, Bmax]))
normalized = bit / norm_factor
return normalized |
def _getCellForNewSegment(self, colIdx):
"""
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
:param colIdx which column to look at
:returns: cell index
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number
# of segments. The reason is that if we always do that, we are more likely
# to run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = (
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" % (
colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print ("Deleting segment #%d for cell[%d,%d] to make room for new "
"segment" % (candidateSegment.segID, colIdx, candidateCellIdx))
candidateSegment.debugPrint()
self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx | Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
:param colIdx which column to look at
:returns: cell index | Below is the the instruction that describes the task:
### Input:
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
:param colIdx which column to look at
:returns: cell index
### Response:
def _getCellForNewSegment(self, colIdx):
"""
Return the index of a cell in this column which is a good candidate
for adding a new segment.
When we have fixed size resources in effect, we insure that we pick a
cell which does not already have the max number of allowed segments. If
none exists, we choose the least used segment in the column to re-allocate.
:param colIdx which column to look at
:returns: cell index
"""
# Not fixed size CLA, just choose a cell randomly
if self.maxSegmentsPerCell < 0:
if self.cellsPerColumn > 1:
# Don't ever choose the start cell (cell # 0) in each column
i = self._random.getUInt32(self.cellsPerColumn-1) + 1
else:
i = 0
return i
# Fixed size CLA, choose from among the cells that are below the maximum
# number of segments.
# NOTE: It is important NOT to always pick the cell with the fewest number
# of segments. The reason is that if we always do that, we are more likely
# to run into situations where we choose the same set of cell indices to
# represent an 'A' in both context 1 and context 2. This is because the
# cell indices we choose in each column of a pattern will advance in
# lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.).
candidateCellIdxs = []
if self.cellsPerColumn == 1:
minIdx = 0
maxIdx = 0
else:
minIdx = 1 # Don't include startCell in the mix
maxIdx = self.cellsPerColumn-1
for i in xrange(minIdx, maxIdx+1):
numSegs = len(self.cells[colIdx][i])
if numSegs < self.maxSegmentsPerCell:
candidateCellIdxs.append(i)
# If we found one, return with it. Note we need to use _random to maintain
# correspondence with CPP code.
if len(candidateCellIdxs) > 0:
#candidateCellIdx = random.choice(candidateCellIdxs)
candidateCellIdx = (
candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))])
if self.verbosity >= 5:
print "Cell [%d,%d] chosen for new segment, # of segs is %d" % (
colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx]))
return candidateCellIdx
# All cells in the column are full, find a segment to free up
candidateSegment = None
candidateSegmentDC = 1.0
# For each cell in this column
for i in xrange(minIdx, maxIdx+1):
# For each segment in this cell
for s in self.cells[colIdx][i]:
dc = s.dutyCycle()
if dc < candidateSegmentDC:
candidateCellIdx = i
candidateSegmentDC = dc
candidateSegment = s
# Free up the least used segment
if self.verbosity >= 5:
print ("Deleting segment #%d for cell[%d,%d] to make room for new "
"segment" % (candidateSegment.segID, colIdx, candidateCellIdx))
candidateSegment.debugPrint()
self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment)
self.cells[colIdx][candidateCellIdx].remove(candidateSegment)
return candidateCellIdx |
def _plan_distribute(self):
"""
* **Source/ Dest:** One source to many destinations
* **Volume:** Single volume or List of volumes is acceptable. This list
should be same length as destinations
* **Behavior with transfer options:**
- New_tip: can be either NEVER or ONCE
(ALWAYS will fallback to ONCE)
- Air_gap: if specified, will be performed after every aspirate and
also in-between dispenses (to keep air gap while moving
between wells)
- Blow_out: can be performed at the end of distribute (after mix,
before touch_tip) at the location specified. If there
is liquid present in the tip, blow_out will be
performed at either user-defined location or (default)
trash. If no liquid is supposed to be present in the
tip at the end of distribute, blow_out will be
performed at the last well the liquid was dispensed to
(if strategy is DEST_IF_EMPTY)
- Touch_tip: can be performed after each aspirate and/or after
every dispense
- Mix: can be performed before aspirate and/or after the last
dispense if there is no disposal volume (i.e. can be
performed only when the tip is supposed to be empty)
Considering all options, the sequence of actions is:
1. Going from source to dest1:
*New Tip -> Mix -> Aspirate (with disposal volume) -> Air gap ->
-> Touch tip -> Dispense air gap -> Dispense -> Mix if empty ->
-> Blow out -> Touch tip -> Drop tip*
2. Going from destn to destn+1:
*.. Dispense air gap -> Dispense -> Touch tip -> Air gap ->
.. Dispense air gap -> ...*
"""
# TODO: decide whether default disposal vol for distribute should be
# pipette min_vol or should we leave it to being 0 by default and
# recommend users to specify a disposal vol when using distribute.
# First method keeps distribute consistent with current behavior while
# the other maintains consistency in default behaviors of all functions
plan_iter = zip(self._volumes, self._dests)
done = False
current_xfer = next(plan_iter)
if self._strategy.new_tip == types.TransferTipPolicy.ALWAYS:
yield self._format_dict('pick_up_tip', kwargs=self._tip_opts)
while not done:
asp_grouped = []
try:
while (sum(a[0] for a in asp_grouped) +
self._strategy.disposal_volume +
self._strategy.air_gap +
current_xfer[0]) < self._instr.max_volume:
asp_grouped.append(current_xfer)
current_xfer = next(plan_iter)
except StopIteration:
done = True
yield from self._aspirate_actions(sum(a[0] for a in asp_grouped) +
self._strategy.disposal_volume,
self._sources[0])
for step in asp_grouped:
yield from self._dispense_actions(step[0], step[1],
step is not asp_grouped[-1])
yield from self._new_tip_action() | * **Source/ Dest:** One source to many destinations
* **Volume:** Single volume or List of volumes is acceptable. This list
should be same length as destinations
* **Behavior with transfer options:**
- New_tip: can be either NEVER or ONCE
(ALWAYS will fallback to ONCE)
- Air_gap: if specified, will be performed after every aspirate and
also in-between dispenses (to keep air gap while moving
between wells)
- Blow_out: can be performed at the end of distribute (after mix,
before touch_tip) at the location specified. If there
is liquid present in the tip, blow_out will be
performed at either user-defined location or (default)
trash. If no liquid is supposed to be present in the
tip at the end of distribute, blow_out will be
performed at the last well the liquid was dispensed to
(if strategy is DEST_IF_EMPTY)
- Touch_tip: can be performed after each aspirate and/or after
every dispense
- Mix: can be performed before aspirate and/or after the last
dispense if there is no disposal volume (i.e. can be
performed only when the tip is supposed to be empty)
Considering all options, the sequence of actions is:
1. Going from source to dest1:
*New Tip -> Mix -> Aspirate (with disposal volume) -> Air gap ->
-> Touch tip -> Dispense air gap -> Dispense -> Mix if empty ->
-> Blow out -> Touch tip -> Drop tip*
2. Going from destn to destn+1:
*.. Dispense air gap -> Dispense -> Touch tip -> Air gap ->
.. Dispense air gap -> ...* | Below is the the instruction that describes the task:
### Input:
* **Source/ Dest:** One source to many destinations
* **Volume:** Single volume or List of volumes is acceptable. This list
should be same length as destinations
* **Behavior with transfer options:**
- New_tip: can be either NEVER or ONCE
(ALWAYS will fallback to ONCE)
- Air_gap: if specified, will be performed after every aspirate and
also in-between dispenses (to keep air gap while moving
between wells)
- Blow_out: can be performed at the end of distribute (after mix,
before touch_tip) at the location specified. If there
is liquid present in the tip, blow_out will be
performed at either user-defined location or (default)
trash. If no liquid is supposed to be present in the
tip at the end of distribute, blow_out will be
performed at the last well the liquid was dispensed to
(if strategy is DEST_IF_EMPTY)
- Touch_tip: can be performed after each aspirate and/or after
every dispense
- Mix: can be performed before aspirate and/or after the last
dispense if there is no disposal volume (i.e. can be
performed only when the tip is supposed to be empty)
Considering all options, the sequence of actions is:
1. Going from source to dest1:
*New Tip -> Mix -> Aspirate (with disposal volume) -> Air gap ->
-> Touch tip -> Dispense air gap -> Dispense -> Mix if empty ->
-> Blow out -> Touch tip -> Drop tip*
2. Going from destn to destn+1:
*.. Dispense air gap -> Dispense -> Touch tip -> Air gap ->
.. Dispense air gap -> ...*
### Response:
def _plan_distribute(self):
"""
* **Source/ Dest:** One source to many destinations
* **Volume:** Single volume or List of volumes is acceptable. This list
should be same length as destinations
* **Behavior with transfer options:**
- New_tip: can be either NEVER or ONCE
(ALWAYS will fallback to ONCE)
- Air_gap: if specified, will be performed after every aspirate and
also in-between dispenses (to keep air gap while moving
between wells)
- Blow_out: can be performed at the end of distribute (after mix,
before touch_tip) at the location specified. If there
is liquid present in the tip, blow_out will be
performed at either user-defined location or (default)
trash. If no liquid is supposed to be present in the
tip at the end of distribute, blow_out will be
performed at the last well the liquid was dispensed to
(if strategy is DEST_IF_EMPTY)
- Touch_tip: can be performed after each aspirate and/or after
every dispense
- Mix: can be performed before aspirate and/or after the last
dispense if there is no disposal volume (i.e. can be
performed only when the tip is supposed to be empty)
Considering all options, the sequence of actions is:
1. Going from source to dest1:
*New Tip -> Mix -> Aspirate (with disposal volume) -> Air gap ->
-> Touch tip -> Dispense air gap -> Dispense -> Mix if empty ->
-> Blow out -> Touch tip -> Drop tip*
2. Going from destn to destn+1:
*.. Dispense air gap -> Dispense -> Touch tip -> Air gap ->
.. Dispense air gap -> ...*
"""
# TODO: decide whether default disposal vol for distribute should be
# pipette min_vol or should we leave it to being 0 by default and
# recommend users to specify a disposal vol when using distribute.
# First method keeps distribute consistent with current behavior while
# the other maintains consistency in default behaviors of all functions
plan_iter = zip(self._volumes, self._dests)
done = False
current_xfer = next(plan_iter)
if self._strategy.new_tip == types.TransferTipPolicy.ALWAYS:
yield self._format_dict('pick_up_tip', kwargs=self._tip_opts)
while not done:
asp_grouped = []
try:
while (sum(a[0] for a in asp_grouped) +
self._strategy.disposal_volume +
self._strategy.air_gap +
current_xfer[0]) < self._instr.max_volume:
asp_grouped.append(current_xfer)
current_xfer = next(plan_iter)
except StopIteration:
done = True
yield from self._aspirate_actions(sum(a[0] for a in asp_grouped) +
self._strategy.disposal_volume,
self._sources[0])
for step in asp_grouped:
yield from self._dispense_actions(step[0], step[1],
step is not asp_grouped[-1])
yield from self._new_tip_action() |
def _client_wrapper(attr, *args, **kwargs):
'''
Common functionality for running low-level API calls
'''
catch_api_errors = kwargs.pop('catch_api_errors', True)
func = getattr(__context__['docker.client'], attr, None)
if func is None or not hasattr(func, '__call__'):
raise SaltInvocationError('Invalid client action \'{0}\''.format(attr))
if attr in ('push', 'pull'):
try:
# Refresh auth config from config.json
__context__['docker.client'].reload_config()
except AttributeError:
pass
err = ''
try:
log.debug(
'Attempting to run docker-py\'s "%s" function '
'with args=%s and kwargs=%s', attr, args, kwargs
)
ret = func(*args, **kwargs)
except docker.errors.APIError as exc:
if catch_api_errors:
# Generic handling of Docker API errors
raise CommandExecutionError(
'Error {0}: {1}'.format(exc.response.status_code,
exc.explanation)
)
else:
# Allow API errors to be caught further up the stack
raise
except docker.errors.DockerException as exc:
# More general docker exception (catches InvalidVersion, etc.)
raise CommandExecutionError(exc.__str__())
except Exception as exc:
err = exc.__str__()
else:
return ret
# If we're here, it's because an exception was caught earlier, and the
# API command failed.
msg = 'Unable to perform {0}'.format(attr)
if err:
msg += ': {0}'.format(err)
raise CommandExecutionError(msg) | Common functionality for running low-level API calls | Below is the the instruction that describes the task:
### Input:
Common functionality for running low-level API calls
### Response:
def _client_wrapper(attr, *args, **kwargs):
'''
Common functionality for running low-level API calls
'''
catch_api_errors = kwargs.pop('catch_api_errors', True)
func = getattr(__context__['docker.client'], attr, None)
if func is None or not hasattr(func, '__call__'):
raise SaltInvocationError('Invalid client action \'{0}\''.format(attr))
if attr in ('push', 'pull'):
try:
# Refresh auth config from config.json
__context__['docker.client'].reload_config()
except AttributeError:
pass
err = ''
try:
log.debug(
'Attempting to run docker-py\'s "%s" function '
'with args=%s and kwargs=%s', attr, args, kwargs
)
ret = func(*args, **kwargs)
except docker.errors.APIError as exc:
if catch_api_errors:
# Generic handling of Docker API errors
raise CommandExecutionError(
'Error {0}: {1}'.format(exc.response.status_code,
exc.explanation)
)
else:
# Allow API errors to be caught further up the stack
raise
except docker.errors.DockerException as exc:
# More general docker exception (catches InvalidVersion, etc.)
raise CommandExecutionError(exc.__str__())
except Exception as exc:
err = exc.__str__()
else:
return ret
# If we're here, it's because an exception was caught earlier, and the
# API command failed.
msg = 'Unable to perform {0}'.format(attr)
if err:
msg += ': {0}'.format(err)
raise CommandExecutionError(msg) |
def obfuscate(p, action):
"""Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally.
"""
key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH"
s = list()
if action == 'store':
if PY2:
for i in range(len(p)):
kc = key[i % len(key)]
ec = chr((ord(p[i]) + ord(kc)) % 256)
s.append(ec)
return base64.urlsafe_b64encode("".join(s))
else:
return base64.urlsafe_b64encode(p.encode()).decode()
else:
if PY2:
e = base64.urlsafe_b64decode(p)
for i in range(len(e)):
kc = key[i % len(key)]
dc = chr((256 + ord(e[i]) - ord(kc)) % 256)
s.append(dc)
return "".join(s)
else:
e = base64.urlsafe_b64decode(p)
return e.decode() | Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally. | Below is the the instruction that describes the task:
### Input:
Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally.
### Response:
def obfuscate(p, action):
"""Obfuscate the auth details to avoid easy snatching.
It's best to use a throw away account for these alerts to avoid having
your authentication put at risk by storing it locally.
"""
key = "ru7sll3uQrGtDPcIW3okutpFLo6YYtd5bWSpbZJIopYQ0Du0a1WlhvJOaZEH"
s = list()
if action == 'store':
if PY2:
for i in range(len(p)):
kc = key[i % len(key)]
ec = chr((ord(p[i]) + ord(kc)) % 256)
s.append(ec)
return base64.urlsafe_b64encode("".join(s))
else:
return base64.urlsafe_b64encode(p.encode()).decode()
else:
if PY2:
e = base64.urlsafe_b64decode(p)
for i in range(len(e)):
kc = key[i % len(key)]
dc = chr((256 + ord(e[i]) - ord(kc)) % 256)
s.append(dc)
return "".join(s)
else:
e = base64.urlsafe_b64decode(p)
return e.decode() |
def insecure_transport(self):
"""Creates a context to enable the oauthlib environment variable in
order to debug with insecure transport.
"""
origin = os.environ.get('OAUTHLIB_INSECURE_TRANSPORT')
if current_app.debug or current_app.testing:
try:
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
yield
finally:
if origin:
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = origin
else:
os.environ.pop('OAUTHLIB_INSECURE_TRANSPORT', None)
else:
if origin:
warnings.warn(
'OAUTHLIB_INSECURE_TRANSPORT has been found in os.environ '
'but the app is not running in debug mode or testing mode.'
' It may put you in danger of the Man-in-the-middle attack'
' while using OAuth 2.', RuntimeWarning)
yield | Creates a context to enable the oauthlib environment variable in
order to debug with insecure transport. | Below is the the instruction that describes the task:
### Input:
Creates a context to enable the oauthlib environment variable in
order to debug with insecure transport.
### Response:
def insecure_transport(self):
"""Creates a context to enable the oauthlib environment variable in
order to debug with insecure transport.
"""
origin = os.environ.get('OAUTHLIB_INSECURE_TRANSPORT')
if current_app.debug or current_app.testing:
try:
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
yield
finally:
if origin:
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = origin
else:
os.environ.pop('OAUTHLIB_INSECURE_TRANSPORT', None)
else:
if origin:
warnings.warn(
'OAUTHLIB_INSECURE_TRANSPORT has been found in os.environ '
'but the app is not running in debug mode or testing mode.'
' It may put you in danger of the Man-in-the-middle attack'
' while using OAuth 2.', RuntimeWarning)
yield |
def reverse(args):
"""Returns a list of file database identifiers given the path stems"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0 | Returns a list of file database identifiers given the path stems | Below is the the instruction that describes the task:
### Input:
Returns a list of file database identifiers given the path stems
### Response:
def reverse(args):
"""Returns a list of file database identifiers given the path stems"""
from .query import Database
db = Database()
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
r = db.reverse(args.path)
for f in r: output.write('%d\n' % f.id)
if not r: return 1
return 0 |
def sort(self):
"""Sort variants from most correct to consume, to least.
Sort rules:
version_priority:
- sort by highest versions of packages shared with request;
- THEN least number of additional packages added to solve;
- THEN highest versions of additional packages;
- THEN alphabetical on name of additional packages;
- THEN variant index.
intersection_priority:
- sort by highest number of packages shared with request;
- THEN sort according to version_priority
Note:
In theory 'variant.index' should never factor into the sort unless
two variants are identical (which shouldn't happen) - this is just
here as a safety measure so that sorting is guaranteed repeatable
regardless.
"""
if self.sorted:
return
def key(variant):
requested_key = []
names = set()
for i, request in enumerate(self.solver.request_list):
if not request.conflict:
req = variant.requires_list.get(request.name)
if req is not None:
requested_key.append((-i, req.range))
names.add(req.name)
additional_key = []
for request in variant.requires_list:
if not request.conflict and request.name not in names:
additional_key.append((request.range, request.name))
if (VariantSelectMode[config.variant_select_mode] ==
VariantSelectMode.version_priority):
k = (requested_key,
-len(additional_key),
additional_key,
variant.index)
else: # VariantSelectMode.intersection_priority
k = (len(requested_key),
requested_key,
-len(additional_key),
additional_key,
variant.index)
return k
self.variants.sort(key=key, reverse=True)
self.sorted = True | Sort variants from most correct to consume, to least.
Sort rules:
version_priority:
- sort by highest versions of packages shared with request;
- THEN least number of additional packages added to solve;
- THEN highest versions of additional packages;
- THEN alphabetical on name of additional packages;
- THEN variant index.
intersection_priority:
- sort by highest number of packages shared with request;
- THEN sort according to version_priority
Note:
In theory 'variant.index' should never factor into the sort unless
two variants are identical (which shouldn't happen) - this is just
here as a safety measure so that sorting is guaranteed repeatable
regardless. | Below is the the instruction that describes the task:
### Input:
Sort variants from most correct to consume, to least.
Sort rules:
version_priority:
- sort by highest versions of packages shared with request;
- THEN least number of additional packages added to solve;
- THEN highest versions of additional packages;
- THEN alphabetical on name of additional packages;
- THEN variant index.
intersection_priority:
- sort by highest number of packages shared with request;
- THEN sort according to version_priority
Note:
In theory 'variant.index' should never factor into the sort unless
two variants are identical (which shouldn't happen) - this is just
here as a safety measure so that sorting is guaranteed repeatable
regardless.
### Response:
def sort(self):
"""Sort variants from most correct to consume, to least.
Sort rules:
version_priority:
- sort by highest versions of packages shared with request;
- THEN least number of additional packages added to solve;
- THEN highest versions of additional packages;
- THEN alphabetical on name of additional packages;
- THEN variant index.
intersection_priority:
- sort by highest number of packages shared with request;
- THEN sort according to version_priority
Note:
In theory 'variant.index' should never factor into the sort unless
two variants are identical (which shouldn't happen) - this is just
here as a safety measure so that sorting is guaranteed repeatable
regardless.
"""
if self.sorted:
return
def key(variant):
requested_key = []
names = set()
for i, request in enumerate(self.solver.request_list):
if not request.conflict:
req = variant.requires_list.get(request.name)
if req is not None:
requested_key.append((-i, req.range))
names.add(req.name)
additional_key = []
for request in variant.requires_list:
if not request.conflict and request.name not in names:
additional_key.append((request.range, request.name))
if (VariantSelectMode[config.variant_select_mode] ==
VariantSelectMode.version_priority):
k = (requested_key,
-len(additional_key),
additional_key,
variant.index)
else: # VariantSelectMode.intersection_priority
k = (len(requested_key),
requested_key,
-len(additional_key),
additional_key,
variant.index)
return k
self.variants.sort(key=key, reverse=True)
self.sorted = True |
def step_impl06(context, count):
"""Execute fuzzer.
:param count: number of string variants to generate.
:param context: test context.
"""
fuzz_factor = 11
context.fuzzed_string_list = fuzz_string(context.seed, count, fuzz_factor) | Execute fuzzer.
:param count: number of string variants to generate.
:param context: test context. | Below is the the instruction that describes the task:
### Input:
Execute fuzzer.
:param count: number of string variants to generate.
:param context: test context.
### Response:
def step_impl06(context, count):
"""Execute fuzzer.
:param count: number of string variants to generate.
:param context: test context.
"""
fuzz_factor = 11
context.fuzzed_string_list = fuzz_string(context.seed, count, fuzz_factor) |
def _get_additional_options(runtime, debug_options):
"""
Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation.
"""
if not debug_options:
return None
opts = {}
if runtime == Runtime.go1x.value:
# These options are required for delve to function properly inside a docker container on docker < 1.12
# See https://github.com/moby/moby/issues/21051
opts["security_opt"] = ["seccomp:unconfined"]
opts["cap_add"] = ["SYS_PTRACE"]
return opts | Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation. | Below is the the instruction that describes the task:
### Input:
Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation.
### Response:
def _get_additional_options(runtime, debug_options):
"""
Return additional Docker container options. Used by container debug mode to enable certain container
security options.
:param DebugContext debug_options: DebugContext for the runtime of the container.
:return dict: Dictionary containing additional arguments to be passed to container creation.
"""
if not debug_options:
return None
opts = {}
if runtime == Runtime.go1x.value:
# These options are required for delve to function properly inside a docker container on docker < 1.12
# See https://github.com/moby/moby/issues/21051
opts["security_opt"] = ["seccomp:unconfined"]
opts["cap_add"] = ["SYS_PTRACE"]
return opts |
def get_default_config(self):
"""
Returns default settings for collector
"""
config = super(IcingaStatsCollector, self).get_default_config()
config.update({
"path": "icinga_stats",
"status_path": "/var/lib/icinga/status.dat",
})
return config | Returns default settings for collector | Below is the the instruction that describes the task:
### Input:
Returns default settings for collector
### Response:
def get_default_config(self):
"""
Returns default settings for collector
"""
config = super(IcingaStatsCollector, self).get_default_config()
config.update({
"path": "icinga_stats",
"status_path": "/var/lib/icinga/status.dat",
})
return config |
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.") | Public method for initiating connectivity with the emby server. | Below is the the instruction that describes the task:
### Input:
Public method for initiating connectivity with the emby server.
### Response:
def start(self):
"""Public method for initiating connectivity with the emby server."""
asyncio.ensure_future(self.register(), loop=self._event_loop)
if self._own_loop:
_LOGGER.info("Starting up our own event loop.")
self._event_loop.run_forever()
self._event_loop.close()
_LOGGER.info("Connection shut down.") |
def str_check(*args, func=None):
"""Check if arguments are str type."""
func = func or inspect.stack()[2][3]
for var in args:
if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):
name = type(var).__name__
raise StringError(
f'Function {func} expected str, {name} got instead.') | Check if arguments are str type. | Below is the the instruction that describes the task:
### Input:
Check if arguments are str type.
### Response:
def str_check(*args, func=None):
"""Check if arguments are str type."""
func = func or inspect.stack()[2][3]
for var in args:
if not isinstance(var, (str, collections.UserString, collections.abc.Sequence)):
name = type(var).__name__
raise StringError(
f'Function {func} expected str, {name} got instead.') |
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device | Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var. | Below is the the instruction that describes the task:
### Input:
Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
### Response:
def device_function(self, var):
"""Choose a device for the input variable.
Args:
var: an Variable.
Returns:
The device for placing the var.
"""
if var.type not in ('Variable', 'VariableV2', 'VarHandleOp'):
tf.logging.debug('Place {} on last device: {}.'.format(
var.name, self._last_device))
return self._last_device
shape = tf.TensorShape(var.get_attr('shape'))
assert shape.num_elements() is not None
size = var.get_attr('dtype').size
mem, device = heapq.heappop(self._mem_device_heap)
mem += shape.num_elements() * size
heapq.heappush(self._mem_device_heap, (mem, device))
tf.logging.debug('Place variable {} on {} and consumes {} Bytes.'.format(
var.name, device, mem))
self._last_device = device
return device |
def compute(self):
"""
Run an iteration of this anomaly classifier
"""
result = self._constructClassificationRecord()
# Classify this point after waiting the classification delay
if result.ROWID >= self._autoDetectWaitRecords:
self._updateState(result)
# Save new classification record and keep history as moving window
self.saved_states.append(result)
if len(self.saved_states) > self._history_length:
self.saved_states.pop(0)
return result | Run an iteration of this anomaly classifier | Below is the the instruction that describes the task:
### Input:
Run an iteration of this anomaly classifier
### Response:
def compute(self):
"""
Run an iteration of this anomaly classifier
"""
result = self._constructClassificationRecord()
# Classify this point after waiting the classification delay
if result.ROWID >= self._autoDetectWaitRecords:
self._updateState(result)
# Save new classification record and keep history as moving window
self.saved_states.append(result)
if len(self.saved_states) > self._history_length:
self.saved_states.pop(0)
return result |
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param featureNoise (float) Noise level to add to the features
during inference. Default: None
@param locationNoise (float) Noise level to add to the locations
during inference. Default: None
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param networkType (string)The type of network to use. Options are:
"MultipleL4L2Columns",
"MultipleL4L2ColumnsWithTopology" and
"MultipleL4L2ColumnsWithRandomTopology".
Default: "MultipleL4L2Columns"
@param longDistanceConnections (float) The probability that a column will
connect to a distant column. Only relevant when
using the random topology network type.
If > 1, will instead be taken as desired number
of long-distance connections per column.
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
@param enableFeedback (bool) If True, enable feedback, default is True
@param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous
locations will present during inference if this
parameter is set to be a positive number
The method returns the args dict updated with multiple additional keys
representing accuracy metrics.
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
networkType = args.get("networkType", "MultipleL4L2Columns")
longDistanceConnections = args.get("longDistanceConnections", 0)
locationNoise = args.get("locationNoise", 0.0)
featureNoise = args.get("featureNoise", 0.0)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
enableFeedback = args.get("enableFeedback", True)
numAmbiguousLocations = args.get("numAmbiguousLocations", 0)
numInferenceRpts = args.get("numInferenceRpts", 1)
l2Params = args.get("l2Params", None)
l4Params = args.get("l4Params", None)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=150,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
r = objects.objectConfusion()
print "Average common pairs in objects=", r[0],
print ", locations=",r[1],", features=",r[2]
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network
name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
L2Overrides=l2Params,
L4Overrides=l4Params,
networkType = networkType,
longDistanceConnections=longDistanceConnections,
inputSize=150,
externalInputSize=2400,
numInputBits=20,
seed=trialNum,
enableFeedback=enableFeedback,
)
exp.learnObjects(objects.provideObjectsToLearn())
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
numCorrectClassifications=0
classificationPerSensation = numpy.zeros(settlingTime*numPoints)
for objectId in objects:
exp.sendReset()
obj = objects[objectId]
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if numColumns > 1:
# Create sequence of random sensations for this object for all columns At
# any point in time, ensure each column touches a unique loc,feature pair
# on the object. It is ok for a given column to sense a loc,feature pair
# more than once. The total number of sensations is equal to the number of
# points on the object.
for sensationNumber in range(len(obj)):
# Randomly shuffle points for each sensation
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for c in range(numColumns):
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[c].append(objectCopy[c])
else:
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"noiseLevel": featureNoise,
"locationNoise": locationNoise,
"includeRandomLocation": includeRandomLocation,
"numAmbiguousLocations": numAmbiguousLocations,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId, reset=False)
classificationPerSensation += numpy.array(
exp.statistics[objectId]["Correct classification"])
if exp.isObjectClassified(objectId, minOverlap=30):
numCorrectClassifications += 1
if plotInferenceStats:
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
experimentID=objectId,
onePlot=False,
)
convergencePoint, accuracy = exp.averageConvergencePoint("L2 Representation",
30, 40, settlingTime)
classificationAccuracy = float(numCorrectClassifications) / numObjects
classificationPerSensation = classificationPerSensation / numObjects
print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format(
numObjects, numFeatures, numLocations, numColumns, trialNum, networkType)
print "Average convergence point=",convergencePoint
print "Classification accuracy=",classificationAccuracy
print
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"classificationAccuracy":classificationAccuracy})
args.update({"classificationPerSensation":classificationPerSensation.tolist()})
# Can't pickle experiment so can't return it for batch multiprocessing runs.
# However this is very useful for debugging when running in a single thread.
if plotInferenceStats:
args.update({"experiment": exp})
return args | Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param featureNoise (float) Noise level to add to the features
during inference. Default: None
@param locationNoise (float) Noise level to add to the locations
during inference. Default: None
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param networkType (string)The type of network to use. Options are:
"MultipleL4L2Columns",
"MultipleL4L2ColumnsWithTopology" and
"MultipleL4L2ColumnsWithRandomTopology".
Default: "MultipleL4L2Columns"
@param longDistanceConnections (float) The probability that a column will
connect to a distant column. Only relevant when
using the random topology network type.
If > 1, will instead be taken as desired number
of long-distance connections per column.
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
@param enableFeedback (bool) If True, enable feedback, default is True
@param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous
locations will present during inference if this
parameter is set to be a positive number
The method returns the args dict updated with multiple additional keys
representing accuracy metrics. | Below is the the instruction that describes the task:
### Input:
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param featureNoise (float) Noise level to add to the features
during inference. Default: None
@param locationNoise (float) Noise level to add to the locations
during inference. Default: None
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param networkType (string)The type of network to use. Options are:
"MultipleL4L2Columns",
"MultipleL4L2ColumnsWithTopology" and
"MultipleL4L2ColumnsWithRandomTopology".
Default: "MultipleL4L2Columns"
@param longDistanceConnections (float) The probability that a column will
connect to a distant column. Only relevant when
using the random topology network type.
If > 1, will instead be taken as desired number
of long-distance connections per column.
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
@param enableFeedback (bool) If True, enable feedback, default is True
@param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous
locations will present during inference if this
parameter is set to be a positive number
The method returns the args dict updated with multiple additional keys
representing accuracy metrics.
### Response:
def runExperiment(args):
"""
Run experiment. What did you think this does?
args is a dict representing the parameters. We do it this way to support
multiprocessing. args contains one or more of the following keys:
@param featureNoise (float) Noise level to add to the features
during inference. Default: None
@param locationNoise (float) Noise level to add to the locations
during inference. Default: None
@param numObjects (int) The number of objects we will train.
Default: 10
@param numPoints (int) The number of points on each object.
Default: 10
@param numLocations (int) For each point, the number of locations to choose
from. Default: 10
@param numFeatures (int) For each point, the number of features to choose
from. Default: 10
@param numColumns (int) The total number of cortical columns in network.
Default: 2
@param networkType (string)The type of network to use. Options are:
"MultipleL4L2Columns",
"MultipleL4L2ColumnsWithTopology" and
"MultipleL4L2ColumnsWithRandomTopology".
Default: "MultipleL4L2Columns"
@param longDistanceConnections (float) The probability that a column will
connect to a distant column. Only relevant when
using the random topology network type.
If > 1, will instead be taken as desired number
of long-distance connections per column.
@param settlingTime (int) Number of iterations we wait to let columns
stabilize. Important for multicolumn experiments
with lateral connections.
@param includeRandomLocation (bool) If True, a random location SDR will be
generated during inference for each feature.
@param enableFeedback (bool) If True, enable feedback, default is True
@param numAmbiguousLocations (int) number of ambiguous locations. Ambiguous
locations will present during inference if this
parameter is set to be a positive number
The method returns the args dict updated with multiple additional keys
representing accuracy metrics.
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
networkType = args.get("networkType", "MultipleL4L2Columns")
longDistanceConnections = args.get("longDistanceConnections", 0)
locationNoise = args.get("locationNoise", 0.0)
featureNoise = args.get("featureNoise", 0.0)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
enableFeedback = args.get("enableFeedback", True)
numAmbiguousLocations = args.get("numAmbiguousLocations", 0)
numInferenceRpts = args.get("numInferenceRpts", 1)
l2Params = args.get("l2Params", None)
l4Params = args.get("l4Params", None)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=150,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
r = objects.objectConfusion()
print "Average common pairs in objects=", r[0],
print ", locations=",r[1],", features=",r[2]
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# Setup experiment and train the network
name = "convergence_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
L2Overrides=l2Params,
L4Overrides=l4Params,
networkType = networkType,
longDistanceConnections=longDistanceConnections,
inputSize=150,
externalInputSize=2400,
numInputBits=20,
seed=trialNum,
enableFeedback=enableFeedback,
)
exp.learnObjects(objects.provideObjectsToLearn())
# For inference, we will check and plot convergence for each object. For each
# object, we create a sequence of random sensations for each column. We will
# present each sensation for settlingTime time steps to let it settle and
# ensure it converges.
numCorrectClassifications=0
classificationPerSensation = numpy.zeros(settlingTime*numPoints)
for objectId in objects:
exp.sendReset()
obj = objects[objectId]
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = []
if numColumns > 1:
# Create sequence of random sensations for this object for all columns At
# any point in time, ensure each column touches a unique loc,feature pair
# on the object. It is ok for a given column to sense a loc,feature pair
# more than once. The total number of sensations is equal to the number of
# points on the object.
for sensationNumber in range(len(obj)):
# Randomly shuffle points for each sensation
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for c in range(numColumns):
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[c].append(objectCopy[c])
else:
# Create sequence of sensations for this object for one column. The total
# number of sensations is equal to the number of points on the object. No
# point should be visited more than once.
objectCopy = [pair for pair in obj]
random.shuffle(objectCopy)
for pair in objectCopy:
# stay multiple steps on each sensation
for _ in xrange(settlingTime):
objectSensations[0].append(pair)
inferConfig = {
"object": objectId,
"numSteps": len(objectSensations[0]),
"pairs": objectSensations,
"noiseLevel": featureNoise,
"locationNoise": locationNoise,
"includeRandomLocation": includeRandomLocation,
"numAmbiguousLocations": numAmbiguousLocations,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName=objectId, reset=False)
classificationPerSensation += numpy.array(
exp.statistics[objectId]["Correct classification"])
if exp.isObjectClassified(objectId, minOverlap=30):
numCorrectClassifications += 1
if plotInferenceStats:
exp.plotInferenceStats(
fields=["L2 Representation",
"Overlap L2 with object",
"L4 Representation"],
experimentID=objectId,
onePlot=False,
)
convergencePoint, accuracy = exp.averageConvergencePoint("L2 Representation",
30, 40, settlingTime)
classificationAccuracy = float(numCorrectClassifications) / numObjects
classificationPerSensation = classificationPerSensation / numObjects
print "# objects {} # features {} # locations {} # columns {} trial # {} network type {}".format(
numObjects, numFeatures, numLocations, numColumns, trialNum, networkType)
print "Average convergence point=",convergencePoint
print "Classification accuracy=",classificationAccuracy
print
# Return our convergence point as well as all the parameters and objects
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"classificationAccuracy":classificationAccuracy})
args.update({"classificationPerSensation":classificationPerSensation.tolist()})
# Can't pickle experiment so can't return it for batch multiprocessing runs.
# However this is very useful for debugging when running in a single thread.
if plotInferenceStats:
args.update({"experiment": exp})
return args |
def _revint(self, version):
'''
Internal function to convert a version string to an integer.
'''
intrev = 0
vsplit = version.split('.')
for c in range(len(vsplit)):
item = int(vsplit[c]) * (10 ** (((len(vsplit) - c - 1) * 2)))
intrev += item
return intrev | Internal function to convert a version string to an integer. | Below is the the instruction that describes the task:
### Input:
Internal function to convert a version string to an integer.
### Response:
def _revint(self, version):
'''
Internal function to convert a version string to an integer.
'''
intrev = 0
vsplit = version.split('.')
for c in range(len(vsplit)):
item = int(vsplit[c]) * (10 ** (((len(vsplit) - c - 1) * 2)))
intrev += item
return intrev |
def Ctt_(self):
""" Covariance matrix of the time shifted data"""
self._check_estimated()
return self._rc.cov_YY(bessel=self.bessel) | Covariance matrix of the time shifted data | Below is the the instruction that describes the task:
### Input:
Covariance matrix of the time shifted data
### Response:
def Ctt_(self):
""" Covariance matrix of the time shifted data"""
self._check_estimated()
return self._rc.cov_YY(bessel=self.bessel) |
def s_rec2latex(r, empty=""):
"""Export a recarray *r* to a LaTeX table in a string"""
latex = ""
names = r.dtype.names
def translate(x):
if x is None or str(x).lower == "none":
x = empty
return latex_quote(x)
latex += r"\begin{tabular}{%s}" % ("".join(["c"]*len(names)),) + "\n" # simple c columns
latex += r"\hline"+"\n"
latex += " & ".join([latex_quote(x) for x in names])+r"\\"+"\n"
latex += r"\hline"+"\n"
for data in r:
latex += " & ".join([translate(x) for x in data])+r"\\"+"\n"
latex += r"\hline"+"\n"
latex += r"\end{tabular}"+"\n"
return latex | Export a recarray *r* to a LaTeX table in a string | Below is the the instruction that describes the task:
### Input:
Export a recarray *r* to a LaTeX table in a string
### Response:
def s_rec2latex(r, empty=""):
"""Export a recarray *r* to a LaTeX table in a string"""
latex = ""
names = r.dtype.names
def translate(x):
if x is None or str(x).lower == "none":
x = empty
return latex_quote(x)
latex += r"\begin{tabular}{%s}" % ("".join(["c"]*len(names)),) + "\n" # simple c columns
latex += r"\hline"+"\n"
latex += " & ".join([latex_quote(x) for x in names])+r"\\"+"\n"
latex += r"\hline"+"\n"
for data in r:
latex += " & ".join([translate(x) for x in data])+r"\\"+"\n"
latex += r"\hline"+"\n"
latex += r"\end{tabular}"+"\n"
return latex |
def saveProfileAs( self ):
"""
Saves the current profile as a new profile to the manager.
"""
name, ok = QInputDialog.getText(self, 'Create Profile', 'Name:')
if ( not name ):
return
manager = self.parent()
prof = manager.viewWidget().saveProfile()
prof.setName(nativestring(name))
self.parent().addProfile(prof) | Saves the current profile as a new profile to the manager. | Below is the the instruction that describes the task:
### Input:
Saves the current profile as a new profile to the manager.
### Response:
def saveProfileAs( self ):
"""
Saves the current profile as a new profile to the manager.
"""
name, ok = QInputDialog.getText(self, 'Create Profile', 'Name:')
if ( not name ):
return
manager = self.parent()
prof = manager.viewWidget().saveProfile()
prof.setName(nativestring(name))
self.parent().addProfile(prof) |
def add_path(prev: Optional[ResponsePath], key: Union[str, int]) -> ResponsePath:
"""Add a key to a response path.
Given a ResponsePath and a key, return a new ResponsePath containing the new key.
"""
return ResponsePath(prev, key) | Add a key to a response path.
Given a ResponsePath and a key, return a new ResponsePath containing the new key. | Below is the the instruction that describes the task:
### Input:
Add a key to a response path.
Given a ResponsePath and a key, return a new ResponsePath containing the new key.
### Response:
def add_path(prev: Optional[ResponsePath], key: Union[str, int]) -> ResponsePath:
"""Add a key to a response path.
Given a ResponsePath and a key, return a new ResponsePath containing the new key.
"""
return ResponsePath(prev, key) |
def compiler_mimetype(self):
"""Implicit MIME type of the asset by its compilers."""
for compiler in reversed(self.compilers):
if compiler.result_mimetype:
return compiler.result_mimetype
return None | Implicit MIME type of the asset by its compilers. | Below is the the instruction that describes the task:
### Input:
Implicit MIME type of the asset by its compilers.
### Response:
def compiler_mimetype(self):
"""Implicit MIME type of the asset by its compilers."""
for compiler in reversed(self.compilers):
if compiler.result_mimetype:
return compiler.result_mimetype
return None |
def verify_digest(hash_hex, pubkey_hex, sigb64, hashfunc=hashlib.sha256):
"""
Given a digest, public key (as hex), and a base64 signature,
verify that the public key signed the digest.
Return True if so
Return False if not
"""
if not isinstance(hash_hex, (str, unicode)):
raise ValueError("hash hex is not a string")
hash_hex = str(hash_hex)
pubk_uncompressed_hex = keylib.key_formatting.decompress(pubkey_hex)
sig_r, sig_s = decode_signature(sigb64)
pubk = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256K1(), pubk_uncompressed_hex.decode('hex')).public_key(default_backend())
signature = encode_dss_signature(sig_r, sig_s)
try:
pubk.verify(signature, hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256())))
return True
except InvalidSignature:
return False | Given a digest, public key (as hex), and a base64 signature,
verify that the public key signed the digest.
Return True if so
Return False if not | Below is the the instruction that describes the task:
### Input:
Given a digest, public key (as hex), and a base64 signature,
verify that the public key signed the digest.
Return True if so
Return False if not
### Response:
def verify_digest(hash_hex, pubkey_hex, sigb64, hashfunc=hashlib.sha256):
"""
Given a digest, public key (as hex), and a base64 signature,
verify that the public key signed the digest.
Return True if so
Return False if not
"""
if not isinstance(hash_hex, (str, unicode)):
raise ValueError("hash hex is not a string")
hash_hex = str(hash_hex)
pubk_uncompressed_hex = keylib.key_formatting.decompress(pubkey_hex)
sig_r, sig_s = decode_signature(sigb64)
pubk = ec.EllipticCurvePublicNumbers.from_encoded_point(ec.SECP256K1(), pubk_uncompressed_hex.decode('hex')).public_key(default_backend())
signature = encode_dss_signature(sig_r, sig_s)
try:
pubk.verify(signature, hash_hex.decode('hex'), ec.ECDSA(utils.Prehashed(hashes.SHA256())))
return True
except InvalidSignature:
return False |
def _setup_atoms(self):
"""Derive Atom objects from the record."""
# Delete existing atoms
self._atoms = {}
# Create atoms
aids = self.record['atoms']['aid']
elements = self.record['atoms']['element']
if not len(aids) == len(elements):
raise ResponseParseError('Error parsing atom elements')
for aid, element in zip(aids, elements):
self._atoms[aid] = Atom(aid=aid, number=element)
# Add coordinates
if 'coords' in self.record:
coord_ids = self.record['coords'][0]['aid']
xs = self.record['coords'][0]['conformers'][0]['x']
ys = self.record['coords'][0]['conformers'][0]['y']
zs = self.record['coords'][0]['conformers'][0].get('z', [])
if not len(coord_ids) == len(xs) == len(ys) == len(self._atoms) or (zs and not len(zs) == len(coord_ids)):
raise ResponseParseError('Error parsing atom coordinates')
for aid, x, y, z in zip_longest(coord_ids, xs, ys, zs):
self._atoms[aid].set_coordinates(x, y, z)
# Add charges
if 'charge' in self.record['atoms']:
for charge in self.record['atoms']['charge']:
self._atoms[charge['aid']].charge = charge['value'] | Derive Atom objects from the record. | Below is the the instruction that describes the task:
### Input:
Derive Atom objects from the record.
### Response:
def _setup_atoms(self):
"""Derive Atom objects from the record."""
# Delete existing atoms
self._atoms = {}
# Create atoms
aids = self.record['atoms']['aid']
elements = self.record['atoms']['element']
if not len(aids) == len(elements):
raise ResponseParseError('Error parsing atom elements')
for aid, element in zip(aids, elements):
self._atoms[aid] = Atom(aid=aid, number=element)
# Add coordinates
if 'coords' in self.record:
coord_ids = self.record['coords'][0]['aid']
xs = self.record['coords'][0]['conformers'][0]['x']
ys = self.record['coords'][0]['conformers'][0]['y']
zs = self.record['coords'][0]['conformers'][0].get('z', [])
if not len(coord_ids) == len(xs) == len(ys) == len(self._atoms) or (zs and not len(zs) == len(coord_ids)):
raise ResponseParseError('Error parsing atom coordinates')
for aid, x, y, z in zip_longest(coord_ids, xs, ys, zs):
self._atoms[aid].set_coordinates(x, y, z)
# Add charges
if 'charge' in self.record['atoms']:
for charge in self.record['atoms']['charge']:
self._atoms[charge['aid']].charge = charge['value'] |
def update(self, move):
"""
Updates position by applying selected move
:type: move: Move
"""
if move is None:
raise TypeError("Move cannot be type None")
if self.king_loc_dict is not None and isinstance(move.piece, King):
self.king_loc_dict[move.color] = move.end_loc
# Invalidates en-passant
for square in self:
pawn = square
if isinstance(pawn, Pawn):
pawn.just_moved_two_steps = False
# Sets King and Rook has_moved property to True is piece has moved
if type(move.piece) is King or type(move.piece) is Rook:
move.piece.has_moved = True
elif move.status == notation_const.MOVEMENT and \
isinstance(move.piece, Pawn) and \
fabs(move.end_loc.rank - move.start_loc.rank) == 2:
move.piece.just_moved_two_steps = True
if move.status == notation_const.KING_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5))
self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True
elif move.status == notation_const.QUEEN_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3))
self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True
elif move.status == notation_const.EN_PASSANT:
self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file))
elif move.status == notation_const.PROMOTE or \
move.status == notation_const.CAPTURE_AND_PROMOTE:
try:
self.remove_piece_at_square(move.start_loc)
self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc)
except TypeError as e:
raise ValueError("Promoted to piece cannot be None in Move {}\n{}".format(repr(move), e))
return
self.move_piece(move.piece.location, move.end_loc) | Updates position by applying selected move
:type: move: Move | Below is the the instruction that describes the task:
### Input:
Updates position by applying selected move
:type: move: Move
### Response:
def update(self, move):
"""
Updates position by applying selected move
:type: move: Move
"""
if move is None:
raise TypeError("Move cannot be type None")
if self.king_loc_dict is not None and isinstance(move.piece, King):
self.king_loc_dict[move.color] = move.end_loc
# Invalidates en-passant
for square in self:
pawn = square
if isinstance(pawn, Pawn):
pawn.just_moved_two_steps = False
# Sets King and Rook has_moved property to True is piece has moved
if type(move.piece) is King or type(move.piece) is Rook:
move.piece.has_moved = True
elif move.status == notation_const.MOVEMENT and \
isinstance(move.piece, Pawn) and \
fabs(move.end_loc.rank - move.start_loc.rank) == 2:
move.piece.just_moved_two_steps = True
if move.status == notation_const.KING_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 7), Location(move.end_loc.rank, 5))
self.piece_at_square(Location(move.end_loc.rank, 5)).has_moved = True
elif move.status == notation_const.QUEEN_SIDE_CASTLE:
self.move_piece(Location(move.end_loc.rank, 0), Location(move.end_loc.rank, 3))
self.piece_at_square(Location(move.end_loc.rank, 3)).has_moved = True
elif move.status == notation_const.EN_PASSANT:
self.remove_piece_at_square(Location(move.start_loc.rank, move.end_loc.file))
elif move.status == notation_const.PROMOTE or \
move.status == notation_const.CAPTURE_AND_PROMOTE:
try:
self.remove_piece_at_square(move.start_loc)
self.place_piece_at_square(move.promoted_to_piece(move.color, move.end_loc), move.end_loc)
except TypeError as e:
raise ValueError("Promoted to piece cannot be None in Move {}\n{}".format(repr(move), e))
return
self.move_piece(move.piece.location, move.end_loc) |
def modelsGetFieldsForCheckpointed(self, jobID, fields):
"""
Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
-----------------------------------------------------------------------
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
"""
assert len(fields) >= 1, "fields is empty"
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
dbFields = [self._models.pubToDBNameDict[f] for f in fields]
dbFieldStr = ", ".join(dbFields)
query = 'SELECT model_id, {fields} from {models}' \
' WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'.format(
fields=dbFieldStr, models=self.modelsTableName)
conn.cursor.execute(query, [jobID])
rows = conn.cursor.fetchall()
return [(r[0], list(r[1:])) for r in rows] | Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
-----------------------------------------------------------------------
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
] | Below is the the instruction that describes the task:
### Input:
Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
-----------------------------------------------------------------------
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
### Response:
def modelsGetFieldsForCheckpointed(self, jobID, fields):
"""
Gets fields from all models in a job that have been checkpointed. This is
used to figure out whether or not a new model should be checkpointed.
Parameters:
-----------------------------------------------------------------------
jobID: The jobID for the models to be searched
fields: A list of fields to return
Returns: a (possibly-empty) list of tuples as follows
[
(model_id1, [field1, ..., fieldn]),
(model_id2, [field1, ..., fieldn]),
(model_id3, [field1, ..., fieldn])
...
]
"""
assert len(fields) >= 1, "fields is empty"
# Get a database connection and cursor
with ConnectionFactory.get() as conn:
dbFields = [self._models.pubToDBNameDict[f] for f in fields]
dbFieldStr = ", ".join(dbFields)
query = 'SELECT model_id, {fields} from {models}' \
' WHERE job_id=%s AND model_checkpoint_id IS NOT NULL'.format(
fields=dbFieldStr, models=self.modelsTableName)
conn.cursor.execute(query, [jobID])
rows = conn.cursor.fetchall()
return [(r[0], list(r[1:])) for r in rows] |
def set_write_bit(fn):
# type: (str) -> None
"""
Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None
"""
fn = fs_encode(fn)
if not os.path.exists(fn):
return
file_stat = os.stat(fn).st_mode
os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if os.name == "nt":
from ._winconsole import get_current_user
user_sid = get_current_user()
icacls_exe = _find_icacls_exe() or "icacls"
from .misc import run
if user_sid:
_, err = run([icacls_exe, "/grant", "{0}:WD".format(user_sid), "''{0}''".format(fn), "/T", "/C", "/Q"])
if not err:
return
if not os.path.isdir(fn):
for path in [fn, os.path.dirname(fn)]:
try:
os.chflags(path, 0)
except AttributeError:
pass
return None
for root, dirs, files in os.walk(fn, topdown=False):
for dir_ in [os.path.join(root, d) for d in dirs]:
set_write_bit(dir_)
for file_ in [os.path.join(root, f) for f in files]:
set_write_bit(file_) | Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None | Below is the the instruction that describes the task:
### Input:
Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None
### Response:
def set_write_bit(fn):
# type: (str) -> None
"""
Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None
"""
fn = fs_encode(fn)
if not os.path.exists(fn):
return
file_stat = os.stat(fn).st_mode
os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if os.name == "nt":
from ._winconsole import get_current_user
user_sid = get_current_user()
icacls_exe = _find_icacls_exe() or "icacls"
from .misc import run
if user_sid:
_, err = run([icacls_exe, "/grant", "{0}:WD".format(user_sid), "''{0}''".format(fn), "/T", "/C", "/Q"])
if not err:
return
if not os.path.isdir(fn):
for path in [fn, os.path.dirname(fn)]:
try:
os.chflags(path, 0)
except AttributeError:
pass
return None
for root, dirs, files in os.walk(fn, topdown=False):
for dir_ in [os.path.join(root, d) for d in dirs]:
set_write_bit(dir_)
for file_ in [os.path.join(root, f) for f in files]:
set_write_bit(file_) |
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
http_headers = event_values.get('http_headers', None)
if http_headers:
event_values['http_headers'] = http_headers.replace('\r\n', ' - ')
if event_values.get('recovered', None):
event_values['recovered_string'] = '[Recovered Entry]'
cached_file_path = event_values.get('cached_filename', None)
if cached_file_path:
cache_directory_name = event_values.get('cache_directory_name', None)
if cache_directory_name:
cached_file_path = '\\'.join([cache_directory_name, cached_file_path])
event_values['cached_file_path'] = cached_file_path
return self._ConditionalFormatMessages(event_values) | Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter. | Below is the the instruction that describes the task:
### Input:
Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
### Response:
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
http_headers = event_values.get('http_headers', None)
if http_headers:
event_values['http_headers'] = http_headers.replace('\r\n', ' - ')
if event_values.get('recovered', None):
event_values['recovered_string'] = '[Recovered Entry]'
cached_file_path = event_values.get('cached_filename', None)
if cached_file_path:
cache_directory_name = event_values.get('cache_directory_name', None)
if cache_directory_name:
cached_file_path = '\\'.join([cache_directory_name, cached_file_path])
event_values['cached_file_path'] = cached_file_path
return self._ConditionalFormatMessages(event_values) |
def window_open_config(self, temperature, duration):
"""Configures the window open behavior. The duration is specified in
5 minute increments."""
_LOGGER.debug("Window open config, temperature: %s duration: %s", temperature, duration)
self._verify_temperature(temperature)
if duration.seconds < 0 and duration.seconds > 3600:
raise ValueError
value = struct.pack('BBB', PROP_WINDOW_OPEN_CONFIG,
int(temperature * 2), int(duration.seconds / 300))
self._conn.make_request(PROP_WRITE_HANDLE, value) | Configures the window open behavior. The duration is specified in
5 minute increments. | Below is the the instruction that describes the task:
### Input:
Configures the window open behavior. The duration is specified in
5 minute increments.
### Response:
def window_open_config(self, temperature, duration):
"""Configures the window open behavior. The duration is specified in
5 minute increments."""
_LOGGER.debug("Window open config, temperature: %s duration: %s", temperature, duration)
self._verify_temperature(temperature)
if duration.seconds < 0 and duration.seconds > 3600:
raise ValueError
value = struct.pack('BBB', PROP_WINDOW_OPEN_CONFIG,
int(temperature * 2), int(duration.seconds / 300))
self._conn.make_request(PROP_WRITE_HANDLE, value) |
def get_field_value(self, name):
"""
Get the field value from the modified data or the original one
"""
name = self.get_real_name(name)
if not name or name in self.__deleted_fields__:
return None
modified = self.__modified_data__.get(name)
if modified is not None:
return modified
return self.__original_data__.get(name) | Get the field value from the modified data or the original one | Below is the the instruction that describes the task:
### Input:
Get the field value from the modified data or the original one
### Response:
def get_field_value(self, name):
"""
Get the field value from the modified data or the original one
"""
name = self.get_real_name(name)
if not name or name in self.__deleted_fields__:
return None
modified = self.__modified_data__.get(name)
if modified is not None:
return modified
return self.__original_data__.get(name) |
def write_temp_file(self, content, filename=None, mode='w'):
"""Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file.
"""
if filename is None:
filename = str(uuid.uuid4())
fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)
with open(fqpn, mode) as fh:
fh.write(content)
return fqpn | Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file. | Below is the the instruction that describes the task:
### Input:
Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file.
### Response:
def write_temp_file(self, content, filename=None, mode='w'):
"""Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file.
"""
if filename is None:
filename = str(uuid.uuid4())
fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)
with open(fqpn, mode) as fh:
fh.write(content)
return fqpn |
def get_real_ip(request, right_most_proxy=False):
"""
Returns client's best-matched `real` `externally-routable` ip-address, or None
@deprecated - Do not edit
"""
warnings.warn('get_real_ip is deprecated and will be removed in 3.0.', DeprecationWarning)
return get_ip(request, real_ip_only=True, right_most_proxy=right_most_proxy) | Returns client's best-matched `real` `externally-routable` ip-address, or None
@deprecated - Do not edit | Below is the the instruction that describes the task:
### Input:
Returns client's best-matched `real` `externally-routable` ip-address, or None
@deprecated - Do not edit
### Response:
def get_real_ip(request, right_most_proxy=False):
"""
Returns client's best-matched `real` `externally-routable` ip-address, or None
@deprecated - Do not edit
"""
warnings.warn('get_real_ip is deprecated and will be removed in 3.0.', DeprecationWarning)
return get_ip(request, real_ip_only=True, right_most_proxy=right_most_proxy) |
def _read_file(self, filename):
"""
Read contents of given file.
"""
try:
with open(filename, "r") as fhandle:
stats = float(fhandle.readline().rstrip("\n"))
except Exception:
stats = None
return stats | Read contents of given file. | Below is the the instruction that describes the task:
### Input:
Read contents of given file.
### Response:
def _read_file(self, filename):
"""
Read contents of given file.
"""
try:
with open(filename, "r") as fhandle:
stats = float(fhandle.readline().rstrip("\n"))
except Exception:
stats = None
return stats |
def getVals(self):
"""Returns value list for Munin Graph
@return: List of name-value pairs.
"""
return [(name, self._fieldValDict.get(name))
for name in self._fieldNameList] | Returns value list for Munin Graph
@return: List of name-value pairs. | Below is the the instruction that describes the task:
### Input:
Returns value list for Munin Graph
@return: List of name-value pairs.
### Response:
def getVals(self):
"""Returns value list for Munin Graph
@return: List of name-value pairs.
"""
return [(name, self._fieldValDict.get(name))
for name in self._fieldNameList] |
def set_cache(new_path=None):
"""Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str
"""
global CACHE_DIR, API_CACHE, SPRITE_CACHE
if new_path is None:
new_path = get_default_cache()
CACHE_DIR = safe_make_dirs(os.path.abspath(new_path))
API_CACHE = os.path.join(CACHE_DIR, 'api.cache')
SPRITE_CACHE = safe_make_dirs(os.path.join(CACHE_DIR, 'sprite'))
return CACHE_DIR, API_CACHE, SPRITE_CACHE | Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str | Below is the the instruction that describes the task:
### Input:
Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str
### Response:
def set_cache(new_path=None):
"""Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str
"""
global CACHE_DIR, API_CACHE, SPRITE_CACHE
if new_path is None:
new_path = get_default_cache()
CACHE_DIR = safe_make_dirs(os.path.abspath(new_path))
API_CACHE = os.path.join(CACHE_DIR, 'api.cache')
SPRITE_CACHE = safe_make_dirs(os.path.join(CACHE_DIR, 'sprite'))
return CACHE_DIR, API_CACHE, SPRITE_CACHE |
def add_plugin(self, phase, name, args, reason=None):
"""
if config has plugin, override it, else add it
"""
plugin_modified = False
for plugin in self.template[phase]:
if plugin['name'] == name:
plugin['args'] = args
plugin_modified = True
if not plugin_modified:
self.template[phase].append({"name": name, "args": args})
if reason:
logger.info('{}:{} with args {}, {}'.format(phase, name, args, reason)) | if config has plugin, override it, else add it | Below is the the instruction that describes the task:
### Input:
if config has plugin, override it, else add it
### Response:
def add_plugin(self, phase, name, args, reason=None):
"""
if config has plugin, override it, else add it
"""
plugin_modified = False
for plugin in self.template[phase]:
if plugin['name'] == name:
plugin['args'] = args
plugin_modified = True
if not plugin_modified:
self.template[phase].append({"name": name, "args": args})
if reason:
logger.info('{}:{} with args {}, {}'.format(phase, name, args, reason)) |
def s1p(self):
"""Return 1 proton separation energy"""
M_P = 7.28897050 # proton mass excess in MeV
f = lambda parent, daugther: -parent + daugther + M_P
return self.derived('s1p', (-1, 0), f) | Return 1 proton separation energy | Below is the the instruction that describes the task:
### Input:
Return 1 proton separation energy
### Response:
def s1p(self):
"""Return 1 proton separation energy"""
M_P = 7.28897050 # proton mass excess in MeV
f = lambda parent, daugther: -parent + daugther + M_P
return self.derived('s1p', (-1, 0), f) |
def init(runlevel):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
cmd = ['init', '{0}'.format(runlevel)]
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret | Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3 | Below is the the instruction that describes the task:
### Input:
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
### Response:
def init(runlevel):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
cmd = ['init', '{0}'.format(runlevel)]
ret = __salt__['cmd.run'](cmd, python_shell=False)
return ret |
async def subscriptions(self, request):
"""
Handles requests for new subscription websockets.
Args:
request (aiohttp.Request): the incoming request
Returns:
aiohttp.web.WebSocketResponse: the websocket response, when the
resulting websocket is closed
"""
if not self._accepting:
return web.Response(status=503)
web_sock = web.WebSocketResponse()
await web_sock.prepare(request)
async for msg in web_sock:
if msg.type == aiohttp.WSMsgType.TEXT:
await self._handle_message(web_sock, msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
LOGGER.warning(
'Web socket connection closed with exception %s',
web_sock.exception())
await web_sock.close()
await self._handle_unsubscribe(web_sock)
return web_sock | Handles requests for new subscription websockets.
Args:
request (aiohttp.Request): the incoming request
Returns:
aiohttp.web.WebSocketResponse: the websocket response, when the
resulting websocket is closed | Below is the the instruction that describes the task:
### Input:
Handles requests for new subscription websockets.
Args:
request (aiohttp.Request): the incoming request
Returns:
aiohttp.web.WebSocketResponse: the websocket response, when the
resulting websocket is closed
### Response:
async def subscriptions(self, request):
"""
Handles requests for new subscription websockets.
Args:
request (aiohttp.Request): the incoming request
Returns:
aiohttp.web.WebSocketResponse: the websocket response, when the
resulting websocket is closed
"""
if not self._accepting:
return web.Response(status=503)
web_sock = web.WebSocketResponse()
await web_sock.prepare(request)
async for msg in web_sock:
if msg.type == aiohttp.WSMsgType.TEXT:
await self._handle_message(web_sock, msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
LOGGER.warning(
'Web socket connection closed with exception %s',
web_sock.exception())
await web_sock.close()
await self._handle_unsubscribe(web_sock)
return web_sock |
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
color_scheme_n = 'color_scheme_name'
color_scheme_o = self.get_color_scheme()
connect_n = 'connect_to_oi'
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
self.wrap_action.setChecked(wrap_o)
math_n = 'math'
math_o = self.get_option(math_n)
if color_scheme_n in options:
self.set_plain_text_color_scheme(color_scheme_o)
if wrap_n in options:
self.toggle_wrap_mode(wrap_o)
if math_n in options:
self.toggle_math_mode(math_o)
# To make auto-connection changes take place instantly
self.main.editor.apply_plugin_settings(options=[connect_n])
self.main.ipyconsole.apply_plugin_settings(options=[connect_n]) | Apply configuration file's plugin settings | Below is the the instruction that describes the task:
### Input:
Apply configuration file's plugin settings
### Response:
def apply_plugin_settings(self, options):
"""Apply configuration file's plugin settings"""
color_scheme_n = 'color_scheme_name'
color_scheme_o = self.get_color_scheme()
connect_n = 'connect_to_oi'
wrap_n = 'wrap'
wrap_o = self.get_option(wrap_n)
self.wrap_action.setChecked(wrap_o)
math_n = 'math'
math_o = self.get_option(math_n)
if color_scheme_n in options:
self.set_plain_text_color_scheme(color_scheme_o)
if wrap_n in options:
self.toggle_wrap_mode(wrap_o)
if math_n in options:
self.toggle_math_mode(math_o)
# To make auto-connection changes take place instantly
self.main.editor.apply_plugin_settings(options=[connect_n])
self.main.ipyconsole.apply_plugin_settings(options=[connect_n]) |
def first_solar_spectral_correction(pw, airmass_absolute, module_type=None,
coefficients=None):
r"""
Spectral mismatch modifier based on precipitable water and absolute
(pressure corrected) airmass.
Estimates a spectral mismatch modifier M representing the effect on
module short circuit current of variation in the spectral
irradiance. M is estimated from absolute (pressure currected) air
mass, AMa, and precipitable water, Pwat, using the following
function:
.. math::
M = c_1 + c_2*AMa + c_3*Pwat + c_4*AMa^.5
+ c_5*Pwat^.5 + c_6*AMa/Pwat^.5
Default coefficients are determined for several cell types with
known quantum efficiency curves, by using the Simple Model of the
Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using
SMARTS, spectrums are simulated with all combinations of AMa and
Pwat where:
* 0.5 cm <= Pwat <= 5 cm
* 1.0 <= AMa <= 5.0
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* spectrum simulated on a plane normal to the sun
* All other parameters fixed at G173 standard
From these simulated spectra, M is calculated using the known
quantum efficiency curves. Multiple linear regression is then
applied to fit Eq. 1 to determine the coefficients for each module.
Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell
Lee and Alex Panchula, at First Solar, 2016 [2]_.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
module_type : None or string, default None
a string specifying a cell type. Can be lower or upper case
letters. Admits values of 'cdte', 'monosi', 'xsi', 'multisi',
'polysi'. If provided, this input selects coefficients for the
following default modules:
* 'cdte' - First Solar Series 4-2 CdTe modules.
* 'monosi', 'xsi' - First Solar TetraSun modules.
* 'multisi', 'polysi' - multi-crystalline silicon modules.
* 'cigs' - anonymous copper indium gallium selenide PV module
* 'asi' - anonymous amorphous silicon PV module
The module used to calculate the spectral correction
coefficients corresponds to the Mult-crystalline silicon
Manufacturer 2 Model C from [3]_. Spectral Response (SR) of CIGS
and a-Si modules used to derive coefficients can be found in [4]_
coefficients : None or array-like, default None
allows for entry of user defined spectral correction
coefficients. Coefficients must be of length 6. Derivation of
coefficients requires use of SMARTS and PV module quantum
efficiency curve. Useful for modeling PV module types which are
not included as defaults, or to fine tune the spectral
correction to a particular mono-Si, multi-Si, or CdTe PV module.
Note that the parameters for modules with very similar QE should
be similar, in most cases limiting the need for module specific
coefficients.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric
radiative transfer of sunshine: algorithms and performance
assessment. Cocoa, FL: Florida Solar Energy Center, 1995.
.. [2] Lee, Mitchell, and Panchula, Alex. "Spectral Correction for
Photovoltaic Module Performance Based on Air Mass and Precipitable
Water." IEEE Photovoltaic Specialists Conference, Portland, 2016
.. [3] Marion, William F., et al. User's Manual for Data for Validating
Models for PV Module Performance. National Renewable Energy
Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf
.. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects
on Energy Yield of Different PV Modules: Comparison of Pwat and
MMF Approach, TUV Rheinland Energy GmbH report 21237296.003,
January 2017
"""
# --- Screen Input Data ---
# *** Pwat ***
# Replace Pwat Values below 0.1 cm with 0.1 cm to prevent model from
# diverging"
if np.min(pw) < 0.1:
pw = np.maximum(pw, 0.1)
warn('Exceptionally low Pwat values replaced with 0.1 cm to prevent' +
' model divergence')
# Warn user about Pwat data that is exceptionally high
if np.max(pw) > 8:
warn('Exceptionally high Pwat values. Check input data:' +
' model may diverge in this range')
# *** AMa ***
# Replace Extremely High AM with AM 10 to prevent model divergence
# AM > 10 will only occur very close to sunset
if np.max(airmass_absolute) > 10:
airmass_absolute = np.minimum(airmass_absolute, 10)
# Warn user about AMa data that is exceptionally low
if np.min(airmass_absolute) < 0.58:
warn('Exceptionally low air mass: ' +
'model not intended for extra-terrestrial use')
# pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of
# Mina Pirquita, Argentian = 4340 m. Highest elevation city with
# population over 50,000.
_coefficients = {}
_coefficients['cdte'] = (
0.86273, -0.038948, -0.012506, 0.098871, 0.084658, -0.0042948)
_coefficients['monosi'] = (
0.85914, -0.020880, -0.0058853, 0.12029, 0.026814, -0.0017810)
_coefficients['xsi'] = _coefficients['monosi']
_coefficients['polysi'] = (
0.84090, -0.027539, -0.0079224, 0.13570, 0.038024, -0.0021218)
_coefficients['multisi'] = _coefficients['polysi']
_coefficients['cigs'] = (
0.85252, -0.022314, -0.0047216, 0.13666, 0.013342, -0.0008945)
_coefficients['asi'] = (
1.12094, -0.047620, -0.0083627, -0.10443, 0.098382, -0.0033818)
if module_type is not None and coefficients is None:
coefficients = _coefficients[module_type.lower()]
elif module_type is None and coefficients is not None:
pass
elif module_type is None and coefficients is None:
raise TypeError('No valid input provided, both module_type and ' +
'coefficients are None')
else:
raise TypeError('Cannot resolve input, must supply only one of ' +
'module_type and coefficients')
# Evaluate Spectral Shift
coeff = coefficients
ama = airmass_absolute
modifier = (
coeff[0] + coeff[1]*ama + coeff[2]*pw + coeff[3]*np.sqrt(ama) +
coeff[4]*np.sqrt(pw) + coeff[5]*ama/np.sqrt(pw))
return modifier | r"""
Spectral mismatch modifier based on precipitable water and absolute
(pressure corrected) airmass.
Estimates a spectral mismatch modifier M representing the effect on
module short circuit current of variation in the spectral
irradiance. M is estimated from absolute (pressure currected) air
mass, AMa, and precipitable water, Pwat, using the following
function:
.. math::
M = c_1 + c_2*AMa + c_3*Pwat + c_4*AMa^.5
+ c_5*Pwat^.5 + c_6*AMa/Pwat^.5
Default coefficients are determined for several cell types with
known quantum efficiency curves, by using the Simple Model of the
Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using
SMARTS, spectrums are simulated with all combinations of AMa and
Pwat where:
* 0.5 cm <= Pwat <= 5 cm
* 1.0 <= AMa <= 5.0
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* spectrum simulated on a plane normal to the sun
* All other parameters fixed at G173 standard
From these simulated spectra, M is calculated using the known
quantum efficiency curves. Multiple linear regression is then
applied to fit Eq. 1 to determine the coefficients for each module.
Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell
Lee and Alex Panchula, at First Solar, 2016 [2]_.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
module_type : None or string, default None
a string specifying a cell type. Can be lower or upper case
letters. Admits values of 'cdte', 'monosi', 'xsi', 'multisi',
'polysi'. If provided, this input selects coefficients for the
following default modules:
* 'cdte' - First Solar Series 4-2 CdTe modules.
* 'monosi', 'xsi' - First Solar TetraSun modules.
* 'multisi', 'polysi' - multi-crystalline silicon modules.
* 'cigs' - anonymous copper indium gallium selenide PV module
* 'asi' - anonymous amorphous silicon PV module
The module used to calculate the spectral correction
coefficients corresponds to the Mult-crystalline silicon
Manufacturer 2 Model C from [3]_. Spectral Response (SR) of CIGS
and a-Si modules used to derive coefficients can be found in [4]_
coefficients : None or array-like, default None
allows for entry of user defined spectral correction
coefficients. Coefficients must be of length 6. Derivation of
coefficients requires use of SMARTS and PV module quantum
efficiency curve. Useful for modeling PV module types which are
not included as defaults, or to fine tune the spectral
correction to a particular mono-Si, multi-Si, or CdTe PV module.
Note that the parameters for modules with very similar QE should
be similar, in most cases limiting the need for module specific
coefficients.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric
radiative transfer of sunshine: algorithms and performance
assessment. Cocoa, FL: Florida Solar Energy Center, 1995.
.. [2] Lee, Mitchell, and Panchula, Alex. "Spectral Correction for
Photovoltaic Module Performance Based on Air Mass and Precipitable
Water." IEEE Photovoltaic Specialists Conference, Portland, 2016
.. [3] Marion, William F., et al. User's Manual for Data for Validating
Models for PV Module Performance. National Renewable Energy
Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf
.. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects
on Energy Yield of Different PV Modules: Comparison of Pwat and
MMF Approach, TUV Rheinland Energy GmbH report 21237296.003,
January 2017 | Below is the the instruction that describes the task:
### Input:
r"""
Spectral mismatch modifier based on precipitable water and absolute
(pressure corrected) airmass.
Estimates a spectral mismatch modifier M representing the effect on
module short circuit current of variation in the spectral
irradiance. M is estimated from absolute (pressure currected) air
mass, AMa, and precipitable water, Pwat, using the following
function:
.. math::
M = c_1 + c_2*AMa + c_3*Pwat + c_4*AMa^.5
+ c_5*Pwat^.5 + c_6*AMa/Pwat^.5
Default coefficients are determined for several cell types with
known quantum efficiency curves, by using the Simple Model of the
Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using
SMARTS, spectrums are simulated with all combinations of AMa and
Pwat where:
* 0.5 cm <= Pwat <= 5 cm
* 1.0 <= AMa <= 5.0
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* spectrum simulated on a plane normal to the sun
* All other parameters fixed at G173 standard
From these simulated spectra, M is calculated using the known
quantum efficiency curves. Multiple linear regression is then
applied to fit Eq. 1 to determine the coefficients for each module.
Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell
Lee and Alex Panchula, at First Solar, 2016 [2]_.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
module_type : None or string, default None
a string specifying a cell type. Can be lower or upper case
letters. Admits values of 'cdte', 'monosi', 'xsi', 'multisi',
'polysi'. If provided, this input selects coefficients for the
following default modules:
* 'cdte' - First Solar Series 4-2 CdTe modules.
* 'monosi', 'xsi' - First Solar TetraSun modules.
* 'multisi', 'polysi' - multi-crystalline silicon modules.
* 'cigs' - anonymous copper indium gallium selenide PV module
* 'asi' - anonymous amorphous silicon PV module
The module used to calculate the spectral correction
coefficients corresponds to the Mult-crystalline silicon
Manufacturer 2 Model C from [3]_. Spectral Response (SR) of CIGS
and a-Si modules used to derive coefficients can be found in [4]_
coefficients : None or array-like, default None
allows for entry of user defined spectral correction
coefficients. Coefficients must be of length 6. Derivation of
coefficients requires use of SMARTS and PV module quantum
efficiency curve. Useful for modeling PV module types which are
not included as defaults, or to fine tune the spectral
correction to a particular mono-Si, multi-Si, or CdTe PV module.
Note that the parameters for modules with very similar QE should
be similar, in most cases limiting the need for module specific
coefficients.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric
radiative transfer of sunshine: algorithms and performance
assessment. Cocoa, FL: Florida Solar Energy Center, 1995.
.. [2] Lee, Mitchell, and Panchula, Alex. "Spectral Correction for
Photovoltaic Module Performance Based on Air Mass and Precipitable
Water." IEEE Photovoltaic Specialists Conference, Portland, 2016
.. [3] Marion, William F., et al. User's Manual for Data for Validating
Models for PV Module Performance. National Renewable Energy
Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf
.. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects
on Energy Yield of Different PV Modules: Comparison of Pwat and
MMF Approach, TUV Rheinland Energy GmbH report 21237296.003,
January 2017
### Response:
def first_solar_spectral_correction(pw, airmass_absolute, module_type=None,
coefficients=None):
r"""
Spectral mismatch modifier based on precipitable water and absolute
(pressure corrected) airmass.
Estimates a spectral mismatch modifier M representing the effect on
module short circuit current of variation in the spectral
irradiance. M is estimated from absolute (pressure currected) air
mass, AMa, and precipitable water, Pwat, using the following
function:
.. math::
M = c_1 + c_2*AMa + c_3*Pwat + c_4*AMa^.5
+ c_5*Pwat^.5 + c_6*AMa/Pwat^.5
Default coefficients are determined for several cell types with
known quantum efficiency curves, by using the Simple Model of the
Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using
SMARTS, spectrums are simulated with all combinations of AMa and
Pwat where:
* 0.5 cm <= Pwat <= 5 cm
* 1.0 <= AMa <= 5.0
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* spectrum simulated on a plane normal to the sun
* All other parameters fixed at G173 standard
From these simulated spectra, M is calculated using the known
quantum efficiency curves. Multiple linear regression is then
applied to fit Eq. 1 to determine the coefficients for each module.
Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell
Lee and Alex Panchula, at First Solar, 2016 [2]_.
Parameters
----------
pw : array-like
atmospheric precipitable water (cm).
airmass_absolute : array-like
absolute (pressure corrected) airmass.
module_type : None or string, default None
a string specifying a cell type. Can be lower or upper case
letters. Admits values of 'cdte', 'monosi', 'xsi', 'multisi',
'polysi'. If provided, this input selects coefficients for the
following default modules:
* 'cdte' - First Solar Series 4-2 CdTe modules.
* 'monosi', 'xsi' - First Solar TetraSun modules.
* 'multisi', 'polysi' - multi-crystalline silicon modules.
* 'cigs' - anonymous copper indium gallium selenide PV module
* 'asi' - anonymous amorphous silicon PV module
The module used to calculate the spectral correction
coefficients corresponds to the Mult-crystalline silicon
Manufacturer 2 Model C from [3]_. Spectral Response (SR) of CIGS
and a-Si modules used to derive coefficients can be found in [4]_
coefficients : None or array-like, default None
allows for entry of user defined spectral correction
coefficients. Coefficients must be of length 6. Derivation of
coefficients requires use of SMARTS and PV module quantum
efficiency curve. Useful for modeling PV module types which are
not included as defaults, or to fine tune the spectral
correction to a particular mono-Si, multi-Si, or CdTe PV module.
Note that the parameters for modules with very similar QE should
be similar, in most cases limiting the need for module specific
coefficients.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric
radiative transfer of sunshine: algorithms and performance
assessment. Cocoa, FL: Florida Solar Energy Center, 1995.
.. [2] Lee, Mitchell, and Panchula, Alex. "Spectral Correction for
Photovoltaic Module Performance Based on Air Mass and Precipitable
Water." IEEE Photovoltaic Specialists Conference, Portland, 2016
.. [3] Marion, William F., et al. User's Manual for Data for Validating
Models for PV Module Performance. National Renewable Energy
Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf
.. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects
on Energy Yield of Different PV Modules: Comparison of Pwat and
MMF Approach, TUV Rheinland Energy GmbH report 21237296.003,
January 2017
"""
# --- Screen Input Data ---
# *** Pwat ***
# Replace Pwat Values below 0.1 cm with 0.1 cm to prevent model from
# diverging"
if np.min(pw) < 0.1:
pw = np.maximum(pw, 0.1)
warn('Exceptionally low Pwat values replaced with 0.1 cm to prevent' +
' model divergence')
# Warn user about Pwat data that is exceptionally high
if np.max(pw) > 8:
warn('Exceptionally high Pwat values. Check input data:' +
' model may diverge in this range')
# *** AMa ***
# Replace Extremely High AM with AM 10 to prevent model divergence
# AM > 10 will only occur very close to sunset
if np.max(airmass_absolute) > 10:
airmass_absolute = np.minimum(airmass_absolute, 10)
# Warn user about AMa data that is exceptionally low
if np.min(airmass_absolute) < 0.58:
warn('Exceptionally low air mass: ' +
'model not intended for extra-terrestrial use')
# pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of
# Mina Pirquita, Argentian = 4340 m. Highest elevation city with
# population over 50,000.
_coefficients = {}
_coefficients['cdte'] = (
0.86273, -0.038948, -0.012506, 0.098871, 0.084658, -0.0042948)
_coefficients['monosi'] = (
0.85914, -0.020880, -0.0058853, 0.12029, 0.026814, -0.0017810)
_coefficients['xsi'] = _coefficients['monosi']
_coefficients['polysi'] = (
0.84090, -0.027539, -0.0079224, 0.13570, 0.038024, -0.0021218)
_coefficients['multisi'] = _coefficients['polysi']
_coefficients['cigs'] = (
0.85252, -0.022314, -0.0047216, 0.13666, 0.013342, -0.0008945)
_coefficients['asi'] = (
1.12094, -0.047620, -0.0083627, -0.10443, 0.098382, -0.0033818)
if module_type is not None and coefficients is None:
coefficients = _coefficients[module_type.lower()]
elif module_type is None and coefficients is not None:
pass
elif module_type is None and coefficients is None:
raise TypeError('No valid input provided, both module_type and ' +
'coefficients are None')
else:
raise TypeError('Cannot resolve input, must supply only one of ' +
'module_type and coefficients')
# Evaluate Spectral Shift
coeff = coefficients
ama = airmass_absolute
modifier = (
coeff[0] + coeff[1]*ama + coeff[2]*pw + coeff[3]*np.sqrt(ama) +
coeff[4]*np.sqrt(pw) + coeff[5]*ama/np.sqrt(pw))
return modifier |
def __get_handle_record_if_necessary(self, handle, handlerecord_json):
'''
Returns the handle record if it is None or if its handle is not the
same as the specified handle.
'''
if handlerecord_json is None:
handlerecord_json = self.retrieve_handle_record_json(handle)
else:
if handle != handlerecord_json['handle']:
handlerecord_json = self.retrieve_handle_record_json(handle)
return handlerecord_json | Returns the handle record if it is None or if its handle is not the
same as the specified handle. | Below is the the instruction that describes the task:
### Input:
Returns the handle record if it is None or if its handle is not the
same as the specified handle.
### Response:
def __get_handle_record_if_necessary(self, handle, handlerecord_json):
'''
Returns the handle record if it is None or if its handle is not the
same as the specified handle.
'''
if handlerecord_json is None:
handlerecord_json = self.retrieve_handle_record_json(handle)
else:
if handle != handlerecord_json['handle']:
handlerecord_json = self.retrieve_handle_record_json(handle)
return handlerecord_json |
def report_status(self, status):
"""Hook for reporting the task status towards completion"""
status = Instance('', TaskStatus).validate(None, status)
print(r'{taskname} | {percent:>3}% | {message}'.format(
taskname=self.__class__.__name__,
percent=int(round(100*status.progress)),
message=status.message if status.message else '',
)) | Hook for reporting the task status towards completion | Below is the the instruction that describes the task:
### Input:
Hook for reporting the task status towards completion
### Response:
def report_status(self, status):
"""Hook for reporting the task status towards completion"""
status = Instance('', TaskStatus).validate(None, status)
print(r'{taskname} | {percent:>3}% | {message}'.format(
taskname=self.__class__.__name__,
percent=int(round(100*status.progress)),
message=status.message if status.message else '',
)) |
def _complete_path(path=None):
"""Perform completion of filesystem path.
https://stackoverflow.com/questions/5637124/tab-completion-in-pythons-raw-input
"""
if not path:
return _listdir('.')
dirname, rest = os.path.split(path)
tmp = dirname if dirname else '.'
res = [p for p in _listdir(tmp) if p.startswith(rest)]
# more than one match, or single match which does not exist (typo)
if len(res) > 1 or not os.path.exists(path):
return res
# resolved to a single directory, so return list of files below it
if os.path.isdir(path):
return [p for p in _listdir(path)]
# exact file match terminates this completion
return [path + ' '] | Perform completion of filesystem path.
https://stackoverflow.com/questions/5637124/tab-completion-in-pythons-raw-input | Below is the the instruction that describes the task:
### Input:
Perform completion of filesystem path.
https://stackoverflow.com/questions/5637124/tab-completion-in-pythons-raw-input
### Response:
def _complete_path(path=None):
"""Perform completion of filesystem path.
https://stackoverflow.com/questions/5637124/tab-completion-in-pythons-raw-input
"""
if not path:
return _listdir('.')
dirname, rest = os.path.split(path)
tmp = dirname if dirname else '.'
res = [p for p in _listdir(tmp) if p.startswith(rest)]
# more than one match, or single match which does not exist (typo)
if len(res) > 1 or not os.path.exists(path):
return res
# resolved to a single directory, so return list of files below it
if os.path.isdir(path):
return [p for p in _listdir(path)]
# exact file match terminates this completion
return [path + ' '] |
def setCheckable(self, state):
"""
Sets whether or not the actions within this button should be checkable.
:param state | <bool>
"""
self._checkable = state
for act in self._actionGroup.actions():
act.setCheckable(state) | Sets whether or not the actions within this button should be checkable.
:param state | <bool> | Below is the the instruction that describes the task:
### Input:
Sets whether or not the actions within this button should be checkable.
:param state | <bool>
### Response:
def setCheckable(self, state):
"""
Sets whether or not the actions within this button should be checkable.
:param state | <bool>
"""
self._checkable = state
for act in self._actionGroup.actions():
act.setCheckable(state) |
def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls'])
elif arguments['<project_path>']:
prepare_project(arguments['<project>'], arguments['<project_path>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls']) | Main function, mostly for passing arguments | Below is the the instruction that describes the task:
### Input:
Main function, mostly for passing arguments
### Response:
def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls'])
elif arguments['<project_path>']:
prepare_project(arguments['<project>'], arguments['<project_path>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls']) |
def force_bytes(s, encoding='utf-8', errors='strict'):
"""A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s.encode(encoding, errors) | A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes | Below is the the instruction that describes the task:
### Input:
A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
### Response:
def force_bytes(s, encoding='utf-8', errors='strict'):
"""A function turns "s" into bytes object, similar to django.utils.encoding.force_bytes
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s.encode(encoding, errors) |
def FlagDictToArgs(flag_map):
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in six.iteritems(flag_map):
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value) | Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution. | Below is the the instruction that describes the task:
### Input:
Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
### Response:
def FlagDictToArgs(flag_map):
"""Convert a dict of values into process call parameters.
This method is used to convert a dictionary into a sequence of parameters
for a binary that parses arguments using this module.
Args:
flag_map: a mapping where the keys are flag names (strings).
values are treated according to their type:
* If value is None, then only the name is emitted.
* If value is True, then only the name is emitted.
* If value is False, then only the name prepended with 'no' is emitted.
* If value is a string then --name=value is emitted.
* If value is a collection, this will emit --name=value1,value2,value3.
* Everything else is converted to string an passed as such.
Yields:
sequence of string suitable for a subprocess execution.
"""
for key, value in six.iteritems(flag_map):
if value is None:
yield '--%s' % key
elif isinstance(value, bool):
if value:
yield '--%s' % key
else:
yield '--no%s' % key
elif isinstance(value, (bytes, type(u''))):
# We don't want strings to be handled like python collections.
yield '--%s=%s' % (key, value)
else:
# Now we attempt to deal with collections.
try:
yield '--%s=%s' % (key, ','.join(str(item) for item in value))
except TypeError:
# Default case.
yield '--%s=%s' % (key, value) |
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None | Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None | Below is the the instruction that describes the task:
### Input:
Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
### Response:
def get_iso3_country_code(cls, country, use_live=True, exception=None):
# type: (str, bool, Optional[ExceptionUpperBound]) -> Optional[str]
"""Get ISO3 code for cls. Only exact matches or None are returned.
Args:
country (str): Country for which to get ISO3 code
use_live (bool): Try to get use latest data from web rather than file in package. Defaults to True.
exception (Optional[ExceptionUpperBound]): An exception to raise if country not found. Defaults to None.
Returns:
Optional[str]: ISO3 country code or None
"""
countriesdata = cls.countriesdata(use_live=use_live)
countryupper = country.upper()
len_countryupper = len(countryupper)
if len_countryupper == 3:
if countryupper in countriesdata['countries']:
return countryupper
elif len_countryupper == 2:
iso3 = countriesdata['iso2iso3'].get(countryupper)
if iso3 is not None:
return iso3
iso3 = countriesdata['countrynames2iso3'].get(countryupper)
if iso3 is not None:
return iso3
for candidate in cls.expand_countryname_abbrevs(countryupper):
iso3 = countriesdata['countrynames2iso3'].get(candidate)
if iso3 is not None:
return iso3
if exception is not None:
raise exception
return None |
def alignment(self, d=5):
""" Boids match velocity with other boids.
"""
vx = vy = vz = 0
for b in self.boids:
if b != self:
vx, vy, vz = vx+b.vx, vy+b.vy, vz+b.vz
n = len(self.boids)-1
vx, vy, vz = vx/n, vy/n, vz/n
return (vx-self.vx)/d, (vy-self.vy)/d, (vz-self.vz)/d | Boids match velocity with other boids. | Below is the the instruction that describes the task:
### Input:
Boids match velocity with other boids.
### Response:
def alignment(self, d=5):
""" Boids match velocity with other boids.
"""
vx = vy = vz = 0
for b in self.boids:
if b != self:
vx, vy, vz = vx+b.vx, vy+b.vy, vz+b.vz
n = len(self.boids)-1
vx, vy, vz = vx/n, vy/n, vz/n
return (vx-self.vx)/d, (vy-self.vy)/d, (vz-self.vz)/d |
def _parse_forward(mapping):
'''
Parses a port forwarding statement in the form used by this state:
from_port:to_port:protocol[:destination]
and returns a ForwardingMapping object
'''
if len(mapping.split(':')) > 3:
(srcport, destport, protocol, destaddr) = mapping.split(':')
else:
(srcport, destport, protocol) = mapping.split(':')
destaddr = ''
return ForwardingMapping(srcport, destport, protocol, destaddr) | Parses a port forwarding statement in the form used by this state:
from_port:to_port:protocol[:destination]
and returns a ForwardingMapping object | Below is the the instruction that describes the task:
### Input:
Parses a port forwarding statement in the form used by this state:
from_port:to_port:protocol[:destination]
and returns a ForwardingMapping object
### Response:
def _parse_forward(mapping):
'''
Parses a port forwarding statement in the form used by this state:
from_port:to_port:protocol[:destination]
and returns a ForwardingMapping object
'''
if len(mapping.split(':')) > 3:
(srcport, destport, protocol, destaddr) = mapping.split(':')
else:
(srcport, destport, protocol) = mapping.split(':')
destaddr = ''
return ForwardingMapping(srcport, destport, protocol, destaddr) |
def duplicate(self, name=None, location=None):
"""
Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project
"""
# If the project was not open we open it temporary
previous_status = self._status
if self._status == "closed":
yield from self.open()
self.dump()
try:
with tempfile.TemporaryDirectory() as tmpdir:
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
with open(os.path.join(tmpdir, "project.gns3p"), "wb") as f:
for data in zipstream:
f.write(data)
with open(os.path.join(tmpdir, "project.gns3p"), "rb") as f:
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
except (OSError, UnicodeEncodeError) as e:
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
if previous_status == "closed":
yield from self.close()
return project | Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project | Below is the the instruction that describes the task:
### Input:
Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project
### Response:
def duplicate(self, name=None, location=None):
"""
Duplicate a project
It's the save as feature of the 1.X. It's implemented on top of the
export / import features. It will generate a gns3p and reimport it.
It's a little slower but we have only one implementation to maintain.
:param name: Name of the new project. A new one will be generated in case of conflicts
:param location: Parent directory of the new project
"""
# If the project was not open we open it temporary
previous_status = self._status
if self._status == "closed":
yield from self.open()
self.dump()
try:
with tempfile.TemporaryDirectory() as tmpdir:
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
with open(os.path.join(tmpdir, "project.gns3p"), "wb") as f:
for data in zipstream:
f.write(data)
with open(os.path.join(tmpdir, "project.gns3p"), "rb") as f:
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
except (OSError, UnicodeEncodeError) as e:
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
if previous_status == "closed":
yield from self.close()
return project |
def mod_init(low):
'''
Set a flag to tell the install functions to refresh the package database.
This ensures that the package database is refreshed only once during
a state run significantly improving the speed of package management
during a state run.
It sets a flag for a number of reasons, primarily due to timeline logic.
When originally setting up the mod_init for pkg a number of corner cases
arose with different package managers and how they refresh package data.
It also runs the "ex_mod_init" from the package manager module that is
currently loaded. The "ex_mod_init" is expected to work as a normal
"mod_init" function.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init`
'''
ret = True
if 'pkg.ex_mod_init' in __salt__:
ret = __salt__['pkg.ex_mod_init'](low)
if low['fun'] == 'installed' or low['fun'] == 'latest':
salt.utils.pkg.write_rtag(__opts__)
return ret
return False | Set a flag to tell the install functions to refresh the package database.
This ensures that the package database is refreshed only once during
a state run significantly improving the speed of package management
during a state run.
It sets a flag for a number of reasons, primarily due to timeline logic.
When originally setting up the mod_init for pkg a number of corner cases
arose with different package managers and how they refresh package data.
It also runs the "ex_mod_init" from the package manager module that is
currently loaded. The "ex_mod_init" is expected to work as a normal
"mod_init" function.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init` | Below is the the instruction that describes the task:
### Input:
Set a flag to tell the install functions to refresh the package database.
This ensures that the package database is refreshed only once during
a state run significantly improving the speed of package management
during a state run.
It sets a flag for a number of reasons, primarily due to timeline logic.
When originally setting up the mod_init for pkg a number of corner cases
arose with different package managers and how they refresh package data.
It also runs the "ex_mod_init" from the package manager module that is
currently loaded. The "ex_mod_init" is expected to work as a normal
"mod_init" function.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init`
### Response:
def mod_init(low):
'''
Set a flag to tell the install functions to refresh the package database.
This ensures that the package database is refreshed only once during
a state run significantly improving the speed of package management
during a state run.
It sets a flag for a number of reasons, primarily due to timeline logic.
When originally setting up the mod_init for pkg a number of corner cases
arose with different package managers and how they refresh package data.
It also runs the "ex_mod_init" from the package manager module that is
currently loaded. The "ex_mod_init" is expected to work as a normal
"mod_init" function.
.. seealso::
:py:func:`salt.modules.ebuild.ex_mod_init`
'''
ret = True
if 'pkg.ex_mod_init' in __salt__:
ret = __salt__['pkg.ex_mod_init'](low)
if low['fun'] == 'installed' or low['fun'] == 'latest':
salt.utils.pkg.write_rtag(__opts__)
return ret
return False |
def render_markdown(post):
"""
Renders the post as Markdown using the template specified in :attr:`markdown_template_path`.
"""
from engineer.conf import settings
# A hack to guarantee the YAML output is in a sensible order.
# The order, assuming all metadata should be written, should be:
# title
# status
# timestamp
# link
# via
# via-link
# slug
# tags
# updated
# template
# content-template
# url
d = [
('status', post.status.name),
('link', post.link),
('via', post.via),
('via-link', post.via_link),
('tags', post.tags),
('updated', post.updated_local.strftime(settings.TIME_FORMAT) if post.updated is not None else None),
('template', post.template if post.template != 'theme/post_detail.html' else None),
('content-template',
post.content_template if post.content_template != 'theme/_content_default.html' else None),
]
# The complete set of metadata that should be written is the union of the FINALIZE_METADATA.config setting and
# the set of metadata that was in the file originally.
finalization_config = FinalizationPlugin.get_settings()['config']
metadata_to_finalize = set([m for m, s in finalization_config.iteritems() if post.status in s])
metadata_to_finalize.update(post.metadata_original)
if 'title' in metadata_to_finalize:
# insert at the top of the list
d.insert(0, ('title', post.title))
if 'slug' in metadata_to_finalize:
# insert right before tags
d.insert(d.index(('tags', post.tags)), ('slug', post.slug))
if 'timestamp' in metadata_to_finalize:
# insert right after status
d.insert(d.index(('status', post.status.name)), ('timestamp',
post.timestamp_local.strftime(settings.TIME_FORMAT)))
if 'url' in metadata_to_finalize:
# insert at end of list
d.append(('url', post.url))
metadata = ''
for k, v in d:
if v is not None and len(v) > 0:
metadata += yaml.safe_dump(dict([(k, v)]), default_flow_style=False)
# handle custom metadata
if len(post.custom_properties):
metadata += '\n'
metadata += yaml.safe_dump(dict(post.custom_properties), default_flow_style=False)
return settings.JINJA_ENV.get_template(post.markdown_template_path).render(metadata=metadata,
content=post.content_finalized,
post=post) | Renders the post as Markdown using the template specified in :attr:`markdown_template_path`. | Below is the the instruction that describes the task:
### Input:
Renders the post as Markdown using the template specified in :attr:`markdown_template_path`.
### Response:
def render_markdown(post):
"""
Renders the post as Markdown using the template specified in :attr:`markdown_template_path`.
"""
from engineer.conf import settings
# A hack to guarantee the YAML output is in a sensible order.
# The order, assuming all metadata should be written, should be:
# title
# status
# timestamp
# link
# via
# via-link
# slug
# tags
# updated
# template
# content-template
# url
d = [
('status', post.status.name),
('link', post.link),
('via', post.via),
('via-link', post.via_link),
('tags', post.tags),
('updated', post.updated_local.strftime(settings.TIME_FORMAT) if post.updated is not None else None),
('template', post.template if post.template != 'theme/post_detail.html' else None),
('content-template',
post.content_template if post.content_template != 'theme/_content_default.html' else None),
]
# The complete set of metadata that should be written is the union of the FINALIZE_METADATA.config setting and
# the set of metadata that was in the file originally.
finalization_config = FinalizationPlugin.get_settings()['config']
metadata_to_finalize = set([m for m, s in finalization_config.iteritems() if post.status in s])
metadata_to_finalize.update(post.metadata_original)
if 'title' in metadata_to_finalize:
# insert at the top of the list
d.insert(0, ('title', post.title))
if 'slug' in metadata_to_finalize:
# insert right before tags
d.insert(d.index(('tags', post.tags)), ('slug', post.slug))
if 'timestamp' in metadata_to_finalize:
# insert right after status
d.insert(d.index(('status', post.status.name)), ('timestamp',
post.timestamp_local.strftime(settings.TIME_FORMAT)))
if 'url' in metadata_to_finalize:
# insert at end of list
d.append(('url', post.url))
metadata = ''
for k, v in d:
if v is not None and len(v) > 0:
metadata += yaml.safe_dump(dict([(k, v)]), default_flow_style=False)
# handle custom metadata
if len(post.custom_properties):
metadata += '\n'
metadata += yaml.safe_dump(dict(post.custom_properties), default_flow_style=False)
return settings.JINJA_ENV.get_template(post.markdown_template_path).render(metadata=metadata,
content=post.content_finalized,
post=post) |
def rolling_beta(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6):
"""
Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details.
"""
if factor_returns.ndim > 1:
# Apply column-wise
return factor_returns.apply(partial(rolling_beta, returns),
rolling_window=rolling_window)
else:
out = pd.Series(index=returns.index)
for beg, end in zip(returns.index[0:-rolling_window],
returns.index[rolling_window:]):
out.loc[end] = ep.beta(
returns.loc[beg:end],
factor_returns.loc[beg:end])
return out | Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details. | Below is the the instruction that describes the task:
### Input:
Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details.
### Response:
def rolling_beta(returns, factor_returns,
rolling_window=APPROX_BDAYS_PER_MONTH * 6):
"""
Determines the rolling beta of a strategy.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in tears.create_full_tear_sheet.
factor_returns : pd.Series or pd.DataFrame
Daily noncumulative returns of the benchmark factor to which betas are
computed. Usually a benchmark such as market returns.
- If DataFrame is passed, computes rolling beta for each column.
- This is in the same style as returns.
rolling_window : int, optional
The size of the rolling window, in days, over which to compute
beta (default 6 months).
Returns
-------
pd.Series
Rolling beta.
Note
-----
See https://en.wikipedia.org/wiki/Beta_(finance) for more details.
"""
if factor_returns.ndim > 1:
# Apply column-wise
return factor_returns.apply(partial(rolling_beta, returns),
rolling_window=rolling_window)
else:
out = pd.Series(index=returns.index)
for beg, end in zip(returns.index[0:-rolling_window],
returns.index[rolling_window:]):
out.loc[end] = ep.beta(
returns.loc[beg:end],
factor_returns.loc[beg:end])
return out |
def check_args(args):
"""Checks the arguments and options."""
# Checking that only VCF can have a - (stdout) as output
if args.output_format not in _streamable_format and args.output == "-":
logger.error("{} format cannot be streamed to standard output"
"".format(args.output_format))
sys.exit(1)
# Checking the file extensions
if args.output_format == "vcf" and args.output != "-":
if not args.output.endswith(".vcf"):
args.output += ".vcf"
elif args.output_format == "plink":
if args.output.endswith(".bed"):
args.output = args.output[:-4] | Checks the arguments and options. | Below is the the instruction that describes the task:
### Input:
Checks the arguments and options.
### Response:
def check_args(args):
"""Checks the arguments and options."""
# Checking that only VCF can have a - (stdout) as output
if args.output_format not in _streamable_format and args.output == "-":
logger.error("{} format cannot be streamed to standard output"
"".format(args.output_format))
sys.exit(1)
# Checking the file extensions
if args.output_format == "vcf" and args.output != "-":
if not args.output.endswith(".vcf"):
args.output += ".vcf"
elif args.output_format == "plink":
if args.output.endswith(".bed"):
args.output = args.output[:-4] |
def get(self, request, customer_uuid):
"""
Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
"""
context = self._build_context(request, customer_uuid)
manage_learners_form = ManageLearnersForm(
user=request.user,
enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER]
)
context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})
return render(request, self.template, context) | Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse | Below is the the instruction that describes the task:
### Input:
Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
### Response:
def get(self, request, customer_uuid):
"""
Handle GET request - render linked learners list and "Link learner" form.
Arguments:
request (django.http.request.HttpRequest): Request instance
customer_uuid (str): Enterprise Customer UUID
Returns:
django.http.response.HttpResponse: HttpResponse
"""
context = self._build_context(request, customer_uuid)
manage_learners_form = ManageLearnersForm(
user=request.user,
enterprise_customer=context[self.ContextParameters.ENTERPRISE_CUSTOMER]
)
context.update({self.ContextParameters.MANAGE_LEARNERS_FORM: manage_learners_form})
return render(request, self.template, context) |
def sha512(message, encoder=nacl.encoding.HexEncoder):
"""
Hashes ``message`` with SHA512.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
"""
return encoder.encode(nacl.bindings.crypto_hash_sha512(message)) | Hashes ``message`` with SHA512.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Hashes ``message`` with SHA512.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
### Response:
def sha512(message, encoder=nacl.encoding.HexEncoder):
"""
Hashes ``message`` with SHA512.
:param message: The message to hash.
:type message: bytes
:param encoder: A class that is able to encode the hashed message.
:returns: The hashed message.
:rtype: bytes
"""
return encoder.encode(nacl.bindings.crypto_hash_sha512(message)) |
def get_plugin(self, name):
"""
Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return:
"""
for p in self._plugins:
if p.name == name:
return p
return None | Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return:
### Response:
def get_plugin(self, name):
"""
Get a plugin by its name from the plugins loaded for the current namespace
:param name:
:return:
"""
for p in self._plugins:
if p.name == name:
return p
return None |
def seek(self, position):
"""Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
"""
self._position = position
range_string = make_range_string(self._position)
logger.debug('content_length: %r range_string: %r', self._content_length, range_string)
#
# Close old body explicitly.
# When first seek(), self._body is not exist. Catch the exception and do nothing.
#
try:
self._body.close()
except AttributeError:
pass
if position == self._content_length == 0 or position == self._content_length:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
self._body = io.BytesIO()
else:
self._body = self._object.get(Range=range_string)['Body'] | Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key. | Below is the the instruction that describes the task:
### Input:
Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
### Response:
def seek(self, position):
"""Seek to the specified position (byte offset) in the S3 key.
:param int position: The byte offset from the beginning of the key.
"""
self._position = position
range_string = make_range_string(self._position)
logger.debug('content_length: %r range_string: %r', self._content_length, range_string)
#
# Close old body explicitly.
# When first seek(), self._body is not exist. Catch the exception and do nothing.
#
try:
self._body.close()
except AttributeError:
pass
if position == self._content_length == 0 or position == self._content_length:
#
# When reading, we can't seek to the first byte of an empty file.
# Similarly, we can't seek past the last byte. Do nothing here.
#
self._body = io.BytesIO()
else:
self._body = self._object.get(Range=range_string)['Body'] |
def get_scanner_params_xml(self):
""" Returns the OSP Daemon's scanner params in xml format. """
scanner_params = Element('scanner_params')
for param_id, param in self.scanner_params.items():
param_xml = SubElement(scanner_params, 'scanner_param')
for name, value in [('id', param_id),
('type', param['type'])]:
param_xml.set(name, value)
for name, value in [('name', param['name']),
('description', param['description']),
('default', param['default']),
('mandatory', param['mandatory'])]:
elem = SubElement(param_xml, name)
elem.text = str(value)
return scanner_params | Returns the OSP Daemon's scanner params in xml format. | Below is the the instruction that describes the task:
### Input:
Returns the OSP Daemon's scanner params in xml format.
### Response:
def get_scanner_params_xml(self):
""" Returns the OSP Daemon's scanner params in xml format. """
scanner_params = Element('scanner_params')
for param_id, param in self.scanner_params.items():
param_xml = SubElement(scanner_params, 'scanner_param')
for name, value in [('id', param_id),
('type', param['type'])]:
param_xml.set(name, value)
for name, value in [('name', param['name']),
('description', param['description']),
('default', param['default']),
('mandatory', param['mandatory'])]:
elem = SubElement(param_xml, name)
elem.text = str(value)
return scanner_params |
def include(code, persist=True, compilerflags=None):
"""This function replaces the *calling module* with a dynamic
module that generates code on demand. The code is generated from
type descriptions that are created by gccxml compiling the C code
'code'.
If <persist> is True, generated code is appended to the module's
source code, otherwise the generated code is executed and then
thrown away.
The calling module must load all the shared libraries that it uses
*BEFORE* this function is called.
NOTE:
- the calling module MUST contain 'from ctypes import *',
and, on windows, also 'from ctypes.wintypes import *'.
"""
compilerflags = compilerflags or ["-c"]
# create a hash for the code, and use that as basename for the
# files we have to create
fullcode = "/* compilerflags: %r */\n%s" % (compilerflags, code)
hashval = md5(fullcode).hexdigest()
fnm = os.path.abspath(os.path.join(gen_dir, hashval))
h_file = fnm + ".h"
xml_file = fnm + ".xml"
tdesc_file = fnm + ".typedesc.bz2"
if not os.path.exists(h_file):
open(h_file, "w").write(fullcode)
if is_newer(h_file, tdesc_file):
if is_newer(h_file, xml_file):
print("# Compiling into...", xml_file, file=sys.stderr)
from ctypeslib import h2xml
h2xml.compile_to_xml(["h2xml",
"-I", os.path.dirname(fnm), "-q",
h_file,
"-o", xml_file] + list(compilerflags))
if is_newer(xml_file, tdesc_file):
print("# Parsing XML file and compressing type descriptions...", file=sys.stderr)
decls = gccxmlparser.parse(xml_file)
ofi = bz2.BZ2File(tdesc_file, "w")
data = cPickle.dump(decls, ofi, -1)
os.remove(xml_file) # not needed any longer.
frame = sys._getframe(1)
glob = frame.f_globals
name = glob["__name__"]
mod = sys.modules[name]
sys.modules[name] = DynamicModule(mod, tdesc_file, persist=persist) | This function replaces the *calling module* with a dynamic
module that generates code on demand. The code is generated from
type descriptions that are created by gccxml compiling the C code
'code'.
If <persist> is True, generated code is appended to the module's
source code, otherwise the generated code is executed and then
thrown away.
The calling module must load all the shared libraries that it uses
*BEFORE* this function is called.
NOTE:
- the calling module MUST contain 'from ctypes import *',
and, on windows, also 'from ctypes.wintypes import *'. | Below is the the instruction that describes the task:
### Input:
This function replaces the *calling module* with a dynamic
module that generates code on demand. The code is generated from
type descriptions that are created by gccxml compiling the C code
'code'.
If <persist> is True, generated code is appended to the module's
source code, otherwise the generated code is executed and then
thrown away.
The calling module must load all the shared libraries that it uses
*BEFORE* this function is called.
NOTE:
- the calling module MUST contain 'from ctypes import *',
and, on windows, also 'from ctypes.wintypes import *'.
### Response:
def include(code, persist=True, compilerflags=None):
"""This function replaces the *calling module* with a dynamic
module that generates code on demand. The code is generated from
type descriptions that are created by gccxml compiling the C code
'code'.
If <persist> is True, generated code is appended to the module's
source code, otherwise the generated code is executed and then
thrown away.
The calling module must load all the shared libraries that it uses
*BEFORE* this function is called.
NOTE:
- the calling module MUST contain 'from ctypes import *',
and, on windows, also 'from ctypes.wintypes import *'.
"""
compilerflags = compilerflags or ["-c"]
# create a hash for the code, and use that as basename for the
# files we have to create
fullcode = "/* compilerflags: %r */\n%s" % (compilerflags, code)
hashval = md5(fullcode).hexdigest()
fnm = os.path.abspath(os.path.join(gen_dir, hashval))
h_file = fnm + ".h"
xml_file = fnm + ".xml"
tdesc_file = fnm + ".typedesc.bz2"
if not os.path.exists(h_file):
open(h_file, "w").write(fullcode)
if is_newer(h_file, tdesc_file):
if is_newer(h_file, xml_file):
print("# Compiling into...", xml_file, file=sys.stderr)
from ctypeslib import h2xml
h2xml.compile_to_xml(["h2xml",
"-I", os.path.dirname(fnm), "-q",
h_file,
"-o", xml_file] + list(compilerflags))
if is_newer(xml_file, tdesc_file):
print("# Parsing XML file and compressing type descriptions...", file=sys.stderr)
decls = gccxmlparser.parse(xml_file)
ofi = bz2.BZ2File(tdesc_file, "w")
data = cPickle.dump(decls, ofi, -1)
os.remove(xml_file) # not needed any longer.
frame = sys._getframe(1)
glob = frame.f_globals
name = glob["__name__"]
mod = sys.modules[name]
sys.modules[name] = DynamicModule(mod, tdesc_file, persist=persist) |
def make_consensus_matrix(com_membership, th=0.5):
r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix
"""
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx | r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix | Below is the the instruction that describes the task:
### Input:
r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix
### Response:
def make_consensus_matrix(com_membership, th=0.5):
r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix
"""
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx |
def get_object_from_string(object_path):
"""
Return python object from string
:param object_path: e.g os.path.join
:return: python object
"""
# split like crosspm.template.GUS => crosspm.template, GUS
try:
module_name, object_name = object_path.rsplit('.', maxsplit=1)
module_ = __import__(module_name, globals(), locals(), ['App'], 0)
variable_ = getattr(module_, object_name)
except Exception:
variable_ = None
return variable_ | Return python object from string
:param object_path: e.g os.path.join
:return: python object | Below is the the instruction that describes the task:
### Input:
Return python object from string
:param object_path: e.g os.path.join
:return: python object
### Response:
def get_object_from_string(object_path):
"""
Return python object from string
:param object_path: e.g os.path.join
:return: python object
"""
# split like crosspm.template.GUS => crosspm.template, GUS
try:
module_name, object_name = object_path.rsplit('.', maxsplit=1)
module_ = __import__(module_name, globals(), locals(), ['App'], 0)
variable_ = getattr(module_, object_name)
except Exception:
variable_ = None
return variable_ |
def url_for(self, action='view', **kwargs):
"""
Return public URL to this instance for a given action (default 'view')
"""
app = current_app._get_current_object() if current_app else None
if app is not None and action in self.url_for_endpoints.get(app, {}):
endpoint, paramattrs, _external = self.url_for_endpoints[app][action]
else:
try:
endpoint, paramattrs, _external = self.url_for_endpoints[None][action]
except KeyError:
raise BuildError(action, kwargs, 'GET')
params = {}
for param, attr in list(paramattrs.items()):
if isinstance(attr, tuple):
# attr is a tuple containing:
# 1. ('parent', 'name') --> self.parent.name
# 2. ('**entity', 'name') --> kwargs['entity'].name
if attr[0].startswith('**'):
item = kwargs.pop(attr[0][2:])
attr = attr[1:]
else:
item = self
for subattr in attr:
item = getattr(item, subattr)
params[param] = item
elif callable(attr):
params[param] = attr(self)
else:
params[param] = getattr(self, attr)
if _external is not None:
params['_external'] = _external
params.update(kwargs) # Let kwargs override params
# url_for from flask
return url_for(endpoint, **params) | Return public URL to this instance for a given action (default 'view') | Below is the the instruction that describes the task:
### Input:
Return public URL to this instance for a given action (default 'view')
### Response:
def url_for(self, action='view', **kwargs):
"""
Return public URL to this instance for a given action (default 'view')
"""
app = current_app._get_current_object() if current_app else None
if app is not None and action in self.url_for_endpoints.get(app, {}):
endpoint, paramattrs, _external = self.url_for_endpoints[app][action]
else:
try:
endpoint, paramattrs, _external = self.url_for_endpoints[None][action]
except KeyError:
raise BuildError(action, kwargs, 'GET')
params = {}
for param, attr in list(paramattrs.items()):
if isinstance(attr, tuple):
# attr is a tuple containing:
# 1. ('parent', 'name') --> self.parent.name
# 2. ('**entity', 'name') --> kwargs['entity'].name
if attr[0].startswith('**'):
item = kwargs.pop(attr[0][2:])
attr = attr[1:]
else:
item = self
for subattr in attr:
item = getattr(item, subattr)
params[param] = item
elif callable(attr):
params[param] = attr(self)
else:
params[param] = getattr(self, attr)
if _external is not None:
params['_external'] = _external
params.update(kwargs) # Let kwargs override params
# url_for from flask
return url_for(endpoint, **params) |
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False | Use OS facilities to determine if a process is running | Below is the the instruction that describes the task:
### Input:
Use OS facilities to determine if a process is running
### Response:
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False |
def __nt_relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path: raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" \
% (path, start))
else: raise ValueError("path is on drive %s, start on drive %s" \
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else: i += 1
pass
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list: return os.curdir
return os.path.join(*rel_list) | Return a relative version of a path | Below is the the instruction that describes the task:
### Input:
Return a relative version of a path
### Response:
def __nt_relpath(path, start=os.curdir):
"""Return a relative version of a path"""
if not path: raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" \
% (path, start))
else: raise ValueError("path is on drive %s, start on drive %s" \
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else: i += 1
pass
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list: return os.curdir
return os.path.join(*rel_list) |
def cancel_ride(self, ride_id, cancel_confirmation_token=None):
"""Cancel an ongoing ride on behalf of a user.
Params
ride_id (str)
The unique ID of the Ride Request.
cancel_confirmation_token (str)
Optional string containing the cancellation confirmation token.
Returns
(Response)
A Response object with successful status_code
if ride was canceled.
"""
args = {
"cancel_confirmation_token": cancel_confirmation_token
}
endpoint = 'v1/rides/{}/cancel'.format(ride_id)
return self._api_call('POST', endpoint, args=args) | Cancel an ongoing ride on behalf of a user.
Params
ride_id (str)
The unique ID of the Ride Request.
cancel_confirmation_token (str)
Optional string containing the cancellation confirmation token.
Returns
(Response)
A Response object with successful status_code
if ride was canceled. | Below is the the instruction that describes the task:
### Input:
Cancel an ongoing ride on behalf of a user.
Params
ride_id (str)
The unique ID of the Ride Request.
cancel_confirmation_token (str)
Optional string containing the cancellation confirmation token.
Returns
(Response)
A Response object with successful status_code
if ride was canceled.
### Response:
def cancel_ride(self, ride_id, cancel_confirmation_token=None):
"""Cancel an ongoing ride on behalf of a user.
Params
ride_id (str)
The unique ID of the Ride Request.
cancel_confirmation_token (str)
Optional string containing the cancellation confirmation token.
Returns
(Response)
A Response object with successful status_code
if ride was canceled.
"""
args = {
"cancel_confirmation_token": cancel_confirmation_token
}
endpoint = 'v1/rides/{}/cancel'.format(ride_id)
return self._api_call('POST', endpoint, args=args) |
def quote_attrval (s):
"""
Quote a HTML attribute to be able to wrap it in double quotes.
@param s: the attribute string to quote
@type s: string
@return: the quoted HTML attribute
@rtype: string
"""
res = []
for c in s:
if ord(c) <= 127:
# ASCII
if c == u'&':
res.append(u"&")
elif c == u'"':
res.append(u""")
else:
res.append(c)
else:
res.append(u"&#%d;" % ord(c))
return u"".join(res) | Quote a HTML attribute to be able to wrap it in double quotes.
@param s: the attribute string to quote
@type s: string
@return: the quoted HTML attribute
@rtype: string | Below is the the instruction that describes the task:
### Input:
Quote a HTML attribute to be able to wrap it in double quotes.
@param s: the attribute string to quote
@type s: string
@return: the quoted HTML attribute
@rtype: string
### Response:
def quote_attrval (s):
"""
Quote a HTML attribute to be able to wrap it in double quotes.
@param s: the attribute string to quote
@type s: string
@return: the quoted HTML attribute
@rtype: string
"""
res = []
for c in s:
if ord(c) <= 127:
# ASCII
if c == u'&':
res.append(u"&")
elif c == u'"':
res.append(u""")
else:
res.append(c)
else:
res.append(u"&#%d;" % ord(c))
return u"".join(res) |
def include_callback_query_chat_id(fn=pair, types='all'):
"""
:return:
a pair producer that enables static callback query capturing
across seeder and delegator.
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
"""
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_callback_query_chat_id(types=types)],
delegator_factory, *args, include_callback_query=True, **kwargs)
return p | :return:
a pair producer that enables static callback query capturing
across seeder and delegator.
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``) | Below is the the instruction that describes the task:
### Input:
:return:
a pair producer that enables static callback query capturing
across seeder and delegator.
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
### Response:
def include_callback_query_chat_id(fn=pair, types='all'):
"""
:return:
a pair producer that enables static callback query capturing
across seeder and delegator.
:param types:
``all`` or a list of chat types (``private``, ``group``, ``channel``)
"""
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_callback_query_chat_id(types=types)],
delegator_factory, *args, include_callback_query=True, **kwargs)
return p |
def overall_accuracy(self, factor=0.5):
"""Overall accuracy metrics (sensitivity, specificity, accuracy, and balanced_accuracy)
Parameters
----------
factor : float [0-1]
Balance factor.
Default value 0.5
Returns
-------
dict
results in a dictionary format
"""
sensitivity = metric.sensitivity(
Ntp=self.overall['Ntp'],
Nfn=self.overall['Nfn']
)
specificity = metric.specificity(
Ntn=self.overall['Ntn'],
Nfp=self.overall['Nfp']
)
balanced_accuracy = metric.balanced_accuracy(
sensitivity=sensitivity,
specificity=specificity,
factor=factor
)
accuracy = metric.accuracy(
Ntp=self.overall['Ntp'],
Ntn=self.overall['Ntn'],
Nfp=self.overall['Nfp'],
Nfn=self.overall['Nfn']
)
return {
'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity
} | Overall accuracy metrics (sensitivity, specificity, accuracy, and balanced_accuracy)
Parameters
----------
factor : float [0-1]
Balance factor.
Default value 0.5
Returns
-------
dict
results in a dictionary format | Below is the the instruction that describes the task:
### Input:
Overall accuracy metrics (sensitivity, specificity, accuracy, and balanced_accuracy)
Parameters
----------
factor : float [0-1]
Balance factor.
Default value 0.5
Returns
-------
dict
results in a dictionary format
### Response:
def overall_accuracy(self, factor=0.5):
"""Overall accuracy metrics (sensitivity, specificity, accuracy, and balanced_accuracy)
Parameters
----------
factor : float [0-1]
Balance factor.
Default value 0.5
Returns
-------
dict
results in a dictionary format
"""
sensitivity = metric.sensitivity(
Ntp=self.overall['Ntp'],
Nfn=self.overall['Nfn']
)
specificity = metric.specificity(
Ntn=self.overall['Ntn'],
Nfp=self.overall['Nfp']
)
balanced_accuracy = metric.balanced_accuracy(
sensitivity=sensitivity,
specificity=specificity,
factor=factor
)
accuracy = metric.accuracy(
Ntp=self.overall['Ntp'],
Ntn=self.overall['Ntn'],
Nfp=self.overall['Nfp'],
Nfn=self.overall['Nfn']
)
return {
'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity
} |
def get_build_configuration_set(id=None, name=None):
"""
Get a specific BuildConfigurationSet by name or ID
"""
content = get_build_configuration_set_raw(id, name)
if content:
return utils.format_json(content) | Get a specific BuildConfigurationSet by name or ID | Below is the the instruction that describes the task:
### Input:
Get a specific BuildConfigurationSet by name or ID
### Response:
def get_build_configuration_set(id=None, name=None):
"""
Get a specific BuildConfigurationSet by name or ID
"""
content = get_build_configuration_set_raw(id, name)
if content:
return utils.format_json(content) |
def _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):
'''
This is the EPD function to fit using a smoothed mag-series.
'''
return (coeffs[0]*fsv*fsv +
coeffs[1]*fsv +
coeffs[2]*fdv*fdv +
coeffs[3]*fdv +
coeffs[4]*fkv*fkv +
coeffs[5]*fkv +
coeffs[6] +
coeffs[7]*fsv*fdv +
coeffs[8]*fsv*fkv +
coeffs[9]*fdv*fkv +
coeffs[10]*np.sin(2*pi_value*xcc) +
coeffs[11]*np.cos(2*pi_value*xcc) +
coeffs[12]*np.sin(2*pi_value*ycc) +
coeffs[13]*np.cos(2*pi_value*ycc) +
coeffs[14]*np.sin(4*pi_value*xcc) +
coeffs[15]*np.cos(4*pi_value*xcc) +
coeffs[16]*np.sin(4*pi_value*ycc) +
coeffs[17]*np.cos(4*pi_value*ycc) +
coeffs[18]*bgv +
coeffs[19]*bge +
coeffs[20]*iha +
coeffs[21]*izd) | This is the EPD function to fit using a smoothed mag-series. | Below is the the instruction that describes the task:
### Input:
This is the EPD function to fit using a smoothed mag-series.
### Response:
def _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):
'''
This is the EPD function to fit using a smoothed mag-series.
'''
return (coeffs[0]*fsv*fsv +
coeffs[1]*fsv +
coeffs[2]*fdv*fdv +
coeffs[3]*fdv +
coeffs[4]*fkv*fkv +
coeffs[5]*fkv +
coeffs[6] +
coeffs[7]*fsv*fdv +
coeffs[8]*fsv*fkv +
coeffs[9]*fdv*fkv +
coeffs[10]*np.sin(2*pi_value*xcc) +
coeffs[11]*np.cos(2*pi_value*xcc) +
coeffs[12]*np.sin(2*pi_value*ycc) +
coeffs[13]*np.cos(2*pi_value*ycc) +
coeffs[14]*np.sin(4*pi_value*xcc) +
coeffs[15]*np.cos(4*pi_value*xcc) +
coeffs[16]*np.sin(4*pi_value*ycc) +
coeffs[17]*np.cos(4*pi_value*ycc) +
coeffs[18]*bgv +
coeffs[19]*bge +
coeffs[20]*iha +
coeffs[21]*izd) |
def drawZigzag(self, p1, p2, breadth = 2):
"""Draw a zig-zagged line from p1 to p2.
"""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis
i_mat = ~matrix # get original position
points = [] # stores edges
for i in range (1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -1) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, 1) * mb
else: # ignore others
continue
points.append(p * i_mat)
self.drawPolyline([p1] + points + [p2]) # add start and end points
return p2 | Draw a zig-zagged line from p1 to p2. | Below is the the instruction that describes the task:
### Input:
Draw a zig-zagged line from p1 to p2.
### Response:
def drawZigzag(self, p1, p2, breadth = 2):
"""Draw a zig-zagged line from p1 to p2.
"""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis
i_mat = ~matrix # get original position
points = [] # stores edges
for i in range (1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -1) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, 1) * mb
else: # ignore others
continue
points.append(p * i_mat)
self.drawPolyline([p1] + points + [p2]) # add start and end points
return p2 |
def get_include_fields(request):
"""Retrieve include_fields values from the request
"""
include_fields = []
rif = request.get("include_fields", "")
if "include_fields" in request:
include_fields = [x.strip()
for x in rif.split(",")
if x.strip()]
if "include_fields[]" in request:
include_fields = request['include_fields[]']
return include_fields | Retrieve include_fields values from the request | Below is the the instruction that describes the task:
### Input:
Retrieve include_fields values from the request
### Response:
def get_include_fields(request):
"""Retrieve include_fields values from the request
"""
include_fields = []
rif = request.get("include_fields", "")
if "include_fields" in request:
include_fields = [x.strip()
for x in rif.split(",")
if x.strip()]
if "include_fields[]" in request:
include_fields = request['include_fields[]']
return include_fields |
def proximal_l2(space, lam=1, g=None):
r"""Proximal operator factory of the l2-norm/distance.
Function for the proximal operator of the functional ``F`` where ``F``
is the l2-norm (or distance to g, if given)::
``F(x) = lam ||x - g||_2``
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : callable
Factory for the proximal operator to be initialized.
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use `proximal_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by
.. math::
F(x) = \lambda \|x - g\|_2
For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F`
is given by
.. math::
\mathrm{prox}_{\sigma F}(y) = \begin{cases}
\frac{1 - c}{\|y-g\|} \cdot y + c \cdot g
& \text{if } c < g, \\
g & \text{else},
\end{cases}
where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`.
See Also
--------
proximal_l2_squared : proximal for squared norm/distance
proximal_convex_conj_l2 : proximal for convex conjugate
"""
lam = float(lam)
if g is not None and g not in space:
raise TypeError('{!r} is not an element of {!r}'.format(g, space))
class ProximalL2(Operator):
"""Proximal operator of the l2-norm/distance."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter
"""
super(ProximalL2, self).__init__(
domain=space, range=space, linear=False)
self.sigma = float(sigma)
def _call(self, x, out):
"""Apply the operator to ``x`` and stores the result in ``out``."""
dtype = getattr(self.domain, 'dtype', float)
eps = np.finfo(dtype).resolution * 10
if g is None:
x_norm = x.norm() * (1 + eps)
if x_norm > 0:
step = self.sigma * lam / x_norm
else:
step = np.infty
if step < 1.0:
out.lincomb(1.0 - step, x)
else:
out.set_zero()
else:
x_norm = (x - g).norm() * (1 + eps)
if x_norm > 0:
step = self.sigma * lam / x_norm
else:
step = np.infty
if step < 1.0:
out.lincomb(1.0 - step, x, step, g)
else:
out.assign(g)
return ProximalL2 | r"""Proximal operator factory of the l2-norm/distance.
Function for the proximal operator of the functional ``F`` where ``F``
is the l2-norm (or distance to g, if given)::
``F(x) = lam ||x - g||_2``
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : callable
Factory for the proximal operator to be initialized.
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use `proximal_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by
.. math::
F(x) = \lambda \|x - g\|_2
For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F`
is given by
.. math::
\mathrm{prox}_{\sigma F}(y) = \begin{cases}
\frac{1 - c}{\|y-g\|} \cdot y + c \cdot g
& \text{if } c < g, \\
g & \text{else},
\end{cases}
where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`.
See Also
--------
proximal_l2_squared : proximal for squared norm/distance
proximal_convex_conj_l2 : proximal for convex conjugate | Below is the the instruction that describes the task:
### Input:
r"""Proximal operator factory of the l2-norm/distance.
Function for the proximal operator of the functional ``F`` where ``F``
is the l2-norm (or distance to g, if given)::
``F(x) = lam ||x - g||_2``
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : callable
Factory for the proximal operator to be initialized.
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use `proximal_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by
.. math::
F(x) = \lambda \|x - g\|_2
For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F`
is given by
.. math::
\mathrm{prox}_{\sigma F}(y) = \begin{cases}
\frac{1 - c}{\|y-g\|} \cdot y + c \cdot g
& \text{if } c < g, \\
g & \text{else},
\end{cases}
where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`.
See Also
--------
proximal_l2_squared : proximal for squared norm/distance
proximal_convex_conj_l2 : proximal for convex conjugate
### Response:
def proximal_l2(space, lam=1, g=None):
r"""Proximal operator factory of the l2-norm/distance.
Function for the proximal operator of the functional ``F`` where ``F``
is the l2-norm (or distance to g, if given)::
``F(x) = lam ||x - g||_2``
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : callable
Factory for the proximal operator to be initialized.
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use `proximal_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by
.. math::
F(x) = \lambda \|x - g\|_2
For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F`
is given by
.. math::
\mathrm{prox}_{\sigma F}(y) = \begin{cases}
\frac{1 - c}{\|y-g\|} \cdot y + c \cdot g
& \text{if } c < g, \\
g & \text{else},
\end{cases}
where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`.
See Also
--------
proximal_l2_squared : proximal for squared norm/distance
proximal_convex_conj_l2 : proximal for convex conjugate
"""
lam = float(lam)
if g is not None and g not in space:
raise TypeError('{!r} is not an element of {!r}'.format(g, space))
class ProximalL2(Operator):
"""Proximal operator of the l2-norm/distance."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter
"""
super(ProximalL2, self).__init__(
domain=space, range=space, linear=False)
self.sigma = float(sigma)
def _call(self, x, out):
"""Apply the operator to ``x`` and stores the result in ``out``."""
dtype = getattr(self.domain, 'dtype', float)
eps = np.finfo(dtype).resolution * 10
if g is None:
x_norm = x.norm() * (1 + eps)
if x_norm > 0:
step = self.sigma * lam / x_norm
else:
step = np.infty
if step < 1.0:
out.lincomb(1.0 - step, x)
else:
out.set_zero()
else:
x_norm = (x - g).norm() * (1 + eps)
if x_norm > 0:
step = self.sigma * lam / x_norm
else:
step = np.infty
if step < 1.0:
out.lincomb(1.0 - step, x, step, g)
else:
out.assign(g)
return ProximalL2 |
def multi_occurrence(self):
"""
Get whether the node has multiple occurrences, i.e. is a I{collection}.
@return: True if it has, False if it has at most 1 occurrence.
@rtype: boolean
"""
max = self.max
if max is None:
return False
if max.isdigit():
return int(max) > 1
return max == "unbounded" | Get whether the node has multiple occurrences, i.e. is a I{collection}.
@return: True if it has, False if it has at most 1 occurrence.
@rtype: boolean | Below is the the instruction that describes the task:
### Input:
Get whether the node has multiple occurrences, i.e. is a I{collection}.
@return: True if it has, False if it has at most 1 occurrence.
@rtype: boolean
### Response:
def multi_occurrence(self):
"""
Get whether the node has multiple occurrences, i.e. is a I{collection}.
@return: True if it has, False if it has at most 1 occurrence.
@rtype: boolean
"""
max = self.max
if max is None:
return False
if max.isdigit():
return int(max) > 1
return max == "unbounded" |
def to_json(self):
"""Convert the Humidity Condition to a dictionary."""
return {
'hum_type': self.hum_type,
'hum_value': self.hum_value,
'barometric_pressure': self.barometric_pressure,
'schedule': self.schedule,
'wet_bulb_range': self.wet_bulb_range,
} | Convert the Humidity Condition to a dictionary. | Below is the the instruction that describes the task:
### Input:
Convert the Humidity Condition to a dictionary.
### Response:
def to_json(self):
"""Convert the Humidity Condition to a dictionary."""
return {
'hum_type': self.hum_type,
'hum_value': self.hum_value,
'barometric_pressure': self.barometric_pressure,
'schedule': self.schedule,
'wet_bulb_range': self.wet_bulb_range,
} |
def ConsultarRetencionesTabacaleras(self, sep="||"):
"Retorna un listado de retenciones tabacaleras con código y descripción"
ret = self.client.consultarRetencionesTabacaleras(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['respuesta']
self.__analizar_errores(ret)
array = ret.get('retencion', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigo'], it['descripcion']) for it in array] | Retorna un listado de retenciones tabacaleras con código y descripción | Below is the the instruction that describes the task:
### Input:
Retorna un listado de retenciones tabacaleras con código y descripción
### Response:
def ConsultarRetencionesTabacaleras(self, sep="||"):
"Retorna un listado de retenciones tabacaleras con código y descripción"
ret = self.client.consultarRetencionesTabacaleras(
auth={
'token': self.Token, 'sign': self.Sign,
'cuit': self.Cuit, },
)['respuesta']
self.__analizar_errores(ret)
array = ret.get('retencion', [])
if sep is None:
return dict([(it['codigo'], it['descripcion']) for it in array])
else:
return [("%s %%s %s %%s %s" % (sep, sep, sep)) %
(it['codigo'], it['descripcion']) for it in array] |
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output | Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'. | Below is the the instruction that describes the task:
### Input:
Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
### Response:
def running_global_pool_1d(inputs, pooling_type="MAX"):
"""Same global pool, but only for the elements up to the current element.
Useful for outputs where the state of future elements is not known.
Takes no mask as all elements up to the current element are assumed to exist.
Currently only supports maximum. Equivalent to using a lower triangle bias.
Args:
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
pooling_type: Pooling type to use. Currently only supports 'MAX'.
Returns:
A tensor of shape [batch_size, sequence_length, input_dims] containing the
running 'totals'.
"""
del pooling_type
with tf.name_scope("running_global_pool", values=[inputs]):
scan_fct = tf.maximum
# Permute inputs so seq_length is first.
elems = tf.transpose(inputs, [1, 0, 2])
# Perform scan.
cumulatives = tf.scan(scan_fct, elems, swap_memory=True)
# Permute output to get back to original order.
output = tf.transpose(cumulatives, [1, 0, 2])
return output |
def list_traces(
self,
project_id=None,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
page_token=None,
):
"""
Returns of a list of traces that match the filter conditions.
Args:
project_id (Optional[str]): ID of the Cloud project where the trace
data is stored.
view (Optional[~google.cloud.trace_v1.gapic.enums.
ListTracesRequest.ViewType]): Type of data returned for traces
in the list. Default is ``MINIMAL``.
page_size (Optional[int]): Maximum number of traces to return. If
not specified or <= 0, the implementation selects a reasonable
value. The implementation may return fewer traces than the
requested page size.
start_time (Optional[~datetime.datetime]): Start of the time
interval (inclusive) during which the trace data was collected
from the application.
end_time (Optional[~datetime.datetime]): End of the time interval
(inclusive) during which the trace data was collected from the
application.
filter_ (Optional[str]): An optional filter for the request.
order_by (Optional[str]): Field used to sort the returned traces.
page_token (Optional[str]): opaque marker for the next "page" of
entries. If not passed, the API will return the first page of
entries.
Returns:
A :class:`~google.api_core.page_iterator.Iterator` of traces that
match the specified filter conditions.
"""
if project_id is None:
project_id = self.project
if start_time is not None:
start_time = _datetime_to_pb_timestamp(start_time)
if end_time is not None:
end_time = _datetime_to_pb_timestamp(end_time)
return self.trace_api.list_traces(
project_id=project_id,
view=view,
page_size=page_size,
start_time=start_time,
end_time=end_time,
filter_=filter_,
order_by=order_by,
page_token=page_token,
) | Returns of a list of traces that match the filter conditions.
Args:
project_id (Optional[str]): ID of the Cloud project where the trace
data is stored.
view (Optional[~google.cloud.trace_v1.gapic.enums.
ListTracesRequest.ViewType]): Type of data returned for traces
in the list. Default is ``MINIMAL``.
page_size (Optional[int]): Maximum number of traces to return. If
not specified or <= 0, the implementation selects a reasonable
value. The implementation may return fewer traces than the
requested page size.
start_time (Optional[~datetime.datetime]): Start of the time
interval (inclusive) during which the trace data was collected
from the application.
end_time (Optional[~datetime.datetime]): End of the time interval
(inclusive) during which the trace data was collected from the
application.
filter_ (Optional[str]): An optional filter for the request.
order_by (Optional[str]): Field used to sort the returned traces.
page_token (Optional[str]): opaque marker for the next "page" of
entries. If not passed, the API will return the first page of
entries.
Returns:
A :class:`~google.api_core.page_iterator.Iterator` of traces that
match the specified filter conditions. | Below is the the instruction that describes the task:
### Input:
Returns of a list of traces that match the filter conditions.
Args:
project_id (Optional[str]): ID of the Cloud project where the trace
data is stored.
view (Optional[~google.cloud.trace_v1.gapic.enums.
ListTracesRequest.ViewType]): Type of data returned for traces
in the list. Default is ``MINIMAL``.
page_size (Optional[int]): Maximum number of traces to return. If
not specified or <= 0, the implementation selects a reasonable
value. The implementation may return fewer traces than the
requested page size.
start_time (Optional[~datetime.datetime]): Start of the time
interval (inclusive) during which the trace data was collected
from the application.
end_time (Optional[~datetime.datetime]): End of the time interval
(inclusive) during which the trace data was collected from the
application.
filter_ (Optional[str]): An optional filter for the request.
order_by (Optional[str]): Field used to sort the returned traces.
page_token (Optional[str]): opaque marker for the next "page" of
entries. If not passed, the API will return the first page of
entries.
Returns:
A :class:`~google.api_core.page_iterator.Iterator` of traces that
match the specified filter conditions.
### Response:
def list_traces(
self,
project_id=None,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
page_token=None,
):
"""
Returns of a list of traces that match the filter conditions.
Args:
project_id (Optional[str]): ID of the Cloud project where the trace
data is stored.
view (Optional[~google.cloud.trace_v1.gapic.enums.
ListTracesRequest.ViewType]): Type of data returned for traces
in the list. Default is ``MINIMAL``.
page_size (Optional[int]): Maximum number of traces to return. If
not specified or <= 0, the implementation selects a reasonable
value. The implementation may return fewer traces than the
requested page size.
start_time (Optional[~datetime.datetime]): Start of the time
interval (inclusive) during which the trace data was collected
from the application.
end_time (Optional[~datetime.datetime]): End of the time interval
(inclusive) during which the trace data was collected from the
application.
filter_ (Optional[str]): An optional filter for the request.
order_by (Optional[str]): Field used to sort the returned traces.
page_token (Optional[str]): opaque marker for the next "page" of
entries. If not passed, the API will return the first page of
entries.
Returns:
A :class:`~google.api_core.page_iterator.Iterator` of traces that
match the specified filter conditions.
"""
if project_id is None:
project_id = self.project
if start_time is not None:
start_time = _datetime_to_pb_timestamp(start_time)
if end_time is not None:
end_time = _datetime_to_pb_timestamp(end_time)
return self.trace_api.list_traces(
project_id=project_id,
view=view,
page_size=page_size,
start_time=start_time,
end_time=end_time,
filter_=filter_,
order_by=order_by,
page_token=page_token,
) |
def my_resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn"t be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don"t return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, "do_not_call_in_templates", False):
pass
elif getattr(current, "alters_data", False):
try:
current = context.template.engine.string_if_invalid
except AttributeError:
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
elif isinstance(current, Model):
if ("request" in context) and hasattr(context["request"], "_ultracache"):
# get_for_model itself is cached
ct = ContentType.objects.get_for_model(current.__class__)
context["request"]._ultracache.append((ct.id, current.pk))
except Exception as e:
template_name = getattr(context, "template_name", None) or "unknown"
if logger is not None:
logger.debug(
"Exception while resolving variable \"%s\" in template \"%s\".",
bit,
template_name,
exc_info=True,
)
if getattr(e, "silent_variable_failure", False):
current = context.template.engine.string_if_invalid
else:
raise
return current | Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn"t be called by external code. Use Variable.resolve()
instead. | Below is the the instruction that describes the task:
### Input:
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn"t be called by external code. Use Variable.resolve()
instead.
### Response:
def my_resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn"t be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don"t return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, "do_not_call_in_templates", False):
pass
elif getattr(current, "alters_data", False):
try:
current = context.template.engine.string_if_invalid
except AttributeError:
current = settings.TEMPLATE_STRING_IF_INVALID
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
inspect.getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
elif isinstance(current, Model):
if ("request" in context) and hasattr(context["request"], "_ultracache"):
# get_for_model itself is cached
ct = ContentType.objects.get_for_model(current.__class__)
context["request"]._ultracache.append((ct.id, current.pk))
except Exception as e:
template_name = getattr(context, "template_name", None) or "unknown"
if logger is not None:
logger.debug(
"Exception while resolving variable \"%s\" in template \"%s\".",
bit,
template_name,
exc_info=True,
)
if getattr(e, "silent_variable_failure", False):
current = context.template.engine.string_if_invalid
else:
raise
return current |
def _plot_figure(self, idx, fig_format='json'):
"""
Returns the figure in html format on the
first call and
"""
self.plot.update(idx)
if self.embed:
return self.renderer.diff(self.plot) | Returns the figure in html format on the
first call and | Below is the the instruction that describes the task:
### Input:
Returns the figure in html format on the
first call and
### Response:
def _plot_figure(self, idx, fig_format='json'):
"""
Returns the figure in html format on the
first call and
"""
self.plot.update(idx)
if self.embed:
return self.renderer.diff(self.plot) |
def save(self):
"""Called by the parent settings dialog when the user clicks on the Save button.
Stores the current settings in the ConfigManager."""
logger.debug("User requested to save settings. New settings: " + self._settings_str())
cm.ConfigManager.SETTINGS[cm.PROMPT_TO_SAVE] = not self.autosave_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.SHOW_TRAY_ICON] = self.show_tray_checkbox.isChecked()
# cm.ConfigManager.SETTINGS[cm.MENU_TAKES_FOCUS] = self.allow_kb_nav_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.SORT_BY_USAGE_COUNT] = self.sort_by_usage_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.UNDO_USING_BACKSPACE] = self.enable_undo_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.NOTIFICATION_ICON] = self.system_tray_icon_theme_combobox.currentData(Qt.UserRole)
# TODO: After saving the notification icon, apply it to the currently running instance.
self._save_autostart_settings() | Called by the parent settings dialog when the user clicks on the Save button.
Stores the current settings in the ConfigManager. | Below is the the instruction that describes the task:
### Input:
Called by the parent settings dialog when the user clicks on the Save button.
Stores the current settings in the ConfigManager.
### Response:
def save(self):
"""Called by the parent settings dialog when the user clicks on the Save button.
Stores the current settings in the ConfigManager."""
logger.debug("User requested to save settings. New settings: " + self._settings_str())
cm.ConfigManager.SETTINGS[cm.PROMPT_TO_SAVE] = not self.autosave_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.SHOW_TRAY_ICON] = self.show_tray_checkbox.isChecked()
# cm.ConfigManager.SETTINGS[cm.MENU_TAKES_FOCUS] = self.allow_kb_nav_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.SORT_BY_USAGE_COUNT] = self.sort_by_usage_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.UNDO_USING_BACKSPACE] = self.enable_undo_checkbox.isChecked()
cm.ConfigManager.SETTINGS[cm.NOTIFICATION_ICON] = self.system_tray_icon_theme_combobox.currentData(Qt.UserRole)
# TODO: After saving the notification icon, apply it to the currently running instance.
self._save_autostart_settings() |
def get_form_kwargs(self):
""" Returns the keyword arguments to provide tp the associated form. """
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs['poll'] = self.object
return kwargs | Returns the keyword arguments to provide tp the associated form. | Below is the the instruction that describes the task:
### Input:
Returns the keyword arguments to provide tp the associated form.
### Response:
def get_form_kwargs(self):
""" Returns the keyword arguments to provide tp the associated form. """
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs['poll'] = self.object
return kwargs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.