code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def serialize_to_xml(root, block):
"""
Serialize the Peer Instruction XBlock's content to XML.
Args:
block (PeerInstructionXBlock): The peer instruction block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
"""
root.tag = 'ubcpi'
if block.rationale_size is not None:
if block.rationale_size.get('min'):
root.set('rationale_size_min', unicode(block.rationale_size.get('min')))
if block.rationale_size.get('max'):
root.set('rationale_size_max', unicode(block.rationale_size['max']))
if block.algo:
if block.algo.get('name'):
root.set('algorithm', block.algo.get('name'))
if block.algo.get('num_responses'):
root.set('num_responses', unicode(block.algo.get('num_responses')))
display_name = etree.SubElement(root, 'display_name')
display_name.text = block.display_name
question = etree.SubElement(root, 'question')
question_text = etree.SubElement(question, 'text')
question_text.text = block.question_text['text']
serialize_image(block.question_text, question)
options = etree.SubElement(root, 'options')
serialize_options(options, block)
seeds = etree.SubElement(root, 'seeds')
serialize_seeds(seeds, block) | Serialize the Peer Instruction XBlock's content to XML.
Args:
block (PeerInstructionXBlock): The peer instruction block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element | Below is the the instruction that describes the task:
### Input:
Serialize the Peer Instruction XBlock's content to XML.
Args:
block (PeerInstructionXBlock): The peer instruction block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
### Response:
def serialize_to_xml(root, block):
"""
Serialize the Peer Instruction XBlock's content to XML.
Args:
block (PeerInstructionXBlock): The peer instruction block to serialize.
root (etree.Element): The XML root node to update.
Returns:
etree.Element
"""
root.tag = 'ubcpi'
if block.rationale_size is not None:
if block.rationale_size.get('min'):
root.set('rationale_size_min', unicode(block.rationale_size.get('min')))
if block.rationale_size.get('max'):
root.set('rationale_size_max', unicode(block.rationale_size['max']))
if block.algo:
if block.algo.get('name'):
root.set('algorithm', block.algo.get('name'))
if block.algo.get('num_responses'):
root.set('num_responses', unicode(block.algo.get('num_responses')))
display_name = etree.SubElement(root, 'display_name')
display_name.text = block.display_name
question = etree.SubElement(root, 'question')
question_text = etree.SubElement(question, 'text')
question_text.text = block.question_text['text']
serialize_image(block.question_text, question)
options = etree.SubElement(root, 'options')
serialize_options(options, block)
seeds = etree.SubElement(root, 'seeds')
serialize_seeds(seeds, block) |
def request(self,
method,
url,
params=None,
data=None,
files=None,
json=None,
timeout=5,
headers=None,
skip_auth=False):
"""
Execute the request using requests library
"""
request_url = self.base_url + url
floyd_logger.debug("Starting request to url: %s with params: %s, data: %s", request_url, params, data)
request_headers = {'x-floydhub-cli-version': get_cli_version()}
# Auth headers if present
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.request(method,
request_url,
params=params,
data=data,
json=json,
headers=request_headers,
files=files,
timeout=timeout)
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Cannot connect to the Floyd server. Check your internet connection.")
except requests.exceptions.Timeout as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Connection to FloydHub server timed out. Please retry or check your internet connection.")
floyd_logger.debug("Response Content: %s, Headers: %s" % (response.content, response.headers))
self.check_response_status(response)
return response | Execute the request using requests library | Below is the the instruction that describes the task:
### Input:
Execute the request using requests library
### Response:
def request(self,
method,
url,
params=None,
data=None,
files=None,
json=None,
timeout=5,
headers=None,
skip_auth=False):
"""
Execute the request using requests library
"""
request_url = self.base_url + url
floyd_logger.debug("Starting request to url: %s with params: %s, data: %s", request_url, params, data)
request_headers = {'x-floydhub-cli-version': get_cli_version()}
# Auth headers if present
if self.auth_header:
request_headers["Authorization"] = self.auth_header
# Add any additional headers
if headers:
request_headers.update(headers)
try:
response = requests.request(method,
request_url,
params=params,
data=data,
json=json,
headers=request_headers,
files=files,
timeout=timeout)
except requests.exceptions.ConnectionError as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Cannot connect to the Floyd server. Check your internet connection.")
except requests.exceptions.Timeout as exception:
floyd_logger.debug("Exception: %s", exception, exc_info=True)
sys.exit("Connection to FloydHub server timed out. Please retry or check your internet connection.")
floyd_logger.debug("Response Content: %s, Headers: %s" % (response.content, response.headers))
self.check_response_status(response)
return response |
def make_2d_block_raster_mask(query_shape, memory_flange):
"""Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
"""
# mask inside the query block
query_triangle = common_layers.ones_matrix_band_part(
np.prod(query_shape), np.prod(query_shape), -1, 0)
split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
# adding mask for left and right
mask_pieces = [
tf.concat( # pylint: disable=g-complex-comprehension
[tf.ones([np.prod(query_shape), memory_flange[1]]),
split_query_masks[i],
tf.zeros([np.prod(query_shape), memory_flange[1]])],
axis=1) for i in range(query_shape[0])
]
# adding mask for top
final_mask = tf.concat(
[
tf.ones([
np.prod(query_shape),
(query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]
]),
tf.concat(mask_pieces, axis=1)
],
axis=1)
# 0.0 is visible location, 1.0 is masked.
return 1. - final_mask | Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size | Below is the the instruction that describes the task:
### Input:
Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
### Response:
def make_2d_block_raster_mask(query_shape, memory_flange):
"""Creates a mask for 2d block raster scan.
The query mask can look to the left, top left, top, and top right, but
not to the right. Inside the query, we have the standard raster scan
masking.
Args:
query_shape: A tuple of ints (query_height, query_width)
memory_flange: A tuple of ints
(memory_flange_height, memory_flange_width)
Returns:
A tensor of shape query_size, memory_size
"""
# mask inside the query block
query_triangle = common_layers.ones_matrix_band_part(
np.prod(query_shape), np.prod(query_shape), -1, 0)
split_query_masks = tf.split(query_triangle, query_shape[0], axis=1)
# adding mask for left and right
mask_pieces = [
tf.concat( # pylint: disable=g-complex-comprehension
[tf.ones([np.prod(query_shape), memory_flange[1]]),
split_query_masks[i],
tf.zeros([np.prod(query_shape), memory_flange[1]])],
axis=1) for i in range(query_shape[0])
]
# adding mask for top
final_mask = tf.concat(
[
tf.ones([
np.prod(query_shape),
(query_shape[1] + 2 * memory_flange[1]) * memory_flange[0]
]),
tf.concat(mask_pieces, axis=1)
],
axis=1)
# 0.0 is visible location, 1.0 is masked.
return 1. - final_mask |
def dumps_xml(props, comment=None, sort_keys=False):
"""
Convert a series ``props`` of key-value pairs to a text string containing
an XML properties document. The document will include a doctype
declaration but not an XML declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string
"""
return ''.join(s + '\n' for s in _stream_xml(props, comment, sort_keys)) | Convert a series ``props`` of key-value pairs to a text string containing
an XML properties document. The document will include a doctype
declaration but not an XML declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string | Below is the the instruction that describes the task:
### Input:
Convert a series ``props`` of key-value pairs to a text string containing
an XML properties document. The document will include a doctype
declaration but not an XML declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string
### Response:
def dumps_xml(props, comment=None, sort_keys=False):
"""
Convert a series ``props`` of key-value pairs to a text string containing
an XML properties document. The document will include a doctype
declaration but not an XML declaration.
:param props: A mapping or iterable of ``(key, value)`` pairs to serialize.
All keys and values in ``props`` must be text strings. If
``sort_keys`` is `False`, the entries are output in iteration order.
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param bool sort_keys: if true, the elements of ``props`` are sorted
lexicographically by key in the output
:rtype: text string
"""
return ''.join(s + '\n' for s in _stream_xml(props, comment, sort_keys)) |
def get_state_by_id(cls, state_id, **kwargs):
"""Find State
Return single instance of State by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_state_by_id(state_id, async=True)
>>> result = thread.get()
:param async bool
:param str state_id: ID of state to return (required)
:return: State
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_state_by_id_with_http_info(state_id, **kwargs)
else:
(data) = cls._get_state_by_id_with_http_info(state_id, **kwargs)
return data | Find State
Return single instance of State by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_state_by_id(state_id, async=True)
>>> result = thread.get()
:param async bool
:param str state_id: ID of state to return (required)
:return: State
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Find State
Return single instance of State by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_state_by_id(state_id, async=True)
>>> result = thread.get()
:param async bool
:param str state_id: ID of state to return (required)
:return: State
If the method is called asynchronously,
returns the request thread.
### Response:
def get_state_by_id(cls, state_id, **kwargs):
"""Find State
Return single instance of State by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_state_by_id(state_id, async=True)
>>> result = thread.get()
:param async bool
:param str state_id: ID of state to return (required)
:return: State
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_state_by_id_with_http_info(state_id, **kwargs)
else:
(data) = cls._get_state_by_id_with_http_info(state_id, **kwargs)
return data |
def register(self, new_calc, *args, **kwargs):
"""
Register calculations and meta data.
* ``dependencies`` - list of prerequisite calculations
* ``always_calc`` - ``True`` if calculation ignores thresholds
* ``frequency`` - frequency of calculation in intervals or units of time
:param new_calc: register new calculation
"""
kwargs.update(zip(self.meta_names, args))
# dependencies should be a list of other calculations
if isinstance(kwargs['dependencies'], basestring):
kwargs['dependencies'] = [kwargs['dependencies']]
# call super method, now meta can be passed as args or kwargs.
super(CalcRegistry, self).register(new_calc, **kwargs) | Register calculations and meta data.
* ``dependencies`` - list of prerequisite calculations
* ``always_calc`` - ``True`` if calculation ignores thresholds
* ``frequency`` - frequency of calculation in intervals or units of time
:param new_calc: register new calculation | Below is the the instruction that describes the task:
### Input:
Register calculations and meta data.
* ``dependencies`` - list of prerequisite calculations
* ``always_calc`` - ``True`` if calculation ignores thresholds
* ``frequency`` - frequency of calculation in intervals or units of time
:param new_calc: register new calculation
### Response:
def register(self, new_calc, *args, **kwargs):
"""
Register calculations and meta data.
* ``dependencies`` - list of prerequisite calculations
* ``always_calc`` - ``True`` if calculation ignores thresholds
* ``frequency`` - frequency of calculation in intervals or units of time
:param new_calc: register new calculation
"""
kwargs.update(zip(self.meta_names, args))
# dependencies should be a list of other calculations
if isinstance(kwargs['dependencies'], basestring):
kwargs['dependencies'] = [kwargs['dependencies']]
# call super method, now meta can be passed as args or kwargs.
super(CalcRegistry, self).register(new_calc, **kwargs) |
def unpack(self, key, value):
"""Unpack and return value only if it is fresh."""
value, freshness = value
if not self.is_fresh(freshness):
raise KeyError('{} (stale)'.format(key))
return value | Unpack and return value only if it is fresh. | Below is the the instruction that describes the task:
### Input:
Unpack and return value only if it is fresh.
### Response:
def unpack(self, key, value):
"""Unpack and return value only if it is fresh."""
value, freshness = value
if not self.is_fresh(freshness):
raise KeyError('{} (stale)'.format(key))
return value |
def generate_hash(filepath):
"""Public function that reads a local file and generates a SHA256 hash digest for it"""
fr = FileReader(filepath)
data = fr.read_bin()
return _calculate_sha256(data) | Public function that reads a local file and generates a SHA256 hash digest for it | Below is the the instruction that describes the task:
### Input:
Public function that reads a local file and generates a SHA256 hash digest for it
### Response:
def generate_hash(filepath):
"""Public function that reads a local file and generates a SHA256 hash digest for it"""
fr = FileReader(filepath)
data = fr.read_bin()
return _calculate_sha256(data) |
def _from_dict(cls, _dict):
"""Initialize a SpeechRecognitionResult object from a json dictionary."""
args = {}
if 'final' in _dict or 'final_results' in _dict:
args['final_results'] = _dict.get('final') or _dict.get(
'final_results')
else:
raise ValueError(
'Required property \'final\' not present in SpeechRecognitionResult JSON'
)
if 'alternatives' in _dict:
args['alternatives'] = [
SpeechRecognitionAlternative._from_dict(x)
for x in (_dict.get('alternatives'))
]
else:
raise ValueError(
'Required property \'alternatives\' not present in SpeechRecognitionResult JSON'
)
if 'keywords_result' in _dict:
args['keywords_result'] = _dict.get('keywords_result')
if 'word_alternatives' in _dict:
args['word_alternatives'] = [
WordAlternativeResults._from_dict(x)
for x in (_dict.get('word_alternatives'))
]
return cls(**args) | Initialize a SpeechRecognitionResult object from a json dictionary. | Below is the the instruction that describes the task:
### Input:
Initialize a SpeechRecognitionResult object from a json dictionary.
### Response:
def _from_dict(cls, _dict):
"""Initialize a SpeechRecognitionResult object from a json dictionary."""
args = {}
if 'final' in _dict or 'final_results' in _dict:
args['final_results'] = _dict.get('final') or _dict.get(
'final_results')
else:
raise ValueError(
'Required property \'final\' not present in SpeechRecognitionResult JSON'
)
if 'alternatives' in _dict:
args['alternatives'] = [
SpeechRecognitionAlternative._from_dict(x)
for x in (_dict.get('alternatives'))
]
else:
raise ValueError(
'Required property \'alternatives\' not present in SpeechRecognitionResult JSON'
)
if 'keywords_result' in _dict:
args['keywords_result'] = _dict.get('keywords_result')
if 'word_alternatives' in _dict:
args['word_alternatives'] = [
WordAlternativeResults._from_dict(x)
for x in (_dict.get('word_alternatives'))
]
return cls(**args) |
def to_json_str(self):
"""Convert data to json string representation.
Returns:
json representation as string.
"""
_json = self.to_json()
try:
return json.dumps(_json, sort_keys=True, cls=JsonEncoder)
except:
logging.exception("Could not serialize JSON: %r", _json)
raise | Convert data to json string representation.
Returns:
json representation as string. | Below is the the instruction that describes the task:
### Input:
Convert data to json string representation.
Returns:
json representation as string.
### Response:
def to_json_str(self):
"""Convert data to json string representation.
Returns:
json representation as string.
"""
_json = self.to_json()
try:
return json.dumps(_json, sort_keys=True, cls=JsonEncoder)
except:
logging.exception("Could not serialize JSON: %r", _json)
raise |
def neuron(layer_name, channel_n, x=None, y=None, batch=None):
"""Visualize a single neuron of a single channel.
Defaults to the center neuron. When width and height are even numbers, we
choose the neuron in the bottom right of the center 2x2 neurons.
Odd width & height: Even width & height:
+---+---+---+ +---+---+---+---+
| | | | | | | | |
+---+---+---+ +---+---+---+---+
| | X | | | | | | |
+---+---+---+ +---+---+---+---+
| | | | | | | X | |
+---+---+---+ +---+---+---+---+
| | | | |
+---+---+---+---+
"""
def inner(T):
layer = T(layer_name)
shape = tf.shape(layer)
x_ = shape[1] // 2 if x is None else x
y_ = shape[2] // 2 if y is None else y
if batch is None:
return layer[:, x_, y_, channel_n]
else:
return layer[batch, x_, y_, channel_n]
return inner | Visualize a single neuron of a single channel.
Defaults to the center neuron. When width and height are even numbers, we
choose the neuron in the bottom right of the center 2x2 neurons.
Odd width & height: Even width & height:
+---+---+---+ +---+---+---+---+
| | | | | | | | |
+---+---+---+ +---+---+---+---+
| | X | | | | | | |
+---+---+---+ +---+---+---+---+
| | | | | | | X | |
+---+---+---+ +---+---+---+---+
| | | | |
+---+---+---+---+ | Below is the the instruction that describes the task:
### Input:
Visualize a single neuron of a single channel.
Defaults to the center neuron. When width and height are even numbers, we
choose the neuron in the bottom right of the center 2x2 neurons.
Odd width & height: Even width & height:
+---+---+---+ +---+---+---+---+
| | | | | | | | |
+---+---+---+ +---+---+---+---+
| | X | | | | | | |
+---+---+---+ +---+---+---+---+
| | | | | | | X | |
+---+---+---+ +---+---+---+---+
| | | | |
+---+---+---+---+
### Response:
def neuron(layer_name, channel_n, x=None, y=None, batch=None):
"""Visualize a single neuron of a single channel.
Defaults to the center neuron. When width and height are even numbers, we
choose the neuron in the bottom right of the center 2x2 neurons.
Odd width & height: Even width & height:
+---+---+---+ +---+---+---+---+
| | | | | | | | |
+---+---+---+ +---+---+---+---+
| | X | | | | | | |
+---+---+---+ +---+---+---+---+
| | | | | | | X | |
+---+---+---+ +---+---+---+---+
| | | | |
+---+---+---+---+
"""
def inner(T):
layer = T(layer_name)
shape = tf.shape(layer)
x_ = shape[1] // 2 if x is None else x
y_ = shape[2] // 2 if y is None else y
if batch is None:
return layer[:, x_, y_, channel_n]
else:
return layer[batch, x_, y_, channel_n]
return inner |
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','') | A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False | Below is the the instruction that describes the task:
### Input:
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
### Response:
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','') |
def server_version(self):
"""
Special method for getting server version.
Because of different behaviour on different versions of
server, we have to pass different headers to the endpoints.
This method requests the version from server and caches it
in internal variable, so other resources could use it.
:return: server version parsed from `about` page.
"""
if self.__server_version is None:
from yagocd.resources.info import InfoManager
self.__server_version = InfoManager(self).version
return self.__server_version | Special method for getting server version.
Because of different behaviour on different versions of
server, we have to pass different headers to the endpoints.
This method requests the version from server and caches it
in internal variable, so other resources could use it.
:return: server version parsed from `about` page. | Below is the the instruction that describes the task:
### Input:
Special method for getting server version.
Because of different behaviour on different versions of
server, we have to pass different headers to the endpoints.
This method requests the version from server and caches it
in internal variable, so other resources could use it.
:return: server version parsed from `about` page.
### Response:
def server_version(self):
"""
Special method for getting server version.
Because of different behaviour on different versions of
server, we have to pass different headers to the endpoints.
This method requests the version from server and caches it
in internal variable, so other resources could use it.
:return: server version parsed from `about` page.
"""
if self.__server_version is None:
from yagocd.resources.info import InfoManager
self.__server_version = InfoManager(self).version
return self.__server_version |
def cluster(self, matrix=None, level=None, sequence=None):
"""
Perform hierarchical clustering.
:param matrix: The 2D list that is currently under processing. The
matrix contains the distances of each item with each other
:param level: The current level of clustering
:param sequence: The sequence number of the clustering
"""
logger.info("Performing cluster()")
if matrix is None:
# create level 0, first iteration (sequence)
level = 0
sequence = 0
matrix = []
# if the matrix only has two rows left, we are done
linkage = partial(self.linkage, distance_function=self.distance)
initial_element_count = len(self._data)
while len(matrix) > 2 or matrix == []:
item_item_matrix = Matrix(self._data,
linkage,
True,
0)
item_item_matrix.genmatrix(self.num_processes)
matrix = item_item_matrix.matrix
smallestpair = None
mindistance = None
rowindex = 0 # keep track of where we are in the matrix
# find the minimum distance
for row in matrix:
cellindex = 0 # keep track of where we are in the matrix
for cell in row:
# if we are not on the diagonal (which is always 0)
# and if this cell represents a new minimum...
cell_lt_mdist = cell < mindistance if mindistance else False
if ((rowindex != cellindex) and
(cell_lt_mdist or smallestpair is None)):
smallestpair = (rowindex, cellindex)
mindistance = cell
cellindex += 1
rowindex += 1
sequence += 1
level = matrix[smallestpair[1]][smallestpair[0]]
cluster = Cluster(level, self._data[smallestpair[0]],
self._data[smallestpair[1]])
# maintain the data, by combining the the two most similar items
# in the list we use the min and max functions to ensure the
# integrity of the data. imagine: if we first remove the item
# with the smaller index, all the rest of the items shift down by
# one. So the next index will be wrong. We could simply adjust the
# value of the second "remove" call, but we don't know the order
# in which they come. The max and min approach clarifies that
self._data.remove(self._data[max(smallestpair[0],
smallestpair[1])]) # remove item 1
self._data.remove(self._data[min(smallestpair[0],
smallestpair[1])]) # remove item 2
self._data.append(cluster) # append item 1 and 2 combined
self.publish_progress(initial_element_count, len(self._data))
# all the data is in one single cluster. We return that and stop
self.__cluster_created = True
logger.info("Call to cluster() is complete")
return | Perform hierarchical clustering.
:param matrix: The 2D list that is currently under processing. The
matrix contains the distances of each item with each other
:param level: The current level of clustering
:param sequence: The sequence number of the clustering | Below is the the instruction that describes the task:
### Input:
Perform hierarchical clustering.
:param matrix: The 2D list that is currently under processing. The
matrix contains the distances of each item with each other
:param level: The current level of clustering
:param sequence: The sequence number of the clustering
### Response:
def cluster(self, matrix=None, level=None, sequence=None):
"""
Perform hierarchical clustering.
:param matrix: The 2D list that is currently under processing. The
matrix contains the distances of each item with each other
:param level: The current level of clustering
:param sequence: The sequence number of the clustering
"""
logger.info("Performing cluster()")
if matrix is None:
# create level 0, first iteration (sequence)
level = 0
sequence = 0
matrix = []
# if the matrix only has two rows left, we are done
linkage = partial(self.linkage, distance_function=self.distance)
initial_element_count = len(self._data)
while len(matrix) > 2 or matrix == []:
item_item_matrix = Matrix(self._data,
linkage,
True,
0)
item_item_matrix.genmatrix(self.num_processes)
matrix = item_item_matrix.matrix
smallestpair = None
mindistance = None
rowindex = 0 # keep track of where we are in the matrix
# find the minimum distance
for row in matrix:
cellindex = 0 # keep track of where we are in the matrix
for cell in row:
# if we are not on the diagonal (which is always 0)
# and if this cell represents a new minimum...
cell_lt_mdist = cell < mindistance if mindistance else False
if ((rowindex != cellindex) and
(cell_lt_mdist or smallestpair is None)):
smallestpair = (rowindex, cellindex)
mindistance = cell
cellindex += 1
rowindex += 1
sequence += 1
level = matrix[smallestpair[1]][smallestpair[0]]
cluster = Cluster(level, self._data[smallestpair[0]],
self._data[smallestpair[1]])
# maintain the data, by combining the the two most similar items
# in the list we use the min and max functions to ensure the
# integrity of the data. imagine: if we first remove the item
# with the smaller index, all the rest of the items shift down by
# one. So the next index will be wrong. We could simply adjust the
# value of the second "remove" call, but we don't know the order
# in which they come. The max and min approach clarifies that
self._data.remove(self._data[max(smallestpair[0],
smallestpair[1])]) # remove item 1
self._data.remove(self._data[min(smallestpair[0],
smallestpair[1])]) # remove item 2
self._data.append(cluster) # append item 1 and 2 combined
self.publish_progress(initial_element_count, len(self._data))
# all the data is in one single cluster. We return that and stop
self.__cluster_created = True
logger.info("Call to cluster() is complete")
return |
def tenant_delete(tenant_id=None, name=None, profile=None, **connection_args):
'''
Delete a tenant (keystone tenant-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete name=demo
'''
kstone = auth(profile, **connection_args)
if name:
for tenant in getattr(kstone, _TENANTS, None).list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
getattr(kstone, _TENANTS, None).delete(tenant_id)
ret = 'Tenant ID {0} deleted'.format(tenant_id)
if name:
ret += ' ({0})'.format(name)
return ret | Delete a tenant (keystone tenant-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete name=demo | Below is the the instruction that describes the task:
### Input:
Delete a tenant (keystone tenant-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete name=demo
### Response:
def tenant_delete(tenant_id=None, name=None, profile=None, **connection_args):
'''
Delete a tenant (keystone tenant-delete)
CLI Examples:
.. code-block:: bash
salt '*' keystone.tenant_delete c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete tenant_id=c965f79c4f864eaaa9c3b41904e67082
salt '*' keystone.tenant_delete name=demo
'''
kstone = auth(profile, **connection_args)
if name:
for tenant in getattr(kstone, _TENANTS, None).list():
if tenant.name == name:
tenant_id = tenant.id
break
if not tenant_id:
return {'Error': 'Unable to resolve tenant id'}
getattr(kstone, _TENANTS, None).delete(tenant_id)
ret = 'Tenant ID {0} deleted'.format(tenant_id)
if name:
ret += ' ({0})'.format(name)
return ret |
def _parse_weights(weight_args, default_weight=0.6):
"""Parse list of weight assignments."""
weights_dict = {}
r_group_weight = default_weight
for weight_arg in weight_args:
for weight_assignment in weight_arg.split(','):
if '=' not in weight_assignment:
raise ValueError(
'Invalid weight assignment: {}'.format(weight_assignment))
key, value = weight_assignment.split('=', 1)
value = float(value)
if key == 'R':
r_group_weight = value
elif key == '*':
default_weight = value
elif hasattr(Atom, key):
weights_dict[Atom(key)] = value
else:
raise ValueError('Invalid element: {}'.format(key))
return weights_dict, r_group_weight, default_weight | Parse list of weight assignments. | Below is the the instruction that describes the task:
### Input:
Parse list of weight assignments.
### Response:
def _parse_weights(weight_args, default_weight=0.6):
"""Parse list of weight assignments."""
weights_dict = {}
r_group_weight = default_weight
for weight_arg in weight_args:
for weight_assignment in weight_arg.split(','):
if '=' not in weight_assignment:
raise ValueError(
'Invalid weight assignment: {}'.format(weight_assignment))
key, value = weight_assignment.split('=', 1)
value = float(value)
if key == 'R':
r_group_weight = value
elif key == '*':
default_weight = value
elif hasattr(Atom, key):
weights_dict[Atom(key)] = value
else:
raise ValueError('Invalid element: {}'.format(key))
return weights_dict, r_group_weight, default_weight |
def stripIEConditionals(contents, addHtmlIfMissing=True):
'''
stripIEConditionals - Strips Internet Explorer conditional statements.
@param contents <str> - Contents String
@param addHtmlIfMissing <bool> - Since these normally encompass the "html" element, optionally add it back if missing.
'''
allMatches = IE_CONDITIONAL_PATTERN.findall(contents)
if not allMatches:
return contents
for match in allMatches:
contents = contents.replace(match, '')
if END_HTML.match(contents) and not START_HTML.match(contents):
contents = addStartTag(contents, '<html>')
return contents | stripIEConditionals - Strips Internet Explorer conditional statements.
@param contents <str> - Contents String
@param addHtmlIfMissing <bool> - Since these normally encompass the "html" element, optionally add it back if missing. | Below is the the instruction that describes the task:
### Input:
stripIEConditionals - Strips Internet Explorer conditional statements.
@param contents <str> - Contents String
@param addHtmlIfMissing <bool> - Since these normally encompass the "html" element, optionally add it back if missing.
### Response:
def stripIEConditionals(contents, addHtmlIfMissing=True):
'''
stripIEConditionals - Strips Internet Explorer conditional statements.
@param contents <str> - Contents String
@param addHtmlIfMissing <bool> - Since these normally encompass the "html" element, optionally add it back if missing.
'''
allMatches = IE_CONDITIONAL_PATTERN.findall(contents)
if not allMatches:
return contents
for match in allMatches:
contents = contents.replace(match, '')
if END_HTML.match(contents) and not START_HTML.match(contents):
contents = addStartTag(contents, '<html>')
return contents |
def filter_instance(self, inst, plist):
"""Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase.
"""
if plist is not None:
for pname in inst.properties.keys():
if pname.lower() not in plist and pname:
if inst.path is not None and pname in inst.path.keybindings:
continue
del inst.properties[pname] | Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase. | Below is the the instruction that describes the task:
### Input:
Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase.
### Response:
def filter_instance(self, inst, plist):
"""Remove properties from an instance that aren't in the PropertyList
inst -- The pywbem.CIMInstance
plist -- The property List, or None. The list items must be all
lowercase.
"""
if plist is not None:
for pname in inst.properties.keys():
if pname.lower() not in plist and pname:
if inst.path is not None and pname in inst.path.keybindings:
continue
del inst.properties[pname] |
def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:
"""Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
"""
results = defaultdict(lambda: defaultdict(set))
for u, v, data in graph.edges(data=True):
if CITATION not in data:
continue
results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())
return dict(results) | Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference} | Below is the the instruction that describes the task:
### Input:
Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
### Response:
def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:
"""Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
"""
results = defaultdict(lambda: defaultdict(set))
for u, v, data in graph.edges(data=True):
if CITATION not in data:
continue
results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())
return dict(results) |
async def run_node(menv, log_folder):
"""Run :class:`~creamas.mp.MultiEnvironment` until its manager's
:meth:`~aiomas.subproc.Manager.stop` is called.
:param menv: :class:`~creamas.mp.MultiEnvironment` to wait for.
:param str log_folder:
Logging folder to be passed down to
:meth:`~creamas.mp.MultiEnvironment.destroy` after :meth:`stop` is
called.
This method will block the current thread until the manager's
:meth:`~creamas.mp.MultiEnvManager.stop` is called. After the stop-message
is received, multi-environment is destroyed.
The method is intended to be
used in :class:`~creamas.ds.DistributedEnvironment` scripts which spawn
multi-environments on different nodes. That is, using this function in the
script will block the script's further execution until the simulation has
run its course and the nodes need to be destroyed.
Calling :meth:`~creamas.ds.DistributedEnvironment.destroy` will
automatically call each node manager's :meth:`stop` and therefore release
the script.
"""
try:
await menv.manager.stop_received
except KeyboardInterrupt:
pass
finally:
ret = await menv.destroy(log_folder, as_coro=True)
return ret | Run :class:`~creamas.mp.MultiEnvironment` until its manager's
:meth:`~aiomas.subproc.Manager.stop` is called.
:param menv: :class:`~creamas.mp.MultiEnvironment` to wait for.
:param str log_folder:
Logging folder to be passed down to
:meth:`~creamas.mp.MultiEnvironment.destroy` after :meth:`stop` is
called.
This method will block the current thread until the manager's
:meth:`~creamas.mp.MultiEnvManager.stop` is called. After the stop-message
is received, multi-environment is destroyed.
The method is intended to be
used in :class:`~creamas.ds.DistributedEnvironment` scripts which spawn
multi-environments on different nodes. That is, using this function in the
script will block the script's further execution until the simulation has
run its course and the nodes need to be destroyed.
Calling :meth:`~creamas.ds.DistributedEnvironment.destroy` will
automatically call each node manager's :meth:`stop` and therefore release
the script. | Below is the the instruction that describes the task:
### Input:
Run :class:`~creamas.mp.MultiEnvironment` until its manager's
:meth:`~aiomas.subproc.Manager.stop` is called.
:param menv: :class:`~creamas.mp.MultiEnvironment` to wait for.
:param str log_folder:
Logging folder to be passed down to
:meth:`~creamas.mp.MultiEnvironment.destroy` after :meth:`stop` is
called.
This method will block the current thread until the manager's
:meth:`~creamas.mp.MultiEnvManager.stop` is called. After the stop-message
is received, multi-environment is destroyed.
The method is intended to be
used in :class:`~creamas.ds.DistributedEnvironment` scripts which spawn
multi-environments on different nodes. That is, using this function in the
script will block the script's further execution until the simulation has
run its course and the nodes need to be destroyed.
Calling :meth:`~creamas.ds.DistributedEnvironment.destroy` will
automatically call each node manager's :meth:`stop` and therefore release
the script.
### Response:
async def run_node(menv, log_folder):
"""Run :class:`~creamas.mp.MultiEnvironment` until its manager's
:meth:`~aiomas.subproc.Manager.stop` is called.
:param menv: :class:`~creamas.mp.MultiEnvironment` to wait for.
:param str log_folder:
Logging folder to be passed down to
:meth:`~creamas.mp.MultiEnvironment.destroy` after :meth:`stop` is
called.
This method will block the current thread until the manager's
:meth:`~creamas.mp.MultiEnvManager.stop` is called. After the stop-message
is received, multi-environment is destroyed.
The method is intended to be
used in :class:`~creamas.ds.DistributedEnvironment` scripts which spawn
multi-environments on different nodes. That is, using this function in the
script will block the script's further execution until the simulation has
run its course and the nodes need to be destroyed.
Calling :meth:`~creamas.ds.DistributedEnvironment.destroy` will
automatically call each node manager's :meth:`stop` and therefore release
the script.
"""
try:
await menv.manager.stop_received
except KeyboardInterrupt:
pass
finally:
ret = await menv.destroy(log_folder, as_coro=True)
return ret |
def matches(self, *args, **kwargs):
"""Test if a request matches a :ref:`message spec <message spec>`.
Returns ``True`` or ``False``.
"""
request = make_prototype_request(*args, **kwargs)
if self._prototype.opcode not in (None, request.opcode):
return False
if self._prototype.is_command not in (None, request.is_command):
return False
for name in dir(self._prototype):
if name.startswith('_') or name in request._non_matched_attrs:
# Ignore privates, and handle documents specially.
continue
prototype_value = getattr(self._prototype, name, None)
if inspect.ismethod(prototype_value):
continue
actual_value = getattr(request, name, None)
if prototype_value not in (None, actual_value):
return False
if len(self._prototype.docs) not in (0, len(request.docs)):
return False
return self._prototype._matches_docs(self._prototype.docs, request.docs) | Test if a request matches a :ref:`message spec <message spec>`.
Returns ``True`` or ``False``. | Below is the the instruction that describes the task:
### Input:
Test if a request matches a :ref:`message spec <message spec>`.
Returns ``True`` or ``False``.
### Response:
def matches(self, *args, **kwargs):
"""Test if a request matches a :ref:`message spec <message spec>`.
Returns ``True`` or ``False``.
"""
request = make_prototype_request(*args, **kwargs)
if self._prototype.opcode not in (None, request.opcode):
return False
if self._prototype.is_command not in (None, request.is_command):
return False
for name in dir(self._prototype):
if name.startswith('_') or name in request._non_matched_attrs:
# Ignore privates, and handle documents specially.
continue
prototype_value = getattr(self._prototype, name, None)
if inspect.ismethod(prototype_value):
continue
actual_value = getattr(request, name, None)
if prototype_value not in (None, actual_value):
return False
if len(self._prototype.docs) not in (0, len(request.docs)):
return False
return self._prototype._matches_docs(self._prototype.docs, request.docs) |
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is
being used and setup segment sizes and start points that will
be used by the seek*() methods later.
"""
self._databaseType = const.COUNTRY_EDITION
self._recordLength = const.STANDARD_RECORD_LENGTH
self._databaseSegments = const.COUNTRY_BEGIN
filepos = self._fp.tell()
self._fp.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
chars = chr(255) * 3
delim = self._fp.read(3)
if PY3 and type(delim) is bytes:
delim = delim.decode(ENCODING)
if PY2:
chars = chars.decode(ENCODING)
if type(delim) is str:
delim = delim.decode(ENCODING)
if delim == chars:
byte = self._fp.read(1)
self._databaseType = ord(byte)
# Compatibility with databases from April 2003 and earlier
if self._databaseType >= 106:
self._databaseType -= 105
if self._databaseType == const.REGION_EDITION_REV0:
self._databaseSegments = const.STATE_BEGIN_REV0
elif self._databaseType == const.REGION_EDITION_REV1:
self._databaseSegments = const.STATE_BEGIN_REV1
elif self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1,
const.CITY_EDITION_REV1_V6,
const.ORG_EDITION,
const.ISP_EDITION,
const.NETSPEED_EDITION_REV1,
const.NETSPEED_EDITION_REV1_V6,
const.ASNUM_EDITION,
const.ASNUM_EDITION_V6):
self._databaseSegments = 0
buf = self._fp.read(const.SEGMENT_RECORD_LENGTH)
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
LONG_RECORDS = (const.ORG_EDITION, const.ISP_EDITION)
if self._databaseType in LONG_RECORDS:
self._recordLength = const.ORG_RECORD_LENGTH
break
else:
self._fp.seek(-4, os.SEEK_CUR)
self._fp.seek(filepos, os.SEEK_SET) | Parses the database file to determine what kind of database is
being used and setup segment sizes and start points that will
be used by the seek*() methods later. | Below is the the instruction that describes the task:
### Input:
Parses the database file to determine what kind of database is
being used and setup segment sizes and start points that will
be used by the seek*() methods later.
### Response:
def _setup_segments(self):
"""
Parses the database file to determine what kind of database is
being used and setup segment sizes and start points that will
be used by the seek*() methods later.
"""
self._databaseType = const.COUNTRY_EDITION
self._recordLength = const.STANDARD_RECORD_LENGTH
self._databaseSegments = const.COUNTRY_BEGIN
filepos = self._fp.tell()
self._fp.seek(-3, os.SEEK_END)
for i in range(const.STRUCTURE_INFO_MAX_SIZE):
chars = chr(255) * 3
delim = self._fp.read(3)
if PY3 and type(delim) is bytes:
delim = delim.decode(ENCODING)
if PY2:
chars = chars.decode(ENCODING)
if type(delim) is str:
delim = delim.decode(ENCODING)
if delim == chars:
byte = self._fp.read(1)
self._databaseType = ord(byte)
# Compatibility with databases from April 2003 and earlier
if self._databaseType >= 106:
self._databaseType -= 105
if self._databaseType == const.REGION_EDITION_REV0:
self._databaseSegments = const.STATE_BEGIN_REV0
elif self._databaseType == const.REGION_EDITION_REV1:
self._databaseSegments = const.STATE_BEGIN_REV1
elif self._databaseType in (const.CITY_EDITION_REV0,
const.CITY_EDITION_REV1,
const.CITY_EDITION_REV1_V6,
const.ORG_EDITION,
const.ISP_EDITION,
const.NETSPEED_EDITION_REV1,
const.NETSPEED_EDITION_REV1_V6,
const.ASNUM_EDITION,
const.ASNUM_EDITION_V6):
self._databaseSegments = 0
buf = self._fp.read(const.SEGMENT_RECORD_LENGTH)
if PY3 and type(buf) is bytes:
buf = buf.decode(ENCODING)
for j in range(const.SEGMENT_RECORD_LENGTH):
self._databaseSegments += (ord(buf[j]) << (j * 8))
LONG_RECORDS = (const.ORG_EDITION, const.ISP_EDITION)
if self._databaseType in LONG_RECORDS:
self._recordLength = const.ORG_RECORD_LENGTH
break
else:
self._fp.seek(-4, os.SEEK_CUR)
self._fp.seek(filepos, os.SEEK_SET) |
def generate_harvester_config(self, catalogs=None, harvest='valid',
report=None, export_path=None):
"""Genera un archivo de configuración del harvester a partir de un
reporte, o de un conjunto de catálogos y un criterio de cosecha
(`harvest`).
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets incluir en el
archivo de configuración generado ('all', 'none',
'valid', 'report' o 'good').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`,
en cuyo caso `catalogs` se ignora.
export_path (str): Path donde exportar el reporte generado (en
formato XLSX o CSV). Si se especifica, el método no devolverá
nada.
Returns:
list of dicts: Un diccionario con variables de configuración
por cada dataset a cosechar.
"""
# Si se pasa un único catálogo, genero una lista que lo contenga
if isinstance(catalogs, string_types + (dict,)):
catalogs = [catalogs]
if harvest == 'report':
if not report:
raise ValueError("""
Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para
el argumento 'report'. Por favor, intentelo nuevamente.""")
datasets_report = readers.read_table(report)
elif harvest in ['valid', 'none', 'all']:
# catalogs no puede faltar para estos criterios
assert isinstance(catalogs, string_types + (dict, list))
datasets_report = self.generate_datasets_report(catalogs, harvest)
else:
raise ValueError("""
{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o
'report'.""".format(harvest))
# define los campos del reporte que mantiene para el config file
config_keys = [
"catalog_federation_id", "catalog_federation_org",
"dataset_identifier"
]
# cambia algunos nombres de estos campos para el config file
config_translator = {
"catalog_federation_id": "catalog_id",
"catalog_federation_org": "dataset_organization"
}
translated_keys = [config_translator.get(k, k) for k in config_keys]
harvester_config = [
OrderedDict(
# Retengo únicamente los campos que necesita el harvester
[(config_translator.get(k, k), v)
for (k, v) in dataset.items() if k in config_keys]
)
# Para aquellost datasets marcados con 'harvest'==1
for dataset in datasets_report if bool(int(dataset["harvest"]))
]
# chequea que el archivo de configuración tiene todos los campos
required_keys = set(translated_keys)
for row in harvester_config:
row_keys = set(row.keys())
msg = "Hay una fila con claves {} y debe tener claves {}".format(
row_keys, required_keys)
assert row_keys == required_keys, msg
if export_path:
writers.write_table(harvester_config, export_path)
else:
return harvester_config | Genera un archivo de configuración del harvester a partir de un
reporte, o de un conjunto de catálogos y un criterio de cosecha
(`harvest`).
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets incluir en el
archivo de configuración generado ('all', 'none',
'valid', 'report' o 'good').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`,
en cuyo caso `catalogs` se ignora.
export_path (str): Path donde exportar el reporte generado (en
formato XLSX o CSV). Si se especifica, el método no devolverá
nada.
Returns:
list of dicts: Un diccionario con variables de configuración
por cada dataset a cosechar. | Below is the the instruction that describes the task:
### Input:
Genera un archivo de configuración del harvester a partir de un
reporte, o de un conjunto de catálogos y un criterio de cosecha
(`harvest`).
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets incluir en el
archivo de configuración generado ('all', 'none',
'valid', 'report' o 'good').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`,
en cuyo caso `catalogs` se ignora.
export_path (str): Path donde exportar el reporte generado (en
formato XLSX o CSV). Si se especifica, el método no devolverá
nada.
Returns:
list of dicts: Un diccionario con variables de configuración
por cada dataset a cosechar.
### Response:
def generate_harvester_config(self, catalogs=None, harvest='valid',
report=None, export_path=None):
"""Genera un archivo de configuración del harvester a partir de un
reporte, o de un conjunto de catálogos y un criterio de cosecha
(`harvest`).
Args:
catalogs (str, dict o list): Uno (str o dict) o varios (list de
strs y/o dicts) catálogos.
harvest (str): Criterio para determinar qué datasets incluir en el
archivo de configuración generado ('all', 'none',
'valid', 'report' o 'good').
report (list o str): Tabla de reporte generada por
generate_datasets_report() como lista de diccionarios o archivo
en formato XLSX o CSV. Sólo se usa cuando `harvest=='report'`,
en cuyo caso `catalogs` se ignora.
export_path (str): Path donde exportar el reporte generado (en
formato XLSX o CSV). Si se especifica, el método no devolverá
nada.
Returns:
list of dicts: Un diccionario con variables de configuración
por cada dataset a cosechar.
"""
# Si se pasa un único catálogo, genero una lista que lo contenga
if isinstance(catalogs, string_types + (dict,)):
catalogs = [catalogs]
if harvest == 'report':
if not report:
raise ValueError("""
Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para
el argumento 'report'. Por favor, intentelo nuevamente.""")
datasets_report = readers.read_table(report)
elif harvest in ['valid', 'none', 'all']:
# catalogs no puede faltar para estos criterios
assert isinstance(catalogs, string_types + (dict, list))
datasets_report = self.generate_datasets_report(catalogs, harvest)
else:
raise ValueError("""
{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o
'report'.""".format(harvest))
# define los campos del reporte que mantiene para el config file
config_keys = [
"catalog_federation_id", "catalog_federation_org",
"dataset_identifier"
]
# cambia algunos nombres de estos campos para el config file
config_translator = {
"catalog_federation_id": "catalog_id",
"catalog_federation_org": "dataset_organization"
}
translated_keys = [config_translator.get(k, k) for k in config_keys]
harvester_config = [
OrderedDict(
# Retengo únicamente los campos que necesita el harvester
[(config_translator.get(k, k), v)
for (k, v) in dataset.items() if k in config_keys]
)
# Para aquellost datasets marcados con 'harvest'==1
for dataset in datasets_report if bool(int(dataset["harvest"]))
]
# chequea que el archivo de configuración tiene todos los campos
required_keys = set(translated_keys)
for row in harvester_config:
row_keys = set(row.keys())
msg = "Hay una fila con claves {} y debe tener claves {}".format(
row_keys, required_keys)
assert row_keys == required_keys, msg
if export_path:
writers.write_table(harvester_config, export_path)
else:
return harvester_config |
def bed_generator(bed_path):
"""Iterates through a BED file yielding parsed BED lines.
Parameters
----------
bed_path : str
path to BED file
Yields
------
BedLine(line) : BedLine
A BedLine object which has parsed the individual line in
a BED file.
"""
with open(bed_path) as handle:
bed_reader = csv.reader(handle, delimiter='\t')
for line in bed_reader:
yield BedLine(line) | Iterates through a BED file yielding parsed BED lines.
Parameters
----------
bed_path : str
path to BED file
Yields
------
BedLine(line) : BedLine
A BedLine object which has parsed the individual line in
a BED file. | Below is the the instruction that describes the task:
### Input:
Iterates through a BED file yielding parsed BED lines.
Parameters
----------
bed_path : str
path to BED file
Yields
------
BedLine(line) : BedLine
A BedLine object which has parsed the individual line in
a BED file.
### Response:
def bed_generator(bed_path):
"""Iterates through a BED file yielding parsed BED lines.
Parameters
----------
bed_path : str
path to BED file
Yields
------
BedLine(line) : BedLine
A BedLine object which has parsed the individual line in
a BED file.
"""
with open(bed_path) as handle:
bed_reader = csv.reader(handle, delimiter='\t')
for line in bed_reader:
yield BedLine(line) |
def config(self):
""" Returns a dictionary for the loaded configuration """
return {
key: self.__dict__[key]
for key in dir(self)
if key.isupper()
} | Returns a dictionary for the loaded configuration | Below is the the instruction that describes the task:
### Input:
Returns a dictionary for the loaded configuration
### Response:
def config(self):
""" Returns a dictionary for the loaded configuration """
return {
key: self.__dict__[key]
for key in dir(self)
if key.isupper()
} |
def get_current_ontology_date():
"""Get the release date of the current Gene Ontolgo release."""
with closing(requests.get(
'http://geneontology.org/ontology/go-basic.obo',
stream=True)) as r:
for i, l in enumerate(r.iter_lines(decode_unicode=True)):
if i == 1:
assert l.split(':')[0] == 'data-version'
date = l.split('/')[-1]
break
return date | Get the release date of the current Gene Ontolgo release. | Below is the the instruction that describes the task:
### Input:
Get the release date of the current Gene Ontolgo release.
### Response:
def get_current_ontology_date():
"""Get the release date of the current Gene Ontolgo release."""
with closing(requests.get(
'http://geneontology.org/ontology/go-basic.obo',
stream=True)) as r:
for i, l in enumerate(r.iter_lines(decode_unicode=True)):
if i == 1:
assert l.split(':')[0] == 'data-version'
date = l.split('/')[-1]
break
return date |
def keys(self):
""" Returns all row keys
:raise NotImplementedError: if all rows aren't keyed
:return: all row keys
"""
if len(self._row_name_list) != len(self._rows):
raise NotImplementedError("You can't get row keys for a FVM that doesn't have all rows keyed")
return self.row_names() | Returns all row keys
:raise NotImplementedError: if all rows aren't keyed
:return: all row keys | Below is the the instruction that describes the task:
### Input:
Returns all row keys
:raise NotImplementedError: if all rows aren't keyed
:return: all row keys
### Response:
def keys(self):
""" Returns all row keys
:raise NotImplementedError: if all rows aren't keyed
:return: all row keys
"""
if len(self._row_name_list) != len(self._rows):
raise NotImplementedError("You can't get row keys for a FVM that doesn't have all rows keyed")
return self.row_names() |
def convert(self, text):
"""
convert Finglish(or whatever you'd like to call) to Persian.
gets and returns string.
"""
url = self.API_URL
encoding = self.ENCODING
headers = self.HEADERS
data = urlencode({
'farsi': str(text)
}).encode(encoding)
request = Request(url=url,data=data,headers=headers)
response = urlopen(request)
result = response.read()
response_encoding = response.headers['Content-Type']
response_encoding = response_encoding[response_encoding.find('=')+1:]
result = result.decode(response_encoding)
# a simple fix
result = result.replace('\ufeff','')[:-1]
return result | convert Finglish(or whatever you'd like to call) to Persian.
gets and returns string. | Below is the the instruction that describes the task:
### Input:
convert Finglish(or whatever you'd like to call) to Persian.
gets and returns string.
### Response:
def convert(self, text):
"""
convert Finglish(or whatever you'd like to call) to Persian.
gets and returns string.
"""
url = self.API_URL
encoding = self.ENCODING
headers = self.HEADERS
data = urlencode({
'farsi': str(text)
}).encode(encoding)
request = Request(url=url,data=data,headers=headers)
response = urlopen(request)
result = response.read()
response_encoding = response.headers['Content-Type']
response_encoding = response_encoding[response_encoding.find('=')+1:]
result = result.decode(response_encoding)
# a simple fix
result = result.replace('\ufeff','')[:-1]
return result |
def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None):
"""
Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
"""
# Get list of qpoints from the the phonon tasks in this work
qpoints = []
qpoints_deps = []
for task in phononwfkq_work:
if isinstance(task,PhononTask):
# Store qpoints
qpt = task.input.get("qpt", [0,0,0])
qpoints.append(qpt)
# Store dependencies
qpoints_deps.append(task.deps)
# Create file nodes
ddb_path = phononwfkq_work.outdir.has_abiext("DDB")
dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB")
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Get scf_task from first q-point
for dep in qpoints_deps[0]:
if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK':
scf_task = dep.node
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfk_task = []
# Add one eph task per qpoint
for qpt,qpoint_deps in zip(qpoints,qpoints_deps):
# Create eph task
eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
deps = {ddb_file: "DDB", dvdb_file: "DVDB" }
for dep in qpoint_deps:
deps[dep.node] = dep.exts[0]
# If no WFQ in deps link the WFK with WFQ extension
if 'WFQ' not in deps.values():
inv_deps = dict((v, k) for k, v in deps.items())
wfk_task = inv_deps['WFK']
wfk_path = wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
wfq_path = os.path.join(os.path.dirname(wfk_path), infile)
if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path)
deps[FileNode(wfq_path)] = 'WFQ'
new.register_eph_task(eph_input, deps=deps)
return new | Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands | Below is the the instruction that describes the task:
### Input:
Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
### Response:
def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None):
"""
Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
"""
# Get list of qpoints from the the phonon tasks in this work
qpoints = []
qpoints_deps = []
for task in phononwfkq_work:
if isinstance(task,PhononTask):
# Store qpoints
qpt = task.input.get("qpt", [0,0,0])
qpoints.append(qpt)
# Store dependencies
qpoints_deps.append(task.deps)
# Create file nodes
ddb_path = phononwfkq_work.outdir.has_abiext("DDB")
dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB")
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Get scf_task from first q-point
for dep in qpoints_deps[0]:
if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK':
scf_task = dep.node
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfk_task = []
# Add one eph task per qpoint
for qpt,qpoint_deps in zip(qpoints,qpoints_deps):
# Create eph task
eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
deps = {ddb_file: "DDB", dvdb_file: "DVDB" }
for dep in qpoint_deps:
deps[dep.node] = dep.exts[0]
# If no WFQ in deps link the WFK with WFQ extension
if 'WFQ' not in deps.values():
inv_deps = dict((v, k) for k, v in deps.items())
wfk_task = inv_deps['WFK']
wfk_path = wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
wfq_path = os.path.join(os.path.dirname(wfk_path), infile)
if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path)
deps[FileNode(wfq_path)] = 'WFQ'
new.register_eph_task(eph_input, deps=deps)
return new |
def add_qreg(self, qreg):
"""Add all wires in a quantum register."""
if not isinstance(qreg, QuantumRegister):
raise DAGCircuitError("not a QuantumRegister instance.")
if qreg.name in self.qregs:
raise DAGCircuitError("duplicate register %s" % qreg.name)
self.qregs[qreg.name] = qreg
for j in range(qreg.size):
self._add_wire((qreg, j)) | Add all wires in a quantum register. | Below is the the instruction that describes the task:
### Input:
Add all wires in a quantum register.
### Response:
def add_qreg(self, qreg):
"""Add all wires in a quantum register."""
if not isinstance(qreg, QuantumRegister):
raise DAGCircuitError("not a QuantumRegister instance.")
if qreg.name in self.qregs:
raise DAGCircuitError("duplicate register %s" % qreg.name)
self.qregs[qreg.name] = qreg
for j in range(qreg.size):
self._add_wire((qreg, j)) |
def forward(self, actions, batch_info):
""" Return model step after applying noise """
while len(self.processes) < actions.shape[0]:
len_action_space = self.action_space.shape[-1]
self.processes.append(
OrnsteinUhlenbeckNoiseProcess(
np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space)
)
)
noise = torch.from_numpy(np.stack([x() for x in self.processes])).float().to(actions.device)
return torch.min(torch.max(actions + noise, self.low_tensor), self.high_tensor) | Return model step after applying noise | Below is the the instruction that describes the task:
### Input:
Return model step after applying noise
### Response:
def forward(self, actions, batch_info):
""" Return model step after applying noise """
while len(self.processes) < actions.shape[0]:
len_action_space = self.action_space.shape[-1]
self.processes.append(
OrnsteinUhlenbeckNoiseProcess(
np.zeros(len_action_space), float(self.std_dev) * np.ones(len_action_space)
)
)
noise = torch.from_numpy(np.stack([x() for x in self.processes])).float().to(actions.device)
return torch.min(torch.max(actions + noise, self.low_tensor), self.high_tensor) |
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization | Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization. | Below is the the instruction that describes the task:
### Input:
Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
### Response:
def _parse_normalizations(normalizations):
"""Parse and yield normalizations.
Parse normalizations parameter that yield all normalizations and
arguments found on it.
Args:
normalizations: List of normalizations.
Yields:
A tuple with a parsed normalization. The first item will
contain the normalization name and the second will be a dict
with the arguments to be used for the normalization.
"""
str_type = str if sys.version_info[0] > 2 else (str, unicode)
for normalization in normalizations:
yield (normalization, {}) if isinstance(normalization, str_type) else normalization |
def parse(self, packet):
"""
There are two formats for headers
old style
---------
Old style headers can be 1, 2, 3, or 6 octets long and are composed of a Tag and a Length.
If the header length is 1 octet (length_type == 3), then there is no Length field.
new style
---------
New style headers can be 2, 3, or 6 octets long and are also composed of a Tag and a Length.
Packet Tag
----------
The packet tag is the first byte, comprising the following fields:
+-------------+----------+---------------+---+---+---+---+----------+----------+
| byte | 1 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| old-style | always 1 | packet format | packet tag | length type |
| description | | 0 = old-style | | 0 = 1 octet |
| | | 1 = new-style | | 1 = 2 octets |
| | | | | 2 = 5 octets |
| | | | | 3 = no length field |
+-------------+ + +---------------+---------------------+
| new-style | | | packet tag |
| description | | | |
+-------------+----------+---------------+-------------------------------------+
:param packet: raw packet bytes
"""
self._lenfmt = ((packet[0] & 0x40) >> 6)
self.tag = packet[0]
if self._lenfmt == 0:
self.llen = (packet[0] & 0x03)
del packet[0]
if (self._lenfmt == 0 and self.llen > 0) or self._lenfmt == 1:
self.length = packet
else:
# indeterminate packet length
self.length = len(packet) | There are two formats for headers
old style
---------
Old style headers can be 1, 2, 3, or 6 octets long and are composed of a Tag and a Length.
If the header length is 1 octet (length_type == 3), then there is no Length field.
new style
---------
New style headers can be 2, 3, or 6 octets long and are also composed of a Tag and a Length.
Packet Tag
----------
The packet tag is the first byte, comprising the following fields:
+-------------+----------+---------------+---+---+---+---+----------+----------+
| byte | 1 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| old-style | always 1 | packet format | packet tag | length type |
| description | | 0 = old-style | | 0 = 1 octet |
| | | 1 = new-style | | 1 = 2 octets |
| | | | | 2 = 5 octets |
| | | | | 3 = no length field |
+-------------+ + +---------------+---------------------+
| new-style | | | packet tag |
| description | | | |
+-------------+----------+---------------+-------------------------------------+
:param packet: raw packet bytes | Below is the the instruction that describes the task:
### Input:
There are two formats for headers
old style
---------
Old style headers can be 1, 2, 3, or 6 octets long and are composed of a Tag and a Length.
If the header length is 1 octet (length_type == 3), then there is no Length field.
new style
---------
New style headers can be 2, 3, or 6 octets long and are also composed of a Tag and a Length.
Packet Tag
----------
The packet tag is the first byte, comprising the following fields:
+-------------+----------+---------------+---+---+---+---+----------+----------+
| byte | 1 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| old-style | always 1 | packet format | packet tag | length type |
| description | | 0 = old-style | | 0 = 1 octet |
| | | 1 = new-style | | 1 = 2 octets |
| | | | | 2 = 5 octets |
| | | | | 3 = no length field |
+-------------+ + +---------------+---------------------+
| new-style | | | packet tag |
| description | | | |
+-------------+----------+---------------+-------------------------------------+
:param packet: raw packet bytes
### Response:
def parse(self, packet):
"""
There are two formats for headers
old style
---------
Old style headers can be 1, 2, 3, or 6 octets long and are composed of a Tag and a Length.
If the header length is 1 octet (length_type == 3), then there is no Length field.
new style
---------
New style headers can be 2, 3, or 6 octets long and are also composed of a Tag and a Length.
Packet Tag
----------
The packet tag is the first byte, comprising the following fields:
+-------------+----------+---------------+---+---+---+---+----------+----------+
| byte | 1 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| bit | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
+-------------+----------+---------------+---+---+---+---+----------+----------+
| old-style | always 1 | packet format | packet tag | length type |
| description | | 0 = old-style | | 0 = 1 octet |
| | | 1 = new-style | | 1 = 2 octets |
| | | | | 2 = 5 octets |
| | | | | 3 = no length field |
+-------------+ + +---------------+---------------------+
| new-style | | | packet tag |
| description | | | |
+-------------+----------+---------------+-------------------------------------+
:param packet: raw packet bytes
"""
self._lenfmt = ((packet[0] & 0x40) >> 6)
self.tag = packet[0]
if self._lenfmt == 0:
self.llen = (packet[0] & 0x03)
del packet[0]
if (self._lenfmt == 0 and self.llen > 0) or self._lenfmt == 1:
self.length = packet
else:
# indeterminate packet length
self.length = len(packet) |
async def add_reaction(self, emoji):
"""|coro|
Add a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = self._emoji_reaction(emoji)
await self._state.http.add_reaction(self.channel.id, self.id, emoji) | |coro|
Add a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid. | Below is the the instruction that describes the task:
### Input:
|coro|
Add a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
### Response:
async def add_reaction(self, emoji):
"""|coro|
Add a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = self._emoji_reaction(emoji)
await self._state.http.add_reaction(self.channel.id, self.id, emoji) |
def add_error_handlers(app):
"""Add custom error handlers for PyMacaronCoreExceptions to the app"""
def handle_validation_error(error):
response = jsonify({'message': str(error)})
response.status_code = error.status_code
return response
app.errorhandler(ValidationError)(handle_validation_error) | Add custom error handlers for PyMacaronCoreExceptions to the app | Below is the the instruction that describes the task:
### Input:
Add custom error handlers for PyMacaronCoreExceptions to the app
### Response:
def add_error_handlers(app):
"""Add custom error handlers for PyMacaronCoreExceptions to the app"""
def handle_validation_error(error):
response = jsonify({'message': str(error)})
response.status_code = error.status_code
return response
app.errorhandler(ValidationError)(handle_validation_error) |
def _dct_from_mro(cls: type, attr_name: str) -> dict:
""""Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest)."""
d = {}
for c in reversed(cls.mro()):
d.update(getattr(c, attr_name, {}))
return d | Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest). | Below is the the instruction that describes the task:
### Input:
Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest).
### Response:
def _dct_from_mro(cls: type, attr_name: str) -> dict:
""""Get a merged dictionary from `cls` bases attribute `attr_name`. MRO defines importance (closest = strongest)."""
d = {}
for c in reversed(cls.mro()):
d.update(getattr(c, attr_name, {}))
return d |
def _get_title(self):
"""According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI"""
#TODO: unicode support
strbuffer = self.ctypes.create_string_buffer(1024)
size = self.ctypes.c_short(1024)
#unicode versions are (Get|Set)ConsolTitleW
self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size)
return strbuffer.value | According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI | Below is the the instruction that describes the task:
### Input:
According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI
### Response:
def _get_title(self):
"""According to http://support.microsoft.com/kb/124103 the buffer
size is 1024
Does not support unicode, only ANSI"""
#TODO: unicode support
strbuffer = self.ctypes.create_string_buffer(1024)
size = self.ctypes.c_short(1024)
#unicode versions are (Get|Set)ConsolTitleW
self.ctypes.windll.kernel32.GetConsoleTitleA(strbuffer, size)
return strbuffer.value |
def QA_indicator_DMI(DataFrame, M1=14, M2=6):
"""
趋向指标 DMI
"""
HIGH = DataFrame.high
LOW = DataFrame.low
CLOSE = DataFrame.close
OPEN = DataFrame.open
TR = SUM(MAX(MAX(HIGH-LOW, ABS(HIGH-REF(CLOSE, 1))),
ABS(LOW-REF(CLOSE, 1))), M1)
HD = HIGH-REF(HIGH, 1)
LD = REF(LOW, 1)-LOW
DMP = SUM(IFAND(HD>0,HD>LD,HD,0), M1)
DMM = SUM(IFAND(LD>0,LD>HD,LD,0), M1)
DI1 = DMP*100/TR
DI2 = DMM*100/TR
ADX = MA(ABS(DI2-DI1)/(DI1+DI2)*100, M2)
ADXR = (ADX+REF(ADX, M2))/2
return pd.DataFrame({
'DI1': DI1, 'DI2': DI2,
'ADX': ADX, 'ADXR': ADXR
}) | 趋向指标 DMI | Below is the the instruction that describes the task:
### Input:
趋向指标 DMI
### Response:
def QA_indicator_DMI(DataFrame, M1=14, M2=6):
"""
趋向指标 DMI
"""
HIGH = DataFrame.high
LOW = DataFrame.low
CLOSE = DataFrame.close
OPEN = DataFrame.open
TR = SUM(MAX(MAX(HIGH-LOW, ABS(HIGH-REF(CLOSE, 1))),
ABS(LOW-REF(CLOSE, 1))), M1)
HD = HIGH-REF(HIGH, 1)
LD = REF(LOW, 1)-LOW
DMP = SUM(IFAND(HD>0,HD>LD,HD,0), M1)
DMM = SUM(IFAND(LD>0,LD>HD,LD,0), M1)
DI1 = DMP*100/TR
DI2 = DMM*100/TR
ADX = MA(ABS(DI2-DI1)/(DI1+DI2)*100, M2)
ADXR = (ADX+REF(ADX, M2))/2
return pd.DataFrame({
'DI1': DI1, 'DI2': DI2,
'ADX': ADX, 'ADXR': ADXR
}) |
def apps():
'''
Return a list of the currently installed app ids.
CLI Example:
.. code-block:: bash
salt marathon-minion-id marathon.apps
'''
response = salt.utils.http.query(
"{0}/v2/apps".format(_base_url()),
decode_type='json',
decode=True,
)
return {'apps': [app['id'] for app in response['dict']['apps']]} | Return a list of the currently installed app ids.
CLI Example:
.. code-block:: bash
salt marathon-minion-id marathon.apps | Below is the the instruction that describes the task:
### Input:
Return a list of the currently installed app ids.
CLI Example:
.. code-block:: bash
salt marathon-minion-id marathon.apps
### Response:
def apps():
'''
Return a list of the currently installed app ids.
CLI Example:
.. code-block:: bash
salt marathon-minion-id marathon.apps
'''
response = salt.utils.http.query(
"{0}/v2/apps".format(_base_url()),
decode_type='json',
decode=True,
)
return {'apps': [app['id'] for app in response['dict']['apps']]} |
def append_tz_timestamp(self, tag, timestamp=None, precision=3,
header=False):
"""Append a field with a TZTimestamp value, derived from local time.
:param tag: Integer or string FIX tag number.
:param timestamp: Time value, see below.
:param precision: Number of decimal places: 0, 3 (ms) or 6 (us).
:param header: Append to FIX header if True; default to body.
The `timestamp` value should be a local datetime, such as created
by datetime.datetime.now(); a float, being the number of seconds
since midnight 1 Jan 1970 UTC, such as returned by time.time();
or, None, in which case datetime.datetime.now() is used to get
the current local time.
Precision values other than zero (seconds), 3 (milliseconds),
or 6 (microseconds) will raise an exception. Note that prior
to FIX 5.0, only values of 0 or 3 comply with the standard."""
# Get float offset from Unix epoch.
if timestamp is None:
now = time.time()
elif type(timestamp) is float:
now = timestamp
else:
now = time.mktime(timestamp.timetuple()) + \
(timestamp.microsecond * 1e-6)
# Get offset of local timezone east of UTC.
utc = datetime.datetime.utcfromtimestamp(now)
local = datetime.datetime.fromtimestamp(now)
td = local - utc
offset = int(((td.days * 86400) + td.seconds) / 60)
s = local.strftime("%Y%m%d-%H:%M:%S")
if precision == 3:
s += ".%03u" % (local.microsecond / 1000)
elif precision == 6:
s += ".%06u" % local.microsecond
elif precision != 0:
raise ValueError("Precision (%u) should be one of "
"0, 3 or 6 digits" % precision)
s += self._tz_offset_string(offset)
return self.append_pair(tag, s, header=header) | Append a field with a TZTimestamp value, derived from local time.
:param tag: Integer or string FIX tag number.
:param timestamp: Time value, see below.
:param precision: Number of decimal places: 0, 3 (ms) or 6 (us).
:param header: Append to FIX header if True; default to body.
The `timestamp` value should be a local datetime, such as created
by datetime.datetime.now(); a float, being the number of seconds
since midnight 1 Jan 1970 UTC, such as returned by time.time();
or, None, in which case datetime.datetime.now() is used to get
the current local time.
Precision values other than zero (seconds), 3 (milliseconds),
or 6 (microseconds) will raise an exception. Note that prior
to FIX 5.0, only values of 0 or 3 comply with the standard. | Below is the the instruction that describes the task:
### Input:
Append a field with a TZTimestamp value, derived from local time.
:param tag: Integer or string FIX tag number.
:param timestamp: Time value, see below.
:param precision: Number of decimal places: 0, 3 (ms) or 6 (us).
:param header: Append to FIX header if True; default to body.
The `timestamp` value should be a local datetime, such as created
by datetime.datetime.now(); a float, being the number of seconds
since midnight 1 Jan 1970 UTC, such as returned by time.time();
or, None, in which case datetime.datetime.now() is used to get
the current local time.
Precision values other than zero (seconds), 3 (milliseconds),
or 6 (microseconds) will raise an exception. Note that prior
to FIX 5.0, only values of 0 or 3 comply with the standard.
### Response:
def append_tz_timestamp(self, tag, timestamp=None, precision=3,
header=False):
"""Append a field with a TZTimestamp value, derived from local time.
:param tag: Integer or string FIX tag number.
:param timestamp: Time value, see below.
:param precision: Number of decimal places: 0, 3 (ms) or 6 (us).
:param header: Append to FIX header if True; default to body.
The `timestamp` value should be a local datetime, such as created
by datetime.datetime.now(); a float, being the number of seconds
since midnight 1 Jan 1970 UTC, such as returned by time.time();
or, None, in which case datetime.datetime.now() is used to get
the current local time.
Precision values other than zero (seconds), 3 (milliseconds),
or 6 (microseconds) will raise an exception. Note that prior
to FIX 5.0, only values of 0 or 3 comply with the standard."""
# Get float offset from Unix epoch.
if timestamp is None:
now = time.time()
elif type(timestamp) is float:
now = timestamp
else:
now = time.mktime(timestamp.timetuple()) + \
(timestamp.microsecond * 1e-6)
# Get offset of local timezone east of UTC.
utc = datetime.datetime.utcfromtimestamp(now)
local = datetime.datetime.fromtimestamp(now)
td = local - utc
offset = int(((td.days * 86400) + td.seconds) / 60)
s = local.strftime("%Y%m%d-%H:%M:%S")
if precision == 3:
s += ".%03u" % (local.microsecond / 1000)
elif precision == 6:
s += ".%06u" % local.microsecond
elif precision != 0:
raise ValueError("Precision (%u) should be one of "
"0, 3 or 6 digits" % precision)
s += self._tz_offset_string(offset)
return self.append_pair(tag, s, header=header) |
def clean_for_doc(nb):
"""
Cleans the notebook to be suitable for inclusion in the docs.
"""
new_cells = []
for cell in nb.worksheets[0].cells:
# Remove the pylab inline line.
if "input" in cell and cell["input"].strip() == "%pylab inline":
continue
# Remove output resulting from the stream/trace method chaining.
if "outputs" in cell:
outputs = [_i for _i in cell["outputs"] if "text" not in _i or
not _i["text"].startswith("<obspy.core")]
cell["outputs"] = outputs
new_cells.append(cell)
nb.worksheets[0].cells = new_cells
return nb | Cleans the notebook to be suitable for inclusion in the docs. | Below is the the instruction that describes the task:
### Input:
Cleans the notebook to be suitable for inclusion in the docs.
### Response:
def clean_for_doc(nb):
"""
Cleans the notebook to be suitable for inclusion in the docs.
"""
new_cells = []
for cell in nb.worksheets[0].cells:
# Remove the pylab inline line.
if "input" in cell and cell["input"].strip() == "%pylab inline":
continue
# Remove output resulting from the stream/trace method chaining.
if "outputs" in cell:
outputs = [_i for _i in cell["outputs"] if "text" not in _i or
not _i["text"].startswith("<obspy.core")]
cell["outputs"] = outputs
new_cells.append(cell)
nb.worksheets[0].cells = new_cells
return nb |
def subslice(inner,outer,section):
'helper for rediff\
outer is a slice (2-tuple, not an official python slice) in global coordinates\
inner is a slice (2-tuple) on that slice\
returns the result of sub-slicing outer by inner'
# todo: think about constraints here. inner and outer ordered, inner[1] less than outer[1]-outer[0]
# todo: this would make more sense as a member of a Slice class
if section=='head': return outer[0],outer[0]+inner[0]
elif section=='tail': return outer[0]+inner[1],outer[1]
elif section=='middle': return outer[0]+inner[0],outer[0]+inner[1]
else: raise ValueError('section val %s not one of (head,middle,tail)'%section) | helper for rediff\
outer is a slice (2-tuple, not an official python slice) in global coordinates\
inner is a slice (2-tuple) on that slice\
returns the result of sub-slicing outer by inner | Below is the the instruction that describes the task:
### Input:
helper for rediff\
outer is a slice (2-tuple, not an official python slice) in global coordinates\
inner is a slice (2-tuple) on that slice\
returns the result of sub-slicing outer by inner
### Response:
def subslice(inner,outer,section):
'helper for rediff\
outer is a slice (2-tuple, not an official python slice) in global coordinates\
inner is a slice (2-tuple) on that slice\
returns the result of sub-slicing outer by inner'
# todo: think about constraints here. inner and outer ordered, inner[1] less than outer[1]-outer[0]
# todo: this would make more sense as a member of a Slice class
if section=='head': return outer[0],outer[0]+inner[0]
elif section=='tail': return outer[0]+inner[1],outer[1]
elif section=='middle': return outer[0]+inner[0],outer[0]+inner[1]
else: raise ValueError('section val %s not one of (head,middle,tail)'%section) |
def _unjsonify(x, isattributes=False):
"""Convert JSON string to an ordered defaultdict."""
if isattributes:
obj = json.loads(x)
return dict_class(obj)
return json.loads(x) | Convert JSON string to an ordered defaultdict. | Below is the the instruction that describes the task:
### Input:
Convert JSON string to an ordered defaultdict.
### Response:
def _unjsonify(x, isattributes=False):
"""Convert JSON string to an ordered defaultdict."""
if isattributes:
obj = json.loads(x)
return dict_class(obj)
return json.loads(x) |
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s | Return the current date and time formatted for a message header. | Below is the the instruction that describes the task:
### Input:
Return the current date and time formatted for a message header.
### Response:
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s |
def make_service_hooks_update_groups_action(service):
"""
Make a admin action for the given service
:param service: services.hooks.ServicesHook
:return: fn to update services groups for the selected users
"""
def update_service_groups(modeladmin, request, queryset):
for user in queryset: # queryset filtering doesn't work here?
service.update_groups(user)
update_service_groups.__name__ = str('update_{}_groups'.format(slugify(service.name)))
update_service_groups.short_description = "Sync groups for selected {} accounts".format(service.title)
return update_service_groups | Make a admin action for the given service
:param service: services.hooks.ServicesHook
:return: fn to update services groups for the selected users | Below is the the instruction that describes the task:
### Input:
Make a admin action for the given service
:param service: services.hooks.ServicesHook
:return: fn to update services groups for the selected users
### Response:
def make_service_hooks_update_groups_action(service):
"""
Make a admin action for the given service
:param service: services.hooks.ServicesHook
:return: fn to update services groups for the selected users
"""
def update_service_groups(modeladmin, request, queryset):
for user in queryset: # queryset filtering doesn't work here?
service.update_groups(user)
update_service_groups.__name__ = str('update_{}_groups'.format(slugify(service.name)))
update_service_groups.short_description = "Sync groups for selected {} accounts".format(service.title)
return update_service_groups |
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha) | Return the hash of a downloaded file. | Below is the the instruction that describes the task:
### Input:
Return the hash of a downloaded file.
### Response:
def hash_of_file(path):
"""Return the hash of a downloaded file."""
with open(path, 'rb') as archive:
sha = sha256()
while True:
data = archive.read(2 ** 20)
if not data:
break
sha.update(data)
return encoded_hash(sha) |
def list_all(prefix=None, app=None, owner=None, description_contains=None,
name_not_contains=None, profile="splunk"):
'''
Get all splunk search details. Produces results that can be used to create
an sls file.
if app or owner are specified, results will be limited to matching saved
searches.
if description_contains is specified, results will be limited to those
where "description_contains in description" is true if name_not_contains is
specified, results will be limited to those where "name_not_contains not in
name" is true.
If prefix parameter is given, alarm names in the output will be prepended
with the prefix; alarms that have the prefix will be skipped. This can be
used to convert existing alarms to be managed by salt, as follows:
CLI example:
1. Make a "backup" of all existing searches
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls
2. Get all searches with new prefixed names
$ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls
3. Insert the managed searches into splunk
$ salt-call state.sls managed_searches.sls
4. Manually verify that the new searches look right
5. Delete the original searches
$ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
$ salt-call state.sls remove_legacy_searches.sls
6. Get all searches again, verify no changes
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
$ diff final_searches.sls managed_searches.sls
'''
client = _get_splunk(profile)
# splunklib doesn't provide the default settings for saved searches.
# so, in order to get the defaults, we create a search with no
# configuration, get that search, and then delete it. We use its contents
# as the default settings
name = "splunk_search.list_all get defaults"
try:
client.saved_searches.delete(name)
except Exception:
pass
search = client.saved_searches.create(name, search="nothing")
defaults = dict(search.content)
client.saved_searches.delete(name)
# stuff that splunk returns but that you should not attempt to set.
# cf http://dev.splunk.com/view/python-sdk/SP-CAAAEK2
readonly_keys = ("triggered_alert_count",
"action.email",
"action.populate_lookup",
"action.rss",
"action.script",
"action.summary_index",
"qualifiedSearch",
"next_scheduled_time")
results = OrderedDict()
# sort the splunk searches by name, so we get consistent output
searches = sorted([(s.name, s) for s in client.saved_searches])
for name, search in searches:
if app and search.access.app != app:
continue
if owner and search.access.owner != owner:
continue
if name_not_contains and name_not_contains in name:
continue
if prefix:
if name.startswith(prefix):
continue
name = prefix + name
# put name in the OrderedDict first
d = [{"name": name}]
# add the rest of the splunk settings, ignoring any defaults
description = ''
for (k, v) in sorted(search.content.items()):
if k in readonly_keys:
continue
if k.startswith("display."):
continue
if not v:
continue
if k in defaults and defaults[k] == v:
continue
d.append({k: v})
if k == 'description':
description = v
if description_contains and description_contains not in description:
continue
results["manage splunk search " + name] = {"splunk_search.present": d}
return salt.utils.yaml.safe_dump(results, default_flow_style=False, width=120) | Get all splunk search details. Produces results that can be used to create
an sls file.
if app or owner are specified, results will be limited to matching saved
searches.
if description_contains is specified, results will be limited to those
where "description_contains in description" is true if name_not_contains is
specified, results will be limited to those where "name_not_contains not in
name" is true.
If prefix parameter is given, alarm names in the output will be prepended
with the prefix; alarms that have the prefix will be skipped. This can be
used to convert existing alarms to be managed by salt, as follows:
CLI example:
1. Make a "backup" of all existing searches
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls
2. Get all searches with new prefixed names
$ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls
3. Insert the managed searches into splunk
$ salt-call state.sls managed_searches.sls
4. Manually verify that the new searches look right
5. Delete the original searches
$ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
$ salt-call state.sls remove_legacy_searches.sls
6. Get all searches again, verify no changes
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
$ diff final_searches.sls managed_searches.sls | Below is the the instruction that describes the task:
### Input:
Get all splunk search details. Produces results that can be used to create
an sls file.
if app or owner are specified, results will be limited to matching saved
searches.
if description_contains is specified, results will be limited to those
where "description_contains in description" is true if name_not_contains is
specified, results will be limited to those where "name_not_contains not in
name" is true.
If prefix parameter is given, alarm names in the output will be prepended
with the prefix; alarms that have the prefix will be skipped. This can be
used to convert existing alarms to be managed by salt, as follows:
CLI example:
1. Make a "backup" of all existing searches
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls
2. Get all searches with new prefixed names
$ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls
3. Insert the managed searches into splunk
$ salt-call state.sls managed_searches.sls
4. Manually verify that the new searches look right
5. Delete the original searches
$ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
$ salt-call state.sls remove_legacy_searches.sls
6. Get all searches again, verify no changes
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
$ diff final_searches.sls managed_searches.sls
### Response:
def list_all(prefix=None, app=None, owner=None, description_contains=None,
name_not_contains=None, profile="splunk"):
'''
Get all splunk search details. Produces results that can be used to create
an sls file.
if app or owner are specified, results will be limited to matching saved
searches.
if description_contains is specified, results will be limited to those
where "description_contains in description" is true if name_not_contains is
specified, results will be limited to those where "name_not_contains not in
name" is true.
If prefix parameter is given, alarm names in the output will be prepended
with the prefix; alarms that have the prefix will be skipped. This can be
used to convert existing alarms to be managed by salt, as follows:
CLI example:
1. Make a "backup" of all existing searches
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > legacy_searches.sls
2. Get all searches with new prefixed names
$ salt-call splunk_search.list_all "prefix=**MANAGED BY SALT** " --out=txt | sed "s/local: //" > managed_searches.sls
3. Insert the managed searches into splunk
$ salt-call state.sls managed_searches.sls
4. Manually verify that the new searches look right
5. Delete the original searches
$ sed s/present/absent/ legacy_searches.sls > remove_legacy_searches.sls
$ salt-call state.sls remove_legacy_searches.sls
6. Get all searches again, verify no changes
$ salt-call splunk_search.list_all --out=txt | sed "s/local: //" > final_searches.sls
$ diff final_searches.sls managed_searches.sls
'''
client = _get_splunk(profile)
# splunklib doesn't provide the default settings for saved searches.
# so, in order to get the defaults, we create a search with no
# configuration, get that search, and then delete it. We use its contents
# as the default settings
name = "splunk_search.list_all get defaults"
try:
client.saved_searches.delete(name)
except Exception:
pass
search = client.saved_searches.create(name, search="nothing")
defaults = dict(search.content)
client.saved_searches.delete(name)
# stuff that splunk returns but that you should not attempt to set.
# cf http://dev.splunk.com/view/python-sdk/SP-CAAAEK2
readonly_keys = ("triggered_alert_count",
"action.email",
"action.populate_lookup",
"action.rss",
"action.script",
"action.summary_index",
"qualifiedSearch",
"next_scheduled_time")
results = OrderedDict()
# sort the splunk searches by name, so we get consistent output
searches = sorted([(s.name, s) for s in client.saved_searches])
for name, search in searches:
if app and search.access.app != app:
continue
if owner and search.access.owner != owner:
continue
if name_not_contains and name_not_contains in name:
continue
if prefix:
if name.startswith(prefix):
continue
name = prefix + name
# put name in the OrderedDict first
d = [{"name": name}]
# add the rest of the splunk settings, ignoring any defaults
description = ''
for (k, v) in sorted(search.content.items()):
if k in readonly_keys:
continue
if k.startswith("display."):
continue
if not v:
continue
if k in defaults and defaults[k] == v:
continue
d.append({k: v})
if k == 'description':
description = v
if description_contains and description_contains not in description:
continue
results["manage splunk search " + name] = {"splunk_search.present": d}
return salt.utils.yaml.safe_dump(results, default_flow_style=False, width=120) |
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField | Returns an encoder for a string field. | Below is the the instruction that describes the task:
### Input:
Returns an encoder for a string field.
### Response:
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField |
def can_view(self, user):
"""Return whether or not `user` can view the submission."""
return user in self.group.users or self.project.can_view(user) | Return whether or not `user` can view the submission. | Below is the the instruction that describes the task:
### Input:
Return whether or not `user` can view the submission.
### Response:
def can_view(self, user):
"""Return whether or not `user` can view the submission."""
return user in self.group.users or self.project.can_view(user) |
def clear_last_check(self):
"""Clear the checksum of the file."""
with db.session.begin_nested():
self.last_check = None
self.last_check_at = datetime.utcnow()
return self | Clear the checksum of the file. | Below is the the instruction that describes the task:
### Input:
Clear the checksum of the file.
### Response:
def clear_last_check(self):
"""Clear the checksum of the file."""
with db.session.begin_nested():
self.last_check = None
self.last_check_at = datetime.utcnow()
return self |
def unique_id(self):
"""Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id)
"""
chain = self.ampal_parent.ampal_parent.id
residue = self.ampal_parent.id
return chain, residue, self.id | Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id) | Below is the the instruction that describes the task:
### Input:
Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id)
### Response:
def unique_id(self):
"""Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id)
"""
chain = self.ampal_parent.ampal_parent.id
residue = self.ampal_parent.id
return chain, residue, self.id |
def create_pvsm_file(vtk_files, pvsm_filename, relative_paths=True):
"""
Create paraview status file (.pvsm) based on input vtk files.
:param vtk_files:
:param pvsm_filename:
:param relative_paths:
:return:
"""
from xml.etree.ElementTree import Element, SubElement, Comment
import os.path as op
top = Element('ParaView')
comment = Comment('Generated for PyMOTW')
top.append(comment)
numberi = 4923
# vtk_file = "C:\Users\miros\lisa_data\83779720_2_liver.vtk"
sms = SubElement(top, "ServerManagerState", version="5.4.1")
file_list = SubElement(sms, "ProxyCollection", name="sources")
for vtk_file_orig in vtk_files:
numberi +=1
dir, vtk_file_head = op.split(vtk_file_orig)
if relative_paths:
vtk_file = vtk_file_head
else:
vtk_file = vtk_file_orig
number = str(numberi)
proxy1 = SubElement(sms, "Proxy", group="sources", type="LegacyVTKFileReader", id=number, servers="1")
property = SubElement(proxy1, "Property", name="FileNameInfo", id=number + ".FileNameInfo", number_of_elements="1")
element = SubElement(property, "Element", index="0", value=vtk_file)
property2 = SubElement(proxy1, "Property", name="FileNames", id=number + ".FileNames", number_of_elements="1")
pr2s1 = SubElement(property2, "Element", index="0", value=vtk_file)
pr2s2 = SubElement(property2, "Domain", name="files", id=number + ".FileNames.files")
# < Property
# name = "Opacity"
# id = "8109.Opacity"
# number_of_elements = "1" >
# < Element
# index = "0"
# value = "0.28" / >
# < Domain
# name = "range"
# id = "8109.Opacity.range" / >
# < / Property >
fn1 = SubElement(file_list, "Item", id=number, name=vtk_file_head)
xml_str = prettify(top)
# logger.debug(xml_str)
with open(op.expanduser(pvsm_filename), "w") as file:
file.write(xml_str) | Create paraview status file (.pvsm) based on input vtk files.
:param vtk_files:
:param pvsm_filename:
:param relative_paths:
:return: | Below is the the instruction that describes the task:
### Input:
Create paraview status file (.pvsm) based on input vtk files.
:param vtk_files:
:param pvsm_filename:
:param relative_paths:
:return:
### Response:
def create_pvsm_file(vtk_files, pvsm_filename, relative_paths=True):
"""
Create paraview status file (.pvsm) based on input vtk files.
:param vtk_files:
:param pvsm_filename:
:param relative_paths:
:return:
"""
from xml.etree.ElementTree import Element, SubElement, Comment
import os.path as op
top = Element('ParaView')
comment = Comment('Generated for PyMOTW')
top.append(comment)
numberi = 4923
# vtk_file = "C:\Users\miros\lisa_data\83779720_2_liver.vtk"
sms = SubElement(top, "ServerManagerState", version="5.4.1")
file_list = SubElement(sms, "ProxyCollection", name="sources")
for vtk_file_orig in vtk_files:
numberi +=1
dir, vtk_file_head = op.split(vtk_file_orig)
if relative_paths:
vtk_file = vtk_file_head
else:
vtk_file = vtk_file_orig
number = str(numberi)
proxy1 = SubElement(sms, "Proxy", group="sources", type="LegacyVTKFileReader", id=number, servers="1")
property = SubElement(proxy1, "Property", name="FileNameInfo", id=number + ".FileNameInfo", number_of_elements="1")
element = SubElement(property, "Element", index="0", value=vtk_file)
property2 = SubElement(proxy1, "Property", name="FileNames", id=number + ".FileNames", number_of_elements="1")
pr2s1 = SubElement(property2, "Element", index="0", value=vtk_file)
pr2s2 = SubElement(property2, "Domain", name="files", id=number + ".FileNames.files")
# < Property
# name = "Opacity"
# id = "8109.Opacity"
# number_of_elements = "1" >
# < Element
# index = "0"
# value = "0.28" / >
# < Domain
# name = "range"
# id = "8109.Opacity.range" / >
# < / Property >
fn1 = SubElement(file_list, "Item", id=number, name=vtk_file_head)
xml_str = prettify(top)
# logger.debug(xml_str)
with open(op.expanduser(pvsm_filename), "w") as file:
file.write(xml_str) |
def get_signal_level(cell):
""" Gets the signal level of a network / cell.
@param string cell
A network / cell from iwlist scan.
@return string
The signal level of the network.
"""
signal = matching_line(cell, "Signal level=")
if signal is None:
return ""
signal = signal.split("=")[1].split("/")
if len(signal) == 2:
return str(int(round(float(signal[0]) / float(signal[1]) * 100)))
elif len(signal) == 1:
return signal[0].split(' ')[0]
else:
return "" | Gets the signal level of a network / cell.
@param string cell
A network / cell from iwlist scan.
@return string
The signal level of the network. | Below is the the instruction that describes the task:
### Input:
Gets the signal level of a network / cell.
@param string cell
A network / cell from iwlist scan.
@return string
The signal level of the network.
### Response:
def get_signal_level(cell):
""" Gets the signal level of a network / cell.
@param string cell
A network / cell from iwlist scan.
@return string
The signal level of the network.
"""
signal = matching_line(cell, "Signal level=")
if signal is None:
return ""
signal = signal.split("=")[1].split("/")
if len(signal) == 2:
return str(int(round(float(signal[0]) / float(signal[1]) * 100)))
elif len(signal) == 1:
return signal[0].split(' ')[0]
else:
return "" |
def regen_keys():
'''
Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys
'''
for fn_ in os.listdir(__opts__['pki_dir']):
path = os.path.join(__opts__['pki_dir'], fn_)
try:
os.remove(path)
except os.error:
pass
# TODO: move this into a channel function? Or auth?
# create a channel again, this will force the key regen
channel = salt.transport.client.ReqChannel.factory(__opts__)
channel.close() | Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys | Below is the the instruction that describes the task:
### Input:
Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys
### Response:
def regen_keys():
'''
Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys
'''
for fn_ in os.listdir(__opts__['pki_dir']):
path = os.path.join(__opts__['pki_dir'], fn_)
try:
os.remove(path)
except os.error:
pass
# TODO: move this into a channel function? Or auth?
# create a channel again, this will force the key regen
channel = salt.transport.client.ReqChannel.factory(__opts__)
channel.close() |
async def _get_response(self, msg):
"""Perform the request, get the response."""
try:
protocol = await self._get_protocol()
pr = protocol.request(msg)
r = await pr.response
return pr, r
except ConstructionRenderableError as e:
raise ClientError("There was an error with the request.", e)
except RequestTimedOut as e:
await self._reset_protocol(e)
raise RequestTimeout('Request timed out.', e)
except (OSError, socket.gaierror, Error) as e:
# aiocoap sometimes raises an OSError/socket.gaierror too.
# aiocoap issue #124
await self._reset_protocol(e)
raise ServerError("There was an error with the request.", e)
except asyncio.CancelledError as e:
await self._reset_protocol(e)
raise e | Perform the request, get the response. | Below is the the instruction that describes the task:
### Input:
Perform the request, get the response.
### Response:
async def _get_response(self, msg):
"""Perform the request, get the response."""
try:
protocol = await self._get_protocol()
pr = protocol.request(msg)
r = await pr.response
return pr, r
except ConstructionRenderableError as e:
raise ClientError("There was an error with the request.", e)
except RequestTimedOut as e:
await self._reset_protocol(e)
raise RequestTimeout('Request timed out.', e)
except (OSError, socket.gaierror, Error) as e:
# aiocoap sometimes raises an OSError/socket.gaierror too.
# aiocoap issue #124
await self._reset_protocol(e)
raise ServerError("There was an error with the request.", e)
except asyncio.CancelledError as e:
await self._reset_protocol(e)
raise e |
def applet_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs) | Invokes the /applet-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Fdescribe | Below is the the instruction that describes the task:
### Input:
Invokes the /applet-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Fdescribe
### Response:
def applet_describe(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/describe API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2Fdescribe
"""
return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs) |
def previous_page_url(self):
"""
:return str: Returns a link to the previous_page_url or None if doesn't exist.
"""
if 'meta' in self._payload and 'previous_page_url' in self._payload['meta']:
return self._payload['meta']['previous_page_url']
elif 'previous_page_uri' in self._payload and self._payload['previous_page_uri']:
return self._version.domain.absolute_url(self._payload['previous_page_uri'])
return None | :return str: Returns a link to the previous_page_url or None if doesn't exist. | Below is the the instruction that describes the task:
### Input:
:return str: Returns a link to the previous_page_url or None if doesn't exist.
### Response:
def previous_page_url(self):
"""
:return str: Returns a link to the previous_page_url or None if doesn't exist.
"""
if 'meta' in self._payload and 'previous_page_url' in self._payload['meta']:
return self._payload['meta']['previous_page_url']
elif 'previous_page_uri' in self._payload and self._payload['previous_page_uri']:
return self._version.domain.absolute_url(self._payload['previous_page_uri'])
return None |
def _make_content_item(node, mime_type=None, alternate_data=None):
"""Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed
"""
raw = node.data
if getattr(node, 'encoding', None) == 'zlib':
try:
raw = zlib.decompress(node.data)
except Exception, exc:
if alternate_data is not None:
try:
raw = zlib.decompress(alternate_data)
except Exception:
raise exc # the original exception
else:
raise
if mime_type is None:
mime_type = node.mime_type
raw = raw.decode('utf8').encode('utf8')
return streamcorpus.ContentItem(raw=raw, media_type=mime_type) | Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed | Below is the the instruction that describes the task:
### Input:
Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed
### Response:
def _make_content_item(node, mime_type=None, alternate_data=None):
"""Create a ContentItem from a node in the spinn3r data tree.
The ContentItem is created with raw data set to ``node.data``,
decompressed if the node's encoding is 'zlib', and UTF-8
normalized, with a MIME type from ``node.mime_type``.
``node``
the actual node from the spinn3r protobuf data
``mime_type``
string MIME type to use (defaults to ``node.mime_type``)
``alternate_data``
alternate (compressed) data to use, if ``node.data`` is missing
or can't be decompressed
"""
raw = node.data
if getattr(node, 'encoding', None) == 'zlib':
try:
raw = zlib.decompress(node.data)
except Exception, exc:
if alternate_data is not None:
try:
raw = zlib.decompress(alternate_data)
except Exception:
raise exc # the original exception
else:
raise
if mime_type is None:
mime_type = node.mime_type
raw = raw.decode('utf8').encode('utf8')
return streamcorpus.ContentItem(raw=raw, media_type=mime_type) |
def get_jobs_url(self, job_id):
# type: (Text) -> Text
"""
Returns the URL to check job status.
:param job_id:
The ID of the job to check.
"""
return compat.urllib_parse.urlunsplit((
self.uri.scheme,
self.uri.netloc,
self.uri.path.rstrip('/') + '/jobs/' + job_id,
self.uri.query,
self.uri.fragment,
)) | Returns the URL to check job status.
:param job_id:
The ID of the job to check. | Below is the the instruction that describes the task:
### Input:
Returns the URL to check job status.
:param job_id:
The ID of the job to check.
### Response:
def get_jobs_url(self, job_id):
# type: (Text) -> Text
"""
Returns the URL to check job status.
:param job_id:
The ID of the job to check.
"""
return compat.urllib_parse.urlunsplit((
self.uri.scheme,
self.uri.netloc,
self.uri.path.rstrip('/') + '/jobs/' + job_id,
self.uri.query,
self.uri.fragment,
)) |
def phenotypes_actions(institute_id, case_name):
"""Perform actions on multiple phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for('.case', institute_id=institute_id, case_name=case_name)
action = request.form['action']
hpo_ids = request.form.getlist('hpo_id')
user_obj = store.user(current_user.email)
if action == 'DELETE':
for hpo_id in hpo_ids:
# DELETE a phenotype from the list
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == 'PHENOMIZER':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
username = current_app.config['PHENOMIZER_USERNAME']
password = current_app.config['PHENOMIZER_PASSWORD']
diseases = controllers.hpo_diseases(username, password, hpo_ids)
return render_template('cases/diseases.html', diseases=diseases,
institute=institute_obj, case=case_obj)
elif action == 'GENES':
hgnc_symbols = set()
for raw_symbols in request.form.getlist('genes'):
# avoid empty lists
if raw_symbols:
hgnc_symbols.update(raw_symbol.split(' ', 1)[0] for raw_symbol in
raw_symbols.split('|'))
store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols)
elif action == 'GENERATE':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
results = store.generate_hpo_gene_list(*hpo_ids)
# determine how many HPO terms each gene must match
hpo_count = int(request.form.get('min_match') or 1)
hgnc_ids = [result[0] for result in results if result[1] >= hpo_count]
store.update_dynamic_gene_list(case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids)
return redirect(case_url) | Perform actions on multiple phenotypes. | Below is the the instruction that describes the task:
### Input:
Perform actions on multiple phenotypes.
### Response:
def phenotypes_actions(institute_id, case_name):
"""Perform actions on multiple phenotypes."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
case_url = url_for('.case', institute_id=institute_id, case_name=case_name)
action = request.form['action']
hpo_ids = request.form.getlist('hpo_id')
user_obj = store.user(current_user.email)
if action == 'DELETE':
for hpo_id in hpo_ids:
# DELETE a phenotype from the list
store.remove_phenotype(institute_obj, case_obj, user_obj, case_url, hpo_id)
elif action == 'PHENOMIZER':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
username = current_app.config['PHENOMIZER_USERNAME']
password = current_app.config['PHENOMIZER_PASSWORD']
diseases = controllers.hpo_diseases(username, password, hpo_ids)
return render_template('cases/diseases.html', diseases=diseases,
institute=institute_obj, case=case_obj)
elif action == 'GENES':
hgnc_symbols = set()
for raw_symbols in request.form.getlist('genes'):
# avoid empty lists
if raw_symbols:
hgnc_symbols.update(raw_symbol.split(' ', 1)[0] for raw_symbol in
raw_symbols.split('|'))
store.update_dynamic_gene_list(case_obj, hgnc_symbols=hgnc_symbols)
elif action == 'GENERATE':
if len(hpo_ids) == 0:
hpo_ids = [term['phenotype_id'] for term in case_obj.get('phenotype_terms', [])]
results = store.generate_hpo_gene_list(*hpo_ids)
# determine how many HPO terms each gene must match
hpo_count = int(request.form.get('min_match') or 1)
hgnc_ids = [result[0] for result in results if result[1] >= hpo_count]
store.update_dynamic_gene_list(case_obj, hgnc_ids=hgnc_ids, phenotype_ids=hpo_ids)
return redirect(case_url) |
def markdown_media_css():
""" Add css requirements to HTML.
:returns: Editor template context.
"""
return dict(
CSS_SET=posixpath.join(
settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css'
),
CSS_SKIN=posixpath.join(
'django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN,
'style.css'
)
) | Add css requirements to HTML.
:returns: Editor template context. | Below is the the instruction that describes the task:
### Input:
Add css requirements to HTML.
:returns: Editor template context.
### Response:
def markdown_media_css():
""" Add css requirements to HTML.
:returns: Editor template context.
"""
return dict(
CSS_SET=posixpath.join(
settings.MARKDOWN_SET_PATH, settings.MARKDOWN_SET_NAME, 'style.css'
),
CSS_SKIN=posixpath.join(
'django_markdown', 'skins', settings.MARKDOWN_EDITOR_SKIN,
'style.css'
)
) |
def _init_client(self, from_archive=False):
"""Init client"""
return ConfluenceClient(self.url, archive=self.archive, from_archive=from_archive) | Init client | Below is the the instruction that describes the task:
### Input:
Init client
### Response:
def _init_client(self, from_archive=False):
"""Init client"""
return ConfluenceClient(self.url, archive=self.archive, from_archive=from_archive) |
def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val)
else:
return float(self.val) | Return the value of this SpinBox. | Below is the the instruction that describes the task:
### Input:
Return the value of this SpinBox.
### Response:
def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val)
else:
return float(self.val) |
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings | Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs. | Below is the the instruction that describes the task:
### Input:
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
### Response:
def labels(self):
"""
Construct a dictionary of string -> labels
Returns:
--------
OrderedDict<str, int> : string label pairs.
"""
labelings = OrderedDict()
for tree in self:
for label, line in tree.to_labeled_lines():
labelings[line] = label
return labelings |
def moon_phase(self, date=None, rtype=int):
"""Calculates the moon phase for a specific date.
:param date: The date to calculate the phase for.
If ommitted the current date is used.
:type date: :class:`datetime.date`
:returns:
A number designating the phase
| 0 = New moon
| 7 = First quarter
| 14 = Full moon
| 21 = Last quarter
"""
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
return self.astral.moon_phase(date, rtype) | Calculates the moon phase for a specific date.
:param date: The date to calculate the phase for.
If ommitted the current date is used.
:type date: :class:`datetime.date`
:returns:
A number designating the phase
| 0 = New moon
| 7 = First quarter
| 14 = Full moon
| 21 = Last quarter | Below is the the instruction that describes the task:
### Input:
Calculates the moon phase for a specific date.
:param date: The date to calculate the phase for.
If ommitted the current date is used.
:type date: :class:`datetime.date`
:returns:
A number designating the phase
| 0 = New moon
| 7 = First quarter
| 14 = Full moon
| 21 = Last quarter
### Response:
def moon_phase(self, date=None, rtype=int):
"""Calculates the moon phase for a specific date.
:param date: The date to calculate the phase for.
If ommitted the current date is used.
:type date: :class:`datetime.date`
:returns:
A number designating the phase
| 0 = New moon
| 7 = First quarter
| 14 = Full moon
| 21 = Last quarter
"""
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
return self.astral.moon_phase(date, rtype) |
def _lmowfv1(password):
"""
[MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as LMOWFv1 in document to create a one way hash of the
password. Only used in NTLMv1 auth without session security
:param password: The password or hash of the user we are trying to
authenticate with
:return res: A Lan Manager hash of the password supplied
"""
# if the password is a hash, return the LM hash
if re.match(r'^[a-fA-F\d]{32}:[a-fA-F\d]{32}$', password):
lm_hash = binascii.unhexlify(password.split(':')[0])
return lm_hash
# fix the password to upper case and length to 14 bytes
password = password.upper()
lm_pw = password.encode('utf-8')
padding_size = 0 if len(lm_pw) >= 14 else (14 - len(lm_pw))
lm_pw += b"\x00" * padding_size
# do hash
magic_str = b"KGS!@#$%" # page 56 in [MS-NLMP v28.0]
res = b""
dobj = DES(DES.key56_to_key64(lm_pw[0:7]))
res += dobj.encrypt(magic_str)
dobj = DES(DES.key56_to_key64(lm_pw[7:14]))
res += dobj.encrypt(magic_str)
return res | [MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as LMOWFv1 in document to create a one way hash of the
password. Only used in NTLMv1 auth without session security
:param password: The password or hash of the user we are trying to
authenticate with
:return res: A Lan Manager hash of the password supplied | Below is the the instruction that describes the task:
### Input:
[MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as LMOWFv1 in document to create a one way hash of the
password. Only used in NTLMv1 auth without session security
:param password: The password or hash of the user we are trying to
authenticate with
:return res: A Lan Manager hash of the password supplied
### Response:
def _lmowfv1(password):
"""
[MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as LMOWFv1 in document to create a one way hash of the
password. Only used in NTLMv1 auth without session security
:param password: The password or hash of the user we are trying to
authenticate with
:return res: A Lan Manager hash of the password supplied
"""
# if the password is a hash, return the LM hash
if re.match(r'^[a-fA-F\d]{32}:[a-fA-F\d]{32}$', password):
lm_hash = binascii.unhexlify(password.split(':')[0])
return lm_hash
# fix the password to upper case and length to 14 bytes
password = password.upper()
lm_pw = password.encode('utf-8')
padding_size = 0 if len(lm_pw) >= 14 else (14 - len(lm_pw))
lm_pw += b"\x00" * padding_size
# do hash
magic_str = b"KGS!@#$%" # page 56 in [MS-NLMP v28.0]
res = b""
dobj = DES(DES.key56_to_key64(lm_pw[0:7]))
res += dobj.encrypt(magic_str)
dobj = DES(DES.key56_to_key64(lm_pw[7:14]))
res += dobj.encrypt(magic_str)
return res |
def normalize_init_values(cls, release, species, server):
"""
Normalizes the arguments which uniquely specify an EnsemblRelease
genome.
"""
release = check_release_number(release)
species = check_species_object(species)
return (release, species, server) | Normalizes the arguments which uniquely specify an EnsemblRelease
genome. | Below is the the instruction that describes the task:
### Input:
Normalizes the arguments which uniquely specify an EnsemblRelease
genome.
### Response:
def normalize_init_values(cls, release, species, server):
"""
Normalizes the arguments which uniquely specify an EnsemblRelease
genome.
"""
release = check_release_number(release)
species = check_species_object(species)
return (release, species, server) |
def copy_sqla_object(obj: object,
omit_fk: bool = True,
omit_pk: bool = True,
omit_attrs: List[str] = None,
debug: bool = False) -> object:
"""
Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT
MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies
across all attributes, omitting PKs (by default), FKs (by default), and
relationship attributes (always omitted).
Args:
obj: the object to copy
omit_fk: omit foreign keys (FKs)?
omit_pk: omit primary keys (PKs)?
omit_attrs: attributes (by name) not to copy
debug: be verbose
Returns:
a new copy of the object
"""
omit_attrs = omit_attrs or [] # type: List[str]
cls = type(obj)
mapper = class_mapper(cls)
newobj = cls() # not: cls.__new__(cls)
rel_keys = set([c.key for c in mapper.relationships])
prohibited = rel_keys
if omit_pk:
pk_keys = set([c.key for c in mapper.primary_key])
prohibited |= pk_keys
if omit_fk:
fk_keys = set([c.key for c in mapper.columns if c.foreign_keys])
prohibited |= fk_keys
prohibited |= set(omit_attrs)
if debug:
log.debug("copy_sqla_object: skipping: {}", prohibited)
for k in [p.key for p in mapper.iterate_properties
if p.key not in prohibited]:
try:
value = getattr(obj, k)
if debug:
log.debug("copy_sqla_object: processing attribute {} = {}",
k, value)
setattr(newobj, k, value)
except AttributeError:
if debug:
log.debug("copy_sqla_object: failed attribute {}", k)
pass
return newobj | Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT
MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies
across all attributes, omitting PKs (by default), FKs (by default), and
relationship attributes (always omitted).
Args:
obj: the object to copy
omit_fk: omit foreign keys (FKs)?
omit_pk: omit primary keys (PKs)?
omit_attrs: attributes (by name) not to copy
debug: be verbose
Returns:
a new copy of the object | Below is the the instruction that describes the task:
### Input:
Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT
MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies
across all attributes, omitting PKs (by default), FKs (by default), and
relationship attributes (always omitted).
Args:
obj: the object to copy
omit_fk: omit foreign keys (FKs)?
omit_pk: omit primary keys (PKs)?
omit_attrs: attributes (by name) not to copy
debug: be verbose
Returns:
a new copy of the object
### Response:
def copy_sqla_object(obj: object,
omit_fk: bool = True,
omit_pk: bool = True,
omit_attrs: List[str] = None,
debug: bool = False) -> object:
"""
Given an SQLAlchemy object, creates a new object (FOR WHICH THE OBJECT
MUST SUPPORT CREATION USING ``__init__()`` WITH NO PARAMETERS), and copies
across all attributes, omitting PKs (by default), FKs (by default), and
relationship attributes (always omitted).
Args:
obj: the object to copy
omit_fk: omit foreign keys (FKs)?
omit_pk: omit primary keys (PKs)?
omit_attrs: attributes (by name) not to copy
debug: be verbose
Returns:
a new copy of the object
"""
omit_attrs = omit_attrs or [] # type: List[str]
cls = type(obj)
mapper = class_mapper(cls)
newobj = cls() # not: cls.__new__(cls)
rel_keys = set([c.key for c in mapper.relationships])
prohibited = rel_keys
if omit_pk:
pk_keys = set([c.key for c in mapper.primary_key])
prohibited |= pk_keys
if omit_fk:
fk_keys = set([c.key for c in mapper.columns if c.foreign_keys])
prohibited |= fk_keys
prohibited |= set(omit_attrs)
if debug:
log.debug("copy_sqla_object: skipping: {}", prohibited)
for k in [p.key for p in mapper.iterate_properties
if p.key not in prohibited]:
try:
value = getattr(obj, k)
if debug:
log.debug("copy_sqla_object: processing attribute {} = {}",
k, value)
setattr(newobj, k, value)
except AttributeError:
if debug:
log.debug("copy_sqla_object: failed attribute {}", k)
pass
return newobj |
def filters(self, *filters):
"""
Add a list of Filter ingredients to the query. These can either be
Filter objects or strings representing filters on the service's shelf.
``.filters()`` are additive, calling .filters() more than once will add
to the list of filters being used by the recipe.
The Filter expression will be added to the query's where clause
:param filters: Filters to add to the recipe. Filters can
either be keys on the ``shelf`` or
Filter objects
:type filters: list
"""
def filter_constructor(f, shelf=None):
if isinstance(f, BinaryExpression):
return Filter(f)
else:
return f
for f in filters:
self._cauldron.use(
self._shelf.find(
f, (Filter, Having), constructor=filter_constructor
)
)
self.dirty = True
return self | Add a list of Filter ingredients to the query. These can either be
Filter objects or strings representing filters on the service's shelf.
``.filters()`` are additive, calling .filters() more than once will add
to the list of filters being used by the recipe.
The Filter expression will be added to the query's where clause
:param filters: Filters to add to the recipe. Filters can
either be keys on the ``shelf`` or
Filter objects
:type filters: list | Below is the the instruction that describes the task:
### Input:
Add a list of Filter ingredients to the query. These can either be
Filter objects or strings representing filters on the service's shelf.
``.filters()`` are additive, calling .filters() more than once will add
to the list of filters being used by the recipe.
The Filter expression will be added to the query's where clause
:param filters: Filters to add to the recipe. Filters can
either be keys on the ``shelf`` or
Filter objects
:type filters: list
### Response:
def filters(self, *filters):
"""
Add a list of Filter ingredients to the query. These can either be
Filter objects or strings representing filters on the service's shelf.
``.filters()`` are additive, calling .filters() more than once will add
to the list of filters being used by the recipe.
The Filter expression will be added to the query's where clause
:param filters: Filters to add to the recipe. Filters can
either be keys on the ``shelf`` or
Filter objects
:type filters: list
"""
def filter_constructor(f, shelf=None):
if isinstance(f, BinaryExpression):
return Filter(f)
else:
return f
for f in filters:
self._cauldron.use(
self._shelf.find(
f, (Filter, Having), constructor=filter_constructor
)
)
self.dirty = True
return self |
def results(self, query_name):
"""
Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set.
"""
url = "{0}/{1}/result".format(self.saved_query_url, query_name)
response = self._get_json(HTTPMethods.GET, url, self._get_read_key())
return response | Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set. | Below is the the instruction that describes the task:
### Input:
Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set.
### Response:
def results(self, query_name):
"""
Gets a single saved query with a 'result' object for a project from the
Keen IO API given a query name.
Read or Master key must be set.
"""
url = "{0}/{1}/result".format(self.saved_query_url, query_name)
response = self._get_json(HTTPMethods.GET, url, self._get_read_key())
return response |
def _HashFile(self, fd):
"""Look for the required hashes in the file."""
hashes = data_store_utils.GetFileHashEntry(fd)
if hashes:
found_all = True
for fingerprint_type, hash_types in iteritems(self.HASH_TYPES):
for hash_type in hash_types:
if fingerprint_type == "pecoff":
hash_type = "pecoff_%s" % hash_type
if not hashes.HasField(hash_type):
found_all = False
break
if not found_all:
break
if found_all:
return hashes
fingerprinter = fingerprint.Fingerprinter(fd)
if "generic" in self.HASH_TYPES:
hashers = self._GetHashers(self.HASH_TYPES["generic"])
fingerprinter.EvalGeneric(hashers=hashers)
if "pecoff" in self.HASH_TYPES:
hashers = self._GetHashers(self.HASH_TYPES["pecoff"])
if hashers:
fingerprinter.EvalPecoff(hashers=hashers)
if not hashes:
hashes = fd.Schema.HASH()
for result in fingerprinter.HashIt():
fingerprint_type = result["name"]
for hash_type in self.HASH_TYPES[fingerprint_type]:
if hash_type not in result:
continue
if hash_type == "SignedData":
# There can be several certs in the same file.
for signed_data in result[hash_type]:
hashes.signed_data.Append(
revision=signed_data[0],
cert_type=signed_data[1],
certificate=signed_data[2])
continue
# Set the hashes in the original object
if fingerprint_type == "generic":
hashes.Set(hash_type, result[hash_type])
elif fingerprint_type == "pecoff":
hashes.Set("pecoff_%s" % hash_type, result[hash_type])
else:
logging.error("Unknown fingerprint_type %s.", fingerprint_type)
return hashes | Look for the required hashes in the file. | Below is the the instruction that describes the task:
### Input:
Look for the required hashes in the file.
### Response:
def _HashFile(self, fd):
"""Look for the required hashes in the file."""
hashes = data_store_utils.GetFileHashEntry(fd)
if hashes:
found_all = True
for fingerprint_type, hash_types in iteritems(self.HASH_TYPES):
for hash_type in hash_types:
if fingerprint_type == "pecoff":
hash_type = "pecoff_%s" % hash_type
if not hashes.HasField(hash_type):
found_all = False
break
if not found_all:
break
if found_all:
return hashes
fingerprinter = fingerprint.Fingerprinter(fd)
if "generic" in self.HASH_TYPES:
hashers = self._GetHashers(self.HASH_TYPES["generic"])
fingerprinter.EvalGeneric(hashers=hashers)
if "pecoff" in self.HASH_TYPES:
hashers = self._GetHashers(self.HASH_TYPES["pecoff"])
if hashers:
fingerprinter.EvalPecoff(hashers=hashers)
if not hashes:
hashes = fd.Schema.HASH()
for result in fingerprinter.HashIt():
fingerprint_type = result["name"]
for hash_type in self.HASH_TYPES[fingerprint_type]:
if hash_type not in result:
continue
if hash_type == "SignedData":
# There can be several certs in the same file.
for signed_data in result[hash_type]:
hashes.signed_data.Append(
revision=signed_data[0],
cert_type=signed_data[1],
certificate=signed_data[2])
continue
# Set the hashes in the original object
if fingerprint_type == "generic":
hashes.Set(hash_type, result[hash_type])
elif fingerprint_type == "pecoff":
hashes.Set("pecoff_%s" % hash_type, result[hash_type])
else:
logging.error("Unknown fingerprint_type %s.", fingerprint_type)
return hashes |
def on(self, event: str) -> Callable:
""" Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
"""
def outer(func):
self.add_event(func, event)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return outer | Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable | Below is the the instruction that describes the task:
### Input:
Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
### Response:
def on(self, event: str) -> Callable:
""" Decorator for subscribing a function to a specific event.
:param event: Name of the event to subscribe to.
:type event: str
:return: The outer function.
:rtype: Callable
"""
def outer(func):
self.add_event(func, event)
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
return outer |
def table_is_subset_of(entries_a, entries_b):
"""Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`.
"""
# Determine which bits we don't need to explicitly test for
common_xs = get_common_xs(entries_b)
# For every entry in the first table
for entry in expand_entries(entries_a, ignore_xs=common_xs):
# Look at every entry in the second table
for other_entry in entries_b:
# If the first entry matches the second
if other_entry.mask & entry.key == other_entry.key:
if other_entry.route == entry.route:
# If the route is the same then we move on to the next
# entry in the first table.
break
else:
# Otherwise we return false as the tables are different
return False
else:
# If we didn't break out of the loop then the entry from the first
# table never matched an entry in the second table. If the entry
# from the first table could not be default routed we return False
# as the tables cannot be equivalent.
default_routed = False
if len(entry.route) == 1 and len(entry.sources) == 1:
source = next(iter(entry.sources))
sink = next(iter(entry.route))
if (source is not None and
sink.is_link and
source is sink.opposite):
default_routed = True
if not default_routed:
return False
return True | Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`. | Below is the the instruction that describes the task:
### Input:
Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`.
### Response:
def table_is_subset_of(entries_a, entries_b):
"""Check that every key matched by every entry in one table results in the
same route when checked against the other table.
For example, the table::
>>> from rig.routing_table import Routes
>>> table = [
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x0, 0xf),
... RoutingTableEntry({Routes.east}, 0x1, 0xf),
... RoutingTableEntry({Routes.south_west}, 0x5, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0x8, 0xf),
... RoutingTableEntry({Routes.east}, 0x9, 0xf),
... RoutingTableEntry({Routes.south_west}, 0xe, 0xf),
... RoutingTableEntry({Routes.north, Routes.north_east}, 0xc, 0xf),
... RoutingTableEntry({Routes.south, Routes.south_west}, 0x0, 0xb),
... ]
is a functional subset of a minimised version of itself::
>>> from rig.routing_table.ordered_covering import minimise
>>> other_table = minimise(table, target_length=None)
>>> other_table == table
False
>>> table_is_subset_of(table, other_table)
True
But not vice-versa::
>>> table_is_subset_of(other_table, table)
False
Default routes are taken into account, such that the table::
>>> table = [
... RoutingTableEntry({Routes.north}, 0x0, 0xf, {Routes.south}),
... ]
is a subset of the empty table::
>>> table_is_subset_of(table, list())
True
Parameters
----------
entries_a : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
entries_b : [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
Ordered of lists of routing table entries to compare.
Returns
-------
bool
True if every key matched in `entries_a` would result in an equivalent
route for the packet when matched in `entries_b`.
"""
# Determine which bits we don't need to explicitly test for
common_xs = get_common_xs(entries_b)
# For every entry in the first table
for entry in expand_entries(entries_a, ignore_xs=common_xs):
# Look at every entry in the second table
for other_entry in entries_b:
# If the first entry matches the second
if other_entry.mask & entry.key == other_entry.key:
if other_entry.route == entry.route:
# If the route is the same then we move on to the next
# entry in the first table.
break
else:
# Otherwise we return false as the tables are different
return False
else:
# If we didn't break out of the loop then the entry from the first
# table never matched an entry in the second table. If the entry
# from the first table could not be default routed we return False
# as the tables cannot be equivalent.
default_routed = False
if len(entry.route) == 1 and len(entry.sources) == 1:
source = next(iter(entry.sources))
sink = next(iter(entry.route))
if (source is not None and
sink.is_link and
source is sink.opposite):
default_routed = True
if not default_routed:
return False
return True |
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx | Return the package config or context and normalize some of the
values | Below is the the instruction that describes the task:
### Input:
Return the package config or context and normalize some of the
values
### Response:
def _default_ctx(self):
""" Return the package config or context and normalize some of the
values
"""
if not self.in_app_directory:
print("Warning: {} does not exist. Using the default.".format(
self.package))
ctx = {}
else:
with open(self.package) as f:
ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader))
if self.in_app_directory:
# Update the env for each platform
excluded = list(ctx.get('excluded', []))
for env in [ctx['ios'], ctx['android']]:
if 'python_build_dir' not in env:
env['python_build_dir'] = expanduser(abspath('build/python'))
if 'conda_prefix' not in env:
env['conda_prefix'] = os.environ.get(
'CONDA_PREFIX', expanduser(abspath('venv')))
# Join the shared and local exclusions
env['excluded'] = list(env.get('excluded', [])) + excluded
return ctx |
def cast_in(self, element):
"""encode the element into the internal tag list."""
if _debug: SequenceOfAny._debug("cast_in %r", element)
# make sure it is a list
if not isinstance(element, List):
raise EncodingError("%r is not a list" % (element,))
t = TagList()
element.encode(t)
self.tagList.extend(t.tagList) | encode the element into the internal tag list. | Below is the the instruction that describes the task:
### Input:
encode the element into the internal tag list.
### Response:
def cast_in(self, element):
"""encode the element into the internal tag list."""
if _debug: SequenceOfAny._debug("cast_in %r", element)
# make sure it is a list
if not isinstance(element, List):
raise EncodingError("%r is not a list" % (element,))
t = TagList()
element.encode(t)
self.tagList.extend(t.tagList) |
async def _body_callback(self, h11_connection):
'''
A callback func to be supplied if the user wants to do something
directly with the response body's stream.
'''
# pylint: disable=not-callable
while True:
next_event = await self._recv_event(h11_connection)
if isinstance(next_event, h11.Data):
await self.callback(next_event.data)
else:
return next_event | A callback func to be supplied if the user wants to do something
directly with the response body's stream. | Below is the the instruction that describes the task:
### Input:
A callback func to be supplied if the user wants to do something
directly with the response body's stream.
### Response:
async def _body_callback(self, h11_connection):
'''
A callback func to be supplied if the user wants to do something
directly with the response body's stream.
'''
# pylint: disable=not-callable
while True:
next_event = await self._recv_event(h11_connection)
if isinstance(next_event, h11.Data):
await self.callback(next_event.data)
else:
return next_event |
def create(p_class, p_todo, p_id_width=4):
"""
Creates a TodoWidget instance for the given todo. Widgets are
cached, the same object is returned for the same todo item.
"""
def parent_progress_may_have_changed(p_todo):
"""
Returns True when a todo's progress should be updated because it is
dependent on the parent's progress.
"""
return p_todo.has_tag('p') and not p_todo.has_tag('due')
source = p_todo.source()
if source in p_class.cache:
widget = p_class.cache[source]
if p_todo is not widget.todo:
# same source text but different todo instance (could happen
# after an edit where a new Todo instance is created with the
# same text as before)
# simply fix the reference in the stored widget.
widget.todo = p_todo
if parent_progress_may_have_changed(p_todo):
widget.update_progress()
else:
widget = p_class(p_todo, p_id_width)
p_class.cache[source] = widget
return widget | Creates a TodoWidget instance for the given todo. Widgets are
cached, the same object is returned for the same todo item. | Below is the the instruction that describes the task:
### Input:
Creates a TodoWidget instance for the given todo. Widgets are
cached, the same object is returned for the same todo item.
### Response:
def create(p_class, p_todo, p_id_width=4):
"""
Creates a TodoWidget instance for the given todo. Widgets are
cached, the same object is returned for the same todo item.
"""
def parent_progress_may_have_changed(p_todo):
"""
Returns True when a todo's progress should be updated because it is
dependent on the parent's progress.
"""
return p_todo.has_tag('p') and not p_todo.has_tag('due')
source = p_todo.source()
if source in p_class.cache:
widget = p_class.cache[source]
if p_todo is not widget.todo:
# same source text but different todo instance (could happen
# after an edit where a new Todo instance is created with the
# same text as before)
# simply fix the reference in the stored widget.
widget.todo = p_todo
if parent_progress_may_have_changed(p_todo):
widget.update_progress()
else:
widget = p_class(p_todo, p_id_width)
p_class.cache[source] = widget
return widget |
def set_printed_time(self, sample):
"""Updates the printed time of the last results report from the sample
"""
if api.get_workflow_status_of(sample) != "published":
return False
reports = sample.objectValues("ARReport")
reports = sorted(reports, key=lambda report: report.getDatePublished())
last_report = reports[-1]
if not last_report.getDatePrinted():
last_report.setDatePrinted(DateTime())
sample.reindexObject(idxs=["getPrinted"])
return True | Updates the printed time of the last results report from the sample | Below is the the instruction that describes the task:
### Input:
Updates the printed time of the last results report from the sample
### Response:
def set_printed_time(self, sample):
"""Updates the printed time of the last results report from the sample
"""
if api.get_workflow_status_of(sample) != "published":
return False
reports = sample.objectValues("ARReport")
reports = sorted(reports, key=lambda report: report.getDatePublished())
last_report = reports[-1]
if not last_report.getDatePrinted():
last_report.setDatePrinted(DateTime())
sample.reindexObject(idxs=["getPrinted"])
return True |
def get_field_mapping(self):
"""Obtain metadata from current state of the widget.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
"""
fields = {}
values = {}
for tab in self.tabs:
parameter_values = tab.get_parameter_value()
fields.update(parameter_values['fields'])
values.update(parameter_values['values'])
return {
'fields': fields,
'values': values
} | Obtain metadata from current state of the widget.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Obtain metadata from current state of the widget.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
### Response:
def get_field_mapping(self):
"""Obtain metadata from current state of the widget.
:returns: Dictionary of values by type in this format:
{'fields': {}, 'values': {}}.
:rtype: dict
"""
fields = {}
values = {}
for tab in self.tabs:
parameter_values = tab.get_parameter_value()
fields.update(parameter_values['fields'])
values.update(parameter_values['values'])
return {
'fields': fields,
'values': values
} |
def example_lab_to_xyz():
"""
This function shows a simple conversion of an Lab color to an XYZ color.
"""
print("=== Simple Example: Lab->XYZ ===")
# Instantiate an Lab color object with the given values.
lab = LabColor(0.903, 16.296, -2.22)
# Show a string representation.
print(lab)
# Convert to XYZ.
xyz = convert_color(lab, XYZColor)
print(xyz)
print("=== End Example ===\n") | This function shows a simple conversion of an Lab color to an XYZ color. | Below is the the instruction that describes the task:
### Input:
This function shows a simple conversion of an Lab color to an XYZ color.
### Response:
def example_lab_to_xyz():
"""
This function shows a simple conversion of an Lab color to an XYZ color.
"""
print("=== Simple Example: Lab->XYZ ===")
# Instantiate an Lab color object with the given values.
lab = LabColor(0.903, 16.296, -2.22)
# Show a string representation.
print(lab)
# Convert to XYZ.
xyz = convert_color(lab, XYZColor)
print(xyz)
print("=== End Example ===\n") |
def __get_cfg_pkgs_rpm(self):
'''
Get packages with configuration files on RPM systems.
'''
out, err = self._syscall('rpm', None, None, '-qa', '--configfiles',
'--queryformat', '%{name}-%{version}-%{release}\\n')
data = dict()
pkg_name = None
pkg_configs = []
out = salt.utils.stringutils.to_str(out)
for line in out.split(os.linesep):
line = line.strip()
if not line:
continue
if not line.startswith("/"):
if pkg_name and pkg_configs:
data[pkg_name] = pkg_configs
pkg_name = line
pkg_configs = []
else:
pkg_configs.append(line)
if pkg_name and pkg_configs:
data[pkg_name] = pkg_configs
return data | Get packages with configuration files on RPM systems. | Below is the the instruction that describes the task:
### Input:
Get packages with configuration files on RPM systems.
### Response:
def __get_cfg_pkgs_rpm(self):
'''
Get packages with configuration files on RPM systems.
'''
out, err = self._syscall('rpm', None, None, '-qa', '--configfiles',
'--queryformat', '%{name}-%{version}-%{release}\\n')
data = dict()
pkg_name = None
pkg_configs = []
out = salt.utils.stringutils.to_str(out)
for line in out.split(os.linesep):
line = line.strip()
if not line:
continue
if not line.startswith("/"):
if pkg_name and pkg_configs:
data[pkg_name] = pkg_configs
pkg_name = line
pkg_configs = []
else:
pkg_configs.append(line)
if pkg_name and pkg_configs:
data[pkg_name] = pkg_configs
return data |
def fcs(args):
"""
%prog fcs fcsfile
Process the results from Genbank contaminant screen. An example of the file
looks like:
contig name, length, span(s), apparent source
contig0746 11760 1..141 vector
contig0751 14226 13476..14226 vector
contig0800 124133 30512..30559 primer/adapter
"""
p = OptionParser(fcs.__doc__)
p.add_option("--cutoff", default=200,
help="Skip small components less than [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fcsfile, = args
cutoff = opts.cutoff
fp = open(fcsfile)
for row in fp:
if row[0] == "#":
continue
sep = "\t" if "\t" in row else None
atoms = row.rstrip().split(sep, 3)
contig, length = atoms[:2]
length = int(length)
label = atoms[-1]
label = label.replace(" ", "_")
if len(atoms) == 3:
ranges = "{0}..{1}".format(1, length)
else:
assert len(atoms) == 4
ranges = atoms[2]
for ab in ranges.split(","):
a, b = ab.split("..")
a, b = int(a), int(b)
assert a <= b
ahang = a - 1
bhang = length - b
if ahang < cutoff:
a = 1
if bhang < cutoff:
b = length
print("\t".join(str(x) for x in (contig, a - 1, b, label))) | %prog fcs fcsfile
Process the results from Genbank contaminant screen. An example of the file
looks like:
contig name, length, span(s), apparent source
contig0746 11760 1..141 vector
contig0751 14226 13476..14226 vector
contig0800 124133 30512..30559 primer/adapter | Below is the the instruction that describes the task:
### Input:
%prog fcs fcsfile
Process the results from Genbank contaminant screen. An example of the file
looks like:
contig name, length, span(s), apparent source
contig0746 11760 1..141 vector
contig0751 14226 13476..14226 vector
contig0800 124133 30512..30559 primer/adapter
### Response:
def fcs(args):
"""
%prog fcs fcsfile
Process the results from Genbank contaminant screen. An example of the file
looks like:
contig name, length, span(s), apparent source
contig0746 11760 1..141 vector
contig0751 14226 13476..14226 vector
contig0800 124133 30512..30559 primer/adapter
"""
p = OptionParser(fcs.__doc__)
p.add_option("--cutoff", default=200,
help="Skip small components less than [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fcsfile, = args
cutoff = opts.cutoff
fp = open(fcsfile)
for row in fp:
if row[0] == "#":
continue
sep = "\t" if "\t" in row else None
atoms = row.rstrip().split(sep, 3)
contig, length = atoms[:2]
length = int(length)
label = atoms[-1]
label = label.replace(" ", "_")
if len(atoms) == 3:
ranges = "{0}..{1}".format(1, length)
else:
assert len(atoms) == 4
ranges = atoms[2]
for ab in ranges.split(","):
a, b = ab.split("..")
a, b = int(a), int(b)
assert a <= b
ahang = a - 1
bhang = length - b
if ahang < cutoff:
a = 1
if bhang < cutoff:
b = length
print("\t".join(str(x) for x in (contig, a - 1, b, label))) |
def _record_revisit(self, payload_offset: int):
'''Record the revisit if possible.'''
fields = self._response_record.fields
ref_record_id = self._url_table.get_revisit_id(
fields['WARC-Target-URI'],
fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '')
)
if ref_record_id:
try:
self._response_record.block_file.truncate(payload_offset)
except TypeError:
self._response_record.block_file.seek(0)
data = self._response_record.block_file.read(payload_offset)
self._response_record.block_file.truncate()
self._response_record.block_file.seek(0)
self._response_record.block_file.write(data)
self._recorder.set_length_and_maybe_checksums(
self._response_record
)
fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT
fields['WARC-Refers-To'] = ref_record_id
fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI
fields['WARC-Truncated'] = 'length' | Record the revisit if possible. | Below is the the instruction that describes the task:
### Input:
Record the revisit if possible.
### Response:
def _record_revisit(self, payload_offset: int):
'''Record the revisit if possible.'''
fields = self._response_record.fields
ref_record_id = self._url_table.get_revisit_id(
fields['WARC-Target-URI'],
fields.get('WARC-Payload-Digest', '').upper().replace('SHA1:', '')
)
if ref_record_id:
try:
self._response_record.block_file.truncate(payload_offset)
except TypeError:
self._response_record.block_file.seek(0)
data = self._response_record.block_file.read(payload_offset)
self._response_record.block_file.truncate()
self._response_record.block_file.seek(0)
self._response_record.block_file.write(data)
self._recorder.set_length_and_maybe_checksums(
self._response_record
)
fields[WARCRecord.WARC_TYPE] = WARCRecord.REVISIT
fields['WARC-Refers-To'] = ref_record_id
fields['WARC-Profile'] = WARCRecord.SAME_PAYLOAD_DIGEST_URI
fields['WARC-Truncated'] = 'length' |
def pack_paths(paths, sheet_size=None):
"""
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
"""
from .util import concatenate
if sheet_size is not None:
sheet_size = np.sort(sheet_size)[::-1]
quantity = []
for path in paths:
if 'quantity' in path.metadata:
quantity.append(path.metadata['quantity'])
else:
quantity.append(1)
# pack using exterior polygon (will OBB)
polygons = [i.polygons_closed[i.root[0]] for i in paths]
# pack the polygons using rectangular bin packing
inserted, transforms = multipack(polygons=polygons,
quantity=quantity,
sheet_size=sheet_size)
multi = []
for i, T in zip(inserted, transforms):
multi.append(paths[i].copy())
multi[-1].apply_transform(T)
# append all packed paths into a single Path object
packed = concatenate(multi)
return packed, inserted | Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result | Below is the the instruction that describes the task:
### Input:
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
### Response:
def pack_paths(paths, sheet_size=None):
"""
Pack a list of Path2D objects into a rectangle.
Parameters
------------
paths: (n,) Path2D
Geometry to be packed
Returns
------------
packed : trimesh.path.Path2D
Object containing input geometry
inserted : (m,) int
Indexes of paths inserted into result
"""
from .util import concatenate
if sheet_size is not None:
sheet_size = np.sort(sheet_size)[::-1]
quantity = []
for path in paths:
if 'quantity' in path.metadata:
quantity.append(path.metadata['quantity'])
else:
quantity.append(1)
# pack using exterior polygon (will OBB)
polygons = [i.polygons_closed[i.root[0]] for i in paths]
# pack the polygons using rectangular bin packing
inserted, transforms = multipack(polygons=polygons,
quantity=quantity,
sheet_size=sheet_size)
multi = []
for i, T in zip(inserted, transforms):
multi.append(paths[i].copy())
multi[-1].apply_transform(T)
# append all packed paths into a single Path object
packed = concatenate(multi)
return packed, inserted |
def get_context(namespace, context_id):
"""Get stored context object."""
context_obj = get_state(context_id, namespace=namespace)
if not context_obj:
raise ContextError("Context '{}' not found in namespace '{}'".format(
context_id, namespace))
return context_obj | Get stored context object. | Below is the the instruction that describes the task:
### Input:
Get stored context object.
### Response:
def get_context(namespace, context_id):
"""Get stored context object."""
context_obj = get_state(context_id, namespace=namespace)
if not context_obj:
raise ContextError("Context '{}' not found in namespace '{}'".format(
context_id, namespace))
return context_obj |
def generate_slides(self, infile):
""" Process a file of rest and yield dictionaries """
state = 0
# each slide is a dict
slide = {}
last_heading = 0
for item in self.generate_lines(infile):
line = item['line']
heading = item['heading']
indent = item['indent']
# Any heading is the heading for a new slide
if heading:
if slide and last_heading <= 1:
yield slide
last_heading = heading
rows = []
slide = {}
if heading < 2:
slide.update(dict(
heading = dict(text=line.strip('#')),
rows = rows))
continue
# Any block with more than one hash is a comment
if last_heading > 1:
continue
if indent == 0 and line:
# at a potential image
rows.append(self.build_row(line))
else:
# Just add the line of text
items = [dict(text=(' ' * indent) + line)]
rows.append(dict(items=items))
if slide:
yield slide | Process a file of rest and yield dictionaries | Below is the the instruction that describes the task:
### Input:
Process a file of rest and yield dictionaries
### Response:
def generate_slides(self, infile):
""" Process a file of rest and yield dictionaries """
state = 0
# each slide is a dict
slide = {}
last_heading = 0
for item in self.generate_lines(infile):
line = item['line']
heading = item['heading']
indent = item['indent']
# Any heading is the heading for a new slide
if heading:
if slide and last_heading <= 1:
yield slide
last_heading = heading
rows = []
slide = {}
if heading < 2:
slide.update(dict(
heading = dict(text=line.strip('#')),
rows = rows))
continue
# Any block with more than one hash is a comment
if last_heading > 1:
continue
if indent == 0 and line:
# at a potential image
rows.append(self.build_row(line))
else:
# Just add the line of text
items = [dict(text=(' ' * indent) + line)]
rows.append(dict(items=items))
if slide:
yield slide |
def _get_cache_key(self, obj):
"""Derive cache key for given object."""
if obj is not None:
# Make sure that key is REALLY unique.
return '{}-{}'.format(id(self), obj.pk)
return "{}-None".format(id(self)) | Derive cache key for given object. | Below is the the instruction that describes the task:
### Input:
Derive cache key for given object.
### Response:
def _get_cache_key(self, obj):
"""Derive cache key for given object."""
if obj is not None:
# Make sure that key is REALLY unique.
return '{}-{}'.format(id(self), obj.pk)
return "{}-None".format(id(self)) |
def projectEmitter(target, source, env):
"""Sets up the DSP dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSPROJECTSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSPROJECTSUFFIX')
target[0] = base + suff
if not source:
source = 'prj_inputs:'
source = source + env.subst('$MSVSSCONSCOM', 1)
source = source + env.subst('$MSVSENCODING', 1)
# Project file depends on CPPDEFINES and CPPPATH
preprocdefs = xmlify(';'.join(processDefines(env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(env.get('CPPPATH', []), env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
source = source + "; ppdefs:%s incpath:%s"%(preprocdefs, includepath)
if 'buildtarget' in env and env['buildtarget'] != None:
if SCons.Util.is_String(env['buildtarget']):
source = source + ' "%s"' % env['buildtarget']
elif SCons.Util.is_List(env['buildtarget']):
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
source = source + ' "%s"' % bt
else:
try: source = source + ' "%s"' % bt.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['buildtarget'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
if 'outdir' in env and env['outdir'] != None:
if SCons.Util.is_String(env['outdir']):
source = source + ' "%s"' % env['outdir']
elif SCons.Util.is_List(env['outdir']):
for s in env['outdir']:
if SCons.Util.is_String(s):
source = source + ' "%s"' % s
else:
try: source = source + ' "%s"' % s.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['outdir'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
for s in _DSPGenerator.srcargs:
if s in env:
if SCons.Util.is_String(env[s]):
source = source + ' "%s' % env[s]
elif SCons.Util.is_List(env[s]):
for t in env[s]:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
targetlist = [target[0]]
sourcelist = source
if env.get('auto_build_solution', 1):
env['projects'] = [env.File(t).srcnode() for t in targetlist]
t, s = solutionEmitter(target, target, env)
targetlist = targetlist + t
# Beginning with Visual Studio 2010 for each project file (.vcxproj) we have additional file (.vcxproj.filters)
version_num = 6.0
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 10.0:
targetlist.append(targetlist[0] + '.filters')
return (targetlist, sourcelist) | Sets up the DSP dependencies. | Below is the the instruction that describes the task:
### Input:
Sets up the DSP dependencies.
### Response:
def projectEmitter(target, source, env):
"""Sets up the DSP dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSPROJECTSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSPROJECTSUFFIX')
target[0] = base + suff
if not source:
source = 'prj_inputs:'
source = source + env.subst('$MSVSSCONSCOM', 1)
source = source + env.subst('$MSVSENCODING', 1)
# Project file depends on CPPDEFINES and CPPPATH
preprocdefs = xmlify(';'.join(processDefines(env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(env.get('CPPPATH', []), env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
source = source + "; ppdefs:%s incpath:%s"%(preprocdefs, includepath)
if 'buildtarget' in env and env['buildtarget'] != None:
if SCons.Util.is_String(env['buildtarget']):
source = source + ' "%s"' % env['buildtarget']
elif SCons.Util.is_List(env['buildtarget']):
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
source = source + ' "%s"' % bt
else:
try: source = source + ' "%s"' % bt.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['buildtarget'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
if 'outdir' in env and env['outdir'] != None:
if SCons.Util.is_String(env['outdir']):
source = source + ' "%s"' % env['outdir']
elif SCons.Util.is_List(env['outdir']):
for s in env['outdir']:
if SCons.Util.is_String(s):
source = source + ' "%s"' % s
else:
try: source = source + ' "%s"' % s.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['outdir'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
for s in _DSPGenerator.srcargs:
if s in env:
if SCons.Util.is_String(env[s]):
source = source + ' "%s' % env[s]
elif SCons.Util.is_List(env[s]):
for t in env[s]:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
targetlist = [target[0]]
sourcelist = source
if env.get('auto_build_solution', 1):
env['projects'] = [env.File(t).srcnode() for t in targetlist]
t, s = solutionEmitter(target, target, env)
targetlist = targetlist + t
# Beginning with Visual Studio 2010 for each project file (.vcxproj) we have additional file (.vcxproj.filters)
version_num = 6.0
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 10.0:
targetlist.append(targetlist[0] + '.filters')
return (targetlist, sourcelist) |
def wait(self, dwMilliseconds = None):
"""
Wait for the Win32 object to be signaled.
@type dwMilliseconds: int
@param dwMilliseconds: (Optional) Timeout value in milliseconds.
Use C{INFINITE} or C{None} for no timeout.
"""
if self.value is None:
raise ValueError("Handle is already closed!")
if dwMilliseconds is None:
dwMilliseconds = INFINITE
r = WaitForSingleObject(self.value, dwMilliseconds)
if r != WAIT_OBJECT_0:
raise ctypes.WinError(r) | Wait for the Win32 object to be signaled.
@type dwMilliseconds: int
@param dwMilliseconds: (Optional) Timeout value in milliseconds.
Use C{INFINITE} or C{None} for no timeout. | Below is the the instruction that describes the task:
### Input:
Wait for the Win32 object to be signaled.
@type dwMilliseconds: int
@param dwMilliseconds: (Optional) Timeout value in milliseconds.
Use C{INFINITE} or C{None} for no timeout.
### Response:
def wait(self, dwMilliseconds = None):
"""
Wait for the Win32 object to be signaled.
@type dwMilliseconds: int
@param dwMilliseconds: (Optional) Timeout value in milliseconds.
Use C{INFINITE} or C{None} for no timeout.
"""
if self.value is None:
raise ValueError("Handle is already closed!")
if dwMilliseconds is None:
dwMilliseconds = INFINITE
r = WaitForSingleObject(self.value, dwMilliseconds)
if r != WAIT_OBJECT_0:
raise ctypes.WinError(r) |
def add_fields(input_array, arrays, names=None, assubarray=False):
"""Adds the given array(s) as new field(s) to the given input array.
Returns a new instance of the input_array with the new fields added.
Parameters
----------
input_array : instance of a numpy.ndarray or numpy recarray
The array to to add the fields to.
arrays : (list of) numpy array(s)
The arrays to add. If adding multiple arrays, must be a list;
if adding a single array, can just be that array.
names : (list of) strings
Optional, the name(s) of the new fields in the output array. If
adding multiple fields, must be a list of strings with the same
length as the list of arrays. If None provided, names used will
be the same as the name of the datatype in the given arrays.
If the datatype has no name, the new field will be ``'fi'`` where
i is the index of the array in arrays.
assubarray : bool
Add the list of arrays as a single subarray field. If True, and names
provided, names should be a string or a length-1 sequence. Default is
False, in which case each array will be added as a separate field.
Returns
-------
new_array : new instance of `input_array`
A copy of the `input_array` with the desired fields added.
"""
if not isinstance(arrays, list):
arrays = [arrays]
# ensure that all arrays in arrays are arrays
arrays = _ensure_array_list(arrays)
# set the names
if names is not None:
if isinstance(names, string_types):
names = [names]
# check if any names are subarray names; if so, we have to add them
# separately
subarray_names = [name for name in names if len(name.split('.')) > 1]
else:
subarray_names = []
if any(subarray_names):
subarrays = [arrays[ii] for ii,name in enumerate(names) \
if name in subarray_names]
# group together by subarray
groups = {}
for name,arr in zip(subarray_names, subarrays):
key = name.split('.')[0]
subkey = '.'.join(name.split('.')[1:])
try:
groups[key].append((subkey, arr))
except KeyError:
groups[key] = [(subkey, arr)]
# now cycle over the groups, adding all of the fields in each group
# as a subarray
for group_name in groups:
# we'll create a dictionary out of the subarray field names ->
# subarrays
thisdict = dict(groups[group_name])
# check if the input array has this field; if so, remove it, then
# add it back with the other new arrays
if group_name in input_array.fieldnames:
# get the data
new_subarray = input_array[group_name]
# add the new fields to the subarray
new_subarray = add_fields(new_subarray, thisdict.values(),
thisdict.keys())
# remove the original from the input array
input_array = input_array.without_fields(group_name)
else:
new_subarray = thisdict.values()
# add the new subarray to input_array as a subarray
input_array = add_fields(input_array, new_subarray,
names=group_name, assubarray=True)
# set the subarray names
input_array[group_name].dtype.names = thisdict.keys()
# remove the subarray names from names
keep_idx = [ii for ii,name in enumerate(names) \
if name not in subarray_names]
names = [names[ii] for ii in keep_idx]
# if there's nothing left, just return
if names == []:
return input_array
# also remove the subarray arrays
arrays = [arrays[ii] for ii in keep_idx]
if assubarray:
# merge all of the arrays into a single array
if len(arrays) > 1:
arrays = [merge_arrays(arrays, flatten=True)]
# now merge all the fields as a single subarray
merged_arr = numpy.empty(len(arrays[0]),
dtype=[('f0', arrays[0].dtype.descr)])
merged_arr['f0'] = arrays[0]
arrays = [merged_arr]
merge_list = [input_array] + arrays
if names is not None:
names = list(input_array.dtype.names) + names
# merge into a single array
return merge_arrays(merge_list, names=names, flatten=True,
outtype=type(input_array)) | Adds the given array(s) as new field(s) to the given input array.
Returns a new instance of the input_array with the new fields added.
Parameters
----------
input_array : instance of a numpy.ndarray or numpy recarray
The array to to add the fields to.
arrays : (list of) numpy array(s)
The arrays to add. If adding multiple arrays, must be a list;
if adding a single array, can just be that array.
names : (list of) strings
Optional, the name(s) of the new fields in the output array. If
adding multiple fields, must be a list of strings with the same
length as the list of arrays. If None provided, names used will
be the same as the name of the datatype in the given arrays.
If the datatype has no name, the new field will be ``'fi'`` where
i is the index of the array in arrays.
assubarray : bool
Add the list of arrays as a single subarray field. If True, and names
provided, names should be a string or a length-1 sequence. Default is
False, in which case each array will be added as a separate field.
Returns
-------
new_array : new instance of `input_array`
A copy of the `input_array` with the desired fields added. | Below is the the instruction that describes the task:
### Input:
Adds the given array(s) as new field(s) to the given input array.
Returns a new instance of the input_array with the new fields added.
Parameters
----------
input_array : instance of a numpy.ndarray or numpy recarray
The array to to add the fields to.
arrays : (list of) numpy array(s)
The arrays to add. If adding multiple arrays, must be a list;
if adding a single array, can just be that array.
names : (list of) strings
Optional, the name(s) of the new fields in the output array. If
adding multiple fields, must be a list of strings with the same
length as the list of arrays. If None provided, names used will
be the same as the name of the datatype in the given arrays.
If the datatype has no name, the new field will be ``'fi'`` where
i is the index of the array in arrays.
assubarray : bool
Add the list of arrays as a single subarray field. If True, and names
provided, names should be a string or a length-1 sequence. Default is
False, in which case each array will be added as a separate field.
Returns
-------
new_array : new instance of `input_array`
A copy of the `input_array` with the desired fields added.
### Response:
def add_fields(input_array, arrays, names=None, assubarray=False):
"""Adds the given array(s) as new field(s) to the given input array.
Returns a new instance of the input_array with the new fields added.
Parameters
----------
input_array : instance of a numpy.ndarray or numpy recarray
The array to to add the fields to.
arrays : (list of) numpy array(s)
The arrays to add. If adding multiple arrays, must be a list;
if adding a single array, can just be that array.
names : (list of) strings
Optional, the name(s) of the new fields in the output array. If
adding multiple fields, must be a list of strings with the same
length as the list of arrays. If None provided, names used will
be the same as the name of the datatype in the given arrays.
If the datatype has no name, the new field will be ``'fi'`` where
i is the index of the array in arrays.
assubarray : bool
Add the list of arrays as a single subarray field. If True, and names
provided, names should be a string or a length-1 sequence. Default is
False, in which case each array will be added as a separate field.
Returns
-------
new_array : new instance of `input_array`
A copy of the `input_array` with the desired fields added.
"""
if not isinstance(arrays, list):
arrays = [arrays]
# ensure that all arrays in arrays are arrays
arrays = _ensure_array_list(arrays)
# set the names
if names is not None:
if isinstance(names, string_types):
names = [names]
# check if any names are subarray names; if so, we have to add them
# separately
subarray_names = [name for name in names if len(name.split('.')) > 1]
else:
subarray_names = []
if any(subarray_names):
subarrays = [arrays[ii] for ii,name in enumerate(names) \
if name in subarray_names]
# group together by subarray
groups = {}
for name,arr in zip(subarray_names, subarrays):
key = name.split('.')[0]
subkey = '.'.join(name.split('.')[1:])
try:
groups[key].append((subkey, arr))
except KeyError:
groups[key] = [(subkey, arr)]
# now cycle over the groups, adding all of the fields in each group
# as a subarray
for group_name in groups:
# we'll create a dictionary out of the subarray field names ->
# subarrays
thisdict = dict(groups[group_name])
# check if the input array has this field; if so, remove it, then
# add it back with the other new arrays
if group_name in input_array.fieldnames:
# get the data
new_subarray = input_array[group_name]
# add the new fields to the subarray
new_subarray = add_fields(new_subarray, thisdict.values(),
thisdict.keys())
# remove the original from the input array
input_array = input_array.without_fields(group_name)
else:
new_subarray = thisdict.values()
# add the new subarray to input_array as a subarray
input_array = add_fields(input_array, new_subarray,
names=group_name, assubarray=True)
# set the subarray names
input_array[group_name].dtype.names = thisdict.keys()
# remove the subarray names from names
keep_idx = [ii for ii,name in enumerate(names) \
if name not in subarray_names]
names = [names[ii] for ii in keep_idx]
# if there's nothing left, just return
if names == []:
return input_array
# also remove the subarray arrays
arrays = [arrays[ii] for ii in keep_idx]
if assubarray:
# merge all of the arrays into a single array
if len(arrays) > 1:
arrays = [merge_arrays(arrays, flatten=True)]
# now merge all the fields as a single subarray
merged_arr = numpy.empty(len(arrays[0]),
dtype=[('f0', arrays[0].dtype.descr)])
merged_arr['f0'] = arrays[0]
arrays = [merged_arr]
merge_list = [input_array] + arrays
if names is not None:
names = list(input_array.dtype.names) + names
# merge into a single array
return merge_arrays(merge_list, names=names, flatten=True,
outtype=type(input_array)) |
def gpio_set(self, pins, states):
"""Sets the state for one or more user-controllable GPIOs.
For each of the given pins, sets the the corresponding state based on
the index.
Args:
self (JLink): the ``JLink`` instance
pins (list): list of GPIO indices
states (list): list of states to set
Returns:
A list of updated states.
Raises:
JLinkException: on error.
ValueError: if ``len(pins) != len(states)``
"""
if len(pins) != len(states):
raise ValueError('Length mismatch between pins and states.')
size = len(pins)
indices = (ctypes.c_uint8 * size)(*pins)
states = (ctypes.c_uint8 * size)(*states)
result_states = (ctypes.c_uint8 * size)()
result = self._dll.JLINK_EMU_GPIO_SetState(ctypes.byref(indices),
ctypes.byref(states),
ctypes.byref(result_states),
size)
if result < 0:
raise errors.JLinkException(result)
return list(result_states) | Sets the state for one or more user-controllable GPIOs.
For each of the given pins, sets the the corresponding state based on
the index.
Args:
self (JLink): the ``JLink`` instance
pins (list): list of GPIO indices
states (list): list of states to set
Returns:
A list of updated states.
Raises:
JLinkException: on error.
ValueError: if ``len(pins) != len(states)`` | Below is the the instruction that describes the task:
### Input:
Sets the state for one or more user-controllable GPIOs.
For each of the given pins, sets the the corresponding state based on
the index.
Args:
self (JLink): the ``JLink`` instance
pins (list): list of GPIO indices
states (list): list of states to set
Returns:
A list of updated states.
Raises:
JLinkException: on error.
ValueError: if ``len(pins) != len(states)``
### Response:
def gpio_set(self, pins, states):
"""Sets the state for one or more user-controllable GPIOs.
For each of the given pins, sets the the corresponding state based on
the index.
Args:
self (JLink): the ``JLink`` instance
pins (list): list of GPIO indices
states (list): list of states to set
Returns:
A list of updated states.
Raises:
JLinkException: on error.
ValueError: if ``len(pins) != len(states)``
"""
if len(pins) != len(states):
raise ValueError('Length mismatch between pins and states.')
size = len(pins)
indices = (ctypes.c_uint8 * size)(*pins)
states = (ctypes.c_uint8 * size)(*states)
result_states = (ctypes.c_uint8 * size)()
result = self._dll.JLINK_EMU_GPIO_SetState(ctypes.byref(indices),
ctypes.byref(states),
ctypes.byref(result_states),
size)
if result < 0:
raise errors.JLinkException(result)
return list(result_states) |
def createMultipleL4L2Columns(network, networkConfig):
"""
Create a network consisting of multiple columns. Each column contains one L4
and one L2, is identical in structure to the network created by
createL4L2Column. In addition all the L2 columns are fully connected to each
other through their lateral inputs.
Region names have a column number appended as in externalInput_0,
externalInput_1, etc.
networkConfig must be of the following format (see createL4L2Column for
further documentation):
{
"networkType": "MultipleL4L2Columns",
"numCorticalColumns": 3,
"externalInputSize": 1024,
"sensorInputSize": 1024,
"L4Params": {
<constructor parameters for ApicalTMPairRegion
},
"L2Params": {
<constructor parameters for ColumnPoolerRegion>
},
"lateralSPParams": {
<constructor parameters for optional SPRegion>
},
"feedForwardSPParams": {
<constructor parameters for optional SPRegion>
}
}
"""
# Create each column
numCorticalColumns = networkConfig["numCorticalColumns"]
for i in xrange(numCorticalColumns):
networkConfigCopy = copy.deepcopy(networkConfig)
layerConfig = networkConfigCopy["L2Params"]
layerConfig["seed"] = layerConfig.get("seed", 42) + i
layerConfig["numOtherCorticalColumns"] = numCorticalColumns - 1
suffix = "_" + str(i)
network = createL4L2Column(network, networkConfigCopy, suffix)
# Now connect the L2 columns laterally
for i in range(networkConfig["numCorticalColumns"]):
suffixSrc = "_" + str(i)
for j in range(networkConfig["numCorticalColumns"]):
if i != j:
suffixDest = "_" + str(j)
network.link(
"L2Column" + suffixSrc, "L2Column" + suffixDest,
"UniformLink", "",
srcOutput="feedForwardOutput", destInput="lateralInput",
propagationDelay=1)
enableProfiling(network)
return network | Create a network consisting of multiple columns. Each column contains one L4
and one L2, is identical in structure to the network created by
createL4L2Column. In addition all the L2 columns are fully connected to each
other through their lateral inputs.
Region names have a column number appended as in externalInput_0,
externalInput_1, etc.
networkConfig must be of the following format (see createL4L2Column for
further documentation):
{
"networkType": "MultipleL4L2Columns",
"numCorticalColumns": 3,
"externalInputSize": 1024,
"sensorInputSize": 1024,
"L4Params": {
<constructor parameters for ApicalTMPairRegion
},
"L2Params": {
<constructor parameters for ColumnPoolerRegion>
},
"lateralSPParams": {
<constructor parameters for optional SPRegion>
},
"feedForwardSPParams": {
<constructor parameters for optional SPRegion>
}
} | Below is the the instruction that describes the task:
### Input:
Create a network consisting of multiple columns. Each column contains one L4
and one L2, is identical in structure to the network created by
createL4L2Column. In addition all the L2 columns are fully connected to each
other through their lateral inputs.
Region names have a column number appended as in externalInput_0,
externalInput_1, etc.
networkConfig must be of the following format (see createL4L2Column for
further documentation):
{
"networkType": "MultipleL4L2Columns",
"numCorticalColumns": 3,
"externalInputSize": 1024,
"sensorInputSize": 1024,
"L4Params": {
<constructor parameters for ApicalTMPairRegion
},
"L2Params": {
<constructor parameters for ColumnPoolerRegion>
},
"lateralSPParams": {
<constructor parameters for optional SPRegion>
},
"feedForwardSPParams": {
<constructor parameters for optional SPRegion>
}
}
### Response:
def createMultipleL4L2Columns(network, networkConfig):
"""
Create a network consisting of multiple columns. Each column contains one L4
and one L2, is identical in structure to the network created by
createL4L2Column. In addition all the L2 columns are fully connected to each
other through their lateral inputs.
Region names have a column number appended as in externalInput_0,
externalInput_1, etc.
networkConfig must be of the following format (see createL4L2Column for
further documentation):
{
"networkType": "MultipleL4L2Columns",
"numCorticalColumns": 3,
"externalInputSize": 1024,
"sensorInputSize": 1024,
"L4Params": {
<constructor parameters for ApicalTMPairRegion
},
"L2Params": {
<constructor parameters for ColumnPoolerRegion>
},
"lateralSPParams": {
<constructor parameters for optional SPRegion>
},
"feedForwardSPParams": {
<constructor parameters for optional SPRegion>
}
}
"""
# Create each column
numCorticalColumns = networkConfig["numCorticalColumns"]
for i in xrange(numCorticalColumns):
networkConfigCopy = copy.deepcopy(networkConfig)
layerConfig = networkConfigCopy["L2Params"]
layerConfig["seed"] = layerConfig.get("seed", 42) + i
layerConfig["numOtherCorticalColumns"] = numCorticalColumns - 1
suffix = "_" + str(i)
network = createL4L2Column(network, networkConfigCopy, suffix)
# Now connect the L2 columns laterally
for i in range(networkConfig["numCorticalColumns"]):
suffixSrc = "_" + str(i)
for j in range(networkConfig["numCorticalColumns"]):
if i != j:
suffixDest = "_" + str(j)
network.link(
"L2Column" + suffixSrc, "L2Column" + suffixDest,
"UniformLink", "",
srcOutput="feedForwardOutput", destInput="lateralInput",
propagationDelay=1)
enableProfiling(network)
return network |
def get_line(self, position):
'Returns the line number that the given string position is found on'
datalen = len(self.data)
count = len(self.data[0])
line = 1
while count < position:
if line >= datalen:
break
count += len(self.data[line]) + 1
line += 1
return line | Returns the line number that the given string position is found on | Below is the the instruction that describes the task:
### Input:
Returns the line number that the given string position is found on
### Response:
def get_line(self, position):
'Returns the line number that the given string position is found on'
datalen = len(self.data)
count = len(self.data[0])
line = 1
while count < position:
if line >= datalen:
break
count += len(self.data[line]) + 1
line += 1
return line |
def get_collection(self, **kwargs):
"""Get an iterator of Python ``Resource`` objects that represent URIs.
The returned objects are Pythonic `Resource`s that map to the most
recently `refreshed` state of uris-resources published by the device.
In order to instantiate the correct types, the concrete subclass must
populate its registry with acceptable types, based on the `kind` field
returned by the REST server.
.. note::
This method implies a single REST transaction with the
Collection subclass URI.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects
"""
list_of_contents = []
self.refresh(**kwargs)
if 'items' in self.__dict__:
for item in self.items:
# It's possible to have non-"kind" JSON returned. We just
# append the corresponding dict. PostProcessing is the caller's
# responsibility.
if 'kind' not in item:
list_of_contents.append(item)
continue
kind = item['kind']
if kind in self._meta_data['attribute_registry']:
# If it has a kind, it must be registered.
instance = self._meta_data['attribute_registry'][kind](self)
instance._local_update(item)
instance._activate_URI(instance.selfLink)
list_of_contents.append(instance)
else:
error_message = '%r is not registered!' % kind
raise UnregisteredKind(error_message)
return list_of_contents | Get an iterator of Python ``Resource`` objects that represent URIs.
The returned objects are Pythonic `Resource`s that map to the most
recently `refreshed` state of uris-resources published by the device.
In order to instantiate the correct types, the concrete subclass must
populate its registry with acceptable types, based on the `kind` field
returned by the REST server.
.. note::
This method implies a single REST transaction with the
Collection subclass URI.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects | Below is the the instruction that describes the task:
### Input:
Get an iterator of Python ``Resource`` objects that represent URIs.
The returned objects are Pythonic `Resource`s that map to the most
recently `refreshed` state of uris-resources published by the device.
In order to instantiate the correct types, the concrete subclass must
populate its registry with acceptable types, based on the `kind` field
returned by the REST server.
.. note::
This method implies a single REST transaction with the
Collection subclass URI.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects
### Response:
def get_collection(self, **kwargs):
"""Get an iterator of Python ``Resource`` objects that represent URIs.
The returned objects are Pythonic `Resource`s that map to the most
recently `refreshed` state of uris-resources published by the device.
In order to instantiate the correct types, the concrete subclass must
populate its registry with acceptable types, based on the `kind` field
returned by the REST server.
.. note::
This method implies a single REST transaction with the
Collection subclass URI.
:raises: UnregisteredKind
:returns: list of reference dicts and Python ``Resource`` objects
"""
list_of_contents = []
self.refresh(**kwargs)
if 'items' in self.__dict__:
for item in self.items:
# It's possible to have non-"kind" JSON returned. We just
# append the corresponding dict. PostProcessing is the caller's
# responsibility.
if 'kind' not in item:
list_of_contents.append(item)
continue
kind = item['kind']
if kind in self._meta_data['attribute_registry']:
# If it has a kind, it must be registered.
instance = self._meta_data['attribute_registry'][kind](self)
instance._local_update(item)
instance._activate_URI(instance.selfLink)
list_of_contents.append(instance)
else:
error_message = '%r is not registered!' % kind
raise UnregisteredKind(error_message)
return list_of_contents |
def schedule_job(self, j):
"""
Add the job given by j to the job queue.
Note: Does not actually run the job.
"""
job_id = uuid.uuid4().hex
j.job_id = job_id
session = self.sessionmaker()
orm_job = ORMJob(
id=job_id,
state=j.state,
app=self.app,
namespace=self.namespace,
obj=j)
session.add(orm_job)
try:
session.commit()
except Exception as e:
logging.error(
"Got an error running session.commit(): {}".format(e))
return job_id | Add the job given by j to the job queue.
Note: Does not actually run the job. | Below is the the instruction that describes the task:
### Input:
Add the job given by j to the job queue.
Note: Does not actually run the job.
### Response:
def schedule_job(self, j):
"""
Add the job given by j to the job queue.
Note: Does not actually run the job.
"""
job_id = uuid.uuid4().hex
j.job_id = job_id
session = self.sessionmaker()
orm_job = ORMJob(
id=job_id,
state=j.state,
app=self.app,
namespace=self.namespace,
obj=j)
session.add(orm_job)
try:
session.commit()
except Exception as e:
logging.error(
"Got an error running session.commit(): {}".format(e))
return job_id |
def _check_timeindex(self, timeseries):
"""
Raises an error if time index of storage time series does not
comply with the time index of load and feed-in time series.
Parameters
-----------
timeseries : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
try:
timeseries.loc[self.edisgo.network.timeseries.timeindex]
except:
message = 'Time index of storage time series does not match ' \
'with load and feed-in time series.'
logging.error(message)
raise KeyError(message) | Raises an error if time index of storage time series does not
comply with the time index of load and feed-in time series.
Parameters
-----------
timeseries : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with in kW in column 'p' and
reactive power in kVA in column 'q'. | Below is the the instruction that describes the task:
### Input:
Raises an error if time index of storage time series does not
comply with the time index of load and feed-in time series.
Parameters
-----------
timeseries : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with in kW in column 'p' and
reactive power in kVA in column 'q'.
### Response:
def _check_timeindex(self, timeseries):
"""
Raises an error if time index of storage time series does not
comply with the time index of load and feed-in time series.
Parameters
-----------
timeseries : :pandas:`pandas.DataFrame<dataframe>`
DataFrame containing active power the storage is charged (negative)
and discharged (positive) with in kW in column 'p' and
reactive power in kVA in column 'q'.
"""
try:
timeseries.loc[self.edisgo.network.timeseries.timeindex]
except:
message = 'Time index of storage time series does not match ' \
'with load and feed-in time series.'
logging.error(message)
raise KeyError(message) |
def with_query(self, *args, **kwargs):
"""Return a new URL with query part replaced.
Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
or str, autoencode the argument if needed.
A sequence of (key, value) pairs is supported as well.
It also can take an arbitrary number of keyword arguments.
Clear query if None is passed.
"""
# N.B. doesn't cleanup query/fragment
new_query = self._get_str_query(*args, **kwargs)
return URL(
self._val._replace(path=self._val.path, query=new_query), encoded=True
) | Return a new URL with query part replaced.
Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
or str, autoencode the argument if needed.
A sequence of (key, value) pairs is supported as well.
It also can take an arbitrary number of keyword arguments.
Clear query if None is passed. | Below is the the instruction that describes the task:
### Input:
Return a new URL with query part replaced.
Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
or str, autoencode the argument if needed.
A sequence of (key, value) pairs is supported as well.
It also can take an arbitrary number of keyword arguments.
Clear query if None is passed.
### Response:
def with_query(self, *args, **kwargs):
"""Return a new URL with query part replaced.
Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
or str, autoencode the argument if needed.
A sequence of (key, value) pairs is supported as well.
It also can take an arbitrary number of keyword arguments.
Clear query if None is passed.
"""
# N.B. doesn't cleanup query/fragment
new_query = self._get_str_query(*args, **kwargs)
return URL(
self._val._replace(path=self._val.path, query=new_query), encoded=True
) |
def raw_datastream_old(request, pid, dsid, type=None, repo=None,
headers=None, accept_range_request=False,
as_of_date=None, streaming=False):
'''
.. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False.
'''
if repo is None:
repo = Repository()
if headers is None:
headers = {}
get_obj_opts = {}
if type is not None:
get_obj_opts['type'] = type
obj = repo.get_object(pid, **get_obj_opts)
range_request = False
partial_request = False
try:
# NOTE: we could test that pid is actually the requested
# obj.has_requisite_content_models but that would mean
# an extra API call for every datastream but RELS-EXT
# Leaving out for now, for efficiency
ds = obj.getDatastreamObject(dsid, as_of_date=as_of_date)
if ds and ds.exists:
# because retrieving the content is expensive and checking
# headers can be useful, explicitly support HEAD requests
if request.method == 'HEAD':
content = ''
elif accept_range_request and request.META.get('HTTP_RANGE', None) is not None:
rng = request.META['HTTP_RANGE']
logger.debug('HTTP Range request: %s', rng)
range_request = True
kind, numbers = rng.split('=')
if kind != 'bytes':
return HttpResponseRangeNotSatisfiable()
try:
start, end = numbers.split('-')
# NOTE: could potentially be complicated stuff like
# this: 0-999,1002-9999,1-9999
# for now, only support the simple case of a single range
except ValueError:
return HttpResponseRangeNotSatisfiable()
start = int(start)
if not end:
end = ds.info.size - 1
else:
end = int(end)
# ignore requests where end is before start
if end < start:
return HttpResponseRangeNotSatisfiable()
if start == end: # safari sends this (weird?); don't 500
partial_length = 0
partial_request = True
content = ''
# special case for bytes=0-
elif start == 0 and end == (ds.info.size - 1):
# set chunksize and end so range headers can be set on response
# partial_length= ds.info.size
partial_length = end - start
content = ds.get_chunked_content()
# range with *NOT* full content requested
elif start != 0 or end != (ds.info.size - 1):
partial_request = True
partial_length = end - start
# chunksize = min(end - start, 4096)
# sample chunk 370726-3005759
content = get_range_content(ds, start, end)
else:
# get the datastream content in chunks, to handle larger datastreams
content = ds.get_chunked_content()
# not using serialize(pretty=True) for XML/RDF datastreams, since
# we actually want the raw datastream content.
http_response_class = HttpResponse
if streaming:
http_response_class = StreamingHttpResponse
response = http_response_class(content, content_type=ds.mimetype)
# NOTE: might want to use StreamingHttpResponse here, at least
# over some size threshold or for range requests
# if we have a checksum, use it as an ETag
# (but checksum not valid when sending partial content)
if ds.checksum_type != 'DISABLED' and not partial_request:
response['ETag'] = ds.checksum
# ds.created is the creation date of this *version* of the datastream,
# so it is effectively our last-modified date
response['Last-Modified'] = ds.created
# Where available, set content length & MD5 checksum in response headers.
# (but checksum not valid when sending partial content)
if ds.checksum_type == 'MD5' and not partial_request:
response['Content-MD5'] = ds.checksum
if ds.info.size and not range_request:
response['Content-Length'] = ds.info.size
if ds.info.size and accept_range_request:
response['Accept-Ranges'] = 'bytes'
# response['Content-Range'] = '0,%d/%d' % (ds.info.size, ds.info.size)
# if partial request, status should be 206 (even for whole file?)
if range_request:
response.status_code = 206
if partial_request:
response['Content-Length'] = partial_length
else:
response['Content-Length'] = ds.info.size
cont_range = 'bytes %d-%d/%d' % (start, end, ds.info.size)
response['Content-Range'] = cont_range
logger.debug('Content-Length=%s Content-Range=%s',
partial_length, cont_range)
# set any user-specified headers that were passed in
for header, val in six.iteritems(headers):
response[header] = val
# Fix for old Fedora data bug where the `Content-Length`
# was -1. IF it is -1 we're just going to get rid of it.
# Setting the value to an arbitrary value led to issues.
if int(response['Content-Length']) < 0:
del response['Content-Length']
return response
else:
raise Http404
except RequestFailed as rf:
# if object is not the speficied type or if either the object
# or the requested datastream doesn't exist, 404
if rf.code == 404 or \
(type is not None and not obj.has_requisite_content_models) or \
not getattr(obj, dsid).exists or not obj.exists:
raise Http404
# for anything else, re-raise & let Django's default 500 logic handle it
raise | .. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False. | Below is the the instruction that describes the task:
### Input:
.. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False.
### Response:
def raw_datastream_old(request, pid, dsid, type=None, repo=None,
headers=None, accept_range_request=False,
as_of_date=None, streaming=False):
'''
.. NOTE::
This version of :meth:`raw_datastream` is deprecated, and you
should update to the new :meth:`raw_datastream`. This version
is still available if you are using a version of Fedora
prior to 3.7 and need the additional functionality.
View to display a raw datastream that belongs to a Fedora Object.
Returns an :class:`~django.http.HttpResponse` with the response content
populated with the content of the datastream. The following HTTP headers
may be included in all the responses:
- Content-Type: mimetype of the datastream in Fedora
- ETag: datastream checksum, as long as the checksum type is not 'DISABLED'
The following HTTP headers may be set if the appropriate content is included
in the datastream metadata:
- Content-MD5: MD5 checksum of the datastream in Fedora, if available
- Content-Length: size of the datastream in Fedora
If either the datastream or object are not found, raises an
:class:`~django.http.Http404` . For any other errors (e.g., permission
denied by Fedora), the exception is re-raised and should be handled elsewhere.
:param request: HttpRequest
:param pid: Fedora object PID
:param dsid: datastream ID to be returned
:param type: custom object type (should extend
:class:`~eulcore.fedora.models.DigitalObject`) (optional)
:param repo: :class:`~eulcore.django.fedora.server.Repository` instance to use,
in case your application requires custom repository initialization (optional)
:param headers: dictionary of additional headers to include in the response
:param accept_range_request: enable HTTP Range requests (disabled by default)
:param as_of_date: access a historical version of the datastream
:param streaming: if True, response will be returned as an instance of
:class:`django.http.StreamingHttpResponse` instead of
:class:`django.http.HttpResponse`; intended for use with large
datastreams, defaults to False.
'''
if repo is None:
repo = Repository()
if headers is None:
headers = {}
get_obj_opts = {}
if type is not None:
get_obj_opts['type'] = type
obj = repo.get_object(pid, **get_obj_opts)
range_request = False
partial_request = False
try:
# NOTE: we could test that pid is actually the requested
# obj.has_requisite_content_models but that would mean
# an extra API call for every datastream but RELS-EXT
# Leaving out for now, for efficiency
ds = obj.getDatastreamObject(dsid, as_of_date=as_of_date)
if ds and ds.exists:
# because retrieving the content is expensive and checking
# headers can be useful, explicitly support HEAD requests
if request.method == 'HEAD':
content = ''
elif accept_range_request and request.META.get('HTTP_RANGE', None) is not None:
rng = request.META['HTTP_RANGE']
logger.debug('HTTP Range request: %s', rng)
range_request = True
kind, numbers = rng.split('=')
if kind != 'bytes':
return HttpResponseRangeNotSatisfiable()
try:
start, end = numbers.split('-')
# NOTE: could potentially be complicated stuff like
# this: 0-999,1002-9999,1-9999
# for now, only support the simple case of a single range
except ValueError:
return HttpResponseRangeNotSatisfiable()
start = int(start)
if not end:
end = ds.info.size - 1
else:
end = int(end)
# ignore requests where end is before start
if end < start:
return HttpResponseRangeNotSatisfiable()
if start == end: # safari sends this (weird?); don't 500
partial_length = 0
partial_request = True
content = ''
# special case for bytes=0-
elif start == 0 and end == (ds.info.size - 1):
# set chunksize and end so range headers can be set on response
# partial_length= ds.info.size
partial_length = end - start
content = ds.get_chunked_content()
# range with *NOT* full content requested
elif start != 0 or end != (ds.info.size - 1):
partial_request = True
partial_length = end - start
# chunksize = min(end - start, 4096)
# sample chunk 370726-3005759
content = get_range_content(ds, start, end)
else:
# get the datastream content in chunks, to handle larger datastreams
content = ds.get_chunked_content()
# not using serialize(pretty=True) for XML/RDF datastreams, since
# we actually want the raw datastream content.
http_response_class = HttpResponse
if streaming:
http_response_class = StreamingHttpResponse
response = http_response_class(content, content_type=ds.mimetype)
# NOTE: might want to use StreamingHttpResponse here, at least
# over some size threshold or for range requests
# if we have a checksum, use it as an ETag
# (but checksum not valid when sending partial content)
if ds.checksum_type != 'DISABLED' and not partial_request:
response['ETag'] = ds.checksum
# ds.created is the creation date of this *version* of the datastream,
# so it is effectively our last-modified date
response['Last-Modified'] = ds.created
# Where available, set content length & MD5 checksum in response headers.
# (but checksum not valid when sending partial content)
if ds.checksum_type == 'MD5' and not partial_request:
response['Content-MD5'] = ds.checksum
if ds.info.size and not range_request:
response['Content-Length'] = ds.info.size
if ds.info.size and accept_range_request:
response['Accept-Ranges'] = 'bytes'
# response['Content-Range'] = '0,%d/%d' % (ds.info.size, ds.info.size)
# if partial request, status should be 206 (even for whole file?)
if range_request:
response.status_code = 206
if partial_request:
response['Content-Length'] = partial_length
else:
response['Content-Length'] = ds.info.size
cont_range = 'bytes %d-%d/%d' % (start, end, ds.info.size)
response['Content-Range'] = cont_range
logger.debug('Content-Length=%s Content-Range=%s',
partial_length, cont_range)
# set any user-specified headers that were passed in
for header, val in six.iteritems(headers):
response[header] = val
# Fix for old Fedora data bug where the `Content-Length`
# was -1. IF it is -1 we're just going to get rid of it.
# Setting the value to an arbitrary value led to issues.
if int(response['Content-Length']) < 0:
del response['Content-Length']
return response
else:
raise Http404
except RequestFailed as rf:
# if object is not the speficied type or if either the object
# or the requested datastream doesn't exist, 404
if rf.code == 404 or \
(type is not None and not obj.has_requisite_content_models) or \
not getattr(obj, dsid).exists or not obj.exists:
raise Http404
# for anything else, re-raise & let Django's default 500 logic handle it
raise |
def eval_string_value(self, element, value):
"""Evaluate parsed string.
Returns a list of current and delayed values.
"""
strval = ''
vals = []
for term in value:
if type(term) is ast.WhitespaceToken:
pass
elif type(term) is ast.StringToken:
strval += term.value
elif type(term) is ast.IdentToken:
log(DEBUG, u"IdentToken as string: {}".format(
term.value).encode('utf-8'))
strval += term.value
elif type(term) is ast.LiteralToken:
log(DEBUG, u"LiteralToken as string: {}".format(
term.value).encode('utf-8'))
strval += term.value
elif type(term) is ast.FunctionBlock:
if term.name == 'string':
str_args = split(term.arguments, ',')
str_name = self.eval_string_value(element,
str_args[0])[0]
val = self.lookup('strings', str_name)
if val == '':
if len(str_args) > 1:
val = self.eval_string_value(element,
str_args[1])[0]
else:
log(WARN, u"{} blank string"
.format(str_name).encode('utf-8'))
strval += val
elif term.name == u'attr':
att_args = split(term.arguments, ',')
att_name = self.eval_string_value(element,
att_args[0])[0]
att_def = ''
if len(att_args) > 1:
att_def = self.eval_string_value(element,
att_args[1])[0]
if '|' in att_name:
ns, att = att_name.split('|')
try:
ns = self.css_namespaces[ns]
except KeyError:
log(WARN, u"Undefined namespace prefix {}"
.format(ns).encode('utf-8'))
continue
att_name = etree.QName(ns, att)
strval += element.etree_element.get(att_name, att_def)
elif term.name == u'uuid':
strval += self.generate_id()
elif term.name == u'content':
strval += etree.tostring(element.etree_element,
encoding='unicode',
method='text',
with_tail=False)
elif term.name.startswith('target-'):
if strval:
vals.append(strval)
strval = ''
target_args = split(term.arguments, ',')
vref = self.eval_string_value(element,
target_args[0])[0]
vname = self.eval_string_value(element,
target_args[1])[0]
vtype = term.name[7:]+'s'
vals.append(TargetVal(self, vref[1:], vname, vtype))
elif term.name == u'first-letter':
tmpstr = self.eval_string_value(element, term.arguments)
if tmpstr:
if isinstance(tmpstr[0], basestring):
strval += tmpstr[0][0]
else:
log(WARN, u"Bad string value:"
u" nested target-* not allowed. "
u"{}".format(
serialize(value)).encode(
'utf-8'))
# FIXME can we do delayed first-letter
elif term.name == 'counter':
counterargs = [serialize(t).strip(" \'")
for t in split(term.arguments, ',')]
count = self.lookup('counters', counterargs)
strval += str(count)
elif term.name == u'pending':
log(WARN, u"Bad string value: pending() not allowed. "
u"{}".format(serialize(value)).encode(
'utf-8'))
else:
log(WARN, u"Bad string value: unknown function: {}. "
u"{}".format(term.name, serialize(value)).encode(
'utf-8'))
if strval:
vals.append(strval)
return vals | Evaluate parsed string.
Returns a list of current and delayed values. | Below is the the instruction that describes the task:
### Input:
Evaluate parsed string.
Returns a list of current and delayed values.
### Response:
def eval_string_value(self, element, value):
"""Evaluate parsed string.
Returns a list of current and delayed values.
"""
strval = ''
vals = []
for term in value:
if type(term) is ast.WhitespaceToken:
pass
elif type(term) is ast.StringToken:
strval += term.value
elif type(term) is ast.IdentToken:
log(DEBUG, u"IdentToken as string: {}".format(
term.value).encode('utf-8'))
strval += term.value
elif type(term) is ast.LiteralToken:
log(DEBUG, u"LiteralToken as string: {}".format(
term.value).encode('utf-8'))
strval += term.value
elif type(term) is ast.FunctionBlock:
if term.name == 'string':
str_args = split(term.arguments, ',')
str_name = self.eval_string_value(element,
str_args[0])[0]
val = self.lookup('strings', str_name)
if val == '':
if len(str_args) > 1:
val = self.eval_string_value(element,
str_args[1])[0]
else:
log(WARN, u"{} blank string"
.format(str_name).encode('utf-8'))
strval += val
elif term.name == u'attr':
att_args = split(term.arguments, ',')
att_name = self.eval_string_value(element,
att_args[0])[0]
att_def = ''
if len(att_args) > 1:
att_def = self.eval_string_value(element,
att_args[1])[0]
if '|' in att_name:
ns, att = att_name.split('|')
try:
ns = self.css_namespaces[ns]
except KeyError:
log(WARN, u"Undefined namespace prefix {}"
.format(ns).encode('utf-8'))
continue
att_name = etree.QName(ns, att)
strval += element.etree_element.get(att_name, att_def)
elif term.name == u'uuid':
strval += self.generate_id()
elif term.name == u'content':
strval += etree.tostring(element.etree_element,
encoding='unicode',
method='text',
with_tail=False)
elif term.name.startswith('target-'):
if strval:
vals.append(strval)
strval = ''
target_args = split(term.arguments, ',')
vref = self.eval_string_value(element,
target_args[0])[0]
vname = self.eval_string_value(element,
target_args[1])[0]
vtype = term.name[7:]+'s'
vals.append(TargetVal(self, vref[1:], vname, vtype))
elif term.name == u'first-letter':
tmpstr = self.eval_string_value(element, term.arguments)
if tmpstr:
if isinstance(tmpstr[0], basestring):
strval += tmpstr[0][0]
else:
log(WARN, u"Bad string value:"
u" nested target-* not allowed. "
u"{}".format(
serialize(value)).encode(
'utf-8'))
# FIXME can we do delayed first-letter
elif term.name == 'counter':
counterargs = [serialize(t).strip(" \'")
for t in split(term.arguments, ',')]
count = self.lookup('counters', counterargs)
strval += str(count)
elif term.name == u'pending':
log(WARN, u"Bad string value: pending() not allowed. "
u"{}".format(serialize(value)).encode(
'utf-8'))
else:
log(WARN, u"Bad string value: unknown function: {}. "
u"{}".format(term.name, serialize(value)).encode(
'utf-8'))
if strval:
vals.append(strval)
return vals |
def match_pattern(regex):
"""
Return a value check function which raises a ValueError if the value does
not match the supplied regular expression, see also `re.match`.
"""
prog = re.compile(regex)
def checker(v):
result = prog.match(v)
if result is None:
raise ValueError(v)
return checker | Return a value check function which raises a ValueError if the value does
not match the supplied regular expression, see also `re.match`. | Below is the the instruction that describes the task:
### Input:
Return a value check function which raises a ValueError if the value does
not match the supplied regular expression, see also `re.match`.
### Response:
def match_pattern(regex):
"""
Return a value check function which raises a ValueError if the value does
not match the supplied regular expression, see also `re.match`.
"""
prog = re.compile(regex)
def checker(v):
result = prog.match(v)
if result is None:
raise ValueError(v)
return checker |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.