code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def remove_zero_normals(self):
"""Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0]
self._data = self._data[:, points_of_interest] | Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place. | Below is the the instruction that describes the task:
### Input:
Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
### Response:
def remove_zero_normals(self):
"""Removes normal vectors with a zero magnitude.
Note
----
This returns nothing and updates the NormalCloud in-place.
"""
points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0]
self._data = self._data[:, points_of_interest] |
def get_index_range(working_dir):
"""
Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about.
"""
bitcoind_session = get_bitcoind(new=True)
assert bitcoind_session is not None
first_block = None
last_block = None
wait = 1.0
while last_block is None and is_running():
first_block, last_block = virtualchain.get_index_range('bitcoin', bitcoind_session, virtualchain_hooks, working_dir)
if first_block is None or last_block is None:
# try to reconnnect
log.error("Reconnect to bitcoind in {} seconds".format(wait))
time.sleep(wait)
wait = min(wait * 2.0 + random.random() * wait, 60)
bitcoind_session = get_bitcoind( new=True )
continue
else:
return first_block, last_block - NUM_CONFIRMATIONS
return None, None | Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about. | Below is the the instruction that describes the task:
### Input:
Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about.
### Response:
def get_index_range(working_dir):
"""
Get the bitcoin block index range.
Mask connection failures with timeouts.
Always try to reconnect.
The last block will be the last block to search for names.
This will be NUM_CONFIRMATIONS behind the actual last-block the
cryptocurrency node knows about.
"""
bitcoind_session = get_bitcoind(new=True)
assert bitcoind_session is not None
first_block = None
last_block = None
wait = 1.0
while last_block is None and is_running():
first_block, last_block = virtualchain.get_index_range('bitcoin', bitcoind_session, virtualchain_hooks, working_dir)
if first_block is None or last_block is None:
# try to reconnnect
log.error("Reconnect to bitcoind in {} seconds".format(wait))
time.sleep(wait)
wait = min(wait * 2.0 + random.random() * wait, 60)
bitcoind_session = get_bitcoind( new=True )
continue
else:
return first_block, last_block - NUM_CONFIRMATIONS
return None, None |
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False | Visit a function node. | Below is the the instruction that describes the task:
### Input:
Visit a function node.
### Response:
def visit_FunctionDef(self, node):
"""Visit a function node."""
node = self.get_function_node(node)
if node is not None:
node._async = False |
def to_string(self):
"""
Return the current NDEF as a string (always 64 bytes).
"""
data = self.ndef_str
if self.ndef_type == _NDEF_URI_TYPE:
data = self._encode_ndef_uri_type(data)
elif self.ndef_type == _NDEF_TEXT_TYPE:
data = self._encode_ndef_text_params(data)
if len(data) > _NDEF_DATA_SIZE:
raise YubiKeyNEO_USBHIDError("NDEF payload too long")
# typedef struct {
# unsigned char len; // Payload length
# unsigned char type; // NDEF type specifier
# unsigned char data[NDEF_DATA_SIZE]; // Payload size
# unsigned char curAccCode[ACC_CODE_SIZE]; // Access code
# } YKNDEF;
#
fmt = '< B B %ss %ss' % (_NDEF_DATA_SIZE, _ACC_CODE_SIZE)
first = struct.pack(fmt,
len(data),
self.ndef_type,
data.ljust(_NDEF_DATA_SIZE, b'\0'),
self.access_code,
)
#crc = 0xffff - yubico_util.crc16(first)
#second = first + struct.pack('<H', crc) + self.unlock_code
return first | Return the current NDEF as a string (always 64 bytes). | Below is the the instruction that describes the task:
### Input:
Return the current NDEF as a string (always 64 bytes).
### Response:
def to_string(self):
"""
Return the current NDEF as a string (always 64 bytes).
"""
data = self.ndef_str
if self.ndef_type == _NDEF_URI_TYPE:
data = self._encode_ndef_uri_type(data)
elif self.ndef_type == _NDEF_TEXT_TYPE:
data = self._encode_ndef_text_params(data)
if len(data) > _NDEF_DATA_SIZE:
raise YubiKeyNEO_USBHIDError("NDEF payload too long")
# typedef struct {
# unsigned char len; // Payload length
# unsigned char type; // NDEF type specifier
# unsigned char data[NDEF_DATA_SIZE]; // Payload size
# unsigned char curAccCode[ACC_CODE_SIZE]; // Access code
# } YKNDEF;
#
fmt = '< B B %ss %ss' % (_NDEF_DATA_SIZE, _ACC_CODE_SIZE)
first = struct.pack(fmt,
len(data),
self.ndef_type,
data.ljust(_NDEF_DATA_SIZE, b'\0'),
self.access_code,
)
#crc = 0xffff - yubico_util.crc16(first)
#second = first + struct.pack('<H', crc) + self.unlock_code
return first |
def read_tf_records(batch_size, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None, interleave=True,
filter_amount=1.0):
"""
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
"""
if shuffle_examples and not shuffle_buffer_size:
raise ValueError("Must set shuffle buffer size if shuffling examples")
tf_records = list(tf_records)
if shuffle_records:
random.shuffle(tf_records)
record_list = tf.data.Dataset.from_tensor_slices(tf_records)
# compression_type here must agree with write_tf_examples
map_func = functools.partial(
tf.data.TFRecordDataset,
buffer_size=8 * 1024 * 1024,
compression_type='ZLIB')
if interleave:
# cycle_length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read,
# and the examples being read from the files.
dataset = record_list.apply(tf.contrib.data.parallel_interleave(
map_func, cycle_length=64, sloppy=True))
else:
dataset = record_list.flat_map(map_func)
if filter_amount < 1.0:
dataset = dataset.filter(
lambda _: tf.random_uniform([]) < filter_amount)
dataset = dataset.repeat(num_repeats)
if shuffle_examples:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.batch(batch_size)
return dataset | Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors | Below is the the instruction that describes the task:
### Input:
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
### Response:
def read_tf_records(batch_size, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None, interleave=True,
filter_amount=1.0):
"""
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
"""
if shuffle_examples and not shuffle_buffer_size:
raise ValueError("Must set shuffle buffer size if shuffling examples")
tf_records = list(tf_records)
if shuffle_records:
random.shuffle(tf_records)
record_list = tf.data.Dataset.from_tensor_slices(tf_records)
# compression_type here must agree with write_tf_examples
map_func = functools.partial(
tf.data.TFRecordDataset,
buffer_size=8 * 1024 * 1024,
compression_type='ZLIB')
if interleave:
# cycle_length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read,
# and the examples being read from the files.
dataset = record_list.apply(tf.contrib.data.parallel_interleave(
map_func, cycle_length=64, sloppy=True))
else:
dataset = record_list.flat_map(map_func)
if filter_amount < 1.0:
dataset = dataset.filter(
lambda _: tf.random_uniform([]) < filter_amount)
dataset = dataset.repeat(num_repeats)
if shuffle_examples:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.batch(batch_size)
return dataset |
def _set_tm_voq(self, v, load=False):
"""
Setter method for tm_voq, mapped from YANG variable /telemetry/profile/tm_voq (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_voq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_voq() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",tm_voq.tm_voq, yang_name="tm-voq", rest_name="tm-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}), is_container='list', yang_name="tm-voq", rest_name="tm-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tm_voq must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",tm_voq.tm_voq, yang_name="tm-voq", rest_name="tm-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}), is_container='list', yang_name="tm-voq", rest_name="tm-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__tm_voq = t
if hasattr(self, '_set'):
self._set() | Setter method for tm_voq, mapped from YANG variable /telemetry/profile/tm_voq (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_voq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_voq() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for tm_voq, mapped from YANG variable /telemetry/profile/tm_voq (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_voq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_voq() directly.
### Response:
def _set_tm_voq(self, v, load=False):
"""
Setter method for tm_voq, mapped from YANG variable /telemetry/profile/tm_voq (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_tm_voq is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tm_voq() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",tm_voq.tm_voq, yang_name="tm-voq", rest_name="tm-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}), is_container='list', yang_name="tm-voq", rest_name="tm-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tm_voq must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",tm_voq.tm_voq, yang_name="tm-voq", rest_name="tm-voq", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}), is_container='list', yang_name="tm-voq", rest_name="tm-voq", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'TmVoqProfile', u'info': u'TM VOQ'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__tm_voq = t
if hasattr(self, '_set'):
self._set() |
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data | Read PLY data from a readable file-like object or filename. | Below is the the instruction that describes the task:
### Input:
Read PLY data from a readable file-like object or filename.
### Response:
def read(stream):
'''
Read PLY data from a readable file-like object or filename.
'''
(must_close, stream) = _open_stream(stream, 'read')
try:
data = PlyData._parse_header(stream)
for elt in data:
elt._read(stream, data.text, data.byte_order)
finally:
if must_close:
stream.close()
return data |
def to_internal_value(self, data):
"""
Updates _validated_data with dynamic data, i.e. data,
not listed in fields.
"""
ret = super(DynamicDocumentSerializer, self).to_internal_value(data)
dynamic_data = self._get_dynamic_data(ret)
ret.update(dynamic_data)
return ret | Updates _validated_data with dynamic data, i.e. data,
not listed in fields. | Below is the the instruction that describes the task:
### Input:
Updates _validated_data with dynamic data, i.e. data,
not listed in fields.
### Response:
def to_internal_value(self, data):
"""
Updates _validated_data with dynamic data, i.e. data,
not listed in fields.
"""
ret = super(DynamicDocumentSerializer, self).to_internal_value(data)
dynamic_data = self._get_dynamic_data(ret)
ret.update(dynamic_data)
return ret |
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
first = self.__class__('%s/%s' %
(self.network_address,
self._prefixlen + prefixlen_diff))
yield first
current = first
while True:
broadcast = current.broadcast_address
if broadcast == self.broadcast_address:
return
new_addr = self._address_class(int(broadcast) + 1)
current = self.__class__('%s/%s' % (new_addr,
new_prefixlen))
yield current | The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network) | Below is the the instruction that describes the task:
### Input:
The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
### Response:
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
first = self.__class__('%s/%s' %
(self.network_address,
self._prefixlen + prefixlen_diff))
yield first
current = first
while True:
broadcast = current.broadcast_address
if broadcast == self.broadcast_address:
return
new_addr = self._address_class(int(broadcast) + 1)
current = self.__class__('%s/%s' % (new_addr,
new_prefixlen))
yield current |
def _parse_configs(self, config):
"""Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided.
"""
for config_dict in config:
label = config_dict.keys()[0]
cfg = config_dict[label]
# Transform dbpath to something digestable by regexp.
dbpath = cfg['dbpath']
pattern = self._parse_dbpath(dbpath)
read_preference = cfg.get('read_preference', 'primary').upper()
read_preference = self._get_read_preference(read_preference)
# Put all parameters that could be passed to pymongo.MongoClient
# in a separate dict, to ease MongoClient creation.
cluster_config = {
'params': {
'host': cfg['host'],
'port': cfg['port'],
'read_preference': read_preference,
'replicaSet': cfg.get('replicaSet')
},
'pattern': pattern,
'label': label
}
self._clusters.append(cluster_config) | Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided. | Below is the the instruction that describes the task:
### Input:
Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided.
### Response:
def _parse_configs(self, config):
"""Builds a dict with information to connect to Clusters.
Parses the list of configuration dictionaries passed by the user and
builds an internal dict (_clusters) that holds information for creating
Clients connecting to Clusters and matching database names.
Args:
config: A list of dictionaries containing connecting and
identification information about Clusters.
A dict has the following structure:
{label: {host, port, read_preference, dbpath}}.
Raises:
Exception('No configuration provided'): no configuration provided.
"""
for config_dict in config:
label = config_dict.keys()[0]
cfg = config_dict[label]
# Transform dbpath to something digestable by regexp.
dbpath = cfg['dbpath']
pattern = self._parse_dbpath(dbpath)
read_preference = cfg.get('read_preference', 'primary').upper()
read_preference = self._get_read_preference(read_preference)
# Put all parameters that could be passed to pymongo.MongoClient
# in a separate dict, to ease MongoClient creation.
cluster_config = {
'params': {
'host': cfg['host'],
'port': cfg['port'],
'read_preference': read_preference,
'replicaSet': cfg.get('replicaSet')
},
'pattern': pattern,
'label': label
}
self._clusters.append(cluster_config) |
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
if self._key_ranges is None:
key_ranges_json = None
else:
key_ranges_json = []
for k in self._key_ranges:
if k:
key_ranges_json.append(k.to_json())
else:
key_ranges_json.append(None)
if self._ns_range is None:
namespace_range_json = None
else:
namespace_range_json = self._ns_range.to_json_object()
if self._current_key_range is None:
current_key_range_json = None
else:
current_key_range_json = self._current_key_range.to_json()
json_dict = {self.KEY_RANGE_PARAM: key_ranges_json,
self.NAMESPACE_RANGE_PARAM: namespace_range_json,
self.CURRENT_KEY_RANGE_PARAM: current_key_range_json,
self.ENTITY_KIND_PARAM: self._entity_kind,
self.BATCH_SIZE_PARAM: self._batch_size,
self.FILTERS_PARAM: self._filters}
return json_dict | Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map. | Below is the the instruction that describes the task:
### Input:
Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
### Response:
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
if self._key_ranges is None:
key_ranges_json = None
else:
key_ranges_json = []
for k in self._key_ranges:
if k:
key_ranges_json.append(k.to_json())
else:
key_ranges_json.append(None)
if self._ns_range is None:
namespace_range_json = None
else:
namespace_range_json = self._ns_range.to_json_object()
if self._current_key_range is None:
current_key_range_json = None
else:
current_key_range_json = self._current_key_range.to_json()
json_dict = {self.KEY_RANGE_PARAM: key_ranges_json,
self.NAMESPACE_RANGE_PARAM: namespace_range_json,
self.CURRENT_KEY_RANGE_PARAM: current_key_range_json,
self.ENTITY_KIND_PARAM: self._entity_kind,
self.BATCH_SIZE_PARAM: self._batch_size,
self.FILTERS_PARAM: self._filters}
return json_dict |
def rebuild(mode=''):
"""Rebuild the site with a nice UI."""
scan_site() # for good measure
if not current_user.can_rebuild_site:
return error('You are not permitted to rebuild the site.</p>'
'<p class="lead">Contact an administartor for '
'more information.', 401)
if db is not None:
db.set('site:needs_rebuild', '-1')
if not q.fetch_job('build') and not q.fetch_job('orphans'):
b = q.enqueue_call(func=coil.tasks.build,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT'], mode),
job_id='build')
q.enqueue_call(func=coil.tasks.orphans,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT']), job_id='orphans',
depends_on=b)
return render('coil_rebuild.tmpl', {'title': 'Rebuild'})
else:
status, outputb = coil.tasks.build_single(mode)
_, outputo = coil.tasks.orphans_single()
site.coil_needs_rebuild = '0'
return render('coil_rebuild_single.tmpl',
{'title': 'Rebuild', 'status': '1' if status else '0',
'outputb': outputb, 'outputo': outputo}) | Rebuild the site with a nice UI. | Below is the the instruction that describes the task:
### Input:
Rebuild the site with a nice UI.
### Response:
def rebuild(mode=''):
"""Rebuild the site with a nice UI."""
scan_site() # for good measure
if not current_user.can_rebuild_site:
return error('You are not permitted to rebuild the site.</p>'
'<p class="lead">Contact an administartor for '
'more information.', 401)
if db is not None:
db.set('site:needs_rebuild', '-1')
if not q.fetch_job('build') and not q.fetch_job('orphans'):
b = q.enqueue_call(func=coil.tasks.build,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT'], mode),
job_id='build')
q.enqueue_call(func=coil.tasks.orphans,
args=(app.config['REDIS_URL'],
app.config['NIKOLA_ROOT']), job_id='orphans',
depends_on=b)
return render('coil_rebuild.tmpl', {'title': 'Rebuild'})
else:
status, outputb = coil.tasks.build_single(mode)
_, outputo = coil.tasks.orphans_single()
site.coil_needs_rebuild = '0'
return render('coil_rebuild_single.tmpl',
{'title': 'Rebuild', 'status': '1' if status else '0',
'outputb': outputb, 'outputo': outputo}) |
def skip_status(*skipped):
"""Decorator to skip this call if we're in one of the skipped states."""
def decorator(func):
@functools.wraps(func)
def _skip_status(self, *args, **kwargs):
if self.status not in skipped:
return func(self, *args, **kwargs)
return _skip_status
return decorator | Decorator to skip this call if we're in one of the skipped states. | Below is the the instruction that describes the task:
### Input:
Decorator to skip this call if we're in one of the skipped states.
### Response:
def skip_status(*skipped):
"""Decorator to skip this call if we're in one of the skipped states."""
def decorator(func):
@functools.wraps(func)
def _skip_status(self, *args, **kwargs):
if self.status not in skipped:
return func(self, *args, **kwargs)
return _skip_status
return decorator |
def _addIndex(catalog, index, indextype):
"""
This function indexes the index element into the catalog if it isn't yet.
:catalog: a catalog object
:index: an index id as string
:indextype: the type of the index as string
:returns: a boolean as True if the element has been indexed and it returns
False otherwise.
"""
if index not in catalog.indexes():
try:
if indextype == 'ZCTextIndex':
addZCTextIndex(catalog, index)
else:
catalog.addIndex(index, indextype)
logger.info('Catalog index %s added to %s.' % (index, catalog.id))
return True
except:
logger.error(
'Catalog index %s error while adding to %s.'
% (index, catalog.id))
return False | This function indexes the index element into the catalog if it isn't yet.
:catalog: a catalog object
:index: an index id as string
:indextype: the type of the index as string
:returns: a boolean as True if the element has been indexed and it returns
False otherwise. | Below is the the instruction that describes the task:
### Input:
This function indexes the index element into the catalog if it isn't yet.
:catalog: a catalog object
:index: an index id as string
:indextype: the type of the index as string
:returns: a boolean as True if the element has been indexed and it returns
False otherwise.
### Response:
def _addIndex(catalog, index, indextype):
"""
This function indexes the index element into the catalog if it isn't yet.
:catalog: a catalog object
:index: an index id as string
:indextype: the type of the index as string
:returns: a boolean as True if the element has been indexed and it returns
False otherwise.
"""
if index not in catalog.indexes():
try:
if indextype == 'ZCTextIndex':
addZCTextIndex(catalog, index)
else:
catalog.addIndex(index, indextype)
logger.info('Catalog index %s added to %s.' % (index, catalog.id))
return True
except:
logger.error(
'Catalog index %s error while adding to %s.'
% (index, catalog.id))
return False |
def float_format(self, value):
"""Validate and set the upper case flag."""
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.') | Validate and set the upper case flag. | Below is the the instruction that describes the task:
### Input:
Validate and set the upper case flag.
### Response:
def float_format(self, value):
"""Validate and set the upper case flag."""
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.') |
def update_user(self, name=None, email=None, blog=None,
company=None, location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters. All parameters are
optional.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company: company name
:param str location: where you are located
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
user = self.user()
return user.update(name, email, blog, company, location, hireable,
bio) | If authenticated as this user, update the information with
the information provided in the parameters. All parameters are
optional.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company: company name
:param str location: where you are located
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool | Below is the the instruction that describes the task:
### Input:
If authenticated as this user, update the information with
the information provided in the parameters. All parameters are
optional.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company: company name
:param str location: where you are located
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
### Response:
def update_user(self, name=None, email=None, blog=None,
company=None, location=None, hireable=False, bio=None):
"""If authenticated as this user, update the information with
the information provided in the parameters. All parameters are
optional.
:param str name: e.g., 'John Smith', not login name
:param str email: e.g., 'john.smith@example.com'
:param str blog: e.g., 'http://www.example.com/jsmith/blog'
:param str company: company name
:param str location: where you are located
:param bool hireable: defaults to False
:param str bio: GitHub flavored markdown
:returns: bool
"""
user = self.user()
return user.update(name, email, blog, company, location, hireable,
bio) |
def __get_post_review(email_cnt, idx):
'''
Review for posts.
'''
for key in router_post:
recent_posts = MPost.query_recent_edited(tools.timestamp() - TIME_LIMIT, kind=key)
for recent_post in recent_posts:
hist_rec = MPostHist.get_last(recent_post.uid)
if hist_rec:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_chg">Edit</td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], router_post[key],
recent_post.uid))
email_cnt = email_cnt + foo_str
else:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_add">New </td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], router_post[key],
recent_post.uid))
email_cnt = email_cnt + foo_str
idx = idx + 1
return email_cnt, idx | Review for posts. | Below is the the instruction that describes the task:
### Input:
Review for posts.
### Response:
def __get_post_review(email_cnt, idx):
'''
Review for posts.
'''
for key in router_post:
recent_posts = MPost.query_recent_edited(tools.timestamp() - TIME_LIMIT, kind=key)
for recent_post in recent_posts:
hist_rec = MPostHist.get_last(recent_post.uid)
if hist_rec:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_chg">Edit</td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], router_post[key],
recent_post.uid))
email_cnt = email_cnt + foo_str
else:
foo_str = '''
<tr><td>{0}</td><td>{1}</td><td class="diff_add">New </td><td>{2}</td>
<td><a href="{3}">{3}</a></td></tr>
'''.format(idx, recent_post.user_name, recent_post.title,
os.path.join(SITE_CFG['site_url'], router_post[key],
recent_post.uid))
email_cnt = email_cnt + foo_str
idx = idx + 1
return email_cnt, idx |
def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
'''
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) | A version of `connectionFromArray` that takes a promised array, and returns a
promised connection. | Below is the the instruction that describes the task:
### Input:
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
### Response:
def connection_from_promised_list(data_promise, args=None, **kwargs):
'''
A version of `connectionFromArray` that takes a promised array, and returns a
promised connection.
'''
return data_promise.then(lambda data: connection_from_list(data, args, **kwargs)) |
def _parse_file(self, sar_parts):
"""
Parses splitted file to get proper information from split parts.
:param sar_parts: Array of SAR file parts
:return: ``Dictionary``-style info (but still non-parsed) \
from SAR file, split into sections we want to check
"""
usage = {}
output = {}
# If sar_parts is a list
if type(sar_parts) is list:
restart_pattern = re.compile(PATTERN_RESTART)
""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """
""" ********** ATTENTION ******* """
""" THERE CAN BE MORE THAN ONE SAME SECTION IN ONE FILE """
""" IF SYSTEM WAS REBOOTED DURING THE DAY """
""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """
for PATTERNSNAME in ALL_PATTERNS:
patterns = ALL_PATTERNS[PATTERNSNAME]
rgxpattern = re.compile(patterns['PATTERN'])
for part in sar_parts:
if rgxpattern.search(part):
if PATTERNSNAME in usage:
usage[PATTERNSNAME] += '\n' + part
else:
usage[PATTERNSNAME] = part
try:
first_line = part.split('\n')[0]
except IndexError:
first_line = part
self.__fields[PATTERNSNAME] = self.__find_column(patterns['FIELDS'], first_line)
# Try to match restart time
if restart_pattern.search(part):
pieces = part.split()
self.__restart_times.append(pieces[0])
del pieces
del sar_parts
# Now we have parts pulled out and combined, do further
# processing.
for PATTERNSNAME in ALL_PATTERNS:
patterns = ALL_PATTERNS[PATTERNSNAME]
output[PATTERNSNAME] = self.__split_info(usage[PATTERNSNAME], PATTERNSNAME, patterns)
del usage
return output
return output | Parses splitted file to get proper information from split parts.
:param sar_parts: Array of SAR file parts
:return: ``Dictionary``-style info (but still non-parsed) \
from SAR file, split into sections we want to check | Below is the the instruction that describes the task:
### Input:
Parses splitted file to get proper information from split parts.
:param sar_parts: Array of SAR file parts
:return: ``Dictionary``-style info (but still non-parsed) \
from SAR file, split into sections we want to check
### Response:
def _parse_file(self, sar_parts):
"""
Parses splitted file to get proper information from split parts.
:param sar_parts: Array of SAR file parts
:return: ``Dictionary``-style info (but still non-parsed) \
from SAR file, split into sections we want to check
"""
usage = {}
output = {}
# If sar_parts is a list
if type(sar_parts) is list:
restart_pattern = re.compile(PATTERN_RESTART)
""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """
""" ********** ATTENTION ******* """
""" THERE CAN BE MORE THAN ONE SAME SECTION IN ONE FILE """
""" IF SYSTEM WAS REBOOTED DURING THE DAY """
""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! """
for PATTERNSNAME in ALL_PATTERNS:
patterns = ALL_PATTERNS[PATTERNSNAME]
rgxpattern = re.compile(patterns['PATTERN'])
for part in sar_parts:
if rgxpattern.search(part):
if PATTERNSNAME in usage:
usage[PATTERNSNAME] += '\n' + part
else:
usage[PATTERNSNAME] = part
try:
first_line = part.split('\n')[0]
except IndexError:
first_line = part
self.__fields[PATTERNSNAME] = self.__find_column(patterns['FIELDS'], first_line)
# Try to match restart time
if restart_pattern.search(part):
pieces = part.split()
self.__restart_times.append(pieces[0])
del pieces
del sar_parts
# Now we have parts pulled out and combined, do further
# processing.
for PATTERNSNAME in ALL_PATTERNS:
patterns = ALL_PATTERNS[PATTERNSNAME]
output[PATTERNSNAME] = self.__split_info(usage[PATTERNSNAME], PATTERNSNAME, patterns)
del usage
return output
return output |
def compute_logarithmic_scale(min_, max_, min_scale, max_scale):
"""Compute an optimal scale for logarithmic"""
if max_ <= 0 or min_ <= 0:
return []
min_order = int(floor(log10(min_)))
max_order = int(ceil(log10(max_)))
positions = []
amplitude = max_order - min_order
if amplitude <= 1:
return []
detail = 10.
while amplitude * detail < min_scale * 5:
detail *= 2
while amplitude * detail > max_scale * 3:
detail /= 2
for order in range(min_order, max_order + 1):
for i in range(int(detail)):
tick = (10 * i / detail or 1) * 10**order
tick = round_to_scale(tick, tick)
if min_ <= tick <= max_ and tick not in positions:
positions.append(tick)
return positions | Compute an optimal scale for logarithmic | Below is the the instruction that describes the task:
### Input:
Compute an optimal scale for logarithmic
### Response:
def compute_logarithmic_scale(min_, max_, min_scale, max_scale):
"""Compute an optimal scale for logarithmic"""
if max_ <= 0 or min_ <= 0:
return []
min_order = int(floor(log10(min_)))
max_order = int(ceil(log10(max_)))
positions = []
amplitude = max_order - min_order
if amplitude <= 1:
return []
detail = 10.
while amplitude * detail < min_scale * 5:
detail *= 2
while amplitude * detail > max_scale * 3:
detail /= 2
for order in range(min_order, max_order + 1):
for i in range(int(detail)):
tick = (10 * i / detail or 1) * 10**order
tick = round_to_scale(tick, tick)
if min_ <= tick <= max_ and tick not in positions:
positions.append(tick)
return positions |
def version(bin_env=None):
'''
.. versionadded:: 0.17.0
Returns the version of pip. Use ``bin_env`` to specify the path to a
virtualenv and get the version of pip in that virtualenv.
If unable to detect the pip version, returns ``None``.
CLI Example:
.. code-block:: bash
salt '*' pip.version
'''
contextkey = 'pip.version'
if bin_env is not None:
contextkey = '{0}.{1}'.format(contextkey, bin_env)
if contextkey in __context__:
return __context__[contextkey]
cmd = _get_pip_bin(bin_env)[:]
cmd.append('--version')
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
raise CommandNotFoundError('Could not find a `pip` binary')
try:
pip_version = re.match(r'^pip (\S+)', ret['stdout']).group(1)
except AttributeError:
pip_version = None
__context__[contextkey] = pip_version
return pip_version | .. versionadded:: 0.17.0
Returns the version of pip. Use ``bin_env`` to specify the path to a
virtualenv and get the version of pip in that virtualenv.
If unable to detect the pip version, returns ``None``.
CLI Example:
.. code-block:: bash
salt '*' pip.version | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 0.17.0
Returns the version of pip. Use ``bin_env`` to specify the path to a
virtualenv and get the version of pip in that virtualenv.
If unable to detect the pip version, returns ``None``.
CLI Example:
.. code-block:: bash
salt '*' pip.version
### Response:
def version(bin_env=None):
'''
.. versionadded:: 0.17.0
Returns the version of pip. Use ``bin_env`` to specify the path to a
virtualenv and get the version of pip in that virtualenv.
If unable to detect the pip version, returns ``None``.
CLI Example:
.. code-block:: bash
salt '*' pip.version
'''
contextkey = 'pip.version'
if bin_env is not None:
contextkey = '{0}.{1}'.format(contextkey, bin_env)
if contextkey in __context__:
return __context__[contextkey]
cmd = _get_pip_bin(bin_env)[:]
cmd.append('--version')
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
if ret['retcode']:
raise CommandNotFoundError('Could not find a `pip` binary')
try:
pip_version = re.match(r'^pip (\S+)', ret['stdout']).group(1)
except AttributeError:
pip_version = None
__context__[contextkey] = pip_version
return pip_version |
def filename(self):
"""
Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename
"""
if self._filename is None:
self._filename = storage.get_file(self.basename,
self.ccd,
ext=self.extension,
version=self.type,
prefix=self.prefix)
return self._filename | Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename | Below is the the instruction that describes the task:
### Input:
Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename
### Response:
def filename(self):
"""
Name if the MOP formatted file to parse.
@rtype: basestring
@return: filename
"""
if self._filename is None:
self._filename = storage.get_file(self.basename,
self.ccd,
ext=self.extension,
version=self.type,
prefix=self.prefix)
return self._filename |
def _create_tar(self, name):
"""Create TAR file."""
fileobj = utils.create_spooled_temporary_file()
mode = 'w:gz' if self.compress else 'w'
tar_file = tarfile.open(name=name, fileobj=fileobj, mode=mode)
for media_filename in self._explore_storage():
tarinfo = tarfile.TarInfo(media_filename)
media_file = self.media_storage.open(media_filename)
tarinfo.size = len(media_file)
tar_file.addfile(tarinfo, media_file)
# Close the TAR for writing
tar_file.close()
return fileobj | Create TAR file. | Below is the the instruction that describes the task:
### Input:
Create TAR file.
### Response:
def _create_tar(self, name):
"""Create TAR file."""
fileobj = utils.create_spooled_temporary_file()
mode = 'w:gz' if self.compress else 'w'
tar_file = tarfile.open(name=name, fileobj=fileobj, mode=mode)
for media_filename in self._explore_storage():
tarinfo = tarfile.TarInfo(media_filename)
media_file = self.media_storage.open(media_filename)
tarinfo.size = len(media_file)
tar_file.addfile(tarinfo, media_file)
# Close the TAR for writing
tar_file.close()
return fileobj |
def add(self, source_id, profile_data, training_metadata=[], profile_reference=None, timestamp_reception=None):
"""Use the api to add a new profile using profile_data."""
data = {
"source_id": _validate_source_id(source_id),
"profile_json": _validate_dict(profile_data, "profile_data"),
"training_metadata": _validate_training_metadata(training_metadata),
"profile_reference": profile_reference
}
# some enrichement for profile_json
if timestamp_reception is not None:
data['timestamp_reception'] = _validate_timestamp(timestamp_reception, 'timestamp_reception')
response = self.client.post("profile/json", data=data)
return response.json() | Use the api to add a new profile using profile_data. | Below is the the instruction that describes the task:
### Input:
Use the api to add a new profile using profile_data.
### Response:
def add(self, source_id, profile_data, training_metadata=[], profile_reference=None, timestamp_reception=None):
"""Use the api to add a new profile using profile_data."""
data = {
"source_id": _validate_source_id(source_id),
"profile_json": _validate_dict(profile_data, "profile_data"),
"training_metadata": _validate_training_metadata(training_metadata),
"profile_reference": profile_reference
}
# some enrichement for profile_json
if timestamp_reception is not None:
data['timestamp_reception'] = _validate_timestamp(timestamp_reception, 'timestamp_reception')
response = self.client.post("profile/json", data=data)
return response.json() |
def parse_manifest(template_lines):
"""List of file names included by the MANIFEST.in template lines."""
manifest_files = distutils.filelist.FileList()
for line in template_lines:
if line.strip():
manifest_files.process_template_line(line)
return manifest_files.files | List of file names included by the MANIFEST.in template lines. | Below is the the instruction that describes the task:
### Input:
List of file names included by the MANIFEST.in template lines.
### Response:
def parse_manifest(template_lines):
"""List of file names included by the MANIFEST.in template lines."""
manifest_files = distutils.filelist.FileList()
for line in template_lines:
if line.strip():
manifest_files.process_template_line(line)
return manifest_files.files |
def get_sync_info(self, name, key=None):
"""Get mtime/size when this target's current dir was last synchronized with remote."""
peer_target = self.peer
if self.is_local():
info = self.cur_dir_meta.dir["peer_sync"].get(peer_target.get_id())
else:
info = peer_target.cur_dir_meta.dir["peer_sync"].get(self.get_id())
if name is not None:
info = info.get(name) if info else None
if info and key:
info = info.get(key)
return info | Get mtime/size when this target's current dir was last synchronized with remote. | Below is the the instruction that describes the task:
### Input:
Get mtime/size when this target's current dir was last synchronized with remote.
### Response:
def get_sync_info(self, name, key=None):
"""Get mtime/size when this target's current dir was last synchronized with remote."""
peer_target = self.peer
if self.is_local():
info = self.cur_dir_meta.dir["peer_sync"].get(peer_target.get_id())
else:
info = peer_target.cur_dir_meta.dir["peer_sync"].get(self.get_id())
if name is not None:
info = info.get(name) if info else None
if info and key:
info = info.get(key)
return info |
def follow_file(self, f, seen, trim):
"""Whether to recurse into a file's dependencies."""
return (f not in self.graph.nodes and
f not in seen and
(not trim or
not isinstance(self.provenance[f],
(resolve.Builtin, resolve.System)))) | Whether to recurse into a file's dependencies. | Below is the the instruction that describes the task:
### Input:
Whether to recurse into a file's dependencies.
### Response:
def follow_file(self, f, seen, trim):
"""Whether to recurse into a file's dependencies."""
return (f not in self.graph.nodes and
f not in seen and
(not trim or
not isinstance(self.provenance[f],
(resolve.Builtin, resolve.System)))) |
def add_record(self, orcid_id, token, request_type, data,
content_type='application/orcid+json'):
"""Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param data: dict | lxml.etree._Element
The record in Python-friendly format, as either JSON-compatible
dictionary (content_type == 'application/orcid+json') or
XML (content_type == 'application/orcid+xml')
:param content_type: string
MIME type of the passed record.
Returns
-------
:returns: string
Put-code of the new work.
"""
return self._update_activities(orcid_id, token, requests.post,
request_type, data,
content_type=content_type) | Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param data: dict | lxml.etree._Element
The record in Python-friendly format, as either JSON-compatible
dictionary (content_type == 'application/orcid+json') or
XML (content_type == 'application/orcid+xml')
:param content_type: string
MIME type of the passed record.
Returns
-------
:returns: string
Put-code of the new work. | Below is the the instruction that describes the task:
### Input:
Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param data: dict | lxml.etree._Element
The record in Python-friendly format, as either JSON-compatible
dictionary (content_type == 'application/orcid+json') or
XML (content_type == 'application/orcid+xml')
:param content_type: string
MIME type of the passed record.
Returns
-------
:returns: string
Put-code of the new work.
### Response:
def add_record(self, orcid_id, token, request_type, data,
content_type='application/orcid+json'):
"""Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param data: dict | lxml.etree._Element
The record in Python-friendly format, as either JSON-compatible
dictionary (content_type == 'application/orcid+json') or
XML (content_type == 'application/orcid+xml')
:param content_type: string
MIME type of the passed record.
Returns
-------
:returns: string
Put-code of the new work.
"""
return self._update_activities(orcid_id, token, requests.post,
request_type, data,
content_type=content_type) |
def render(self, flags: Flags) -> List[Text]:
"""
Returns a list of randomly chosen outcomes for each sentence of the
list.
"""
return [x.render(flags) for x in self.sentences] | Returns a list of randomly chosen outcomes for each sentence of the
list. | Below is the the instruction that describes the task:
### Input:
Returns a list of randomly chosen outcomes for each sentence of the
list.
### Response:
def render(self, flags: Flags) -> List[Text]:
"""
Returns a list of randomly chosen outcomes for each sentence of the
list.
"""
return [x.render(flags) for x in self.sentences] |
def elapsed_time(self) -> float:
"""
The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step.
"""
current_time = datetime.utcnow()
start = self.start_time or current_time
end = self.end_time or current_time
return (end - start).total_seconds() | The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step. | Below is the the instruction that describes the task:
### Input:
The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step.
### Response:
def elapsed_time(self) -> float:
"""
The number of seconds that has elapsed since the step started running
if the step is still running. Or, if the step has already finished
running, the amount of time that elapsed during the last execution of
the step.
"""
current_time = datetime.utcnow()
start = self.start_time or current_time
end = self.end_time or current_time
return (end - start).total_seconds() |
def request_token():
"""
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
"""
while True:
email, password, captcha_solution, captcha_id = win_login()
options = {
'source': 'radio',
'alias': email,
'form_password': password,
'captcha_solution': captcha_solution,
'captcha_id': captcha_id,
'task': 'sync_channel_list'
}
r = requests.post('https://douban.fm/j/login', data=options, headers=HEADERS)
req_json = json.loads(r.text, object_hook=decode_dict)
# req_json = json.loads(r.text)
if req_json['r'] == 0:
post_data = {
# will not save
'liked': req_json['user_info']['play_record']['liked'],
'banned': req_json['user_info']['play_record']['banned'],
'played': req_json['user_info']['play_record']['played'],
'is_pro': req_json['user_info']['is_pro'],
'user_name': req_json['user_info']['name'],
# to save
'cookies': r.cookies,
'valume': 50,
'channel': 0,
'theme_id': 0
}
return post_data
print(req_json['err_msg'])
print(ERROR + req_json['err_msg']) | 通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
} | Below is the the instruction that describes the task:
### Input:
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
### Response:
def request_token():
"""
通过帐号,密码请求token,返回一个dict
{
"user_info": {
"ck": "-VQY",
"play_record": {
"fav_chls_count": 4,
"liked": 802,
"banned": 162,
"played": 28368
},
"is_new_user": 0,
"uid": "taizilongxu",
"third_party_info": null,
"url": "http://www.douban.com/people/taizilongxu/",
"is_dj": false,
"id": "2053207",
"is_pro": false,
"name": "刘小备"
},
"r": 0
}
"""
while True:
email, password, captcha_solution, captcha_id = win_login()
options = {
'source': 'radio',
'alias': email,
'form_password': password,
'captcha_solution': captcha_solution,
'captcha_id': captcha_id,
'task': 'sync_channel_list'
}
r = requests.post('https://douban.fm/j/login', data=options, headers=HEADERS)
req_json = json.loads(r.text, object_hook=decode_dict)
# req_json = json.loads(r.text)
if req_json['r'] == 0:
post_data = {
# will not save
'liked': req_json['user_info']['play_record']['liked'],
'banned': req_json['user_info']['play_record']['banned'],
'played': req_json['user_info']['play_record']['played'],
'is_pro': req_json['user_info']['is_pro'],
'user_name': req_json['user_info']['name'],
# to save
'cookies': r.cookies,
'valume': 50,
'channel': 0,
'theme_id': 0
}
return post_data
print(req_json['err_msg'])
print(ERROR + req_json['err_msg']) |
def download_to_directory(self, directory, url, basename=None, overwrite=False, subdir=None):
"""
Download a file to the workspace.
Early Shortcut: If url is a file://-URL and that file is already in the directory, keep it there.
If basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename.
If basename is not given and no subdir is given, use the alnum characters in the URL as the basename.
Args:
directory (string): Directory to download files to
basename (string, None): basename part of the filename on disk.
url (string): URL to download from
overwrite (boolean): Whether to overwrite existing files with that name
subdir (string, None): Subdirectory to create within the directory. Think fileGrp.
Returns:
Local filename
"""
log = getLogger('ocrd.resolver.download_to_directory') # pylint: disable=redefined-outer-name
log.debug("directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|", directory, url, basename, overwrite, subdir)
if url is None:
raise Exception("'url' must be a string")
if directory is None:
raise Exception("'directory' must be a string")
if basename is None:
if (subdir is not None) or \
(directory and url.startswith('file://%s' % directory)): # in case downloading a url 'file:///tmp/foo/bar' to directory '/tmp/foo'
basename = url.rsplit('/', 1)[-1]
else:
basename = safe_filename(url)
if subdir is not None:
basename = join(subdir, basename)
outfilename = join(directory, basename)
if exists(outfilename) and not overwrite:
log.debug("File already exists and overwrite=False: %s", outfilename)
return outfilename
outfiledir = outfilename.rsplit('/', 1)[0]
# print(outfiledir)
if not isdir(outfiledir):
makedirs(outfiledir)
log.debug("Downloading <%s> to '%s'", url, outfilename)
# de-scheme file:// URL
if url.startswith('file://'):
url = url[len('file://'):]
# Copy files or download remote assets
if '://' not in url:
copyfile(url, outfilename)
else:
response = requests.get(url)
if response.status_code != 200:
raise Exception("Not found: %s (HTTP %d)" % (url, response.status_code))
with open(outfilename, 'wb') as outfile:
outfile.write(response.content)
return outfilename | Download a file to the workspace.
Early Shortcut: If url is a file://-URL and that file is already in the directory, keep it there.
If basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename.
If basename is not given and no subdir is given, use the alnum characters in the URL as the basename.
Args:
directory (string): Directory to download files to
basename (string, None): basename part of the filename on disk.
url (string): URL to download from
overwrite (boolean): Whether to overwrite existing files with that name
subdir (string, None): Subdirectory to create within the directory. Think fileGrp.
Returns:
Local filename | Below is the the instruction that describes the task:
### Input:
Download a file to the workspace.
Early Shortcut: If url is a file://-URL and that file is already in the directory, keep it there.
If basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename.
If basename is not given and no subdir is given, use the alnum characters in the URL as the basename.
Args:
directory (string): Directory to download files to
basename (string, None): basename part of the filename on disk.
url (string): URL to download from
overwrite (boolean): Whether to overwrite existing files with that name
subdir (string, None): Subdirectory to create within the directory. Think fileGrp.
Returns:
Local filename
### Response:
def download_to_directory(self, directory, url, basename=None, overwrite=False, subdir=None):
"""
Download a file to the workspace.
Early Shortcut: If url is a file://-URL and that file is already in the directory, keep it there.
If basename is not given but subdir is, assume user knows what she's doing and use last URL segment as the basename.
If basename is not given and no subdir is given, use the alnum characters in the URL as the basename.
Args:
directory (string): Directory to download files to
basename (string, None): basename part of the filename on disk.
url (string): URL to download from
overwrite (boolean): Whether to overwrite existing files with that name
subdir (string, None): Subdirectory to create within the directory. Think fileGrp.
Returns:
Local filename
"""
log = getLogger('ocrd.resolver.download_to_directory') # pylint: disable=redefined-outer-name
log.debug("directory=|%s| url=|%s| basename=|%s| overwrite=|%s| subdir=|%s|", directory, url, basename, overwrite, subdir)
if url is None:
raise Exception("'url' must be a string")
if directory is None:
raise Exception("'directory' must be a string")
if basename is None:
if (subdir is not None) or \
(directory and url.startswith('file://%s' % directory)): # in case downloading a url 'file:///tmp/foo/bar' to directory '/tmp/foo'
basename = url.rsplit('/', 1)[-1]
else:
basename = safe_filename(url)
if subdir is not None:
basename = join(subdir, basename)
outfilename = join(directory, basename)
if exists(outfilename) and not overwrite:
log.debug("File already exists and overwrite=False: %s", outfilename)
return outfilename
outfiledir = outfilename.rsplit('/', 1)[0]
# print(outfiledir)
if not isdir(outfiledir):
makedirs(outfiledir)
log.debug("Downloading <%s> to '%s'", url, outfilename)
# de-scheme file:// URL
if url.startswith('file://'):
url = url[len('file://'):]
# Copy files or download remote assets
if '://' not in url:
copyfile(url, outfilename)
else:
response = requests.get(url)
if response.status_code != 200:
raise Exception("Not found: %s (HTTP %d)" % (url, response.status_code))
with open(outfilename, 'wb') as outfile:
outfile.write(response.content)
return outfilename |
def Inference(probability=None, relation=None, name=None, concept=None):
"""Represents a probable cause / relation between this event and some prior.
Args:
probability(float): Value 0.0 to 1.0.
relation(str): e.g. 'associated' or 'identified' (see Voevent spec)
name(str): e.g. name of identified progenitor.
concept(str): One of a 'formal UCD-like vocabulary of astronomical
concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see
VOEvent spec.
"""
atts = {}
if probability is not None:
atts['probability'] = str(probability)
if relation is not None:
atts['relation'] = relation
inf = objectify.Element('Inference', attrib=atts)
if name is not None:
inf.Name = name
if concept is not None:
inf.Concept = concept
return inf | Represents a probable cause / relation between this event and some prior.
Args:
probability(float): Value 0.0 to 1.0.
relation(str): e.g. 'associated' or 'identified' (see Voevent spec)
name(str): e.g. name of identified progenitor.
concept(str): One of a 'formal UCD-like vocabulary of astronomical
concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see
VOEvent spec. | Below is the the instruction that describes the task:
### Input:
Represents a probable cause / relation between this event and some prior.
Args:
probability(float): Value 0.0 to 1.0.
relation(str): e.g. 'associated' or 'identified' (see Voevent spec)
name(str): e.g. name of identified progenitor.
concept(str): One of a 'formal UCD-like vocabulary of astronomical
concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see
VOEvent spec.
### Response:
def Inference(probability=None, relation=None, name=None, concept=None):
"""Represents a probable cause / relation between this event and some prior.
Args:
probability(float): Value 0.0 to 1.0.
relation(str): e.g. 'associated' or 'identified' (see Voevent spec)
name(str): e.g. name of identified progenitor.
concept(str): One of a 'formal UCD-like vocabulary of astronomical
concepts', e.g. http://ivoat.ivoa.net/stars.supernova.Ia - see
VOEvent spec.
"""
atts = {}
if probability is not None:
atts['probability'] = str(probability)
if relation is not None:
atts['relation'] = relation
inf = objectify.Element('Inference', attrib=atts)
if name is not None:
inf.Name = name
if concept is not None:
inf.Concept = concept
return inf |
def _expand_pattern(self, pattern):
"""
From the pattern decomposition, finds the absolute paths
matching the pattern.
"""
(globpattern, regexp, fields, types) = self._decompose_pattern(pattern)
filelist = glob.glob(globpattern)
expansion = []
for fname in filelist:
if fields == []:
expansion.append((fname, {}))
continue
match = re.match(regexp, fname)
if match is None: continue
match_items = match.groupdict().items()
tags = dict((k,types.get(k, str)(v)) for (k,v) in match_items)
expansion.append((fname, tags))
return expansion | From the pattern decomposition, finds the absolute paths
matching the pattern. | Below is the the instruction that describes the task:
### Input:
From the pattern decomposition, finds the absolute paths
matching the pattern.
### Response:
def _expand_pattern(self, pattern):
"""
From the pattern decomposition, finds the absolute paths
matching the pattern.
"""
(globpattern, regexp, fields, types) = self._decompose_pattern(pattern)
filelist = glob.glob(globpattern)
expansion = []
for fname in filelist:
if fields == []:
expansion.append((fname, {}))
continue
match = re.match(regexp, fname)
if match is None: continue
match_items = match.groupdict().items()
tags = dict((k,types.get(k, str)(v)) for (k,v) in match_items)
expansion.append((fname, tags))
return expansion |
def _validate_callback(callback):
"""Checks if the specified callback is callable and accepts a kwargs param.
Args:
callback (obj): Any object or a list of objects that can be called.
e.g. <function say_hello at 0x101234567>
Raises:
SlackClientError: The specified callback is not callable.
SlackClientError: The callback must accept keyword arguments (**kwargs).
"""
cb_name = callback.__name__ if hasattr(callback, "__name__") else callback
if not callable(callback):
msg = "The specified callback '{}' is not callable.".format(cb_name)
raise client_err.SlackClientError(msg)
callback_params = inspect.signature(callback).parameters.values()
if not any(
param for param in callback_params if param.kind == param.VAR_KEYWORD
):
msg = "The callback '{}' must accept keyword arguments (**kwargs).".format(
cb_name
)
raise client_err.SlackClientError(msg) | Checks if the specified callback is callable and accepts a kwargs param.
Args:
callback (obj): Any object or a list of objects that can be called.
e.g. <function say_hello at 0x101234567>
Raises:
SlackClientError: The specified callback is not callable.
SlackClientError: The callback must accept keyword arguments (**kwargs). | Below is the the instruction that describes the task:
### Input:
Checks if the specified callback is callable and accepts a kwargs param.
Args:
callback (obj): Any object or a list of objects that can be called.
e.g. <function say_hello at 0x101234567>
Raises:
SlackClientError: The specified callback is not callable.
SlackClientError: The callback must accept keyword arguments (**kwargs).
### Response:
def _validate_callback(callback):
"""Checks if the specified callback is callable and accepts a kwargs param.
Args:
callback (obj): Any object or a list of objects that can be called.
e.g. <function say_hello at 0x101234567>
Raises:
SlackClientError: The specified callback is not callable.
SlackClientError: The callback must accept keyword arguments (**kwargs).
"""
cb_name = callback.__name__ if hasattr(callback, "__name__") else callback
if not callable(callback):
msg = "The specified callback '{}' is not callable.".format(cb_name)
raise client_err.SlackClientError(msg)
callback_params = inspect.signature(callback).parameters.values()
if not any(
param for param in callback_params if param.kind == param.VAR_KEYWORD
):
msg = "The callback '{}' must accept keyword arguments (**kwargs).".format(
cb_name
)
raise client_err.SlackClientError(msg) |
def deliver_hook(instance, target, payload_override=None):
"""
Deliver the payload to the target URL.
By default it serializes to JSON and POSTs.
"""
payload = payload_override or serialize_hook(instance)
if hasattr(settings, 'HOOK_DELIVERER'):
deliverer = get_module(settings.HOOK_DELIVERER)
deliverer(target, payload, instance=instance)
else:
client.post(
url=target,
data=json.dumps(payload, cls=serializers.json.DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
return None | Deliver the payload to the target URL.
By default it serializes to JSON and POSTs. | Below is the the instruction that describes the task:
### Input:
Deliver the payload to the target URL.
By default it serializes to JSON and POSTs.
### Response:
def deliver_hook(instance, target, payload_override=None):
"""
Deliver the payload to the target URL.
By default it serializes to JSON and POSTs.
"""
payload = payload_override or serialize_hook(instance)
if hasattr(settings, 'HOOK_DELIVERER'):
deliverer = get_module(settings.HOOK_DELIVERER)
deliverer(target, payload, instance=instance)
else:
client.post(
url=target,
data=json.dumps(payload, cls=serializers.json.DjangoJSONEncoder),
headers={'Content-Type': 'application/json'}
)
return None |
def download_file(url, dst_path):
"""Download a file from a url"""
request = requests.get(url, stream=True)
with open(dst_path, 'wb') as downloaded_file:
request.raw.decode_content = True
shutil.copyfileobj(request.raw, downloaded_file) | Download a file from a url | Below is the the instruction that describes the task:
### Input:
Download a file from a url
### Response:
def download_file(url, dst_path):
"""Download a file from a url"""
request = requests.get(url, stream=True)
with open(dst_path, 'wb') as downloaded_file:
request.raw.decode_content = True
shutil.copyfileobj(request.raw, downloaded_file) |
def is_bifurcating(self, include_root=True):
"""
Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument.
"""
ctn1 = -1 + (2 * len(self))
ctn2 = -2 + (2 * len(self))
if self.is_rooted():
return bool(ctn1 == sum(1 for i in self.treenode.traverse()))
if include_root:
return bool(ctn2 == -1 + sum(1 for i in self.treenode.traverse()))
return bool(ctn2 == sum(1 for i in self.treenode.traverse())) | Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument. | Below is the the instruction that describes the task:
### Input:
Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument.
### Response:
def is_bifurcating(self, include_root=True):
"""
Returns False if there is a polytomy in the tree, including if the tree
is unrooted (basal polytomy), unless you use the include_root=False
argument.
"""
ctn1 = -1 + (2 * len(self))
ctn2 = -2 + (2 * len(self))
if self.is_rooted():
return bool(ctn1 == sum(1 for i in self.treenode.traverse()))
if include_root:
return bool(ctn2 == -1 + sum(1 for i in self.treenode.traverse()))
return bool(ctn2 == sum(1 for i in self.treenode.traverse())) |
def _grouper(iterable, n_args, fillvalue=None):
"""
Banana banana
"""
args = [iter(iterable)] * n_args
return zip_longest(*args, fillvalue=fillvalue) | Banana banana | Below is the the instruction that describes the task:
### Input:
Banana banana
### Response:
def _grouper(iterable, n_args, fillvalue=None):
"""
Banana banana
"""
args = [iter(iterable)] * n_args
return zip_longest(*args, fillvalue=fillvalue) |
def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except StopIteration:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data) | Get data from the dataset. | Below is the the instruction that describes the task:
### Input:
Get data from the dataset.
### Response:
def get_data(self, request=None):
"""Get data from the dataset."""
if request is None:
raise ValueError
data = [[] for _ in self.sources]
for i in range(request):
try:
for source_data, example in zip(
data, next(self.child_epoch_iterator)):
source_data.append(example)
except StopIteration:
# If some data has been extracted and `strict` is not set,
# we should spit out this data before stopping iteration.
if not self.strictness and data[0]:
break
elif self.strictness > 1 and data[0]:
raise ValueError
raise
return tuple(numpy.asarray(source_data) for source_data in data) |
def atualizar_software_sat(self):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
retorno = super(ClienteSATLocal, self).atualizar_software_sat()
return RespostaSAT.atualizar_software_sat(retorno) | Sobrepõe :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT | Below is the the instruction that describes the task:
### Input:
Sobrepõe :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
### Response:
def atualizar_software_sat(self):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.atualizar_software_sat`.
:return: Uma resposta SAT padrão.
:rtype: satcfe.resposta.padrao.RespostaSAT
"""
retorno = super(ClienteSATLocal, self).atualizar_software_sat()
return RespostaSAT.atualizar_software_sat(retorno) |
def centroid(self):
"""
The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3,) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
centroid = np.average(self.triangles_center,
axis=0,
weights=self.area_faces)
centroid.flags.writeable = False
return centroid | The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3,) float
The average vertex weighted by face area | Below is the the instruction that describes the task:
### Input:
The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3,) float
The average vertex weighted by face area
### Response:
def centroid(self):
"""
The point in space which is the average of the triangle centroids
weighted by the area of each triangle.
This will be valid even for non- watertight meshes,
unlike self.center_mass
Returns
----------
centroid : (3,) float
The average vertex weighted by face area
"""
# use the centroid of each triangle weighted by
# the area of the triangle to find the overall centroid
centroid = np.average(self.triangles_center,
axis=0,
weights=self.area_faces)
centroid.flags.writeable = False
return centroid |
def from_iso_long_date(self, date_str: str) -> datetime:
""" Parse ISO date string (YYYY-MM-DDTHH:mm:ss) """
assert isinstance(date_str, str)
assert len(date_str) == 19
self.value = datetime.strptime(date_str, ISO_LONG_FORMAT)
return self.value | Parse ISO date string (YYYY-MM-DDTHH:mm:ss) | Below is the the instruction that describes the task:
### Input:
Parse ISO date string (YYYY-MM-DDTHH:mm:ss)
### Response:
def from_iso_long_date(self, date_str: str) -> datetime:
""" Parse ISO date string (YYYY-MM-DDTHH:mm:ss) """
assert isinstance(date_str, str)
assert len(date_str) == 19
self.value = datetime.strptime(date_str, ISO_LONG_FORMAT)
return self.value |
def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable
"""
return Callable(self.project,
addr=addr,
concrete_only=concrete_only,
perform_merge=perform_merge,
base_state=base_state,
toc=toc,
cc=cc) | A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable | Below is the the instruction that describes the task:
### Input:
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable
### Response:
def callable(self, addr, concrete_only=False, perform_merge=True, base_state=None, toc=None, cc=None):
"""
A Callable is a representation of a function in the binary that can be interacted with like a native python
function.
:param addr: The address of the function to use
:param concrete_only: Throw an exception if the execution splits into multiple states
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
:returns: A Callable object that can be used as a interface for executing guest code like a
python function.
:rtype: angr.callable.Callable
"""
return Callable(self.project,
addr=addr,
concrete_only=concrete_only,
perform_merge=perform_merge,
base_state=base_state,
toc=toc,
cc=cc) |
def op_list_venvs(self):
"""Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
"""
self.logger.info('Listing known virtual environments ...')
venvs = self.get_venvs()
for venv in venvs:
self.logger.info('Found `%s`' % venv)
else:
self.logger.info('No virtual environments found in `%s` directory.' % VENVS_DIRNAME)
return venvs | Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments | Below is the the instruction that describes the task:
### Input:
Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
### Response:
def op_list_venvs(self):
"""Prints out and returns a list of known virtual environments.
:rtype: list
:return: list of virtual environments
"""
self.logger.info('Listing known virtual environments ...')
venvs = self.get_venvs()
for venv in venvs:
self.logger.info('Found `%s`' % venv)
else:
self.logger.info('No virtual environments found in `%s` directory.' % VENVS_DIRNAME)
return venvs |
def atomic_strain(atoms_now, atoms_old, cutoff=None, i_now=None, j_now=None):
"""
Calculate deformation gradient tensor and D^2_min measure for non-affine
displacements.
See: Falk, Langer, Phys. Rev. B 57, 7192 (1998)
Parameters:
-----------
atoms_now Current atomic configuration
atoms_old Reference atomic configuration
cutoff Neighbor list cutoff.
i_now, j_now Neighbor list. Automatically computed if not provided.
Returns:
--------
delta_plus_epsilon Strain gradient tensor
d_sq D^2_min norm
"""
if i_now is None or j_now is None:
if cutoff is None:
raise ValueError('Please provide either neighbor list or neighbor '
'list cutoff.')
# Create a particles object and set number of atoms and cell
p = native.from_atoms(a_now)
# create a neighbor list object and set it's cutoff
nl = native.Neighbors(avgn)
nl.request_interaction_range(cutoff)
# get neighbours and distance
i_now, j_now, abs_dr_now = nl.get_neighbors(p)
elif cutoff is not None:
raise ValueError('Please provide either neighbor list or neighbor '
'list cutoff, not both.')
### get the D square values
delta_plus_epsilon, d_sq = get_D_square_min(atoms_now, atoms_old, i_now,
j_now)
return delta_plus_epsilon, d_sq | Calculate deformation gradient tensor and D^2_min measure for non-affine
displacements.
See: Falk, Langer, Phys. Rev. B 57, 7192 (1998)
Parameters:
-----------
atoms_now Current atomic configuration
atoms_old Reference atomic configuration
cutoff Neighbor list cutoff.
i_now, j_now Neighbor list. Automatically computed if not provided.
Returns:
--------
delta_plus_epsilon Strain gradient tensor
d_sq D^2_min norm | Below is the the instruction that describes the task:
### Input:
Calculate deformation gradient tensor and D^2_min measure for non-affine
displacements.
See: Falk, Langer, Phys. Rev. B 57, 7192 (1998)
Parameters:
-----------
atoms_now Current atomic configuration
atoms_old Reference atomic configuration
cutoff Neighbor list cutoff.
i_now, j_now Neighbor list. Automatically computed if not provided.
Returns:
--------
delta_plus_epsilon Strain gradient tensor
d_sq D^2_min norm
### Response:
def atomic_strain(atoms_now, atoms_old, cutoff=None, i_now=None, j_now=None):
"""
Calculate deformation gradient tensor and D^2_min measure for non-affine
displacements.
See: Falk, Langer, Phys. Rev. B 57, 7192 (1998)
Parameters:
-----------
atoms_now Current atomic configuration
atoms_old Reference atomic configuration
cutoff Neighbor list cutoff.
i_now, j_now Neighbor list. Automatically computed if not provided.
Returns:
--------
delta_plus_epsilon Strain gradient tensor
d_sq D^2_min norm
"""
if i_now is None or j_now is None:
if cutoff is None:
raise ValueError('Please provide either neighbor list or neighbor '
'list cutoff.')
# Create a particles object and set number of atoms and cell
p = native.from_atoms(a_now)
# create a neighbor list object and set it's cutoff
nl = native.Neighbors(avgn)
nl.request_interaction_range(cutoff)
# get neighbours and distance
i_now, j_now, abs_dr_now = nl.get_neighbors(p)
elif cutoff is not None:
raise ValueError('Please provide either neighbor list or neighbor '
'list cutoff, not both.')
### get the D square values
delta_plus_epsilon, d_sq = get_D_square_min(atoms_now, atoms_old, i_now,
j_now)
return delta_plus_epsilon, d_sq |
def _TextJustify(self, text, col_size):
"""Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
"""
result = []
if '\n' in text:
for paragraph in text.split('\n'):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(width=col_size-2, break_long_words=False,
expand_tabs=False)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError('Field too small (minimum width: 3)')
if not text_list:
return [' '*col_size]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = len(current_line) - stripped_len
# +2 for white space on either side.
if stripped_len + 2 > col_size:
raise TableError('String contains words that do not fit in column.')
result.append(' %-*s' % (col_size - 1 + ansi_color_adds, current_line))
return result | Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text. | Below is the the instruction that describes the task:
### Input:
Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
### Response:
def _TextJustify(self, text, col_size):
"""Formats text within column with white space padding.
A single space is prefixed, and a number of spaces are added as a
suffix such that the length of the resultant string equals the col_size.
If the length of the text exceeds the column width available then it
is split into words and returned as a list of string, each string
contains one or more words padded to the column size.
Args:
text: String of text to format.
col_size: integer size of column to pad out the text to.
Returns:
List of strings col_size in length.
Raises:
TableError: If col_size is too small to fit the words in the text.
"""
result = []
if '\n' in text:
for paragraph in text.split('\n'):
result.extend(self._TextJustify(paragraph, col_size))
return result
wrapper = textwrap.TextWrapper(width=col_size-2, break_long_words=False,
expand_tabs=False)
try:
text_list = wrapper.wrap(text)
except ValueError:
raise TableError('Field too small (minimum width: 3)')
if not text_list:
return [' '*col_size]
for current_line in text_list:
stripped_len = len(terminal.StripAnsiText(current_line))
ansi_color_adds = len(current_line) - stripped_len
# +2 for white space on either side.
if stripped_len + 2 > col_size:
raise TableError('String contains words that do not fit in column.')
result.append(' %-*s' % (col_size - 1 + ansi_color_adds, current_line))
return result |
def simplify(self, options=None):
"""
returns a dict describing a simple snapshot of this change, and
its children if any.
"""
simple = {
"class": type(self).__name__,
"is_change": self.is_change(),
"description": self.get_description(),
"label": self.label,
}
if options:
simple["is_ignored"] = self.is_ignored(options)
if isinstance(self, Addition):
simple["is_addition"] = True
if isinstance(self, Removal):
simple["is_removal"] = True
if self.entry:
simple["entry"] = self.entry
return simple | returns a dict describing a simple snapshot of this change, and
its children if any. | Below is the the instruction that describes the task:
### Input:
returns a dict describing a simple snapshot of this change, and
its children if any.
### Response:
def simplify(self, options=None):
"""
returns a dict describing a simple snapshot of this change, and
its children if any.
"""
simple = {
"class": type(self).__name__,
"is_change": self.is_change(),
"description": self.get_description(),
"label": self.label,
}
if options:
simple["is_ignored"] = self.is_ignored(options)
if isinstance(self, Addition):
simple["is_addition"] = True
if isinstance(self, Removal):
simple["is_removal"] = True
if self.entry:
simple["entry"] = self.entry
return simple |
def _log(self, level, msg, args, kwargs):
"""Throttled log output."""
with self._tb_lock:
if self._tb is None:
throttled = 0
should_log = True
else:
throttled = self._tb.throttle_count
should_log = self._tb.check_and_consume()
if should_log:
if throttled > 0:
self._logger.log(level, "")
self._logger.log(
level,
"(... throttled %d messages ...)",
throttled,
)
self._logger.log(level, "")
if msg is not None:
self._logger.log(level, msg, *args, **kwargs)
return FancyLogContext(self._logger,
level,
self._verbosity,
self._structured_detail,
self._with_prefix)
else:
return NoopLogContext() | Throttled log output. | Below is the the instruction that describes the task:
### Input:
Throttled log output.
### Response:
def _log(self, level, msg, args, kwargs):
"""Throttled log output."""
with self._tb_lock:
if self._tb is None:
throttled = 0
should_log = True
else:
throttled = self._tb.throttle_count
should_log = self._tb.check_and_consume()
if should_log:
if throttled > 0:
self._logger.log(level, "")
self._logger.log(
level,
"(... throttled %d messages ...)",
throttled,
)
self._logger.log(level, "")
if msg is not None:
self._logger.log(level, msg, *args, **kwargs)
return FancyLogContext(self._logger,
level,
self._verbosity,
self._structured_detail,
self._with_prefix)
else:
return NoopLogContext() |
def _validate_plan(plan):
"""Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set
"""
# Validate format of plan
if not _validate_format(plan):
return False
# Verify no duplicate partitions
partition_names = [
(p_data['topic'], p_data['partition'])
for p_data in plan['partitions']
]
duplicate_partitions = [
partition for partition, count in six.iteritems(Counter(partition_names))
if count > 1
]
if duplicate_partitions:
_log.error(
'Duplicate partitions in plan {p_list}'
.format(p_list=duplicate_partitions),
)
return False
# Verify no duplicate brokers in partition-replicas
dup_replica_brokers = []
for p_data in plan['partitions']:
dup_replica_brokers = [
broker
for broker, count in Counter(p_data['replicas']).items()
if count > 1
]
if dup_replica_brokers:
_log.error(
'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}'
.format(
topic=p_data['topic'],
p_id=p_data['partition'],
replicas=p_data['replicas'],
)
)
return False
# Verify same replication-factor for every topic
topic_replication_factor = {}
for partition_info in plan['partitions']:
topic = partition_info['topic']
replication_factor = len(partition_info['replicas'])
if topic in list(topic_replication_factor.keys()):
if topic_replication_factor[topic] != replication_factor:
_log.error(
'Mismatch in replication-factor of partitions for topic '
'{topic}'.format(topic=topic),
)
return False
else:
topic_replication_factor[topic] = replication_factor
return True | Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set | Below is the the instruction that describes the task:
### Input:
Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set
### Response:
def _validate_plan(plan):
"""Validate if given plan is valid based on kafka-cluster-assignment protocols.
Validate following parameters:
- Correct format of plan
- Partition-list should be unique
- Every partition of a topic should have same replication-factor
- Replicas of a partition should have unique broker-set
"""
# Validate format of plan
if not _validate_format(plan):
return False
# Verify no duplicate partitions
partition_names = [
(p_data['topic'], p_data['partition'])
for p_data in plan['partitions']
]
duplicate_partitions = [
partition for partition, count in six.iteritems(Counter(partition_names))
if count > 1
]
if duplicate_partitions:
_log.error(
'Duplicate partitions in plan {p_list}'
.format(p_list=duplicate_partitions),
)
return False
# Verify no duplicate brokers in partition-replicas
dup_replica_brokers = []
for p_data in plan['partitions']:
dup_replica_brokers = [
broker
for broker, count in Counter(p_data['replicas']).items()
if count > 1
]
if dup_replica_brokers:
_log.error(
'Duplicate brokers: ({topic}, {p_id}) in replicas {replicas}'
.format(
topic=p_data['topic'],
p_id=p_data['partition'],
replicas=p_data['replicas'],
)
)
return False
# Verify same replication-factor for every topic
topic_replication_factor = {}
for partition_info in plan['partitions']:
topic = partition_info['topic']
replication_factor = len(partition_info['replicas'])
if topic in list(topic_replication_factor.keys()):
if topic_replication_factor[topic] != replication_factor:
_log.error(
'Mismatch in replication-factor of partitions for topic '
'{topic}'.format(topic=topic),
)
return False
else:
topic_replication_factor[topic] = replication_factor
return True |
def quit(self):
"""Restore previous stdout/stderr and destroy the window."""
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self.destroy() | Restore previous stdout/stderr and destroy the window. | Below is the the instruction that describes the task:
### Input:
Restore previous stdout/stderr and destroy the window.
### Response:
def quit(self):
"""Restore previous stdout/stderr and destroy the window."""
sys.stdout = self._oldstdout
sys.stderr = self._oldstderr
self.destroy() |
def text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
self.pos(x, y)
self.write_color(text, attr) | u'''Write text at the given position. | Below is the the instruction that describes the task:
### Input:
u'''Write text at the given position.
### Response:
def text(self, x, y, text, attr=None):
u'''Write text at the given position.'''
self.pos(x, y)
self.write_color(text, attr) |
def get_dataframe_from_variable(nc, data_var):
""" Returns a Pandas DataFrame of the data.
This always returns positive down depths
"""
time_var = nc.get_variables_by_attributes(standard_name='time')[0]
depth_vars = nc.get_variables_by_attributes(axis=lambda v: v is not None and v.lower() == 'z')
depth_vars += nc.get_variables_by_attributes(standard_name=lambda v: v in ['height', 'depth' 'surface_altitude'], positive=lambda x: x is not None)
# Find the correct depth variable
depth_var = None
for d in depth_vars:
try:
if d._name in data_var.coordinates.split(" ") or d._name in data_var.dimensions:
depth_var = d
break
except AttributeError:
continue
times = netCDF4.num2date(time_var[:], units=time_var.units, calendar=getattr(time_var, 'calendar', 'standard'))
original_times_size = times.size
if depth_var is None and hasattr(data_var, 'sensor_depth'):
depth_type = get_type(data_var.sensor_depth)
depths = np.asarray([data_var.sensor_depth] * len(times)).flatten()
values = data_var[:].flatten()
elif depth_var is None:
depths = np.asarray([np.nan] * len(times)).flatten()
depth_type = get_type(depths)
values = data_var[:].flatten()
else:
depths = depth_var[:]
depth_type = get_type(depths)
if len(data_var.shape) > 1:
times = np.repeat(times, depths.size)
depths = np.tile(depths, original_times_size)
values = data_var[:, :].flatten()
else:
values = data_var[:].flatten()
if getattr(depth_var, 'positive', 'down').lower() == 'up':
logger.warning("Converting depths to positive down before returning the DataFrame")
depths = depths * -1
# https://github.com/numpy/numpy/issues/4595
# We can't call astype on a MaskedConstant
if (
isinstance(depths, np.ma.core.MaskedConstant) or
(hasattr(depths, 'mask') and depths.mask.all())
):
depths = np.asarray([np.nan] * len(times)).flatten()
df = pd.DataFrame({ 'time': times,
'value': values.astype(data_var.dtype),
'unit': data_var.units if hasattr(data_var, 'units') else np.nan,
'depth': depths.astype(depth_type) })
df.set_index([pd.DatetimeIndex(df['time']), pd.Float64Index(df['depth'])], inplace=True)
return df | Returns a Pandas DataFrame of the data.
This always returns positive down depths | Below is the the instruction that describes the task:
### Input:
Returns a Pandas DataFrame of the data.
This always returns positive down depths
### Response:
def get_dataframe_from_variable(nc, data_var):
""" Returns a Pandas DataFrame of the data.
This always returns positive down depths
"""
time_var = nc.get_variables_by_attributes(standard_name='time')[0]
depth_vars = nc.get_variables_by_attributes(axis=lambda v: v is not None and v.lower() == 'z')
depth_vars += nc.get_variables_by_attributes(standard_name=lambda v: v in ['height', 'depth' 'surface_altitude'], positive=lambda x: x is not None)
# Find the correct depth variable
depth_var = None
for d in depth_vars:
try:
if d._name in data_var.coordinates.split(" ") or d._name in data_var.dimensions:
depth_var = d
break
except AttributeError:
continue
times = netCDF4.num2date(time_var[:], units=time_var.units, calendar=getattr(time_var, 'calendar', 'standard'))
original_times_size = times.size
if depth_var is None and hasattr(data_var, 'sensor_depth'):
depth_type = get_type(data_var.sensor_depth)
depths = np.asarray([data_var.sensor_depth] * len(times)).flatten()
values = data_var[:].flatten()
elif depth_var is None:
depths = np.asarray([np.nan] * len(times)).flatten()
depth_type = get_type(depths)
values = data_var[:].flatten()
else:
depths = depth_var[:]
depth_type = get_type(depths)
if len(data_var.shape) > 1:
times = np.repeat(times, depths.size)
depths = np.tile(depths, original_times_size)
values = data_var[:, :].flatten()
else:
values = data_var[:].flatten()
if getattr(depth_var, 'positive', 'down').lower() == 'up':
logger.warning("Converting depths to positive down before returning the DataFrame")
depths = depths * -1
# https://github.com/numpy/numpy/issues/4595
# We can't call astype on a MaskedConstant
if (
isinstance(depths, np.ma.core.MaskedConstant) or
(hasattr(depths, 'mask') and depths.mask.all())
):
depths = np.asarray([np.nan] * len(times)).flatten()
df = pd.DataFrame({ 'time': times,
'value': values.astype(data_var.dtype),
'unit': data_var.units if hasattr(data_var, 'units') else np.nan,
'depth': depths.astype(depth_type) })
df.set_index([pd.DatetimeIndex(df['time']), pd.Float64Index(df['depth'])], inplace=True)
return df |
def make_arrow(self, portal):
"""Make an :class:`Arrow` to represent a :class:`Portal`, store it,
and return it.
"""
if (
portal["origin"] not in self.spot or
portal["destination"] not in self.spot
):
raise ValueError(
"An :class:`Arrow` should only be made after "
"the :class:`Spot`s it connects"
)
if (
portal["origin"] in self.arrow and
portal["destination"] in self.arrow[portal["origin"]]
):
raise KeyError("Already have an Arrow for this Portal")
return self._core_make_arrow(portal, self.spot[portal['origin']], self.spot[portal['destination']], self.arrow) | Make an :class:`Arrow` to represent a :class:`Portal`, store it,
and return it. | Below is the the instruction that describes the task:
### Input:
Make an :class:`Arrow` to represent a :class:`Portal`, store it,
and return it.
### Response:
def make_arrow(self, portal):
"""Make an :class:`Arrow` to represent a :class:`Portal`, store it,
and return it.
"""
if (
portal["origin"] not in self.spot or
portal["destination"] not in self.spot
):
raise ValueError(
"An :class:`Arrow` should only be made after "
"the :class:`Spot`s it connects"
)
if (
portal["origin"] in self.arrow and
portal["destination"] in self.arrow[portal["origin"]]
):
raise KeyError("Already have an Arrow for this Portal")
return self._core_make_arrow(portal, self.spot[portal['origin']], self.spot[portal['destination']], self.arrow) |
def _tryConnect(src, unit, intfName):
"""
Try connect src to interface of specified name on unit.
Ignore if interface is not present or if it already has driver.
"""
try:
dst = getattr(unit, intfName)
except AttributeError:
return
if not dst._sig.drivers:
connect(src, dst) | Try connect src to interface of specified name on unit.
Ignore if interface is not present or if it already has driver. | Below is the the instruction that describes the task:
### Input:
Try connect src to interface of specified name on unit.
Ignore if interface is not present or if it already has driver.
### Response:
def _tryConnect(src, unit, intfName):
"""
Try connect src to interface of specified name on unit.
Ignore if interface is not present or if it already has driver.
"""
try:
dst = getattr(unit, intfName)
except AttributeError:
return
if not dst._sig.drivers:
connect(src, dst) |
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names) | :param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up. | Below is the the instruction that describes the task:
### Input:
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
### Response:
def get(
table,
session,
version_id=None,
t1=None,
t2=None,
fields=None,
conds=None,
include_deleted=True,
page=1,
page_size=100,
):
"""
:param table: the model class which inherits from
:class:`~savage.models.user_table.SavageModelMixin` and specifies the model of
the user table from which we are querying
:param session: a sqlalchemy session with connections to the database
:param version_id: if specified, the value of t1 and t2 will be ignored. If specified, this will
return all records after the specified version_id.
:param t1: lower bound time for this query; if None or unspecified,
defaults to the unix epoch. If this is specified and t2 is not, this query
will simply return the time slice of data at t1. This must either be a valid
sql time string or a datetime.datetime object.
:param t2: upper bound time for this query; if both t1 and t2 are none or unspecified,
this will return the latest data (i.e. time slice of data now). This must either be a
valid sql time string or a datetime.datetime object.
:param fields: a list of strings which corresponds to columns in the table; If
None or unspecified, returns all fields in the table.
:param conds: a list of dictionary of key value pairs where keys are columns in the table
and values are values the column should take on. If specified, this query will
only return rows where the columns meet all the conditions. The columns specified
in this dictionary must be exactly the unique columns that versioning pivots around.
:param include_deleted: if ``True``, the response will include deleted changes. Else it will
only include changes where ``deleted = 0`` i.e. the data was in the user table.
:param page: the offset of the result set (1-indexed); i.e. if page_size is 100 and page is 2,
the result set will contain results 100 - 199
:param page_size: upper bound on number of results to display. Note the actual returned result
set may be smaller than this due to the roll up.
"""
limit, offset = _get_limit_and_offset(page, page_size)
version_col_names = table.version_columns
if fields is None:
fields = [name for name in utils.get_column_names(table) if name != 'version_id']
if version_id is not None:
return _format_response(utils.result_to_dict(session.execute(
sa.select([table.ArchiveTable])
.where(table.ArchiveTable.version_id > version_id)
.order_by(*_get_order_clause(table.ArchiveTable))
.limit(page_size)
.offset(offset)
)), fields, version_col_names)
if t1 is None and t2 is None:
rows = _get_latest_time_slice(table, session, conds, include_deleted, limit, offset)
return _format_response(rows, fields, version_col_names)
if t2 is None: # return a historical time slice
rows = _get_historical_time_slice(
table, session, t1, conds, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names)
if t1 is None:
t1 = datetime.utcfromtimestamp(0)
rows = _get_historical_changes(
table, session, conds, t1, t2, include_deleted, limit, offset
)
return _format_response(rows, fields, version_col_names) |
def create_api_key(self, body, **kwargs): # noqa: E501
"""Create a new API key. # noqa: E501
An endpoint for creating a new API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys -d '{\"name\": \"MyKey1\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_api_key(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param ApiKeyInfoReq body: The details of the API key to be created. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_api_key_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_api_key_with_http_info(body, **kwargs) # noqa: E501
return data | Create a new API key. # noqa: E501
An endpoint for creating a new API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys -d '{\"name\": \"MyKey1\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_api_key(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param ApiKeyInfoReq body: The details of the API key to be created. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Create a new API key. # noqa: E501
An endpoint for creating a new API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys -d '{\"name\": \"MyKey1\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_api_key(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param ApiKeyInfoReq body: The details of the API key to be created. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread.
### Response:
def create_api_key(self, body, **kwargs): # noqa: E501
"""Create a new API key. # noqa: E501
An endpoint for creating a new API key. **Example usage:** `curl -X POST https://api.us-east-1.mbedcloud.com/v3/api-keys -d '{\"name\": \"MyKey1\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.create_api_key(body, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param ApiKeyInfoReq body: The details of the API key to be created. (required)
:return: ApiKeyInfoResp
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.create_api_key_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_api_key_with_http_info(body, **kwargs) # noqa: E501
return data |
def group_permissions(permissions):
"""
Groups a permissions list
Returns a dictionary, with permission types as keys and sets of entities
with access to the resource as values, e.g.:
{
'organisation_id': {
'org1': set(['rw', 'r', 'w']),
'org2': set(['-']),
'org3': set(['r', 'w']),
},
'all': set(['r'])
}
'org1' has 'rw' access to the resource, 'org2' is denied access and 'org3'
has 'r' & 'w' access (the same as 'rw'). Note that 'rw' will always result
in 'rw', 'r' & 'w' in the set to make checks easier.
If present in the resource's permissions, the 'all' permission type is an
exception in that it's value is just a set instead of a dictionary.
:param permissions: a list of permissions
:returns: defaultdict
"""
groups = defaultdict(lambda: defaultdict(set))
for p in sorted(permissions, key=itemgetter('type')):
permission_set = groups[p['type']][p.get('value')]
permission_set.add(p['permission'])
if p['permission'] == 'rw':
permission_set.update({'r', 'w'})
# the 'all' permission type always has None as the value
groups['all'] = groups['all'][None]
return groups | Groups a permissions list
Returns a dictionary, with permission types as keys and sets of entities
with access to the resource as values, e.g.:
{
'organisation_id': {
'org1': set(['rw', 'r', 'w']),
'org2': set(['-']),
'org3': set(['r', 'w']),
},
'all': set(['r'])
}
'org1' has 'rw' access to the resource, 'org2' is denied access and 'org3'
has 'r' & 'w' access (the same as 'rw'). Note that 'rw' will always result
in 'rw', 'r' & 'w' in the set to make checks easier.
If present in the resource's permissions, the 'all' permission type is an
exception in that it's value is just a set instead of a dictionary.
:param permissions: a list of permissions
:returns: defaultdict | Below is the the instruction that describes the task:
### Input:
Groups a permissions list
Returns a dictionary, with permission types as keys and sets of entities
with access to the resource as values, e.g.:
{
'organisation_id': {
'org1': set(['rw', 'r', 'w']),
'org2': set(['-']),
'org3': set(['r', 'w']),
},
'all': set(['r'])
}
'org1' has 'rw' access to the resource, 'org2' is denied access and 'org3'
has 'r' & 'w' access (the same as 'rw'). Note that 'rw' will always result
in 'rw', 'r' & 'w' in the set to make checks easier.
If present in the resource's permissions, the 'all' permission type is an
exception in that it's value is just a set instead of a dictionary.
:param permissions: a list of permissions
:returns: defaultdict
### Response:
def group_permissions(permissions):
"""
Groups a permissions list
Returns a dictionary, with permission types as keys and sets of entities
with access to the resource as values, e.g.:
{
'organisation_id': {
'org1': set(['rw', 'r', 'w']),
'org2': set(['-']),
'org3': set(['r', 'w']),
},
'all': set(['r'])
}
'org1' has 'rw' access to the resource, 'org2' is denied access and 'org3'
has 'r' & 'w' access (the same as 'rw'). Note that 'rw' will always result
in 'rw', 'r' & 'w' in the set to make checks easier.
If present in the resource's permissions, the 'all' permission type is an
exception in that it's value is just a set instead of a dictionary.
:param permissions: a list of permissions
:returns: defaultdict
"""
groups = defaultdict(lambda: defaultdict(set))
for p in sorted(permissions, key=itemgetter('type')):
permission_set = groups[p['type']][p.get('value')]
permission_set.add(p['permission'])
if p['permission'] == 'rw':
permission_set.update({'r', 'w'})
# the 'all' permission type always has None as the value
groups['all'] = groups['all'][None]
return groups |
def from_events(self, instance, ev_args, ctx):
"""
Detect the object to instanciate from the arguments `ev_args` of the
``"start"`` event. The new object is stored at the corresponding
descriptor attribute on `instance`.
This method is suspendable.
"""
obj = yield from self._process(instance, ev_args, ctx)
self.__set__(instance, obj)
return obj | Detect the object to instanciate from the arguments `ev_args` of the
``"start"`` event. The new object is stored at the corresponding
descriptor attribute on `instance`.
This method is suspendable. | Below is the the instruction that describes the task:
### Input:
Detect the object to instanciate from the arguments `ev_args` of the
``"start"`` event. The new object is stored at the corresponding
descriptor attribute on `instance`.
This method is suspendable.
### Response:
def from_events(self, instance, ev_args, ctx):
"""
Detect the object to instanciate from the arguments `ev_args` of the
``"start"`` event. The new object is stored at the corresponding
descriptor attribute on `instance`.
This method is suspendable.
"""
obj = yield from self._process(instance, ev_args, ctx)
self.__set__(instance, obj)
return obj |
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False | Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool | Below is the the instruction that describes the task:
### Input:
Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
### Response:
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False |
def find_repo_type(self):
"""Check for git or hg repository"""
is_git = self.call(['git', 'rev-parse', '--is-inside-work-tree'],
devnull=True)
if is_git != 0:
if self.debug:
click.echo('not git')
is_hg = self.call(['hg', '-q', 'stat'], devnull=True)
if is_hg != 0:
if self.debug:
click.echo('not hg')
exit(1)
else:
self.vc_name = 'hg' | Check for git or hg repository | Below is the the instruction that describes the task:
### Input:
Check for git or hg repository
### Response:
def find_repo_type(self):
"""Check for git or hg repository"""
is_git = self.call(['git', 'rev-parse', '--is-inside-work-tree'],
devnull=True)
if is_git != 0:
if self.debug:
click.echo('not git')
is_hg = self.call(['hg', '-q', 'stat'], devnull=True)
if is_hg != 0:
if self.debug:
click.echo('not hg')
exit(1)
else:
self.vc_name = 'hg' |
def set_window_size(self, width, height):
"""Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels.
"""
self._imgwin_wd = int(width)
self._imgwin_ht = int(height)
self._ctr_x = width // 2
self._ctr_y = height // 2
self.logger.debug("widget resized to %dx%d" % (width, height))
self.make_callback('configure', width, height)
self.redraw(whence=0) | Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels. | Below is the the instruction that describes the task:
### Input:
Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels.
### Response:
def set_window_size(self, width, height):
"""Report the size of the window to display the image.
**Callbacks**
Will call any callbacks registered for the ``'configure'`` event.
Callbacks should have a method signature of::
(viewer, width, height, ...)
.. note::
This is called by the subclass with ``width`` and ``height``
as soon as the actual dimensions of the allocated window are known.
Parameters
----------
width : int
The width of the window in pixels.
height : int
The height of the window in pixels.
"""
self._imgwin_wd = int(width)
self._imgwin_ht = int(height)
self._ctr_x = width // 2
self._ctr_y = height // 2
self.logger.debug("widget resized to %dx%d" % (width, height))
self.make_callback('configure', width, height)
self.redraw(whence=0) |
def math(data, operator=None, operator_name=None, axis=None):
"""Apply mathematical operation to each trial and channel individually.
Parameters
----------
data : instance of DataTime, DataFreq, or DataTimeFreq
operator : function or tuple of functions, optional
function(s) to run on the data.
operator_name : str or tuple of str, optional
name of the function(s) to run on the data.
axis : str, optional
for functions that accept it, which axis you should run it on.
Returns
-------
instance of Data
data where the trials underwent operator.
Raises
------
TypeError
If you pass both operator and operator_name.
ValueError
When you try to operate on an axis that has already been removed.
Notes
-----
operator and operator_name are mutually exclusive. operator_name is given
as shortcut for most common operations.
If a function accepts an 'axis' argument, you need to pass 'axis' to the
constructor. In this way, it'll apply the function to the correct
dimension.
The possible point-wise operator_name are:
'absolute', 'angle', 'dB' (=10 * log10), 'exp', 'log', 'sqrt', 'square',
'unwrap'
The operator_name's that need an axis, but do not remove it:
'hilbert', 'diff', 'detrend'
The operator_name's that need an axis and remove it:
'mean', 'median', 'mode', 'std'
Examples
--------
You can pass a single value or a tuple. The order starts from left to
right, so abs of the hilbert transform, should be:
>>> rms = math(data, operator_name=('hilbert', 'abs'), axis='time')
If you want to pass the power of three, use lambda (or partial):
>>> p3 = lambda x: power(x, 3)
>>> data_p3 = math(data, operator=p3)
Note that lambdas are fine with point-wise operation, but if you want them
to operate on axis, you need to pass ''axis'' as well, so that:
>>> std_ddof = lambda x, axis: std(x, axis, ddof=1)
>>> data_std = math(data, operator=std_ddof)
If you don't pass 'axis' in lambda, it'll never know on which axis the
function should be applied and you'll get unpredictable results.
If you want to pass a function that operates on an axis and removes it (for
example, if you want the max value over time), you need to add an argument
in your function called ''keepdims'' (the values won't be used):
>>> def func(x, axis, keepdims=None):
>>> return nanmax(x, axis=axis)
"""
if operator is not None and operator_name is not None:
raise TypeError('Parameters "operator" and "operator_name" are '
'mutually exclusive')
# turn input into a tuple of functions in operators
if operator_name is not None:
if isinstance(operator_name, str):
operator_name = (operator_name, )
operators = []
for one_operator_name in operator_name:
operators.append(eval(one_operator_name))
operator = tuple(operators)
# make it an iterable
if callable(operator):
operator = (operator, )
operations = []
for one_operator in operator:
on_axis = False
keepdims = True
try:
args = getfullargspec(one_operator).args
except TypeError:
lg.debug('func ' + str(one_operator) + ' is not a Python '
'function')
else:
if 'axis' in args:
on_axis = True
if axis is None:
raise TypeError('You need to specify an axis if you '
'use ' + one_operator.__name__ +
' (which applies to an axis)')
if 'keepdims' in args or one_operator in NOKEEPDIM:
keepdims = False
operations.append({'name': one_operator.__name__,
'func': one_operator,
'on_axis': on_axis,
'keepdims': keepdims,
})
output = data._copy()
if axis is not None:
idx_axis = data.index_of(axis)
first_op = True
for op in operations:
#lg.info('running operator: ' + op['name'])
func = op['func']
if func == mode:
func = lambda x, axis: mode(x, axis=axis)[0]
for i in range(output.number_of('trial')):
# don't copy original data, but use data if it's the first operation
if first_op:
x = data(trial=i)
else:
x = output(trial=i)
if op['on_axis']:
lg.debug('running ' + op['name'] + ' on ' + str(idx_axis))
try:
if func == diff:
lg.debug('Diff has one-point of zero padding')
x = _pad_one_axis_one_value(x, idx_axis)
output.data[i] = func(x, axis=idx_axis)
except IndexError:
raise ValueError('The axis ' + axis + ' does not '
'exist in [' +
', '.join(list(data.axis.keys())) + ']')
else:
lg.debug('running ' + op['name'] + ' on each datapoint')
output.data[i] = func(x)
first_op = False
if op['on_axis'] and not op['keepdims']:
del output.axis[axis]
return output | Apply mathematical operation to each trial and channel individually.
Parameters
----------
data : instance of DataTime, DataFreq, or DataTimeFreq
operator : function or tuple of functions, optional
function(s) to run on the data.
operator_name : str or tuple of str, optional
name of the function(s) to run on the data.
axis : str, optional
for functions that accept it, which axis you should run it on.
Returns
-------
instance of Data
data where the trials underwent operator.
Raises
------
TypeError
If you pass both operator and operator_name.
ValueError
When you try to operate on an axis that has already been removed.
Notes
-----
operator and operator_name are mutually exclusive. operator_name is given
as shortcut for most common operations.
If a function accepts an 'axis' argument, you need to pass 'axis' to the
constructor. In this way, it'll apply the function to the correct
dimension.
The possible point-wise operator_name are:
'absolute', 'angle', 'dB' (=10 * log10), 'exp', 'log', 'sqrt', 'square',
'unwrap'
The operator_name's that need an axis, but do not remove it:
'hilbert', 'diff', 'detrend'
The operator_name's that need an axis and remove it:
'mean', 'median', 'mode', 'std'
Examples
--------
You can pass a single value or a tuple. The order starts from left to
right, so abs of the hilbert transform, should be:
>>> rms = math(data, operator_name=('hilbert', 'abs'), axis='time')
If you want to pass the power of three, use lambda (or partial):
>>> p3 = lambda x: power(x, 3)
>>> data_p3 = math(data, operator=p3)
Note that lambdas are fine with point-wise operation, but if you want them
to operate on axis, you need to pass ''axis'' as well, so that:
>>> std_ddof = lambda x, axis: std(x, axis, ddof=1)
>>> data_std = math(data, operator=std_ddof)
If you don't pass 'axis' in lambda, it'll never know on which axis the
function should be applied and you'll get unpredictable results.
If you want to pass a function that operates on an axis and removes it (for
example, if you want the max value over time), you need to add an argument
in your function called ''keepdims'' (the values won't be used):
>>> def func(x, axis, keepdims=None):
>>> return nanmax(x, axis=axis) | Below is the the instruction that describes the task:
### Input:
Apply mathematical operation to each trial and channel individually.
Parameters
----------
data : instance of DataTime, DataFreq, or DataTimeFreq
operator : function or tuple of functions, optional
function(s) to run on the data.
operator_name : str or tuple of str, optional
name of the function(s) to run on the data.
axis : str, optional
for functions that accept it, which axis you should run it on.
Returns
-------
instance of Data
data where the trials underwent operator.
Raises
------
TypeError
If you pass both operator and operator_name.
ValueError
When you try to operate on an axis that has already been removed.
Notes
-----
operator and operator_name are mutually exclusive. operator_name is given
as shortcut for most common operations.
If a function accepts an 'axis' argument, you need to pass 'axis' to the
constructor. In this way, it'll apply the function to the correct
dimension.
The possible point-wise operator_name are:
'absolute', 'angle', 'dB' (=10 * log10), 'exp', 'log', 'sqrt', 'square',
'unwrap'
The operator_name's that need an axis, but do not remove it:
'hilbert', 'diff', 'detrend'
The operator_name's that need an axis and remove it:
'mean', 'median', 'mode', 'std'
Examples
--------
You can pass a single value or a tuple. The order starts from left to
right, so abs of the hilbert transform, should be:
>>> rms = math(data, operator_name=('hilbert', 'abs'), axis='time')
If you want to pass the power of three, use lambda (or partial):
>>> p3 = lambda x: power(x, 3)
>>> data_p3 = math(data, operator=p3)
Note that lambdas are fine with point-wise operation, but if you want them
to operate on axis, you need to pass ''axis'' as well, so that:
>>> std_ddof = lambda x, axis: std(x, axis, ddof=1)
>>> data_std = math(data, operator=std_ddof)
If you don't pass 'axis' in lambda, it'll never know on which axis the
function should be applied and you'll get unpredictable results.
If you want to pass a function that operates on an axis and removes it (for
example, if you want the max value over time), you need to add an argument
in your function called ''keepdims'' (the values won't be used):
>>> def func(x, axis, keepdims=None):
>>> return nanmax(x, axis=axis)
### Response:
def math(data, operator=None, operator_name=None, axis=None):
"""Apply mathematical operation to each trial and channel individually.
Parameters
----------
data : instance of DataTime, DataFreq, or DataTimeFreq
operator : function or tuple of functions, optional
function(s) to run on the data.
operator_name : str or tuple of str, optional
name of the function(s) to run on the data.
axis : str, optional
for functions that accept it, which axis you should run it on.
Returns
-------
instance of Data
data where the trials underwent operator.
Raises
------
TypeError
If you pass both operator and operator_name.
ValueError
When you try to operate on an axis that has already been removed.
Notes
-----
operator and operator_name are mutually exclusive. operator_name is given
as shortcut for most common operations.
If a function accepts an 'axis' argument, you need to pass 'axis' to the
constructor. In this way, it'll apply the function to the correct
dimension.
The possible point-wise operator_name are:
'absolute', 'angle', 'dB' (=10 * log10), 'exp', 'log', 'sqrt', 'square',
'unwrap'
The operator_name's that need an axis, but do not remove it:
'hilbert', 'diff', 'detrend'
The operator_name's that need an axis and remove it:
'mean', 'median', 'mode', 'std'
Examples
--------
You can pass a single value or a tuple. The order starts from left to
right, so abs of the hilbert transform, should be:
>>> rms = math(data, operator_name=('hilbert', 'abs'), axis='time')
If you want to pass the power of three, use lambda (or partial):
>>> p3 = lambda x: power(x, 3)
>>> data_p3 = math(data, operator=p3)
Note that lambdas are fine with point-wise operation, but if you want them
to operate on axis, you need to pass ''axis'' as well, so that:
>>> std_ddof = lambda x, axis: std(x, axis, ddof=1)
>>> data_std = math(data, operator=std_ddof)
If you don't pass 'axis' in lambda, it'll never know on which axis the
function should be applied and you'll get unpredictable results.
If you want to pass a function that operates on an axis and removes it (for
example, if you want the max value over time), you need to add an argument
in your function called ''keepdims'' (the values won't be used):
>>> def func(x, axis, keepdims=None):
>>> return nanmax(x, axis=axis)
"""
if operator is not None and operator_name is not None:
raise TypeError('Parameters "operator" and "operator_name" are '
'mutually exclusive')
# turn input into a tuple of functions in operators
if operator_name is not None:
if isinstance(operator_name, str):
operator_name = (operator_name, )
operators = []
for one_operator_name in operator_name:
operators.append(eval(one_operator_name))
operator = tuple(operators)
# make it an iterable
if callable(operator):
operator = (operator, )
operations = []
for one_operator in operator:
on_axis = False
keepdims = True
try:
args = getfullargspec(one_operator).args
except TypeError:
lg.debug('func ' + str(one_operator) + ' is not a Python '
'function')
else:
if 'axis' in args:
on_axis = True
if axis is None:
raise TypeError('You need to specify an axis if you '
'use ' + one_operator.__name__ +
' (which applies to an axis)')
if 'keepdims' in args or one_operator in NOKEEPDIM:
keepdims = False
operations.append({'name': one_operator.__name__,
'func': one_operator,
'on_axis': on_axis,
'keepdims': keepdims,
})
output = data._copy()
if axis is not None:
idx_axis = data.index_of(axis)
first_op = True
for op in operations:
#lg.info('running operator: ' + op['name'])
func = op['func']
if func == mode:
func = lambda x, axis: mode(x, axis=axis)[0]
for i in range(output.number_of('trial')):
# don't copy original data, but use data if it's the first operation
if first_op:
x = data(trial=i)
else:
x = output(trial=i)
if op['on_axis']:
lg.debug('running ' + op['name'] + ' on ' + str(idx_axis))
try:
if func == diff:
lg.debug('Diff has one-point of zero padding')
x = _pad_one_axis_one_value(x, idx_axis)
output.data[i] = func(x, axis=idx_axis)
except IndexError:
raise ValueError('The axis ' + axis + ' does not '
'exist in [' +
', '.join(list(data.axis.keys())) + ']')
else:
lg.debug('running ' + op['name'] + ' on each datapoint')
output.data[i] = func(x)
first_op = False
if op['on_axis'] and not op['keepdims']:
del output.axis[axis]
return output |
def get_segment_label(segment, segment_type, segment_text, ns, tokenized):
"""
generates an appropriate node label for a segment (useful for dot
visualization).
"""
segment_prefix = segment_type[0] if segment_type else '_'
if tokenized:
segment_label = u'[{0}]:{1}:segment:{2}'.format(
segment_prefix, ns, segment.attrib['id'])
else:
# if the graph is not tokenized, put (the beginning of) the
# segment's text into its label
segment_label = u'[{0}]:{1}: {2}...'.format(
segment_prefix, segment.attrib['id'], segment_text[:20])
return segment_label | generates an appropriate node label for a segment (useful for dot
visualization). | Below is the the instruction that describes the task:
### Input:
generates an appropriate node label for a segment (useful for dot
visualization).
### Response:
def get_segment_label(segment, segment_type, segment_text, ns, tokenized):
"""
generates an appropriate node label for a segment (useful for dot
visualization).
"""
segment_prefix = segment_type[0] if segment_type else '_'
if tokenized:
segment_label = u'[{0}]:{1}:segment:{2}'.format(
segment_prefix, ns, segment.attrib['id'])
else:
# if the graph is not tokenized, put (the beginning of) the
# segment's text into its label
segment_label = u'[{0}]:{1}: {2}...'.format(
segment_prefix, segment.attrib['id'], segment_text[:20])
return segment_label |
def do_examine(self, arg):
"""Opens a unit test case's .out.compare file to examine the verbose comparison
report across values.
"""
#We use their default editor (if it has been set); otherwise we can't do much of
#anything and issue a warning.
from os import getenv, path, system
testcase, output = arg.split()
target = path.join(self.tests[self.active].stagedir, "tests", testcase,
"{}.compare".format(output))
if getenv("EDITOR") is not None:
system("`$EDITOR {}`".format(target))
else:
msg.warn("$EDITOR not set in environment. Can't open {}".format(target)) | Opens a unit test case's .out.compare file to examine the verbose comparison
report across values. | Below is the the instruction that describes the task:
### Input:
Opens a unit test case's .out.compare file to examine the verbose comparison
report across values.
### Response:
def do_examine(self, arg):
"""Opens a unit test case's .out.compare file to examine the verbose comparison
report across values.
"""
#We use their default editor (if it has been set); otherwise we can't do much of
#anything and issue a warning.
from os import getenv, path, system
testcase, output = arg.split()
target = path.join(self.tests[self.active].stagedir, "tests", testcase,
"{}.compare".format(output))
if getenv("EDITOR") is not None:
system("`$EDITOR {}`".format(target))
else:
msg.warn("$EDITOR not set in environment. Can't open {}".format(target)) |
def get_irregular_vertex(bgedge):
"""
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
"""
if not bgedge.is_irregular_edge:
raise Exception("trying to retrieve an irregular vertex from regular edge")
return bgedge.vertex1 if bgedge.vertex1.is_irregular_vertex else bgedge.vertex2 | This method is called only in irregular edges in current implementation, thus at least one edge will be irregular | Below is the the instruction that describes the task:
### Input:
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
### Response:
def get_irregular_vertex(bgedge):
"""
This method is called only in irregular edges in current implementation, thus at least one edge will be irregular
"""
if not bgedge.is_irregular_edge:
raise Exception("trying to retrieve an irregular vertex from regular edge")
return bgedge.vertex1 if bgedge.vertex1.is_irregular_vertex else bgedge.vertex2 |
def sort(args):
"""
%prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'.
"""
p = OptionParser(sort.__doc__)
p.add_option("--query", default=False, action="store_true",
help="Sort by query position [default: %default]")
p.add_option("--ref", default=False, action="store_true",
help="Sort by reference position [default: %default]")
p.add_option("--refscore", default=False, action="store_true",
help="Sort by reference name, then score descending [default: %default]")
p.add_option("--coords", default=False, action="store_true",
help="File is .coords generated by NUCMER [default: %default]")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
if opts.coords:
if opts.query:
key = "-k13,13 -k3,3n"
elif opts.ref:
key = "-k12,12 -k1,1n"
else:
if opts.query:
key = "-k1,1 -k7,7n"
elif opts.ref:
key = "-k2,2 -k9,9n"
elif opts.refscore:
key = "-k2,2 -k12,12gr"
else:
key = "-k1,1 -k12,12gr"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
cmd += " {0} {1} -o {1}".format(key, blastfile)
sh(cmd) | %prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'. | Below is the the instruction that describes the task:
### Input:
%prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'.
### Response:
def sort(args):
"""
%prog sort <blastfile|coordsfile>
Sort lines so that same query grouped together with scores descending. The
sort is 'in-place'.
"""
p = OptionParser(sort.__doc__)
p.add_option("--query", default=False, action="store_true",
help="Sort by query position [default: %default]")
p.add_option("--ref", default=False, action="store_true",
help="Sort by reference position [default: %default]")
p.add_option("--refscore", default=False, action="store_true",
help="Sort by reference name, then score descending [default: %default]")
p.add_option("--coords", default=False, action="store_true",
help="File is .coords generated by NUCMER [default: %default]")
p.set_tmpdir()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
if opts.coords:
if opts.query:
key = "-k13,13 -k3,3n"
elif opts.ref:
key = "-k12,12 -k1,1n"
else:
if opts.query:
key = "-k1,1 -k7,7n"
elif opts.ref:
key = "-k2,2 -k9,9n"
elif opts.refscore:
key = "-k2,2 -k12,12gr"
else:
key = "-k1,1 -k12,12gr"
cmd = "sort"
if opts.tmpdir:
cmd += " -T {0}".format(opts.tmpdir)
cmd += " {0} {1} -o {1}".format(key, blastfile)
sh(cmd) |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
service_sid=self._solution['service_sid'],
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.ParticipantContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.ParticipantContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.proxy.v1.service.session.participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
service_sid=self._solution['service_sid'],
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
return self._context |
def _iterate_subsequences(self, tokens):
"""
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
"""
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx | Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ? | Below is the the instruction that describes the task:
### Input:
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
### Response:
def _iterate_subsequences(self, tokens):
"""
Using regex invokes this function, which significantly impacts performance of adapt. it is an N! operation.
Args:
tokens(list): list of tokens for Yield results.
Yields:
str: ?
"""
for start_idx in xrange(len(tokens)):
for end_idx in xrange(start_idx + 1, len(tokens) + 1):
yield ' '.join(tokens[start_idx:end_idx]), start_idx |
def intersubject_scores(fm, category, predicting_filenumbers,
predicting_subjects, predicted_filenumbers,
predicted_subjects, controls = True, scale_factor = 1):
"""
Calculates how well the fixations from a set of subjects on a set of
images can be predicted with the fixations from another set of subjects
on another set of images.
The prediction is carried out by computing a fixation density map from
fixations of predicting_subjects subjects on predicting_images images.
Prediction accuracy is assessed by measures.prediction_scores.
Parameters
fm : fixmat instance
category : int
Category from which the fixations are taken.
predicting_filenumbers : list
List of filenumbers used for prediction, i.e. images where fixations
for the prediction are taken from.
predicting_subjects : list
List of subjects whose fixations on images in predicting_filenumbers
are used for the prediction.
predicted_filenumnbers : list
List of images from which the to be predicted fixations are taken.
predicted_subjects : list
List of subjects used for evaluation, i.e subjects whose fixations
on images in predicted_filenumbers are taken for evaluation.
controls : bool, optional
If True (default), n_predict subjects are chosen from the fixmat.
If False, 1000 fixations are randomly generated and used for
testing.
scale_factor : int, optional
specifies the scaling of the fdm. Default is 1.
Returns
auc : area under the roc curve for sets of actuals and controls
true_pos_rate : ndarray
Rate of true positives for every given threshold value.
All values appearing in actuals are taken as thresholds. Uses lower
sum interpolation.
false_pos_rate : ndarray
See true_pos_rate but for false positives.
"""
predicting_fm = fm[
(ismember(fm.SUBJECTINDEX, predicting_subjects)) &
(ismember(fm.filenumber, predicting_filenumbers)) &
(fm.category == category)]
predicted_fm = fm[
(ismember(fm.SUBJECTINDEX,predicted_subjects)) &
(ismember(fm.filenumber,predicted_filenumbers))&
(fm.category == category)]
try:
predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor)
except RuntimeError:
predicting_fdm = None
if controls == True:
fm_controls = fm[
(ismember(fm.SUBJECTINDEX, predicted_subjects)) &
((ismember(fm.filenumber, predicted_filenumbers)) != True) &
(fm.category == category)]
return measures.prediction_scores(predicting_fdm, predicted_fm,
controls = (fm_controls.y, fm_controls.x))
return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None) | Calculates how well the fixations from a set of subjects on a set of
images can be predicted with the fixations from another set of subjects
on another set of images.
The prediction is carried out by computing a fixation density map from
fixations of predicting_subjects subjects on predicting_images images.
Prediction accuracy is assessed by measures.prediction_scores.
Parameters
fm : fixmat instance
category : int
Category from which the fixations are taken.
predicting_filenumbers : list
List of filenumbers used for prediction, i.e. images where fixations
for the prediction are taken from.
predicting_subjects : list
List of subjects whose fixations on images in predicting_filenumbers
are used for the prediction.
predicted_filenumnbers : list
List of images from which the to be predicted fixations are taken.
predicted_subjects : list
List of subjects used for evaluation, i.e subjects whose fixations
on images in predicted_filenumbers are taken for evaluation.
controls : bool, optional
If True (default), n_predict subjects are chosen from the fixmat.
If False, 1000 fixations are randomly generated and used for
testing.
scale_factor : int, optional
specifies the scaling of the fdm. Default is 1.
Returns
auc : area under the roc curve for sets of actuals and controls
true_pos_rate : ndarray
Rate of true positives for every given threshold value.
All values appearing in actuals are taken as thresholds. Uses lower
sum interpolation.
false_pos_rate : ndarray
See true_pos_rate but for false positives. | Below is the the instruction that describes the task:
### Input:
Calculates how well the fixations from a set of subjects on a set of
images can be predicted with the fixations from another set of subjects
on another set of images.
The prediction is carried out by computing a fixation density map from
fixations of predicting_subjects subjects on predicting_images images.
Prediction accuracy is assessed by measures.prediction_scores.
Parameters
fm : fixmat instance
category : int
Category from which the fixations are taken.
predicting_filenumbers : list
List of filenumbers used for prediction, i.e. images where fixations
for the prediction are taken from.
predicting_subjects : list
List of subjects whose fixations on images in predicting_filenumbers
are used for the prediction.
predicted_filenumnbers : list
List of images from which the to be predicted fixations are taken.
predicted_subjects : list
List of subjects used for evaluation, i.e subjects whose fixations
on images in predicted_filenumbers are taken for evaluation.
controls : bool, optional
If True (default), n_predict subjects are chosen from the fixmat.
If False, 1000 fixations are randomly generated and used for
testing.
scale_factor : int, optional
specifies the scaling of the fdm. Default is 1.
Returns
auc : area under the roc curve for sets of actuals and controls
true_pos_rate : ndarray
Rate of true positives for every given threshold value.
All values appearing in actuals are taken as thresholds. Uses lower
sum interpolation.
false_pos_rate : ndarray
See true_pos_rate but for false positives.
### Response:
def intersubject_scores(fm, category, predicting_filenumbers,
predicting_subjects, predicted_filenumbers,
predicted_subjects, controls = True, scale_factor = 1):
"""
Calculates how well the fixations from a set of subjects on a set of
images can be predicted with the fixations from another set of subjects
on another set of images.
The prediction is carried out by computing a fixation density map from
fixations of predicting_subjects subjects on predicting_images images.
Prediction accuracy is assessed by measures.prediction_scores.
Parameters
fm : fixmat instance
category : int
Category from which the fixations are taken.
predicting_filenumbers : list
List of filenumbers used for prediction, i.e. images where fixations
for the prediction are taken from.
predicting_subjects : list
List of subjects whose fixations on images in predicting_filenumbers
are used for the prediction.
predicted_filenumnbers : list
List of images from which the to be predicted fixations are taken.
predicted_subjects : list
List of subjects used for evaluation, i.e subjects whose fixations
on images in predicted_filenumbers are taken for evaluation.
controls : bool, optional
If True (default), n_predict subjects are chosen from the fixmat.
If False, 1000 fixations are randomly generated and used for
testing.
scale_factor : int, optional
specifies the scaling of the fdm. Default is 1.
Returns
auc : area under the roc curve for sets of actuals and controls
true_pos_rate : ndarray
Rate of true positives for every given threshold value.
All values appearing in actuals are taken as thresholds. Uses lower
sum interpolation.
false_pos_rate : ndarray
See true_pos_rate but for false positives.
"""
predicting_fm = fm[
(ismember(fm.SUBJECTINDEX, predicting_subjects)) &
(ismember(fm.filenumber, predicting_filenumbers)) &
(fm.category == category)]
predicted_fm = fm[
(ismember(fm.SUBJECTINDEX,predicted_subjects)) &
(ismember(fm.filenumber,predicted_filenumbers))&
(fm.category == category)]
try:
predicting_fdm = compute_fdm(predicting_fm, scale_factor = scale_factor)
except RuntimeError:
predicting_fdm = None
if controls == True:
fm_controls = fm[
(ismember(fm.SUBJECTINDEX, predicted_subjects)) &
((ismember(fm.filenumber, predicted_filenumbers)) != True) &
(fm.category == category)]
return measures.prediction_scores(predicting_fdm, predicted_fm,
controls = (fm_controls.y, fm_controls.x))
return measures.prediction_scores(predicting_fdm, predicted_fm, controls = None) |
def _sortObjects(orderby='created', **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == 'created' else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o | Sorts lists of objects and combines them into a single list | Below is the the instruction that describes the task:
### Input:
Sorts lists of objects and combines them into a single list
### Response:
def _sortObjects(orderby='created', **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == 'created' else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o |
def toXml(self, xparent=None):
"""
Converts the data for this profile into an XML blob.
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
xprofile = ElementTree.SubElement(xparent, 'profile')
else:
xprofile = ElementTree.Element('profile')
xprofile.set('version', '2')
xprofile.set('name', self.name())
xprofile.set('profile_version', '{0:0.1f}'.format(self.version()))
icon = self.icon()
if not icon.isNull():
data = projexui.storePixmap(self.icon().pixmap(48, 48))
xico = ElementTree.SubElement(xprofile, 'icon')
xico.text = data
xdata = ElementTree.SubElement(xprofile, 'data')
self._customData.toXml(xdata)
xdesc = ElementTree.SubElement(xprofile, 'desc')
xdesc.text = self.description()
if self._xmlElement is not None:
xlayout = copy.deepcopy(self._xmlElement)
xlayout.tag = 'layout'
xprofile.append(xlayout)
return xprofile | Converts the data for this profile into an XML blob.
:return <xml.etree.ElementTree.Element> | Below is the the instruction that describes the task:
### Input:
Converts the data for this profile into an XML blob.
:return <xml.etree.ElementTree.Element>
### Response:
def toXml(self, xparent=None):
"""
Converts the data for this profile into an XML blob.
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
xprofile = ElementTree.SubElement(xparent, 'profile')
else:
xprofile = ElementTree.Element('profile')
xprofile.set('version', '2')
xprofile.set('name', self.name())
xprofile.set('profile_version', '{0:0.1f}'.format(self.version()))
icon = self.icon()
if not icon.isNull():
data = projexui.storePixmap(self.icon().pixmap(48, 48))
xico = ElementTree.SubElement(xprofile, 'icon')
xico.text = data
xdata = ElementTree.SubElement(xprofile, 'data')
self._customData.toXml(xdata)
xdesc = ElementTree.SubElement(xprofile, 'desc')
xdesc.text = self.description()
if self._xmlElement is not None:
xlayout = copy.deepcopy(self._xmlElement)
xlayout.tag = 'layout'
xprofile.append(xlayout)
return xprofile |
def create(self, resource_class, content_type):
"""
Creates a representer for the given combination of resource and
content type. This will also find representer factories that were
registered for a base class of the given resource.
"""
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
if rpr_fac is None:
# Register a representer with default configuration on the fly
# and look again.
self.register(resource_class, content_type)
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
return rpr_fac(resource_class) | Creates a representer for the given combination of resource and
content type. This will also find representer factories that were
registered for a base class of the given resource. | Below is the the instruction that describes the task:
### Input:
Creates a representer for the given combination of resource and
content type. This will also find representer factories that were
registered for a base class of the given resource.
### Response:
def create(self, resource_class, content_type):
"""
Creates a representer for the given combination of resource and
content type. This will also find representer factories that were
registered for a base class of the given resource.
"""
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
if rpr_fac is None:
# Register a representer with default configuration on the fly
# and look again.
self.register(resource_class, content_type)
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
return rpr_fac(resource_class) |
def clustal_align_protein(recs, work_dir, outfmt="fasta"):
"""
Align given proteins with clustalw.
recs are iterable of Biopython SeqIO objects
"""
fasta_file = op.join(work_dir, "prot-start.fasta")
align_file = op.join(work_dir, "prot.aln")
SeqIO.write(recs, file(fasta_file, "w"), "fasta")
clustal_cl = ClustalwCommandline(cmd=CLUSTALW_BIN("clustalw2"),
infile=fasta_file, outfile=align_file, outorder="INPUT",
type="PROTEIN")
stdout, stderr = clustal_cl()
aln_file = file(clustal_cl.outfile)
alignment = AlignIO.read(aln_file, "clustal")
print("\tDoing clustalw alignment: %s" % clustal_cl, file=sys.stderr)
if outfmt == "fasta":
return alignment.format("fasta")
if outfmt == "clustal":
return alignment | Align given proteins with clustalw.
recs are iterable of Biopython SeqIO objects | Below is the the instruction that describes the task:
### Input:
Align given proteins with clustalw.
recs are iterable of Biopython SeqIO objects
### Response:
def clustal_align_protein(recs, work_dir, outfmt="fasta"):
"""
Align given proteins with clustalw.
recs are iterable of Biopython SeqIO objects
"""
fasta_file = op.join(work_dir, "prot-start.fasta")
align_file = op.join(work_dir, "prot.aln")
SeqIO.write(recs, file(fasta_file, "w"), "fasta")
clustal_cl = ClustalwCommandline(cmd=CLUSTALW_BIN("clustalw2"),
infile=fasta_file, outfile=align_file, outorder="INPUT",
type="PROTEIN")
stdout, stderr = clustal_cl()
aln_file = file(clustal_cl.outfile)
alignment = AlignIO.read(aln_file, "clustal")
print("\tDoing clustalw alignment: %s" % clustal_cl, file=sys.stderr)
if outfmt == "fasta":
return alignment.format("fasta")
if outfmt == "clustal":
return alignment |
def _fetch_file(self, remote, local):
"""fetch a single file"""
full_remote = "%s:%s" % (self.location, remote)
self.log.info("fetching %s from %s", local, full_remote)
for i in range(10):
# wait up to 10s for remote file to exist
check = check_output(self.ssh_cmd + self.ssh_args + \
[self.location, 'test -e', remote, "&& echo 'yes' || echo 'no'"])
check = check.strip()
if check == 'no':
time.sleep(1)
elif check == 'yes':
break
check_output(self.scp_cmd + [full_remote, local]) | fetch a single file | Below is the the instruction that describes the task:
### Input:
fetch a single file
### Response:
def _fetch_file(self, remote, local):
"""fetch a single file"""
full_remote = "%s:%s" % (self.location, remote)
self.log.info("fetching %s from %s", local, full_remote)
for i in range(10):
# wait up to 10s for remote file to exist
check = check_output(self.ssh_cmd + self.ssh_args + \
[self.location, 'test -e', remote, "&& echo 'yes' || echo 'no'"])
check = check.strip()
if check == 'no':
time.sleep(1)
elif check == 'yes':
break
check_output(self.scp_cmd + [full_remote, local]) |
def create_uniform(low=0., high=1., mu_err=0., sigma_err=1.,
seed=None, **kwargs):
"""Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.uniform, magf_params={"low": low, "high": high},
errf=random.normal, errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs) | Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ]) | Below is the the instruction that describes the task:
### Input:
Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ])
### Response:
def create_uniform(low=0., high=1., mu_err=0., sigma_err=1.,
seed=None, **kwargs):
"""Generate a data with magnitudes that follows a uniform
distribution; the error instead are gaussian.
Parameters
----------
low : float, optional
Lower boundary of the output interval. All values generated will be
greater than or equal to low. The default value is 0.
high : float, optional
Upper boundary of the output interval. All values generated will be
less than high. The default value is 1.0.
mu_err : float (default=0)
Mean of the gaussian distribution of magnitudes
sigma_err : float (default=1)
Standar deviation of the gaussian distribution of magnitude errorrs
seed : {None, int, array_like}, optional
Random seed used to initialize the pseudo-random number generator.
Can be any integer between 0 and 2**32 - 1 inclusive, an
array (or other sequence) of such integers, or None (the default).
If seed is None, then RandomState will try to read data from
/dev/urandom (or the Windows analogue) if available or seed from
the clock otherwise.
kwargs : optional
extra arguments for create_random.
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> ds = synthetic.create_uniform(1, 2, 0, .0008, 42)
>>> ds
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
>>> ds.data.B.magnitude
array([ 1.37454012, 1.95071431, 1.73199394, ..., 1.94670792,
1.39748799, 1.2171404 ])
"""
random = np.random.RandomState(seed)
return create_random(
magf=random.uniform, magf_params={"low": low, "high": high},
errf=random.normal, errf_params={"loc": mu_err, "scale": sigma_err},
**kwargs) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'used_bytes') and self.used_bytes is not None:
_dict['used_bytes'] = self.used_bytes
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'used_bytes') and self.used_bytes is not None:
_dict['used_bytes'] = self.used_bytes
return _dict |
def build_specfiles(source, target, env):
""" Filter the targets for the needed files and use the variables in env
to create the specfile.
"""
#
# At first we care for the CONTROL/control file, which is the main file for ipk.
#
# For this we need to open multiple files in random order, so we store into
# a dict so they can be easily accessed.
#
#
opened_files={}
def open_file(needle, haystack):
try:
return opened_files[needle]
except KeyError:
file=filter(lambda x: x.get_path().rfind(needle)!=-1, haystack)[0]
opened_files[needle]=open(file.get_abspath(), 'w')
return opened_files[needle]
control_file=open_file('control', target)
if 'X_IPK_DESCRIPTION' not in env:
env['X_IPK_DESCRIPTION']="%s\n %s"%(env['SUMMARY'],
env['DESCRIPTION'].replace('\n', '\n '))
content = """
Package: $NAME
Version: $VERSION
Priority: $X_IPK_PRIORITY
Section: $X_IPK_SECTION
Source: $SOURCE_URL
Architecture: $ARCHITECTURE
Maintainer: $X_IPK_MAINTAINER
Depends: $X_IPK_DEPENDS
Description: $X_IPK_DESCRIPTION
"""
control_file.write(env.subst(content))
#
# now handle the various other files, which purpose it is to set post-,
# pre-scripts and mark files as config files.
#
# We do so by filtering the source files for files which are marked with
# the "config" tag and afterwards we do the same for x_ipk_postrm,
# x_ipk_prerm, x_ipk_postinst and x_ipk_preinst tags.
#
# The first one will write the name of the file into the file
# CONTROL/configfiles, the latter add the content of the x_ipk_* variable
# into the same named file.
#
for f in [x for x in source if 'PACKAGING_CONFIG' in dir(x)]:
config=open_file('conffiles')
config.write(f.PACKAGING_INSTALL_LOCATION)
config.write('\n')
for str in 'POSTRM PRERM POSTINST PREINST'.split():
name="PACKAGING_X_IPK_%s"%str
for f in [x for x in source if name in dir(x)]:
file=open_file(name)
file.write(env[str])
#
# close all opened files
for f in list(opened_files.values()):
f.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
content += env['CHANGE_SPECFILE'](target)
return 0 | Filter the targets for the needed files and use the variables in env
to create the specfile. | Below is the the instruction that describes the task:
### Input:
Filter the targets for the needed files and use the variables in env
to create the specfile.
### Response:
def build_specfiles(source, target, env):
""" Filter the targets for the needed files and use the variables in env
to create the specfile.
"""
#
# At first we care for the CONTROL/control file, which is the main file for ipk.
#
# For this we need to open multiple files in random order, so we store into
# a dict so they can be easily accessed.
#
#
opened_files={}
def open_file(needle, haystack):
try:
return opened_files[needle]
except KeyError:
file=filter(lambda x: x.get_path().rfind(needle)!=-1, haystack)[0]
opened_files[needle]=open(file.get_abspath(), 'w')
return opened_files[needle]
control_file=open_file('control', target)
if 'X_IPK_DESCRIPTION' not in env:
env['X_IPK_DESCRIPTION']="%s\n %s"%(env['SUMMARY'],
env['DESCRIPTION'].replace('\n', '\n '))
content = """
Package: $NAME
Version: $VERSION
Priority: $X_IPK_PRIORITY
Section: $X_IPK_SECTION
Source: $SOURCE_URL
Architecture: $ARCHITECTURE
Maintainer: $X_IPK_MAINTAINER
Depends: $X_IPK_DEPENDS
Description: $X_IPK_DESCRIPTION
"""
control_file.write(env.subst(content))
#
# now handle the various other files, which purpose it is to set post-,
# pre-scripts and mark files as config files.
#
# We do so by filtering the source files for files which are marked with
# the "config" tag and afterwards we do the same for x_ipk_postrm,
# x_ipk_prerm, x_ipk_postinst and x_ipk_preinst tags.
#
# The first one will write the name of the file into the file
# CONTROL/configfiles, the latter add the content of the x_ipk_* variable
# into the same named file.
#
for f in [x for x in source if 'PACKAGING_CONFIG' in dir(x)]:
config=open_file('conffiles')
config.write(f.PACKAGING_INSTALL_LOCATION)
config.write('\n')
for str in 'POSTRM PRERM POSTINST PREINST'.split():
name="PACKAGING_X_IPK_%s"%str
for f in [x for x in source if name in dir(x)]:
file=open_file(name)
file.write(env[str])
#
# close all opened files
for f in list(opened_files.values()):
f.close()
# call a user specified function
if 'CHANGE_SPECFILE' in env:
content += env['CHANGE_SPECFILE'](target)
return 0 |
def encode(self):
'''
Encode and store an UNSUBACK control packet
'''
header = bytearray(1)
varHeader = encode16Int(self.msgId)
header[0] = 0xB0
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header) | Encode and store an UNSUBACK control packet | Below is the the instruction that describes the task:
### Input:
Encode and store an UNSUBACK control packet
### Response:
def encode(self):
'''
Encode and store an UNSUBACK control packet
'''
header = bytearray(1)
varHeader = encode16Int(self.msgId)
header[0] = 0xB0
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header) |
def SNTV_winners(self, profile, K):
"""
Returns a list that associates all the winners of a profile under Single non-transferable vote rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
m = profile.numCands
candScoresMap = MechanismPlurality().getCandScoresMap(profile)
if K >= m:
return list(candScoresMap.keys())
# print(candScoresMap)
sorted_items = sorted(candScoresMap.items(), key=lambda x: x[1], reverse=True)
sorted_dict = {key: value for key, value in sorted_items}
winners = list(sorted_dict.keys())[0:K]
return winners | Returns a list that associates all the winners of a profile under Single non-transferable vote rule.
:ivar Profile profile: A Profile object that represents an election profile. | Below is the the instruction that describes the task:
### Input:
Returns a list that associates all the winners of a profile under Single non-transferable vote rule.
:ivar Profile profile: A Profile object that represents an election profile.
### Response:
def SNTV_winners(self, profile, K):
"""
Returns a list that associates all the winners of a profile under Single non-transferable vote rule.
:ivar Profile profile: A Profile object that represents an election profile.
"""
# Currently, we expect the profile to contain complete ordering over candidates. Ties are
# allowed however.
elecType = profile.getElecType()
if elecType != "soc" and elecType != "toc" and elecType != "csv":
print("ERROR: unsupported election type")
exit()
m = profile.numCands
candScoresMap = MechanismPlurality().getCandScoresMap(profile)
if K >= m:
return list(candScoresMap.keys())
# print(candScoresMap)
sorted_items = sorted(candScoresMap.items(), key=lambda x: x[1], reverse=True)
sorted_dict = {key: value for key, value in sorted_items}
winners = list(sorted_dict.keys())[0:K]
return winners |
def location(self):
"""
The location for this engine. May be None if no specific
location has been assigned.
:param value: location to assign engine. Can be name, str href,
or Location element. If name, it will be automatically created
if a Location with the same name doesn't exist.
:raises UpdateElementFailed: failure to update element
:return: Location element or None
"""
location = Element.from_href(self.location_ref)
if location and location.name == 'Default':
return None
return location | The location for this engine. May be None if no specific
location has been assigned.
:param value: location to assign engine. Can be name, str href,
or Location element. If name, it will be automatically created
if a Location with the same name doesn't exist.
:raises UpdateElementFailed: failure to update element
:return: Location element or None | Below is the the instruction that describes the task:
### Input:
The location for this engine. May be None if no specific
location has been assigned.
:param value: location to assign engine. Can be name, str href,
or Location element. If name, it will be automatically created
if a Location with the same name doesn't exist.
:raises UpdateElementFailed: failure to update element
:return: Location element or None
### Response:
def location(self):
"""
The location for this engine. May be None if no specific
location has been assigned.
:param value: location to assign engine. Can be name, str href,
or Location element. If name, it will be automatically created
if a Location with the same name doesn't exist.
:raises UpdateElementFailed: failure to update element
:return: Location element or None
"""
location = Element.from_href(self.location_ref)
if location and location.name == 'Default':
return None
return location |
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases | Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`: | Below is the the instruction that describes the task:
### Input:
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
### Response:
def read_paraphrase_file(filename):
'''
Reads in a GermaNet wiktionary paraphrase file and returns its
contents as a list of dictionary structures.
Arguments:
- `filename`:
'''
with open(filename, 'rb') as input_file:
doc = etree.parse(input_file)
assert doc.getroot().tag == 'wiktionaryParaphrases'
paraphrases = []
for child in doc.getroot():
if child.tag == 'wiktionaryParaphrase':
paraphrase = child
warn_attribs('', paraphrase, PARAPHRASE_ATTRIBS)
if 0 < len(paraphrase):
print('unrecognised child of <wiktionaryParaphrase>',
list(paraphrase))
paraphrase_dict = dict(paraphrase.items())
if paraphrase_dict['edited'] not in MAP_YESNO_TO_BOOL:
print('<paraphrase> attribute "edited" has unexpected value',
paraphrase_dict['edited'])
else:
paraphrase_dict['edited'] = MAP_YESNO_TO_BOOL[
paraphrase_dict['edited']]
if not paraphrase_dict['wiktionarySenseId'].isdigit():
print('<paraphrase> attribute "wiktionarySenseId" has '
'non-integer value', paraphrase_dict['edited'])
else:
paraphrase_dict['wiktionarySenseId'] = \
int(paraphrase_dict['wiktionarySenseId'], 10)
paraphrases.append(paraphrase_dict)
else:
print('unknown child of <wiktionaryParaphrases>', child)
return paraphrases |
def stream_download(cookie, tokens, path):
'''下载流媒体文件.
path - 流文件的绝对路径.
'''
url = ''.join([
const.PCS_URL_D,
'file?method=download',
'&path=', encoder.encode_uri_component(path),
'&app_id=250528',
])
req = net.urlopen_without_redirect(url, headers=
{'Cookie': cookie.header_output()})
if req:
return req
else:
return None | 下载流媒体文件.
path - 流文件的绝对路径. | Below is the the instruction that describes the task:
### Input:
下载流媒体文件.
path - 流文件的绝对路径.
### Response:
def stream_download(cookie, tokens, path):
'''下载流媒体文件.
path - 流文件的绝对路径.
'''
url = ''.join([
const.PCS_URL_D,
'file?method=download',
'&path=', encoder.encode_uri_component(path),
'&app_id=250528',
])
req = net.urlopen_without_redirect(url, headers=
{'Cookie': cookie.header_output()})
if req:
return req
else:
return None |
def get_contracts_deployment_info(
chain_id: int,
version: Optional[str] = None,
module: DeploymentModule = DeploymentModule.ALL,
) -> Optional[DeployedContracts]:
"""Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version.
"""
if module not in DeploymentModule:
raise ValueError(f'Unknown module {module} given to get_contracts_deployment_info()')
def module_chosen(to_be_added: DeploymentModule):
return module == to_be_added or module == DeploymentModule.ALL
files: List[Path] = []
if module_chosen(DeploymentModule.RAIDEN):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=False,
))
if module == DeploymentModule.SERVICES and not version_provides_services(version):
raise ValueError(
f'SERVICES module queried for version {version}, but {version} '
'does not provide service contracts.',
)
if module_chosen(DeploymentModule.SERVICES) and version_provides_services(version):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=True,
))
deployment_data: DeployedContracts = {} # type: ignore
for f in files:
deployment_data = merge_deployment_data(
deployment_data,
_load_json_from_path(f),
)
if not deployment_data:
deployment_data = None
return deployment_data | Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version. | Below is the the instruction that describes the task:
### Input:
Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version.
### Response:
def get_contracts_deployment_info(
chain_id: int,
version: Optional[str] = None,
module: DeploymentModule = DeploymentModule.ALL,
) -> Optional[DeployedContracts]:
"""Reads the deployment data. Returns None if the file is not found.
Parameter:
module The name of the module. ALL means deployed contracts from all modules that are
available for the version.
"""
if module not in DeploymentModule:
raise ValueError(f'Unknown module {module} given to get_contracts_deployment_info()')
def module_chosen(to_be_added: DeploymentModule):
return module == to_be_added or module == DeploymentModule.ALL
files: List[Path] = []
if module_chosen(DeploymentModule.RAIDEN):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=False,
))
if module == DeploymentModule.SERVICES and not version_provides_services(version):
raise ValueError(
f'SERVICES module queried for version {version}, but {version} '
'does not provide service contracts.',
)
if module_chosen(DeploymentModule.SERVICES) and version_provides_services(version):
files.append(contracts_deployed_path(
chain_id=chain_id,
version=version,
services=True,
))
deployment_data: DeployedContracts = {} # type: ignore
for f in files:
deployment_data = merge_deployment_data(
deployment_data,
_load_json_from_path(f),
)
if not deployment_data:
deployment_data = None
return deployment_data |
def issue_command(self, cmd, *args):
""" Sends and receives a message to/from the server """
self._writeline(cmd)
self._writeline(str(len(args)))
for arg in args:
arg = str(arg)
self._writeline(str(len(arg)))
self._sock.sendall(arg.encode("utf-8"))
return self._read_response() | Sends and receives a message to/from the server | Below is the the instruction that describes the task:
### Input:
Sends and receives a message to/from the server
### Response:
def issue_command(self, cmd, *args):
""" Sends and receives a message to/from the server """
self._writeline(cmd)
self._writeline(str(len(args)))
for arg in args:
arg = str(arg)
self._writeline(str(len(arg)))
self._sock.sendall(arg.encode("utf-8"))
return self._read_response() |
def rmlinematch(oldstr, infile, dryrun=False):
"""
Sed-like line deletion function based on given string..
Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)
Example: pysed.rmlinematch('xyz', '/path/to/file.txt')
Example:
'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)
This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
with open(infile) as reader:
for item in reader:
rmitem = re.match(r'.*{}'.format(oldstr), item)
# if isinstance(rmitem) == isinstance(None): Not quite sure the intent here
if rmitem is None:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit("""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>.""") | Sed-like line deletion function based on given string..
Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)
Example: pysed.rmlinematch('xyz', '/path/to/file.txt')
Example:
'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)
This will dump the output to STDOUT instead of changing the input file. | Below is the the instruction that describes the task:
### Input:
Sed-like line deletion function based on given string..
Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)
Example: pysed.rmlinematch('xyz', '/path/to/file.txt')
Example:
'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)
This will dump the output to STDOUT instead of changing the input file.
### Response:
def rmlinematch(oldstr, infile, dryrun=False):
"""
Sed-like line deletion function based on given string..
Usage: pysed.rmlinematch(<Unwanted string>, <Text File>)
Example: pysed.rmlinematch('xyz', '/path/to/file.txt')
Example:
'DRYRUN': pysed.rmlinematch('xyz', '/path/to/file.txt', dryrun=True)
This will dump the output to STDOUT instead of changing the input file.
"""
linelist = []
with open(infile) as reader:
for item in reader:
rmitem = re.match(r'.*{}'.format(oldstr), item)
# if isinstance(rmitem) == isinstance(None): Not quite sure the intent here
if rmitem is None:
linelist.append(item)
if dryrun is False:
with open(infile, "w") as writer:
writer.truncate()
for line in linelist:
writer.writelines(line)
elif dryrun is True:
for line in linelist:
print(line, end='')
else:
exit("""Unknown option specified to 'dryrun' argument,
Usage: dryrun=<True|False>.""") |
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_name = ET.SubElement(fwdl_entries, "blade-name")
blade_name.text = kwargs.pop('blade_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_name = ET.SubElement(fwdl_entries, "blade-name")
blade_name.text = kwargs.pop('blade_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def _validate_resolution_output_length(path, entity_name, results, allow_mult=False, all_mult=False,
ask_to_resolve=True):
"""
:param path: Path to the object that required resolution; propagated from
command-line
:type path: string
:param entity_name: Name of the object
:type entity_name: string
:param results: Result of resolution; non-empty list of object
specifications (each specification is a dictionary with
keys "project" and "id")
:type results: list of dictionaries
:param allow_mult: If True, it is okay to choose from multiple results
of a single resolved object, or return all results
found; if False, raise an error if multiple results
are found
:type allow_mult: boolean
:param all_mult: If True, return all results if multiple results are
found for a single resolved object; if False, user needs
to choose a single result if multiple are found; the value
of all_mult only has an effect if allow_mult is True)
:type all_mult: boolean
:param ask_to_resolve: Whether picking may be necessary (if True, a
list is returned; if False, only one result
is returned); if specified as True, then all
results will be returned, regardless of the
values of allow_mult and all_mult
:type ask_to_resolve: boolean
:returns: The results of resolving entity_name, expected to be of the
following form:
<resolved_object> # If only one result is present or the user
# is able to select from multiple
OR
[<resolved_object>, ...] # If multiple results are present and
# it is allowed
where <resolved_object> is of the following form:
{"project": <project_id>, "id": <object_id>}
:rtype: dict or list of dicts
:raises: ValueError if results is empty
:raises: ResolutionError if too many results are found and the user is
not in interactive mode and cannot select one
Precondition: results must be a nonempty list
Validates length of results.
If there are multiple results found and the user is in interactive mode,
then the user will be prompted to select a single result to be returned.
"""
if len(results) == 0:
raise ValueError("'results' must be nonempty.")
# Caller wants ALL results, so return the entire results list
# At this point, do not care about the values of allow_mult or all_mult
if not ask_to_resolve:
return results
if len(results) > 1:
# The other way the caller can specify it wants all results is by setting
# allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern)
if allow_mult and (all_mult or is_glob_pattern(entity_name)):
return results
if INTERACTIVE_CLI:
print('The given path "' + path + '" resolves to the following data objects:')
if any(['describe' not in result for result in results]):
# findDataObject API call must be made to get 'describe' mappings
project, folderpath, entity_name = resolve_path(path, expected='entity')
results = _resolve_global_entity(project, folderpath, entity_name)
choice = pick([get_ls_l_desc(result['describe']) for result in results],
allow_mult=allow_mult)
if allow_mult and choice == '*':
return results
else:
return [results[choice]] if allow_mult else results[choice]
else:
raise ResolutionError('The given path "' + path + '" resolves to ' +
str(len(results)) + ' data objects')
else:
return [results[0]] if allow_mult else results[0] | :param path: Path to the object that required resolution; propagated from
command-line
:type path: string
:param entity_name: Name of the object
:type entity_name: string
:param results: Result of resolution; non-empty list of object
specifications (each specification is a dictionary with
keys "project" and "id")
:type results: list of dictionaries
:param allow_mult: If True, it is okay to choose from multiple results
of a single resolved object, or return all results
found; if False, raise an error if multiple results
are found
:type allow_mult: boolean
:param all_mult: If True, return all results if multiple results are
found for a single resolved object; if False, user needs
to choose a single result if multiple are found; the value
of all_mult only has an effect if allow_mult is True)
:type all_mult: boolean
:param ask_to_resolve: Whether picking may be necessary (if True, a
list is returned; if False, only one result
is returned); if specified as True, then all
results will be returned, regardless of the
values of allow_mult and all_mult
:type ask_to_resolve: boolean
:returns: The results of resolving entity_name, expected to be of the
following form:
<resolved_object> # If only one result is present or the user
# is able to select from multiple
OR
[<resolved_object>, ...] # If multiple results are present and
# it is allowed
where <resolved_object> is of the following form:
{"project": <project_id>, "id": <object_id>}
:rtype: dict or list of dicts
:raises: ValueError if results is empty
:raises: ResolutionError if too many results are found and the user is
not in interactive mode and cannot select one
Precondition: results must be a nonempty list
Validates length of results.
If there are multiple results found and the user is in interactive mode,
then the user will be prompted to select a single result to be returned. | Below is the the instruction that describes the task:
### Input:
:param path: Path to the object that required resolution; propagated from
command-line
:type path: string
:param entity_name: Name of the object
:type entity_name: string
:param results: Result of resolution; non-empty list of object
specifications (each specification is a dictionary with
keys "project" and "id")
:type results: list of dictionaries
:param allow_mult: If True, it is okay to choose from multiple results
of a single resolved object, or return all results
found; if False, raise an error if multiple results
are found
:type allow_mult: boolean
:param all_mult: If True, return all results if multiple results are
found for a single resolved object; if False, user needs
to choose a single result if multiple are found; the value
of all_mult only has an effect if allow_mult is True)
:type all_mult: boolean
:param ask_to_resolve: Whether picking may be necessary (if True, a
list is returned; if False, only one result
is returned); if specified as True, then all
results will be returned, regardless of the
values of allow_mult and all_mult
:type ask_to_resolve: boolean
:returns: The results of resolving entity_name, expected to be of the
following form:
<resolved_object> # If only one result is present or the user
# is able to select from multiple
OR
[<resolved_object>, ...] # If multiple results are present and
# it is allowed
where <resolved_object> is of the following form:
{"project": <project_id>, "id": <object_id>}
:rtype: dict or list of dicts
:raises: ValueError if results is empty
:raises: ResolutionError if too many results are found and the user is
not in interactive mode and cannot select one
Precondition: results must be a nonempty list
Validates length of results.
If there are multiple results found and the user is in interactive mode,
then the user will be prompted to select a single result to be returned.
### Response:
def _validate_resolution_output_length(path, entity_name, results, allow_mult=False, all_mult=False,
ask_to_resolve=True):
"""
:param path: Path to the object that required resolution; propagated from
command-line
:type path: string
:param entity_name: Name of the object
:type entity_name: string
:param results: Result of resolution; non-empty list of object
specifications (each specification is a dictionary with
keys "project" and "id")
:type results: list of dictionaries
:param allow_mult: If True, it is okay to choose from multiple results
of a single resolved object, or return all results
found; if False, raise an error if multiple results
are found
:type allow_mult: boolean
:param all_mult: If True, return all results if multiple results are
found for a single resolved object; if False, user needs
to choose a single result if multiple are found; the value
of all_mult only has an effect if allow_mult is True)
:type all_mult: boolean
:param ask_to_resolve: Whether picking may be necessary (if True, a
list is returned; if False, only one result
is returned); if specified as True, then all
results will be returned, regardless of the
values of allow_mult and all_mult
:type ask_to_resolve: boolean
:returns: The results of resolving entity_name, expected to be of the
following form:
<resolved_object> # If only one result is present or the user
# is able to select from multiple
OR
[<resolved_object>, ...] # If multiple results are present and
# it is allowed
where <resolved_object> is of the following form:
{"project": <project_id>, "id": <object_id>}
:rtype: dict or list of dicts
:raises: ValueError if results is empty
:raises: ResolutionError if too many results are found and the user is
not in interactive mode and cannot select one
Precondition: results must be a nonempty list
Validates length of results.
If there are multiple results found and the user is in interactive mode,
then the user will be prompted to select a single result to be returned.
"""
if len(results) == 0:
raise ValueError("'results' must be nonempty.")
# Caller wants ALL results, so return the entire results list
# At this point, do not care about the values of allow_mult or all_mult
if not ask_to_resolve:
return results
if len(results) > 1:
# The other way the caller can specify it wants all results is by setting
# allow_mult to be True and allowing all_mult to be True (or if the object name is a glob pattern)
if allow_mult and (all_mult or is_glob_pattern(entity_name)):
return results
if INTERACTIVE_CLI:
print('The given path "' + path + '" resolves to the following data objects:')
if any(['describe' not in result for result in results]):
# findDataObject API call must be made to get 'describe' mappings
project, folderpath, entity_name = resolve_path(path, expected='entity')
results = _resolve_global_entity(project, folderpath, entity_name)
choice = pick([get_ls_l_desc(result['describe']) for result in results],
allow_mult=allow_mult)
if allow_mult and choice == '*':
return results
else:
return [results[choice]] if allow_mult else results[choice]
else:
raise ResolutionError('The given path "' + path + '" resolves to ' +
str(len(results)) + ' data objects')
else:
return [results[0]] if allow_mult else results[0] |
def print_inplace(msg):
"""Clears out the previous line and prints a new one."""
term_width = get_terminal_size().columns
spacing = term_width - terminal_width(msg)
# On windows we need one less space or we overflow the line for some reason.
if is_win32:
spacing -= 1
sys.stderr.write("\r{0}".format(msg))
sys.stderr.write(" " * max(0, spacing))
sys.stderr.flush() | Clears out the previous line and prints a new one. | Below is the the instruction that describes the task:
### Input:
Clears out the previous line and prints a new one.
### Response:
def print_inplace(msg):
"""Clears out the previous line and prints a new one."""
term_width = get_terminal_size().columns
spacing = term_width - terminal_width(msg)
# On windows we need one less space or we overflow the line for some reason.
if is_win32:
spacing -= 1
sys.stderr.write("\r{0}".format(msg))
sys.stderr.write(" " * max(0, spacing))
sys.stderr.flush() |
def xoscmounts(host_mount):
"""
Cross OS compatible mount dirs
"""
callback_lower_drive_letter = lambda pat: pat.group(1).lower()
host_mount = re.sub(r"^([a-zA-Z])\:", callback_lower_drive_letter, host_mount)
host_mount = re.sub(r"^([a-z])", "//\\1", host_mount)
host_mount = re.sub(r"\\", "/", host_mount)
return host_mount | Cross OS compatible mount dirs | Below is the the instruction that describes the task:
### Input:
Cross OS compatible mount dirs
### Response:
def xoscmounts(host_mount):
"""
Cross OS compatible mount dirs
"""
callback_lower_drive_letter = lambda pat: pat.group(1).lower()
host_mount = re.sub(r"^([a-zA-Z])\:", callback_lower_drive_letter, host_mount)
host_mount = re.sub(r"^([a-z])", "//\\1", host_mount)
host_mount = re.sub(r"\\", "/", host_mount)
return host_mount |
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self | Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object | Below is the the instruction that describes the task:
### Input:
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
### Response:
def set_train_summary(self, summary):
"""
Set train summary. A TrainSummary object contains information
necessary for the optimizer to know how often the logs are recorded,
where to store the logs and how to retrieve them, etc. For details,
refer to the docs of TrainSummary.
:param summary: a TrainSummary object
"""
callBigDlFunc(self.bigdl_type, "setTrainSummary", self.value,
summary)
return self |
def _set_upload_url(self):
"""Generate the full URL for a POST."""
# pylint: disable=protected-access
self._upload_url = "/".join(
[self.jss._url, self._url, self.resource_type, self.id_type,
str(self._id)]) | Generate the full URL for a POST. | Below is the the instruction that describes the task:
### Input:
Generate the full URL for a POST.
### Response:
def _set_upload_url(self):
"""Generate the full URL for a POST."""
# pylint: disable=protected-access
self._upload_url = "/".join(
[self.jss._url, self._url, self.resource_type, self.id_type,
str(self._id)]) |
def mach60(msg):
"""Aircraft MACH number
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: MACH number
"""
d = hex2bin(data(msg))
if d[23] == '0':
return None
mach = bin2int(d[24:34]) * 2.048 / 512.0
return round(mach, 3) | Aircraft MACH number
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: MACH number | Below is the the instruction that describes the task:
### Input:
Aircraft MACH number
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: MACH number
### Response:
def mach60(msg):
"""Aircraft MACH number
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: MACH number
"""
d = hex2bin(data(msg))
if d[23] == '0':
return None
mach = bin2int(d[24:34]) * 2.048 / 512.0
return round(mach, 3) |
def upload_asset(self, content_type, name, asset):
"""Upload an asset to this release.
All parameters are required.
:param str content_type: The content type of the asset. Wikipedia has
a list of common media types
:param str name: The name of the file
:param asset: The file or bytes object to upload.
:returns: :class:`Asset <Asset>`
"""
headers = Release.CUSTOM_HEADERS.copy()
headers.update({'Content-Type': content_type})
url = self.upload_urlt.expand({'name': name})
r = self._post(url, data=asset, json=False, headers=headers,
verify=False)
if r.status_code in (201, 202):
return Asset(r.json(), self)
raise GitHubError(r) | Upload an asset to this release.
All parameters are required.
:param str content_type: The content type of the asset. Wikipedia has
a list of common media types
:param str name: The name of the file
:param asset: The file or bytes object to upload.
:returns: :class:`Asset <Asset>` | Below is the the instruction that describes the task:
### Input:
Upload an asset to this release.
All parameters are required.
:param str content_type: The content type of the asset. Wikipedia has
a list of common media types
:param str name: The name of the file
:param asset: The file or bytes object to upload.
:returns: :class:`Asset <Asset>`
### Response:
def upload_asset(self, content_type, name, asset):
"""Upload an asset to this release.
All parameters are required.
:param str content_type: The content type of the asset. Wikipedia has
a list of common media types
:param str name: The name of the file
:param asset: The file or bytes object to upload.
:returns: :class:`Asset <Asset>`
"""
headers = Release.CUSTOM_HEADERS.copy()
headers.update({'Content-Type': content_type})
url = self.upload_urlt.expand({'name': name})
r = self._post(url, data=asset, json=False, headers=headers,
verify=False)
if r.status_code in (201, 202):
return Asset(r.json(), self)
raise GitHubError(r) |
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority,
'operating-system-command-text': os_command_text}
self.manager.session.post(
self.uri + '/operations/send-os-cmd', body) | Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | Below is the the instruction that describes the task:
### Input:
Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
def send_os_command(self, os_command_text, is_priority=False):
"""
Send a command to the operating system running in this partition.
Parameters:
os_command_text (string): The text of the operating system command.
is_priority (bool):
Boolean controlling whether this is a priority operating system
command, as follows:
* If `True`, this message is treated as a priority operating
system command.
* If `False`, this message is not treated as a priority
operating system command. The default.
Returns:
None
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'is-priority': is_priority,
'operating-system-command-text': os_command_text}
self.manager.session.post(
self.uri + '/operations/send-os-cmd', body) |
def generate_gaussian_profile(seeing_fwhm):
"""Generate a normalized Gaussian profile from its FWHM"""
FWHM_G = 2 * math.sqrt(2 * math.log(2))
sigma = seeing_fwhm / FWHM_G
amplitude = 1.0 / (2 * math.pi * sigma * sigma)
seeing_model = Gaussian2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
x_stddev=sigma,
y_stddev=sigma)
return seeing_model | Generate a normalized Gaussian profile from its FWHM | Below is the the instruction that describes the task:
### Input:
Generate a normalized Gaussian profile from its FWHM
### Response:
def generate_gaussian_profile(seeing_fwhm):
"""Generate a normalized Gaussian profile from its FWHM"""
FWHM_G = 2 * math.sqrt(2 * math.log(2))
sigma = seeing_fwhm / FWHM_G
amplitude = 1.0 / (2 * math.pi * sigma * sigma)
seeing_model = Gaussian2D(amplitude=amplitude,
x_mean=0.0,
y_mean=0.0,
x_stddev=sigma,
y_stddev=sigma)
return seeing_model |
def import_staging(self, filename, source, rater_name, rec_start,
staging_start=None, epoch_length=None,
poor=['Artefact'], as_qual=False):
"""Import staging from an external staging text file.
Parameters
----------
filename : str
Staging file name.
source : str
Name of program where staging was made. One of 'domino', 'alice',
'compumedics', 'sandman', 'remlogic'
rater_name : str
Rater name for imported staging.
rec_start : datetime
Date and time (year, month, day, hour, minute, second) of recording
start. Year is ignored (New Year's Eve celebratory recordings
unsupported.)
staging_start : datetime (default: None)
Date and time of staging start. For use when not provided in
staging file.
epoch_length : int
duration in s of a scoring epoch
poor : list of str
epochs with stage names in this list will be marked as Poor quality
as_qual : bool
if True, the staging only be used to mark quality, as per poor
"""
if as_qual and rater_name not in self.raters:
self.parent.statusBar.showMessage('Rater not found.')
return
clue = None # used in some instances to pick out epochs from other evts
idx_clue = None
if source in ['remlogic', 'sandman']:
encoding = 'ISO-8859-1'
else:
encoding = 'utf-8'
with open(filename, 'r', encoding=encoding) as f:
lines = f.readlines()
if source == 'domino':
for i, line in enumerate(lines):
if line[0].isdigit():
idx_first_line = i
break
if lines[idx_first_line].index(';') > 15:
idx_time = (11, 19)
idx_stage = slice(25, 26)
stage_key = PHYSIP_STAGE_KEY
else:
idx_time = (0, 8)
idx_stage = slice(14, 16)
stage_key = DOMINO_STAGE_KEY
stage_start = datetime.strptime(
lines[idx_first_line][idx_time[0]:idx_time[1]], '%H:%M:%S')
stage_day = int(lines[1][12:14])
stage_month = int(lines[1][15:17])
stage_start_for_delta = stage_start.replace(year=1999,
month=stage_month,
day=stage_day)
rec_start_for_delta = rec_start.replace(year=1999)
first_second = int((stage_start_for_delta -
rec_start_for_delta).total_seconds())
if epoch_length is None:
epoch_length = int(lines[5][6:8])
elif source == 'remlogic':
clue = 'SLEEP-' # signifies an epoch (as opposed to an event)
idx_clue = slice(-18, -6)
idx_head = lines.index(
next(l for l in lines if 'Time [hh:mm:ss]' in l))
first_line = next(l for l in lines[idx_head:] if clue in l)
idx_first_line = lines.index(first_line)
stage_start_date = _try_parse_datetime(
lines[3][16:lines[3].index('\n')],
('%Y/%m/%d', '%d/%m/%Y'))
stage_start_time = None
try:
stage_start_time = datetime.strptime(
first_line[:19], '%Y-%m-%dT%H:%M:%S')
except ValueError:
cells = first_line.split('\t')
for cell in cells:
try:
stage_start_time = datetime.strptime(cell[-8:],
'%I:%M:%S')
if cell[1] == 'U':
stage_start_time = stage_start_time + timedelta(
hours=12)
except ValueError:
continue
if stage_start_time == None:
raise ValueError('No valid start time found.')
stage_start = datetime.combine(stage_start_date.date(),
stage_start_time.time())
first_second = int((stage_start - rec_start).total_seconds())
stage_key = {k[-2:]: v for k, v in REMLOGIC_STAGE_KEY.items()}
idx_stage = slice(-6, -4)
if epoch_length is None:
epoch_length = int(first_line[-3:-1])
elif source == 'alice':
stage_start = datetime.strptime(lines[1][2:13], '%I:%M:%S %p')
dt = rec_start
# best guess in absence of date
if lines[1][11:13] == 'pm' and rec_start.hour < 12:
dt = rec_start - timedelta(days=1)
elif lines[1][11:13] == 'am' and rec_start.hour > 12:
dt = rec_start + timedelta
stage_start = stage_start.replace(year=dt.year,
month=dt.month,
day=dt.day)
first_second = int((stage_start - rec_start).total_seconds())
idx_first_line = 1
lines[-1] += '_' # to fill newline position
stage_key = ALICE_STAGE_KEY
idx_stage = slice(-3, -1)
if epoch_length is None:
epoch_length = 30
elif source == 'sandman':
stage_start = datetime.strptime(lines[4][12:33],
'%d/%m/%Y %I:%M:%S %p')
first_second = int((stage_start - rec_start).total_seconds())
idx_first_line = 14
stage_key = SANDMAN_STAGE_KEY
idx_stage = slice(-14, -12)
if epoch_length is None:
epoch_length = 30
elif source == 'compumedics':
if staging_start is None:
first_second = 0
else:
first_second = int((
staging_start - rec_start).total_seconds())
idx_first_line = 0
stage_key = COMPUMEDICS_STAGE_KEY
idx_stage = slice(0, 1)
if epoch_length is None:
epoch_length = 30
elif source == 'deltamed':
if staging_start is None:
first_second = 0
else:
first_second = int((
staging_start - rec_start).total_seconds())
idx_first_line = 0
stage_key = DELTAMED_STAGE_KEY
idx_stage = slice(-2, -1)
if epoch_length is None:
epoch_length = int(lines[0][:lines[0].index('\t')])
elif source == 'prana':
stage_start = datetime.strptime(lines[5][:11], '%d %H:%M:%S')
# best guess in absence of date
dt = rec_start
if stage_start.hour > 12 and rec_start.hour < 12:
dt = rec_start - timedelta(days=1)
elif stage_start.hour < 12 and rec_start.hour > 12:
dt = rec_start + timedelta(days=1)
stage_start = stage_start.replace(year=dt.year,
month=dt.month,
day=dt.day)
first_second = int((stage_start - rec_start).total_seconds())
idx_first_line = 5
stage_key = PRANA_STAGE_KEY
spacer = next(i for i, j in enumerate(lines[5][30:]) \
if j.strip())
idx_stage = slice(30 + spacer, 30 + spacer + 1)
if epoch_length is None:
idx_epoch_length = None
for i,j in enumerate(lines[3]):
if j.isdigit():
idx_epoch_length = i, i + lines[3][i:].index(' ')
epoch_length = int(lines[3][slice(*idx_epoch_length)])
break
if idx_epoch_length is None:
epoch_length = 30
else:
raise ValueError('Unknown source program for staging file')
offset = first_second % epoch_length
lg.info('Time offset: ' + str(offset) + ' sec')
if rater_name not in self.raters:
self.add_rater(rater_name)
self.get_rater(rater_name)
stages = self.rater.find('stages')
if as_qual:
for i, one_line in enumerate(lines[idx_first_line:]):
if one_line[idx_stage] in poor:
epoch_beg = first_second + (i * epoch_length)
try:
self.set_stage_for_epoch(epoch_beg, 'Poor',
attr='quality',
save=False)
except KeyError:
return 1
else:
# list is necessary so that it does not remove in place
for s in list(stages):
stages.remove(s)
for i in arange(offset, first_second - epoch_length, epoch_length):
epoch = SubElement(stages, 'epoch')
start_time = SubElement(epoch, 'epoch_start')
epoch_beg = i
start_time.text = str(epoch_beg)
end_time = SubElement(epoch, 'epoch_end')
end_time.text = str(epoch_beg + epoch_length)
epoch_stage = SubElement(epoch, 'stage')
epoch_stage.text = 'Unknown'
quality = SubElement(epoch, 'quality')
quality.text = 'Good'
idx_epoch = 0
for i, one_line in enumerate(lines[idx_first_line:]):
if clue is not None:
if clue not in one_line[idx_clue]:
continue
epoch = SubElement(stages, 'epoch')
start_time = SubElement(epoch, 'epoch_start')
epoch_beg = first_second + (idx_epoch * epoch_length)
start_time.text = str(epoch_beg)
end_time = SubElement(epoch, 'epoch_end')
end_time.text = str(epoch_beg + epoch_length)
epoch_stage = SubElement(epoch, 'stage')
try:
key = one_line[idx_stage]
one_stage = stage_key[key]
except KeyError:
one_stage = 'Unknown'
lg.info('Stage not recognized: ' + key)
epoch_stage.text = one_stage
quality = SubElement(epoch, 'quality')
if one_stage in poor:
quality.text = 'Poor'
else:
quality.text = 'Good'
idx_epoch += 1
self.save() | Import staging from an external staging text file.
Parameters
----------
filename : str
Staging file name.
source : str
Name of program where staging was made. One of 'domino', 'alice',
'compumedics', 'sandman', 'remlogic'
rater_name : str
Rater name for imported staging.
rec_start : datetime
Date and time (year, month, day, hour, minute, second) of recording
start. Year is ignored (New Year's Eve celebratory recordings
unsupported.)
staging_start : datetime (default: None)
Date and time of staging start. For use when not provided in
staging file.
epoch_length : int
duration in s of a scoring epoch
poor : list of str
epochs with stage names in this list will be marked as Poor quality
as_qual : bool
if True, the staging only be used to mark quality, as per poor | Below is the the instruction that describes the task:
### Input:
Import staging from an external staging text file.
Parameters
----------
filename : str
Staging file name.
source : str
Name of program where staging was made. One of 'domino', 'alice',
'compumedics', 'sandman', 'remlogic'
rater_name : str
Rater name for imported staging.
rec_start : datetime
Date and time (year, month, day, hour, minute, second) of recording
start. Year is ignored (New Year's Eve celebratory recordings
unsupported.)
staging_start : datetime (default: None)
Date and time of staging start. For use when not provided in
staging file.
epoch_length : int
duration in s of a scoring epoch
poor : list of str
epochs with stage names in this list will be marked as Poor quality
as_qual : bool
if True, the staging only be used to mark quality, as per poor
### Response:
def import_staging(self, filename, source, rater_name, rec_start,
staging_start=None, epoch_length=None,
poor=['Artefact'], as_qual=False):
"""Import staging from an external staging text file.
Parameters
----------
filename : str
Staging file name.
source : str
Name of program where staging was made. One of 'domino', 'alice',
'compumedics', 'sandman', 'remlogic'
rater_name : str
Rater name for imported staging.
rec_start : datetime
Date and time (year, month, day, hour, minute, second) of recording
start. Year is ignored (New Year's Eve celebratory recordings
unsupported.)
staging_start : datetime (default: None)
Date and time of staging start. For use when not provided in
staging file.
epoch_length : int
duration in s of a scoring epoch
poor : list of str
epochs with stage names in this list will be marked as Poor quality
as_qual : bool
if True, the staging only be used to mark quality, as per poor
"""
if as_qual and rater_name not in self.raters:
self.parent.statusBar.showMessage('Rater not found.')
return
clue = None # used in some instances to pick out epochs from other evts
idx_clue = None
if source in ['remlogic', 'sandman']:
encoding = 'ISO-8859-1'
else:
encoding = 'utf-8'
with open(filename, 'r', encoding=encoding) as f:
lines = f.readlines()
if source == 'domino':
for i, line in enumerate(lines):
if line[0].isdigit():
idx_first_line = i
break
if lines[idx_first_line].index(';') > 15:
idx_time = (11, 19)
idx_stage = slice(25, 26)
stage_key = PHYSIP_STAGE_KEY
else:
idx_time = (0, 8)
idx_stage = slice(14, 16)
stage_key = DOMINO_STAGE_KEY
stage_start = datetime.strptime(
lines[idx_first_line][idx_time[0]:idx_time[1]], '%H:%M:%S')
stage_day = int(lines[1][12:14])
stage_month = int(lines[1][15:17])
stage_start_for_delta = stage_start.replace(year=1999,
month=stage_month,
day=stage_day)
rec_start_for_delta = rec_start.replace(year=1999)
first_second = int((stage_start_for_delta -
rec_start_for_delta).total_seconds())
if epoch_length is None:
epoch_length = int(lines[5][6:8])
elif source == 'remlogic':
clue = 'SLEEP-' # signifies an epoch (as opposed to an event)
idx_clue = slice(-18, -6)
idx_head = lines.index(
next(l for l in lines if 'Time [hh:mm:ss]' in l))
first_line = next(l for l in lines[idx_head:] if clue in l)
idx_first_line = lines.index(first_line)
stage_start_date = _try_parse_datetime(
lines[3][16:lines[3].index('\n')],
('%Y/%m/%d', '%d/%m/%Y'))
stage_start_time = None
try:
stage_start_time = datetime.strptime(
first_line[:19], '%Y-%m-%dT%H:%M:%S')
except ValueError:
cells = first_line.split('\t')
for cell in cells:
try:
stage_start_time = datetime.strptime(cell[-8:],
'%I:%M:%S')
if cell[1] == 'U':
stage_start_time = stage_start_time + timedelta(
hours=12)
except ValueError:
continue
if stage_start_time == None:
raise ValueError('No valid start time found.')
stage_start = datetime.combine(stage_start_date.date(),
stage_start_time.time())
first_second = int((stage_start - rec_start).total_seconds())
stage_key = {k[-2:]: v for k, v in REMLOGIC_STAGE_KEY.items()}
idx_stage = slice(-6, -4)
if epoch_length is None:
epoch_length = int(first_line[-3:-1])
elif source == 'alice':
stage_start = datetime.strptime(lines[1][2:13], '%I:%M:%S %p')
dt = rec_start
# best guess in absence of date
if lines[1][11:13] == 'pm' and rec_start.hour < 12:
dt = rec_start - timedelta(days=1)
elif lines[1][11:13] == 'am' and rec_start.hour > 12:
dt = rec_start + timedelta
stage_start = stage_start.replace(year=dt.year,
month=dt.month,
day=dt.day)
first_second = int((stage_start - rec_start).total_seconds())
idx_first_line = 1
lines[-1] += '_' # to fill newline position
stage_key = ALICE_STAGE_KEY
idx_stage = slice(-3, -1)
if epoch_length is None:
epoch_length = 30
elif source == 'sandman':
stage_start = datetime.strptime(lines[4][12:33],
'%d/%m/%Y %I:%M:%S %p')
first_second = int((stage_start - rec_start).total_seconds())
idx_first_line = 14
stage_key = SANDMAN_STAGE_KEY
idx_stage = slice(-14, -12)
if epoch_length is None:
epoch_length = 30
elif source == 'compumedics':
if staging_start is None:
first_second = 0
else:
first_second = int((
staging_start - rec_start).total_seconds())
idx_first_line = 0
stage_key = COMPUMEDICS_STAGE_KEY
idx_stage = slice(0, 1)
if epoch_length is None:
epoch_length = 30
elif source == 'deltamed':
if staging_start is None:
first_second = 0
else:
first_second = int((
staging_start - rec_start).total_seconds())
idx_first_line = 0
stage_key = DELTAMED_STAGE_KEY
idx_stage = slice(-2, -1)
if epoch_length is None:
epoch_length = int(lines[0][:lines[0].index('\t')])
elif source == 'prana':
stage_start = datetime.strptime(lines[5][:11], '%d %H:%M:%S')
# best guess in absence of date
dt = rec_start
if stage_start.hour > 12 and rec_start.hour < 12:
dt = rec_start - timedelta(days=1)
elif stage_start.hour < 12 and rec_start.hour > 12:
dt = rec_start + timedelta(days=1)
stage_start = stage_start.replace(year=dt.year,
month=dt.month,
day=dt.day)
first_second = int((stage_start - rec_start).total_seconds())
idx_first_line = 5
stage_key = PRANA_STAGE_KEY
spacer = next(i for i, j in enumerate(lines[5][30:]) \
if j.strip())
idx_stage = slice(30 + spacer, 30 + spacer + 1)
if epoch_length is None:
idx_epoch_length = None
for i,j in enumerate(lines[3]):
if j.isdigit():
idx_epoch_length = i, i + lines[3][i:].index(' ')
epoch_length = int(lines[3][slice(*idx_epoch_length)])
break
if idx_epoch_length is None:
epoch_length = 30
else:
raise ValueError('Unknown source program for staging file')
offset = first_second % epoch_length
lg.info('Time offset: ' + str(offset) + ' sec')
if rater_name not in self.raters:
self.add_rater(rater_name)
self.get_rater(rater_name)
stages = self.rater.find('stages')
if as_qual:
for i, one_line in enumerate(lines[idx_first_line:]):
if one_line[idx_stage] in poor:
epoch_beg = first_second + (i * epoch_length)
try:
self.set_stage_for_epoch(epoch_beg, 'Poor',
attr='quality',
save=False)
except KeyError:
return 1
else:
# list is necessary so that it does not remove in place
for s in list(stages):
stages.remove(s)
for i in arange(offset, first_second - epoch_length, epoch_length):
epoch = SubElement(stages, 'epoch')
start_time = SubElement(epoch, 'epoch_start')
epoch_beg = i
start_time.text = str(epoch_beg)
end_time = SubElement(epoch, 'epoch_end')
end_time.text = str(epoch_beg + epoch_length)
epoch_stage = SubElement(epoch, 'stage')
epoch_stage.text = 'Unknown'
quality = SubElement(epoch, 'quality')
quality.text = 'Good'
idx_epoch = 0
for i, one_line in enumerate(lines[idx_first_line:]):
if clue is not None:
if clue not in one_line[idx_clue]:
continue
epoch = SubElement(stages, 'epoch')
start_time = SubElement(epoch, 'epoch_start')
epoch_beg = first_second + (idx_epoch * epoch_length)
start_time.text = str(epoch_beg)
end_time = SubElement(epoch, 'epoch_end')
end_time.text = str(epoch_beg + epoch_length)
epoch_stage = SubElement(epoch, 'stage')
try:
key = one_line[idx_stage]
one_stage = stage_key[key]
except KeyError:
one_stage = 'Unknown'
lg.info('Stage not recognized: ' + key)
epoch_stage.text = one_stage
quality = SubElement(epoch, 'quality')
if one_stage in poor:
quality.text = 'Poor'
else:
quality.text = 'Good'
idx_epoch += 1
self.save() |
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB') | Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.). | Below is the the instruction that describes the task:
### Input:
Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
### Response:
def mem(data):
"""Total memory used by data
Parameters
----------
data : dict of pandas.DataFrames or pandas.DataFrame
Returns
-------
str : str
Human readable amount of memory used with unit (like KB, MB, GB etc.).
"""
if type(data) == dict:
num = sum([data[k].memory_usage(index=True).sum() for k in data])
else:
num = data.memory_usage(index=True).sum()
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
return "%3.1f %s" % (num, 'PB') |
def updatepLvlNextFunc(self):
'''
A method that creates the pLvlNextFunc attribute as a sequence of
linear functions, indicating constant expected permanent income growth
across permanent income levels. Draws on the attribute PermGroFac, and
installs a special retirement function when it exists.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
pLvlNextFunc = []
for t in range(self.T_cycle):
pLvlNextFunc.append(LinearInterp(np.array([0.,1.]),np.array([0.,self.PermGroFac[t]])))
self.pLvlNextFunc = pLvlNextFunc
self.addToTimeVary('pLvlNextFunc')
if not orig_time:
self.timeRev() | A method that creates the pLvlNextFunc attribute as a sequence of
linear functions, indicating constant expected permanent income growth
across permanent income levels. Draws on the attribute PermGroFac, and
installs a special retirement function when it exists.
Parameters
----------
None
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
A method that creates the pLvlNextFunc attribute as a sequence of
linear functions, indicating constant expected permanent income growth
across permanent income levels. Draws on the attribute PermGroFac, and
installs a special retirement function when it exists.
Parameters
----------
None
Returns
-------
None
### Response:
def updatepLvlNextFunc(self):
'''
A method that creates the pLvlNextFunc attribute as a sequence of
linear functions, indicating constant expected permanent income growth
across permanent income levels. Draws on the attribute PermGroFac, and
installs a special retirement function when it exists.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
pLvlNextFunc = []
for t in range(self.T_cycle):
pLvlNextFunc.append(LinearInterp(np.array([0.,1.]),np.array([0.,self.PermGroFac[t]])))
self.pLvlNextFunc = pLvlNextFunc
self.addToTimeVary('pLvlNextFunc')
if not orig_time:
self.timeRev() |
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.') | Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Below is the the instruction that describes the task:
### Input:
Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
### Response:
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.