code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def list_by_x(self, item=None):
"""
:param item: Grouping key, one of "cid", "type" and "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default
"""
prs = self._processors
if item is None or item == "cid": # Default.
res = [(cid, [prs[cid]]) for cid in sorted(prs.keys())]
elif item in ("type", "extensions"):
res = list_by_x(prs.values(), item)
else:
raise ValueError("keyword argument 'item' must be one of "
"None, 'cid', 'type' and 'extensions' "
"but it was '%s'" % item)
return res | :param item: Grouping key, one of "cid", "type" and "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default | Below is the the instruction that describes the task:
### Input:
:param item: Grouping key, one of "cid", "type" and "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default
### Response:
def list_by_x(self, item=None):
"""
:param item: Grouping key, one of "cid", "type" and "extensions"
:return:
A list of :class:`Processor` or its children classes grouped by
given 'item', [(cid, [:class:`Processor`)]] by default
"""
prs = self._processors
if item is None or item == "cid": # Default.
res = [(cid, [prs[cid]]) for cid in sorted(prs.keys())]
elif item in ("type", "extensions"):
res = list_by_x(prs.values(), item)
else:
raise ValueError("keyword argument 'item' must be one of "
"None, 'cid', 'type' and 'extensions' "
"but it was '%s'" % item)
return res |
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit() | Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once! | Below is the the instruction that describes the task:
### Input:
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
### Response:
def grow(self, path):
"""
Grow the metadata tree for the given directory path
Note: For each path, grow() should be run only once. Growing the tree
from the same path multiple times with attribute adding using the "+"
sign leads to adding the value more than once!
"""
if path is None:
return
path = path.rstrip("/")
log.info("Walking through directory {0}".format(
os.path.abspath(path)))
dirpath, dirnames, filenames = next(os.walk(path))
# Investigate main.fmf as the first file (for correct inheritance)
filenames = sorted(
[filename for filename in filenames if filename.endswith(SUFFIX)])
try:
filenames.insert(0, filenames.pop(filenames.index(MAIN)))
except ValueError:
pass
# Check every metadata file and load data (ignore hidden)
for filename in filenames:
if filename.startswith("."):
continue
fullpath = os.path.abspath(os.path.join(dirpath, filename))
log.info("Checking file {0}".format(fullpath))
try:
with open(fullpath) as datafile:
data = yaml.load(datafile, Loader=FullLoader)
except yaml.scanner.ScannerError as error:
raise(utils.FileError("Failed to parse '{0}'\n{1}".format(
fullpath, error)))
log.data(pretty(data))
# Handle main.fmf as data for self
if filename == MAIN:
self.sources.append(fullpath)
self.update(data)
# Handle other *.fmf files as children
else:
self.child(os.path.splitext(filename)[0], data, fullpath)
# Explore every child directory (ignore hidden dirs and subtrees)
for dirname in sorted(dirnames):
if dirname.startswith("."):
continue
# Ignore metadata subtrees
if os.path.isdir(os.path.join(path, dirname, SUFFIX)):
log.debug("Ignoring metadata tree '{0}'.".format(dirname))
continue
self.child(dirname, os.path.join(path, dirname))
# Remove empty children (ignore directories without metadata)
for name in list(self.children.keys()):
child = self.children[name]
if not child.data and not child.children:
del(self.children[name])
log.debug("Empty tree '{0}' removed.".format(child.name))
# Apply inheritance when all scattered data are gathered.
# This is done only once, from the top parent object.
if self.parent is None:
self.inherit() |
def adjust_to_360(val, key):
"""
Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model
"""
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
adjust = False
for dec_key in CheckDec:
if dec_key in key:
if key.endswith(dec_key) or key.endswith('_'):
adjust = True
if not val:
return ''
elif not adjust:
return val
elif adjust:
new_val = float(val) % 360
if new_val != float(val):
print('-I- adjusted {} {} to 0=>360.: {}'.format(key, val, new_val))
return new_val | Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model | Below is the the instruction that describes the task:
### Input:
Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model
### Response:
def adjust_to_360(val, key):
"""
Take in a value and a key. If the key is of the type:
declination/longitude/azimuth/direction, adjust it to be within
the range 0-360 as required by the MagIC data model
"""
CheckDec = ['_dec', '_lon', '_azimuth', 'dip_direction']
adjust = False
for dec_key in CheckDec:
if dec_key in key:
if key.endswith(dec_key) or key.endswith('_'):
adjust = True
if not val:
return ''
elif not adjust:
return val
elif adjust:
new_val = float(val) % 360
if new_val != float(val):
print('-I- adjusted {} {} to 0=>360.: {}'.format(key, val, new_val))
return new_val |
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text | Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red' | Below is the the instruction that describes the task:
### Input:
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
### Response:
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.iteritems():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text |
def switch_toggle(self, device):
"""Toggles the current state of the given device"""
state = self.get_state(device)
if(state == '1'):
return self.switch_off(device)
elif(state == '0'):
return self.switch_on(device)
else:
return state | Toggles the current state of the given device | Below is the the instruction that describes the task:
### Input:
Toggles the current state of the given device
### Response:
def switch_toggle(self, device):
"""Toggles the current state of the given device"""
state = self.get_state(device)
if(state == '1'):
return self.switch_off(device)
elif(state == '0'):
return self.switch_on(device)
else:
return state |
def read_iso19115_metadata(layer_uri, keyword=None, version_35=False):
"""Retrieve keywords from a metadata object
:param layer_uri: Uri to layer.
:type layer_uri: basestring
:param keyword: The key of keyword that want to be read. If None, return
all keywords in dictionary.
:type keyword: basestring
:returns: Dictionary of keywords or value of key as string.
:rtype: dict, basestring
"""
xml_uri = os.path.splitext(layer_uri)[0] + '.xml'
# Remove the prefix for local file. For example csv.
file_prefix = 'file:'
if xml_uri.startswith(file_prefix):
xml_uri = xml_uri[len(file_prefix):]
if not os.path.exists(xml_uri):
xml_uri = None
if not xml_uri and os.path.exists(layer_uri):
message = 'Layer based file but no xml file.\n'
message += 'Layer path: %s.' % layer_uri
raise NoKeywordsFoundError(message)
if version_35:
metadata = GenericLayerMetadata35(layer_uri, xml_uri)
else:
metadata = GenericLayerMetadata(layer_uri, xml_uri)
active_metadata_classes = METADATA_CLASSES
if version_35:
active_metadata_classes = METADATA_CLASSES35
if metadata.layer_purpose in active_metadata_classes:
metadata = active_metadata_classes[
metadata.layer_purpose](layer_uri, xml_uri)
# dictionary comprehension
keywords = {
x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items())
if x[1]['value'] is not None}
if 'keyword_version' not in list(keywords.keys()) and xml_uri:
message = 'No keyword version found. Metadata xml file is invalid.\n'
message += 'Layer uri: %s\n' % layer_uri
message += 'Keywords file: %s\n' % os.path.exists(
os.path.splitext(layer_uri)[0] + '.xml')
message += 'keywords:\n'
for k, v in list(keywords.items()):
message += '%s: %s\n' % (k, v)
raise MetadataReadError(message)
# Get dictionary keywords that has value != None
keywords = {
x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items())
if x[1]['value'] is not None}
if keyword:
try:
return keywords[keyword]
except KeyError:
message = 'Keyword with key %s is not found. ' % keyword
message += 'Layer path: %s' % layer_uri
raise KeywordNotFoundError(message)
return keywords | Retrieve keywords from a metadata object
:param layer_uri: Uri to layer.
:type layer_uri: basestring
:param keyword: The key of keyword that want to be read. If None, return
all keywords in dictionary.
:type keyword: basestring
:returns: Dictionary of keywords or value of key as string.
:rtype: dict, basestring | Below is the the instruction that describes the task:
### Input:
Retrieve keywords from a metadata object
:param layer_uri: Uri to layer.
:type layer_uri: basestring
:param keyword: The key of keyword that want to be read. If None, return
all keywords in dictionary.
:type keyword: basestring
:returns: Dictionary of keywords or value of key as string.
:rtype: dict, basestring
### Response:
def read_iso19115_metadata(layer_uri, keyword=None, version_35=False):
"""Retrieve keywords from a metadata object
:param layer_uri: Uri to layer.
:type layer_uri: basestring
:param keyword: The key of keyword that want to be read. If None, return
all keywords in dictionary.
:type keyword: basestring
:returns: Dictionary of keywords or value of key as string.
:rtype: dict, basestring
"""
xml_uri = os.path.splitext(layer_uri)[0] + '.xml'
# Remove the prefix for local file. For example csv.
file_prefix = 'file:'
if xml_uri.startswith(file_prefix):
xml_uri = xml_uri[len(file_prefix):]
if not os.path.exists(xml_uri):
xml_uri = None
if not xml_uri and os.path.exists(layer_uri):
message = 'Layer based file but no xml file.\n'
message += 'Layer path: %s.' % layer_uri
raise NoKeywordsFoundError(message)
if version_35:
metadata = GenericLayerMetadata35(layer_uri, xml_uri)
else:
metadata = GenericLayerMetadata(layer_uri, xml_uri)
active_metadata_classes = METADATA_CLASSES
if version_35:
active_metadata_classes = METADATA_CLASSES35
if metadata.layer_purpose in active_metadata_classes:
metadata = active_metadata_classes[
metadata.layer_purpose](layer_uri, xml_uri)
# dictionary comprehension
keywords = {
x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items())
if x[1]['value'] is not None}
if 'keyword_version' not in list(keywords.keys()) and xml_uri:
message = 'No keyword version found. Metadata xml file is invalid.\n'
message += 'Layer uri: %s\n' % layer_uri
message += 'Keywords file: %s\n' % os.path.exists(
os.path.splitext(layer_uri)[0] + '.xml')
message += 'keywords:\n'
for k, v in list(keywords.items()):
message += '%s: %s\n' % (k, v)
raise MetadataReadError(message)
# Get dictionary keywords that has value != None
keywords = {
x[0]: x[1]['value'] for x in list(metadata.dict['properties'].items())
if x[1]['value'] is not None}
if keyword:
try:
return keywords[keyword]
except KeyError:
message = 'Keyword with key %s is not found. ' % keyword
message += 'Layer path: %s' % layer_uri
raise KeywordNotFoundError(message)
return keywords |
def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(
duration=round(self.duration / 1000.0, 2),
cpm=round(cpm, 2),
uSvh=round(cpm / K_ALPHA, 3),
uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3)
if minutes > 0
else 0,
) | Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose. | Below is the the instruction that describes the task:
### Input:
Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose.
### Response:
def status(self):
"""Return current readings, as a dictionary with:
duration -- the duration of the measurements, in seconds;
cpm -- the radiation count by minute;
uSvh -- the radiation dose, exprimed in Sievert per house (uSv/h);
uSvhError -- the incertitude for the radiation dose."""
minutes = min(self.duration, MAX_CPM_TIME) / 1000 / 60.0
cpm = self.count / minutes if minutes > 0 else 0
return dict(
duration=round(self.duration / 1000.0, 2),
cpm=round(cpm, 2),
uSvh=round(cpm / K_ALPHA, 3),
uSvhError=round(math.sqrt(self.count) / minutes / K_ALPHA, 3)
if minutes > 0
else 0,
) |
def session_expired(self):
"""
Returns True if login_time not set or seconds since
login time is greater than 200 mins.
"""
if not self._login_time or (datetime.datetime.now()-self._login_time).total_seconds() > 12000:
return True | Returns True if login_time not set or seconds since
login time is greater than 200 mins. | Below is the the instruction that describes the task:
### Input:
Returns True if login_time not set or seconds since
login time is greater than 200 mins.
### Response:
def session_expired(self):
"""
Returns True if login_time not set or seconds since
login time is greater than 200 mins.
"""
if not self._login_time or (datetime.datetime.now()-self._login_time).total_seconds() > 12000:
return True |
def _setup_mqs(self):
"""
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one.
"""
try:
self._prof.prof('init mqs setup', uid=self._uid)
self._logger.debug('Setting up mq connection and channel')
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
self._logger.debug('Connection and channel setup successful')
self._logger.debug('Setting up all exchanges and queues')
qs = [
'%s-tmgr-to-sync' % self._sid,
'%s-cb-to-sync' % self._sid,
'%s-enq-to-sync' % self._sid,
'%s-deq-to-sync' % self._sid,
'%s-sync-to-tmgr' % self._sid,
'%s-sync-to-cb' % self._sid,
'%s-sync-to-enq' % self._sid,
'%s-sync-to-deq' % self._sid
]
for i in range(1, self._num_pending_qs + 1):
queue_name = '%s-pendingq-%s' % (self._sid, i)
self._pending_queue.append(queue_name)
qs.append(queue_name)
for i in range(1, self._num_completed_qs + 1):
queue_name = '%s-completedq-%s' % (self._sid, i)
self._completed_queue.append(queue_name)
qs.append(queue_name)
f = open('.%s.txt' % self._sid, 'w')
for q in qs:
# Durable Qs will not be lost if rabbitmq server crashes
mq_channel.queue_declare(queue=q)
f.write(q + '\n')
f.close()
self._logger.debug('All exchanges and queues are setup')
self._prof.prof('mqs setup done', uid=self._uid)
return True
except Exception, ex:
self._logger.exception('Error setting RabbitMQ system: %s' % ex)
raise | **Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one. | Below is the the instruction that describes the task:
### Input:
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one.
### Response:
def _setup_mqs(self):
"""
**Purpose**: Setup RabbitMQ system on the client side. We instantiate queue(s) 'pendingq-*' for communication
between the enqueuer thread and the task manager process. We instantiate queue(s) 'completedq-*' for
communication between the task manager and dequeuer thread. We instantiate queue 'sync-to-master' for
communication from enqueuer/dequeuer/task_manager to the synchronizer thread. We instantiate queue
'sync-ack' for communication from synchronizer thread to enqueuer/dequeuer/task_manager.
Details: All queues are durable: Even if the RabbitMQ server goes down, the queues are saved to disk and can
be retrieved. This also means that after an erroneous run the queues might still have unacknowledged messages
and will contain messages from that run. Hence, in every new run, we first delete the queue and create a new
one.
"""
try:
self._prof.prof('init mqs setup', uid=self._uid)
self._logger.debug('Setting up mq connection and channel')
mq_connection = pika.BlockingConnection(pika.ConnectionParameters(host=self._mq_hostname, port=self._port))
mq_channel = mq_connection.channel()
self._logger.debug('Connection and channel setup successful')
self._logger.debug('Setting up all exchanges and queues')
qs = [
'%s-tmgr-to-sync' % self._sid,
'%s-cb-to-sync' % self._sid,
'%s-enq-to-sync' % self._sid,
'%s-deq-to-sync' % self._sid,
'%s-sync-to-tmgr' % self._sid,
'%s-sync-to-cb' % self._sid,
'%s-sync-to-enq' % self._sid,
'%s-sync-to-deq' % self._sid
]
for i in range(1, self._num_pending_qs + 1):
queue_name = '%s-pendingq-%s' % (self._sid, i)
self._pending_queue.append(queue_name)
qs.append(queue_name)
for i in range(1, self._num_completed_qs + 1):
queue_name = '%s-completedq-%s' % (self._sid, i)
self._completed_queue.append(queue_name)
qs.append(queue_name)
f = open('.%s.txt' % self._sid, 'w')
for q in qs:
# Durable Qs will not be lost if rabbitmq server crashes
mq_channel.queue_declare(queue=q)
f.write(q + '\n')
f.close()
self._logger.debug('All exchanges and queues are setup')
self._prof.prof('mqs setup done', uid=self._uid)
return True
except Exception, ex:
self._logger.exception('Error setting RabbitMQ system: %s' % ex)
raise |
def save(self, filename, binary=True):
"""
Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
"""
filename = os.path.abspath(os.path.expanduser(filename))
file_mode = True
# Check filetype
ftype = filename[-3:]
if ftype == 'ply':
writer = vtk.vtkPLYWriter()
elif ftype == 'vtp':
writer = vtk.vtkXMLPolyDataWriter()
file_mode = False
if binary:
writer.SetDataModeToBinary()
else:
writer.SetDataModeToAscii()
elif ftype == 'stl':
writer = vtk.vtkSTLWriter()
elif ftype == 'vtk':
writer = vtk.vtkPolyDataWriter()
else:
raise Exception('Filetype must be either "ply", "stl", or "vtk"')
writer.SetFileName(filename)
writer.SetInputData(self)
if binary and file_mode:
writer.SetFileTypeToBinary()
elif file_mode:
writer.SetFileTypeToASCII()
writer.Write() | Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size. | Below is the the instruction that describes the task:
### Input:
Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
### Response:
def save(self, filename, binary=True):
"""
Writes a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh file.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of the following types (.ply, .stl,
.vtk)
binary : bool, optional
Writes the file as binary when True and ASCII when False.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
"""
filename = os.path.abspath(os.path.expanduser(filename))
file_mode = True
# Check filetype
ftype = filename[-3:]
if ftype == 'ply':
writer = vtk.vtkPLYWriter()
elif ftype == 'vtp':
writer = vtk.vtkXMLPolyDataWriter()
file_mode = False
if binary:
writer.SetDataModeToBinary()
else:
writer.SetDataModeToAscii()
elif ftype == 'stl':
writer = vtk.vtkSTLWriter()
elif ftype == 'vtk':
writer = vtk.vtkPolyDataWriter()
else:
raise Exception('Filetype must be either "ply", "stl", or "vtk"')
writer.SetFileName(filename)
writer.SetInputData(self)
if binary and file_mode:
writer.SetFileTypeToBinary()
elif file_mode:
writer.SetFileTypeToASCII()
writer.Write() |
def _extract_header_value(line):
"""
Extracts a key / value pair from a header line in an ODF file
"""
# Skip blank lines, returning None
if not line:
return None
# Attempt to split by equals sign
halves = line.split('=')
if len(halves) > 1:
key = halves[0].strip()
value = halves[1].strip()
return {key: value}
# Otherwise, attempt to split by colon
else:
halves = line.split(':')
key = halves[0].strip()
value = halves[1].strip()
return {key: value} | Extracts a key / value pair from a header line in an ODF file | Below is the the instruction that describes the task:
### Input:
Extracts a key / value pair from a header line in an ODF file
### Response:
def _extract_header_value(line):
"""
Extracts a key / value pair from a header line in an ODF file
"""
# Skip blank lines, returning None
if not line:
return None
# Attempt to split by equals sign
halves = line.split('=')
if len(halves) > 1:
key = halves[0].strip()
value = halves[1].strip()
return {key: value}
# Otherwise, attempt to split by colon
else:
halves = line.split(':')
key = halves[0].strip()
value = halves[1].strip()
return {key: value} |
def register_target(repo_cmd, repo_service):
"""Decorator to register a class with an repo_service"""
def decorate(klass):
log.debug('Loading service module class: {}'.format(klass.__name__) )
klass.command = repo_cmd
klass.name = repo_service
RepositoryService.service_map[repo_service] = klass
RepositoryService.command_map[repo_cmd] = repo_service
return klass
return decorate | Decorator to register a class with an repo_service | Below is the the instruction that describes the task:
### Input:
Decorator to register a class with an repo_service
### Response:
def register_target(repo_cmd, repo_service):
"""Decorator to register a class with an repo_service"""
def decorate(klass):
log.debug('Loading service module class: {}'.format(klass.__name__) )
klass.command = repo_cmd
klass.name = repo_service
RepositoryService.service_map[repo_service] = klass
RepositoryService.command_map[repo_cmd] = repo_service
return klass
return decorate |
def status(url="http://127.0.0.1/status"):
"""
Return the data from an Nginx status page as a dictionary.
http://wiki.nginx.org/HttpStubStatusModule
url
The URL of the status page. Defaults to 'http://127.0.0.1/status'
CLI Example:
.. code-block:: bash
salt '*' nginx.status
"""
resp = _urlopen(url)
status_data = resp.read()
resp.close()
lines = status_data.splitlines()
if not len(lines) == 4:
return
# "Active connections: 1 "
active_connections = lines[0].split()[2]
# "server accepts handled requests"
# " 12 12 9 "
accepted, handled, requests = lines[2].split()
# "Reading: 0 Writing: 1 Waiting: 0 "
_, reading, _, writing, _, waiting = lines[3].split()
return {
'active connections': int(active_connections),
'accepted': int(accepted),
'handled': int(handled),
'requests': int(requests),
'reading': int(reading),
'writing': int(writing),
'waiting': int(waiting),
} | Return the data from an Nginx status page as a dictionary.
http://wiki.nginx.org/HttpStubStatusModule
url
The URL of the status page. Defaults to 'http://127.0.0.1/status'
CLI Example:
.. code-block:: bash
salt '*' nginx.status | Below is the the instruction that describes the task:
### Input:
Return the data from an Nginx status page as a dictionary.
http://wiki.nginx.org/HttpStubStatusModule
url
The URL of the status page. Defaults to 'http://127.0.0.1/status'
CLI Example:
.. code-block:: bash
salt '*' nginx.status
### Response:
def status(url="http://127.0.0.1/status"):
"""
Return the data from an Nginx status page as a dictionary.
http://wiki.nginx.org/HttpStubStatusModule
url
The URL of the status page. Defaults to 'http://127.0.0.1/status'
CLI Example:
.. code-block:: bash
salt '*' nginx.status
"""
resp = _urlopen(url)
status_data = resp.read()
resp.close()
lines = status_data.splitlines()
if not len(lines) == 4:
return
# "Active connections: 1 "
active_connections = lines[0].split()[2]
# "server accepts handled requests"
# " 12 12 9 "
accepted, handled, requests = lines[2].split()
# "Reading: 0 Writing: 1 Waiting: 0 "
_, reading, _, writing, _, waiting = lines[3].split()
return {
'active connections': int(active_connections),
'accepted': int(accepted),
'handled': int(handled),
'requests': int(requests),
'reading': int(reading),
'writing': int(writing),
'waiting': int(waiting),
} |
def delete(self, data=None):
"""Delete this object from the JSS."""
if not self.can_delete:
raise JSSMethodNotAllowedError(self.__class__.__name__)
if data:
self.jss.delete(self.url, data)
else:
self.jss.delete(self.url) | Delete this object from the JSS. | Below is the the instruction that describes the task:
### Input:
Delete this object from the JSS.
### Response:
def delete(self, data=None):
"""Delete this object from the JSS."""
if not self.can_delete:
raise JSSMethodNotAllowedError(self.__class__.__name__)
if data:
self.jss.delete(self.url, data)
else:
self.jss.delete(self.url) |
def get_children(self, recursive=False):
"""Return the children of this process as a list of Process
objects.
If recursive is True return all the parent descendants.
Example (A == this process):
A ββ
β
ββ B (child) ββ
β ββ X (grandchild) ββ
β ββ Y (great grandchild)
ββ C (child)
ββ D (child)
>>> p.get_children()
B, C, D
>>> p.get_children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be returned either as the reference to
process A is lost.
"""
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
ret = []
if not recursive:
for p in process_iter():
try:
if p.ppid == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time <= p.create_time:
ret.append(p)
except NoSuchProcess:
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = defaultdict(list)
for p in process_iter():
try:
table[p.ppid].append(p)
except NoSuchProcess:
pass
# At this point we have a mapping table where table[self.pid]
# are the current process's children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time <= child.create_time
except NoSuchProcess:
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret | Return the children of this process as a list of Process
objects.
If recursive is True return all the parent descendants.
Example (A == this process):
A ββ
β
ββ B (child) ββ
β ββ X (grandchild) ββ
β ββ Y (great grandchild)
ββ C (child)
ββ D (child)
>>> p.get_children()
B, C, D
>>> p.get_children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be returned either as the reference to
process A is lost. | Below is the the instruction that describes the task:
### Input:
Return the children of this process as a list of Process
objects.
If recursive is True return all the parent descendants.
Example (A == this process):
A ββ
β
ββ B (child) ββ
β ββ X (grandchild) ββ
β ββ Y (great grandchild)
ββ C (child)
ββ D (child)
>>> p.get_children()
B, C, D
>>> p.get_children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be returned either as the reference to
process A is lost.
### Response:
def get_children(self, recursive=False):
"""Return the children of this process as a list of Process
objects.
If recursive is True return all the parent descendants.
Example (A == this process):
A ββ
β
ββ B (child) ββ
β ββ X (grandchild) ββ
β ββ Y (great grandchild)
ββ C (child)
ββ D (child)
>>> p.get_children()
B, C, D
>>> p.get_children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be returned either as the reference to
process A is lost.
"""
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
ret = []
if not recursive:
for p in process_iter():
try:
if p.ppid == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time <= p.create_time:
ret.append(p)
except NoSuchProcess:
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = defaultdict(list)
for p in process_iter():
try:
table[p.ppid].append(p)
except NoSuchProcess:
pass
# At this point we have a mapping table where table[self.pid]
# are the current process's children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time <= child.create_time
except NoSuchProcess:
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret |
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator)))
miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator)))
return miller | Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector. | Below is the the instruction that describes the task:
### Input:
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
### Response:
def vec_to_surface(vec):
"""
Transform a float vector to a surface miller index with integers.
Args:
vec (1 by 3 array float vector): input float vector
Return:
the surface miller index of the input vector.
"""
miller = [None] * 3
index = []
for i, value in enumerate(vec):
if abs(value) < 1.e-8:
miller[i] = 0
else:
index.append(i)
if len(index) == 1:
miller[index[0]] = 1
else:
min_index = np.argmin([i for i in vec if i != 0])
true_index = index[min_index]
index.pop(min_index)
frac = []
for i, value in enumerate(index):
frac.append(Fraction(vec[value] / vec[true_index]).limit_denominator(100))
if len(index) == 1:
miller[true_index] = frac[0].denominator
miller[index[0]] = frac[0].numerator
else:
com_lcm = lcm(frac[0].denominator, frac[1].denominator)
miller[true_index] = com_lcm
miller[index[0]] = frac[0].numerator * int(round((com_lcm / frac[0].denominator)))
miller[index[1]] = frac[1].numerator * int(round((com_lcm / frac[1].denominator)))
return miller |
def yeasttruth(args):
"""
%prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts.
"""
p = OptionParser(yeasttruth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
pillars = args[0]
gffiles = args[1:]
aliases = {}
pivot = {}
for gffile in gffiles:
is_pivot = op.basename(gffile).startswith("Saccharomyces_cerevisiae")
gff = Gff(gffile)
for g in gff:
if g.type != "gene":
continue
for a in g.attributes["Alias"]:
aliases[a] = g.accn
if is_pivot:
pivot[a] = g.accn
logging.debug("Aliases imported: {0}".format(len(aliases)))
logging.debug("Pivot imported: {0}".format(len(pivot)))
fw = open("yeast.aliases", "w")
for k, v in sorted(aliases.items()):
print("\t".join((k, v)), file=fw)
fw.close()
fp = open(pillars)
pairs = set()
fw = must_open(opts.outfile, "w")
for row in fp:
atoms = [x for x in row.split() if x != "---"]
pps = [pivot[x] for x in atoms if x in pivot]
atoms = [aliases[x] for x in atoms if x in aliases]
for p in pps:
for a in atoms:
if p == a:
continue
pairs.add(tuple(sorted((p, a))))
for a, b in sorted(pairs):
print("\t".join((a, b)), file=fw)
fw.close() | %prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts. | Below is the the instruction that describes the task:
### Input:
%prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts.
### Response:
def yeasttruth(args):
"""
%prog yeasttruth Pillars.tab *.gff
Prepare pairs data for 14 yeasts.
"""
p = OptionParser(yeasttruth.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
pillars = args[0]
gffiles = args[1:]
aliases = {}
pivot = {}
for gffile in gffiles:
is_pivot = op.basename(gffile).startswith("Saccharomyces_cerevisiae")
gff = Gff(gffile)
for g in gff:
if g.type != "gene":
continue
for a in g.attributes["Alias"]:
aliases[a] = g.accn
if is_pivot:
pivot[a] = g.accn
logging.debug("Aliases imported: {0}".format(len(aliases)))
logging.debug("Pivot imported: {0}".format(len(pivot)))
fw = open("yeast.aliases", "w")
for k, v in sorted(aliases.items()):
print("\t".join((k, v)), file=fw)
fw.close()
fp = open(pillars)
pairs = set()
fw = must_open(opts.outfile, "w")
for row in fp:
atoms = [x for x in row.split() if x != "---"]
pps = [pivot[x] for x in atoms if x in pivot]
atoms = [aliases[x] for x in atoms if x in aliases]
for p in pps:
for a in atoms:
if p == a:
continue
pairs.add(tuple(sorted((p, a))))
for a, b in sorted(pairs):
print("\t".join((a, b)), file=fw)
fw.close() |
def _find_logs(self, compile_workunit):
"""Finds all logs under the given workunit."""
for idx, workunit in enumerate(compile_workunit.children):
for output_name, outpath in workunit.output_paths().items():
if output_name in ('stdout', 'stderr'):
yield idx, workunit.name, output_name, outpath | Finds all logs under the given workunit. | Below is the the instruction that describes the task:
### Input:
Finds all logs under the given workunit.
### Response:
def _find_logs(self, compile_workunit):
"""Finds all logs under the given workunit."""
for idx, workunit in enumerate(compile_workunit.children):
for output_name, outpath in workunit.output_paths().items():
if output_name in ('stdout', 'stderr'):
yield idx, workunit.name, output_name, outpath |
def setup_dir(f):
''' Decorate f to run inside the directory where setup.py resides.
'''
setup_dir = os.path.dirname(os.path.abspath(__file__))
def wrapped(*args, **kwargs):
with chdir(setup_dir):
return f(*args, **kwargs)
return wrapped | Decorate f to run inside the directory where setup.py resides. | Below is the the instruction that describes the task:
### Input:
Decorate f to run inside the directory where setup.py resides.
### Response:
def setup_dir(f):
''' Decorate f to run inside the directory where setup.py resides.
'''
setup_dir = os.path.dirname(os.path.abspath(__file__))
def wrapped(*args, **kwargs):
with chdir(setup_dir):
return f(*args, **kwargs)
return wrapped |
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensorshape_util.constant_value_as_shape(new_shape)
if tensorshape_util.is_fully_defined(batch_shape_static):
return np.int32(batch_shape_static), batch_shape_static, []
with tf.name_scope(name or "calculate_reshape"):
original_size = tf.reduce_prod(input_tensor=original_shape)
implicit_dim = tf.equal(new_shape, -1)
size_implicit_dim = (
original_size // tf.maximum(1, -tf.reduce_prod(input_tensor=new_shape)))
new_ndims = tf.shape(input=new_shape)
expanded_new_shape = tf.where( # Assumes exactly one `-1`.
implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [ # pylint: disable=g-long-ternary
assert_util.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
assert_util.assert_rank(
new_shape, 1, message="New shape must be a vector."),
assert_util.assert_less_equal(
tf.math.count_nonzero(implicit_dim, dtype=tf.int32),
1,
message="At most one dimension can be unknown."),
assert_util.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
assert_util.assert_equal(
tf.reduce_prod(input_tensor=expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations | Calculates the reshaped dimensions (replacing up to one -1 in reshape). | Below is the the instruction that describes the task:
### Input:
Calculates the reshaped dimensions (replacing up to one -1 in reshape).
### Response:
def calculate_reshape(original_shape, new_shape, validate=False, name=None):
"""Calculates the reshaped dimensions (replacing up to one -1 in reshape)."""
batch_shape_static = tensorshape_util.constant_value_as_shape(new_shape)
if tensorshape_util.is_fully_defined(batch_shape_static):
return np.int32(batch_shape_static), batch_shape_static, []
with tf.name_scope(name or "calculate_reshape"):
original_size = tf.reduce_prod(input_tensor=original_shape)
implicit_dim = tf.equal(new_shape, -1)
size_implicit_dim = (
original_size // tf.maximum(1, -tf.reduce_prod(input_tensor=new_shape)))
new_ndims = tf.shape(input=new_shape)
expanded_new_shape = tf.where( # Assumes exactly one `-1`.
implicit_dim, tf.fill(new_ndims, size_implicit_dim), new_shape)
validations = [] if not validate else [ # pylint: disable=g-long-ternary
assert_util.assert_rank(
original_shape, 1, message="Original shape must be a vector."),
assert_util.assert_rank(
new_shape, 1, message="New shape must be a vector."),
assert_util.assert_less_equal(
tf.math.count_nonzero(implicit_dim, dtype=tf.int32),
1,
message="At most one dimension can be unknown."),
assert_util.assert_positive(
expanded_new_shape, message="Shape elements must be >=-1."),
assert_util.assert_equal(
tf.reduce_prod(input_tensor=expanded_new_shape),
original_size,
message="Shape sizes do not match."),
]
return expanded_new_shape, batch_shape_static, validations |
def _debug_info(self):
"""Show a list of recently variables info.
"""
self._msg('DEBUG')
self._msg2('WorkDir: {0}'.format(self._curdir))
self._msg2('Cookies: {0}'.format(self._session.cookies))
self._msg2('Headers: {0}'.format(self._session.headers))
self._msg2('Configs: {0}'.format(self._config))
self._msg2('Customs: {0}'.format(self._custom))
self._msg2('Account: {0}'.format(self._account)) | Show a list of recently variables info. | Below is the the instruction that describes the task:
### Input:
Show a list of recently variables info.
### Response:
def _debug_info(self):
"""Show a list of recently variables info.
"""
self._msg('DEBUG')
self._msg2('WorkDir: {0}'.format(self._curdir))
self._msg2('Cookies: {0}'.format(self._session.cookies))
self._msg2('Headers: {0}'.format(self._session.headers))
self._msg2('Configs: {0}'.format(self._config))
self._msg2('Customs: {0}'.format(self._custom))
self._msg2('Account: {0}'.format(self._account)) |
def get_actuators(self):
"""
Get actuators as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.actuators} | Get actuators as a dictionary of format ``{name: status}`` | Below is the the instruction that describes the task:
### Input:
Get actuators as a dictionary of format ``{name: status}``
### Response:
def get_actuators(self):
"""
Get actuators as a dictionary of format ``{name: status}``
"""
return {i.name: i.status for i in self.system.actuators} |
def copy(self, resource_view):
# type: (Union[ResourceView,Dict,str]) -> None
"""Copies all fields except id, resource_id and package_id from another resource view.
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None
"""
if isinstance(resource_view, str):
if is_valid_uuid(resource_view) is False:
raise HDXError('%s is not a valid resource view id!' % resource_view)
resource_view = ResourceView.read_from_hdx(resource_view)
if not isinstance(resource_view, dict) and not isinstance(resource_view, ResourceView):
raise HDXError('%s is not a valid resource view!' % resource_view)
for key in resource_view:
if key not in ('id', 'resource_id', 'package_id'):
self.data[key] = resource_view[key] | Copies all fields except id, resource_id and package_id from another resource view.
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None | Below is the the instruction that describes the task:
### Input:
Copies all fields except id, resource_id and package_id from another resource view.
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None
### Response:
def copy(self, resource_view):
# type: (Union[ResourceView,Dict,str]) -> None
"""Copies all fields except id, resource_id and package_id from another resource view.
Args:
resource_view (Union[ResourceView,Dict,str]): Either a resource view id or resource view metadata either from a ResourceView object or a dictionary
Returns:
None
"""
if isinstance(resource_view, str):
if is_valid_uuid(resource_view) is False:
raise HDXError('%s is not a valid resource view id!' % resource_view)
resource_view = ResourceView.read_from_hdx(resource_view)
if not isinstance(resource_view, dict) and not isinstance(resource_view, ResourceView):
raise HDXError('%s is not a valid resource view!' % resource_view)
for key in resource_view:
if key not in ('id', 'resource_id', 'package_id'):
self.data[key] = resource_view[key] |
def get_rotation_matrix(axis, angle):
"""Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array):
"""
axis = normalize(np.array(axis))
if not (np.array([1, 1, 1]).shape) == (3, ):
raise ValueError('axis.shape has to be 3')
angle = float(angle)
return _jit_get_rotation_matrix(axis, angle) | Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array): | Below is the the instruction that describes the task:
### Input:
Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array):
### Response:
def get_rotation_matrix(axis, angle):
"""Returns the rotation matrix.
This function returns a matrix for the counterclockwise rotation
around the given axis.
The Input angle is in radians.
Args:
axis (vector):
angle (float):
Returns:
Rotation matrix (np.array):
"""
axis = normalize(np.array(axis))
if not (np.array([1, 1, 1]).shape) == (3, ):
raise ValueError('axis.shape has to be 3')
angle = float(angle)
return _jit_get_rotation_matrix(axis, angle) |
def find_atomics(formula: Formula) -> Set[PLAtomic]:
"""Finds all the atomic formulas"""
f = formula
res = set()
if isinstance(formula, PLFormula):
res = formula.find_atomics()
# elif isinstance(f, PLNot):
# res = res.union(find_atomics(f.f))
# elif isinstance(f, PLBinaryOperator):
# for subf in f.formulas:
# res = res.union(find_atomics(subf))
else:
res.add(f)
return res | Finds all the atomic formulas | Below is the the instruction that describes the task:
### Input:
Finds all the atomic formulas
### Response:
def find_atomics(formula: Formula) -> Set[PLAtomic]:
"""Finds all the atomic formulas"""
f = formula
res = set()
if isinstance(formula, PLFormula):
res = formula.find_atomics()
# elif isinstance(f, PLNot):
# res = res.union(find_atomics(f.f))
# elif isinstance(f, PLBinaryOperator):
# for subf in f.formulas:
# res = res.union(find_atomics(subf))
else:
res.add(f)
return res |
def ostype_2_json(self):
"""
transform ariane_clip3 OS Type object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("OSType.ostype_2_json")
json_obj = {
'osTypeID': self.id,
'osTypeName': self.name,
'osTypeArchitecture': self.architecture,
'osTypeCompanyID': self.company_id,
'osTypeOSInstancesID': self.osi_ids
}
return json.dumps(json_obj) | transform ariane_clip3 OS Type object to Ariane server JSON obj
:return: Ariane JSON obj | Below is the the instruction that describes the task:
### Input:
transform ariane_clip3 OS Type object to Ariane server JSON obj
:return: Ariane JSON obj
### Response:
def ostype_2_json(self):
"""
transform ariane_clip3 OS Type object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("OSType.ostype_2_json")
json_obj = {
'osTypeID': self.id,
'osTypeName': self.name,
'osTypeArchitecture': self.architecture,
'osTypeCompanyID': self.company_id,
'osTypeOSInstancesID': self.osi_ids
}
return json.dumps(json_obj) |
def load(self):
"""
Load the dataset using pylearn2.config.yaml_parse.
"""
from pylearn2.config import yaml_parse
from pylearn2.datasets import Dataset
dataset = yaml_parse.load(self.yaml_string)
assert isinstance(dataset, Dataset)
data = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True).next()
if len(data) == 2:
X, y = data
y = np.squeeze(y)
if self.one_hot:
y = np.argmax(y, axis=1)
else:
X = data
y = None
return X, y | Load the dataset using pylearn2.config.yaml_parse. | Below is the the instruction that describes the task:
### Input:
Load the dataset using pylearn2.config.yaml_parse.
### Response:
def load(self):
"""
Load the dataset using pylearn2.config.yaml_parse.
"""
from pylearn2.config import yaml_parse
from pylearn2.datasets import Dataset
dataset = yaml_parse.load(self.yaml_string)
assert isinstance(dataset, Dataset)
data = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True).next()
if len(data) == 2:
X, y = data
y = np.squeeze(y)
if self.one_hot:
y = np.argmax(y, axis=1)
else:
X = data
y = None
return X, y |
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Docstring is inherited from the LGBMModel."""
result = self.predict_proba(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if raw_score or pred_leaf or pred_contrib:
return result
else:
class_index = np.argmax(result, axis=1)
return self._le.inverse_transform(class_index) | Docstring is inherited from the LGBMModel. | Below is the the instruction that describes the task:
### Input:
Docstring is inherited from the LGBMModel.
### Response:
def predict(self, X, raw_score=False, num_iteration=None,
pred_leaf=False, pred_contrib=False, **kwargs):
"""Docstring is inherited from the LGBMModel."""
result = self.predict_proba(X, raw_score, num_iteration,
pred_leaf, pred_contrib, **kwargs)
if raw_score or pred_leaf or pred_contrib:
return result
else:
class_index = np.argmax(result, axis=1)
return self._le.inverse_transform(class_index) |
def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info("Opening CLI recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = (
'Search for "{}" in the logs for subsequent recruitment URLs.\n'
"Open the logs for this experiment with "
'"dallinger logs --app {}"'.format(
NEW_RECRUIT_LOG_PREFIX, self.config.get("id")
)
)
return {"items": recruitments, "message": message} | Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs. | Below is the the instruction that describes the task:
### Input:
Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
### Response:
def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info("Opening CLI recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = (
'Search for "{}" in the logs for subsequent recruitment URLs.\n'
"Open the logs for this experiment with "
'"dallinger logs --app {}"'.format(
NEW_RECRUIT_LOG_PREFIX, self.config.get("id")
)
)
return {"items": recruitments, "message": message} |
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y | Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info. | Below is the the instruction that describes the task:
### Input:
Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
### Response:
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier:
y = copier(x, memo)
else:
try:
issc = issubclass(cls, type)
except TypeError: # cls is not a class (old Boost; see SF #502085)
issc = 0
if issc:
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor:
rv = reductor(2)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
y = _reconstruct(x, rv, 1, memo)
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y |
def get_transport_info(self, key: str) -> Any:
"""
Get extra info from the transport.
Supported keys:
- ``peername``
- ``socket``
- ``sockname``
- ``compression``
- ``cipher``
- ``peercert``
- ``sslcontext``
- ``sslobject``
:raises SMTPServerDisconnected: connection lost
"""
self._raise_error_if_disconnected()
return self.transport.get_extra_info(key) | Get extra info from the transport.
Supported keys:
- ``peername``
- ``socket``
- ``sockname``
- ``compression``
- ``cipher``
- ``peercert``
- ``sslcontext``
- ``sslobject``
:raises SMTPServerDisconnected: connection lost | Below is the the instruction that describes the task:
### Input:
Get extra info from the transport.
Supported keys:
- ``peername``
- ``socket``
- ``sockname``
- ``compression``
- ``cipher``
- ``peercert``
- ``sslcontext``
- ``sslobject``
:raises SMTPServerDisconnected: connection lost
### Response:
def get_transport_info(self, key: str) -> Any:
"""
Get extra info from the transport.
Supported keys:
- ``peername``
- ``socket``
- ``sockname``
- ``compression``
- ``cipher``
- ``peercert``
- ``sslcontext``
- ``sslobject``
:raises SMTPServerDisconnected: connection lost
"""
self._raise_error_if_disconnected()
return self.transport.get_extra_info(key) |
def label_boundary_cells(network=None, boundary_faces=None):
r"""
Takes 2D or 3D network and assign labels to boundary pores
Parameters
----------
network : dictionary
A dictionary as produced by the SNOW network extraction algorithms
containing edge/vertex, site/bond, node/link information.
boundary_faces : list of strings
The user can choose βleftβ, βrightβ, βtopβ, βbottomβ, βfrontβ and
βbackβ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
Returns
-------
The same dictionar s pass ing, but containing boundary nodes labels. For
example network['pore.left'], network['pore.right'], network['pore.top'],
network['pore.bottom'] etc.
Notes
-----
The dictionary names use the OpenPNM convention so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
f = boundary_faces
if f is not None:
coords = network['pore.coords']
condition = coords[~network['pore.boundary']]
dic = {'left': 0, 'right': 0, 'front': 1, 'back': 1,
'top': 2, 'bottom': 2}
if all(coords[:, 2] == 0):
dic['top'] = 1
dic['bottom'] = 1
for i in f:
if i in ['left', 'front', 'bottom']:
network['pore.{}'.format(i)] = (coords[:, dic[i]] <
min(condition[:, dic[i]]))
elif i in ['right', 'back', 'top']:
network['pore.{}'.format(i)] = (coords[:, dic[i]] >
max(condition[:, dic[i]]))
return network | r"""
Takes 2D or 3D network and assign labels to boundary pores
Parameters
----------
network : dictionary
A dictionary as produced by the SNOW network extraction algorithms
containing edge/vertex, site/bond, node/link information.
boundary_faces : list of strings
The user can choose βleftβ, βrightβ, βtopβ, βbottomβ, βfrontβ and
βbackβ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
Returns
-------
The same dictionar s pass ing, but containing boundary nodes labels. For
example network['pore.left'], network['pore.right'], network['pore.top'],
network['pore.bottom'] etc.
Notes
-----
The dictionary names use the OpenPNM convention so it may be converted
directly to an OpenPNM network object using the ``update`` command. | Below is the the instruction that describes the task:
### Input:
r"""
Takes 2D or 3D network and assign labels to boundary pores
Parameters
----------
network : dictionary
A dictionary as produced by the SNOW network extraction algorithms
containing edge/vertex, site/bond, node/link information.
boundary_faces : list of strings
The user can choose βleftβ, βrightβ, βtopβ, βbottomβ, βfrontβ and
βbackβ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
Returns
-------
The same dictionar s pass ing, but containing boundary nodes labels. For
example network['pore.left'], network['pore.right'], network['pore.top'],
network['pore.bottom'] etc.
Notes
-----
The dictionary names use the OpenPNM convention so it may be converted
directly to an OpenPNM network object using the ``update`` command.
### Response:
def label_boundary_cells(network=None, boundary_faces=None):
r"""
Takes 2D or 3D network and assign labels to boundary pores
Parameters
----------
network : dictionary
A dictionary as produced by the SNOW network extraction algorithms
containing edge/vertex, site/bond, node/link information.
boundary_faces : list of strings
The user can choose βleftβ, βrightβ, βtopβ, βbottomβ, βfrontβ and
βbackβ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
Returns
-------
The same dictionar s pass ing, but containing boundary nodes labels. For
example network['pore.left'], network['pore.right'], network['pore.top'],
network['pore.bottom'] etc.
Notes
-----
The dictionary names use the OpenPNM convention so it may be converted
directly to an OpenPNM network object using the ``update`` command.
"""
f = boundary_faces
if f is not None:
coords = network['pore.coords']
condition = coords[~network['pore.boundary']]
dic = {'left': 0, 'right': 0, 'front': 1, 'back': 1,
'top': 2, 'bottom': 2}
if all(coords[:, 2] == 0):
dic['top'] = 1
dic['bottom'] = 1
for i in f:
if i in ['left', 'front', 'bottom']:
network['pore.{}'.format(i)] = (coords[:, dic[i]] <
min(condition[:, dic[i]]))
elif i in ['right', 'back', 'top']:
network['pore.{}'.format(i)] = (coords[:, dic[i]] >
max(condition[:, dic[i]]))
return network |
def import_model(self, source):
"""Import and return model instance."""
source = self._resolve_source(source)
self._context = FilePathContext(source)
with self._context.open() as f:
self._reader = self._open_reader(f)
return self._reader.create_model() | Import and return model instance. | Below is the the instruction that describes the task:
### Input:
Import and return model instance.
### Response:
def import_model(self, source):
"""Import and return model instance."""
source = self._resolve_source(source)
self._context = FilePathContext(source)
with self._context.open() as f:
self._reader = self._open_reader(f)
return self._reader.create_model() |
def make_requester(self, my_args=None):
"""
make a new requester instance and handle it from driver
:param my_args: dict like {request_q}. Default : None
:return: created requester proxy
"""
LOGGER.debug("natsd.Driver.make_requester")
if my_args is None:
raise exceptions.ArianeConfError('requester factory arguments')
if not self.configuration_OK or self.connection_args is None:
raise exceptions.ArianeConfError('NATS connection arguments')
requester = Requester.start(my_args, self.connection_args).proxy()
self.requester_registry.append(requester)
return requester | make a new requester instance and handle it from driver
:param my_args: dict like {request_q}. Default : None
:return: created requester proxy | Below is the the instruction that describes the task:
### Input:
make a new requester instance and handle it from driver
:param my_args: dict like {request_q}. Default : None
:return: created requester proxy
### Response:
def make_requester(self, my_args=None):
"""
make a new requester instance and handle it from driver
:param my_args: dict like {request_q}. Default : None
:return: created requester proxy
"""
LOGGER.debug("natsd.Driver.make_requester")
if my_args is None:
raise exceptions.ArianeConfError('requester factory arguments')
if not self.configuration_OK or self.connection_args is None:
raise exceptions.ArianeConfError('NATS connection arguments')
requester = Requester.start(my_args, self.connection_args).proxy()
self.requester_registry.append(requester)
return requester |
def value(self):
"""Retrieve the data value of this attachment.
Will show the filename of the attachment if there is an attachment available otherwise None
Use save_as in order to download as a file.
Example
-------
>>> file_attachment_property = project.part('Bike').property('file_attachment')
>>> if file_attachment_property.value:
... file_attachment_property.save_as('file.ext')
... else:
... print('file attachment not set, its value is None')
"""
if 'value' in self._json_data and self._json_data['value']:
return "[Attachment: {}]".format(self._json_data['value'].split('/')[-1])
else:
return None | Retrieve the data value of this attachment.
Will show the filename of the attachment if there is an attachment available otherwise None
Use save_as in order to download as a file.
Example
-------
>>> file_attachment_property = project.part('Bike').property('file_attachment')
>>> if file_attachment_property.value:
... file_attachment_property.save_as('file.ext')
... else:
... print('file attachment not set, its value is None') | Below is the the instruction that describes the task:
### Input:
Retrieve the data value of this attachment.
Will show the filename of the attachment if there is an attachment available otherwise None
Use save_as in order to download as a file.
Example
-------
>>> file_attachment_property = project.part('Bike').property('file_attachment')
>>> if file_attachment_property.value:
... file_attachment_property.save_as('file.ext')
... else:
... print('file attachment not set, its value is None')
### Response:
def value(self):
"""Retrieve the data value of this attachment.
Will show the filename of the attachment if there is an attachment available otherwise None
Use save_as in order to download as a file.
Example
-------
>>> file_attachment_property = project.part('Bike').property('file_attachment')
>>> if file_attachment_property.value:
... file_attachment_property.save_as('file.ext')
... else:
... print('file attachment not set, its value is None')
"""
if 'value' in self._json_data and self._json_data['value']:
return "[Attachment: {}]".format(self._json_data['value'].split('/')[-1])
else:
return None |
def eval_one_max(traj, individual):
"""The fitness function"""
traj.f_add_result('$set.$.individual', list(individual))
fitness = sum(individual)
traj.f_add_result('$set.$.fitness', fitness)
traj.f_store()
return (fitness,) | The fitness function | Below is the the instruction that describes the task:
### Input:
The fitness function
### Response:
def eval_one_max(traj, individual):
"""The fitness function"""
traj.f_add_result('$set.$.individual', list(individual))
fitness = sum(individual)
traj.f_add_result('$set.$.fitness', fitness)
traj.f_store()
return (fitness,) |
def _clause_formatter(self, cond):
'''Formats conditions
args is a list of ['field', 'operator', 'value']
'''
if len(cond) == 2 :
cond = ' '.join(cond)
return cond
if 'in' in cond[1].lower() :
if not isinstance(cond[2], (tuple, list)):
raise TypeError('("{0}") must be of type <type tuple> or <type list>'.format(cond[2]))
if 'select' not in cond[2][0].lower() :
cond[2] = "({0})".format(','.join(map(str,["'{0}'".format(e) for e in cond[2]])))
else:
cond[2] = "({0})".format(','.join(map(str,["{0}".format(e) for e in cond[2]])))
cond = " ".join(cond)
else:
#if isinstance(cond[2], str):
# var = re.match('^@(\w+)$', cond[2])
#else:
# var = None
#if var :
if isinstance(cond[2], str) and cond[2].startswith('@'):
cond[2] = "{0}".format(cond[2])
else :
cond[2] = "'{0}'".format(cond[2])
cond = ' '.join(cond)
return cond | Formats conditions
args is a list of ['field', 'operator', 'value'] | Below is the the instruction that describes the task:
### Input:
Formats conditions
args is a list of ['field', 'operator', 'value']
### Response:
def _clause_formatter(self, cond):
'''Formats conditions
args is a list of ['field', 'operator', 'value']
'''
if len(cond) == 2 :
cond = ' '.join(cond)
return cond
if 'in' in cond[1].lower() :
if not isinstance(cond[2], (tuple, list)):
raise TypeError('("{0}") must be of type <type tuple> or <type list>'.format(cond[2]))
if 'select' not in cond[2][0].lower() :
cond[2] = "({0})".format(','.join(map(str,["'{0}'".format(e) for e in cond[2]])))
else:
cond[2] = "({0})".format(','.join(map(str,["{0}".format(e) for e in cond[2]])))
cond = " ".join(cond)
else:
#if isinstance(cond[2], str):
# var = re.match('^@(\w+)$', cond[2])
#else:
# var = None
#if var :
if isinstance(cond[2], str) and cond[2].startswith('@'):
cond[2] = "{0}".format(cond[2])
else :
cond[2] = "'{0}'".format(cond[2])
cond = ' '.join(cond)
return cond |
def add(self, items):
"""Add items to options"""
options = self._create_options(items)
for k, v in options.items():
if k in self.labels and v not in self.items:
options.pop(k)
count = 0
while f'{k}_{count}' in self.labels:
count += 1
options[f'{k}_{count}'] = v
self.widget.options.update(options)
self.widget.param.trigger('options')
self.widget.value = list(options.values())[:1] | Add items to options | Below is the the instruction that describes the task:
### Input:
Add items to options
### Response:
def add(self, items):
"""Add items to options"""
options = self._create_options(items)
for k, v in options.items():
if k in self.labels and v not in self.items:
options.pop(k)
count = 0
while f'{k}_{count}' in self.labels:
count += 1
options[f'{k}_{count}'] = v
self.widget.options.update(options)
self.widget.param.trigger('options')
self.widget.value = list(options.values())[:1] |
def enumerate_builtins(tokens):
"""
Returns a list of all the builtins being used in *tokens*.
"""
out = []
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_string in builtins:
# Note: I need to test if print can be replaced in Python 3
special_special = ['print'] # Print is special in Python 2
if py3:
special_special = []
if token_string not in special_special:
if not token_string.startswith('__'): # Don't count magic funcs
if tokens[index-1][1] != '.' and tokens[index+1][1] != '=':
if token_string not in out:
out.append(token_string)
return out | Returns a list of all the builtins being used in *tokens*. | Below is the the instruction that describes the task:
### Input:
Returns a list of all the builtins being used in *tokens*.
### Response:
def enumerate_builtins(tokens):
"""
Returns a list of all the builtins being used in *tokens*.
"""
out = []
for index, tok in enumerate(tokens):
token_type = tok[0]
token_string = tok[1]
if token_string in builtins:
# Note: I need to test if print can be replaced in Python 3
special_special = ['print'] # Print is special in Python 2
if py3:
special_special = []
if token_string not in special_special:
if not token_string.startswith('__'): # Don't count magic funcs
if tokens[index-1][1] != '.' and tokens[index+1][1] != '=':
if token_string not in out:
out.append(token_string)
return out |
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) | Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr` | Below is the the instruction that describes the task:
### Input:
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
### Response:
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
) |
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print | Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified | Below is the the instruction that describes the task:
### Input:
Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified
### Response:
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
:param output: to print
:param prefix: printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print |
def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False):
"""Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
"""
if dropBad:
self.dropBadEntries()
if invert:
self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()}
else:
self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()} | Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True` | Below is the the instruction that describes the task:
### Input:
Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
### Response:
def dropNonJournals(self, ptVal = 'J', dropBad = True, invert = False):
"""Drops the non journal type `Records` from the collection, this is done by checking _ptVal_ against the PT tag
# Parameters
_ptVal_ : `optional [str]`
> Default `'J'`, The value of the PT tag to be kept, default is `'J'` the journal tag, other tags can be substituted.
_dropBad_ : `optional [bool]`
> Default `True`, if `True` bad `Records` will be dropped as well those that are not journal entries
_invert_ : `optional [bool]`
> Default `False`, Set `True` to drop journals (or the PT tag given by _ptVal_) instead of keeping them. **Note**, it still drops bad Records if _dropBad_ is `True`
"""
if dropBad:
self.dropBadEntries()
if invert:
self._collection = {r for r in self._collection if r['pubType'] != ptVal.upper()}
else:
self._collection = {r for r in self._collection if r['pubType'] == ptVal.upper()} |
def set_config(config_file='/etc/dnsmasq.conf', follow=True, **kwargs):
'''
Sets a value or a set of values in the specified file. By default, if
conf-dir is configured in this file, salt will attempt to set the option
in any file inside the conf-dir where it has already been enabled. If it
does not find it inside any files, it will append it to the main config
file. Setting follow to False will turn off this behavior.
If a config option currently appears multiple times (such as dhcp-host,
which is specified at least once per host), the new option will be added
to the end of the main config file (and not to any includes). If you need
an option added to a specific include file, specify it as the config_file.
:param string config_file: config file where settings should be updated / added.
:param bool follow: attempt to set the config option inside any file within
the ``conf-dir`` where it has already been enabled.
:param kwargs: key value pairs that contain the configuration settings that you
want set.
CLI Examples:
.. code-block:: bash
salt '*' dnsmasq.set_config domain=mydomain.com
salt '*' dnsmasq.set_config follow=False domain=mydomain.com
salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com
'''
dnsopts = get_config(config_file)
includes = [config_file]
if follow is True and 'conf-dir' in dnsopts:
for filename in os.listdir(dnsopts['conf-dir']):
if filename.startswith('.'):
continue
if filename.endswith('~'):
continue
if filename.endswith('bak'):
continue
if filename.endswith('#') and filename.endswith('#'):
continue
includes.append('{0}/{1}'.format(dnsopts['conf-dir'], filename))
ret_kwargs = {}
for key in kwargs:
# Filter out __pub keys as they should not be added to the config file
# See Issue #34263 for more information
if key.startswith('__'):
continue
ret_kwargs[key] = kwargs[key]
if key in dnsopts:
if isinstance(dnsopts[key], six.string_types):
for config in includes:
__salt__['file.sed'](path=config,
before='^{0}=.*'.format(key),
after='{0}={1}'.format(key, kwargs[key]))
else:
__salt__['file.append'](config_file,
'{0}={1}'.format(key, kwargs[key]))
else:
__salt__['file.append'](config_file,
'{0}={1}'.format(key, kwargs[key]))
return ret_kwargs | Sets a value or a set of values in the specified file. By default, if
conf-dir is configured in this file, salt will attempt to set the option
in any file inside the conf-dir where it has already been enabled. If it
does not find it inside any files, it will append it to the main config
file. Setting follow to False will turn off this behavior.
If a config option currently appears multiple times (such as dhcp-host,
which is specified at least once per host), the new option will be added
to the end of the main config file (and not to any includes). If you need
an option added to a specific include file, specify it as the config_file.
:param string config_file: config file where settings should be updated / added.
:param bool follow: attempt to set the config option inside any file within
the ``conf-dir`` where it has already been enabled.
:param kwargs: key value pairs that contain the configuration settings that you
want set.
CLI Examples:
.. code-block:: bash
salt '*' dnsmasq.set_config domain=mydomain.com
salt '*' dnsmasq.set_config follow=False domain=mydomain.com
salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com | Below is the the instruction that describes the task:
### Input:
Sets a value or a set of values in the specified file. By default, if
conf-dir is configured in this file, salt will attempt to set the option
in any file inside the conf-dir where it has already been enabled. If it
does not find it inside any files, it will append it to the main config
file. Setting follow to False will turn off this behavior.
If a config option currently appears multiple times (such as dhcp-host,
which is specified at least once per host), the new option will be added
to the end of the main config file (and not to any includes). If you need
an option added to a specific include file, specify it as the config_file.
:param string config_file: config file where settings should be updated / added.
:param bool follow: attempt to set the config option inside any file within
the ``conf-dir`` where it has already been enabled.
:param kwargs: key value pairs that contain the configuration settings that you
want set.
CLI Examples:
.. code-block:: bash
salt '*' dnsmasq.set_config domain=mydomain.com
salt '*' dnsmasq.set_config follow=False domain=mydomain.com
salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com
### Response:
def set_config(config_file='/etc/dnsmasq.conf', follow=True, **kwargs):
'''
Sets a value or a set of values in the specified file. By default, if
conf-dir is configured in this file, salt will attempt to set the option
in any file inside the conf-dir where it has already been enabled. If it
does not find it inside any files, it will append it to the main config
file. Setting follow to False will turn off this behavior.
If a config option currently appears multiple times (such as dhcp-host,
which is specified at least once per host), the new option will be added
to the end of the main config file (and not to any includes). If you need
an option added to a specific include file, specify it as the config_file.
:param string config_file: config file where settings should be updated / added.
:param bool follow: attempt to set the config option inside any file within
the ``conf-dir`` where it has already been enabled.
:param kwargs: key value pairs that contain the configuration settings that you
want set.
CLI Examples:
.. code-block:: bash
salt '*' dnsmasq.set_config domain=mydomain.com
salt '*' dnsmasq.set_config follow=False domain=mydomain.com
salt '*' dnsmasq.set_config config_file=/etc/dnsmasq.conf domain=mydomain.com
'''
dnsopts = get_config(config_file)
includes = [config_file]
if follow is True and 'conf-dir' in dnsopts:
for filename in os.listdir(dnsopts['conf-dir']):
if filename.startswith('.'):
continue
if filename.endswith('~'):
continue
if filename.endswith('bak'):
continue
if filename.endswith('#') and filename.endswith('#'):
continue
includes.append('{0}/{1}'.format(dnsopts['conf-dir'], filename))
ret_kwargs = {}
for key in kwargs:
# Filter out __pub keys as they should not be added to the config file
# See Issue #34263 for more information
if key.startswith('__'):
continue
ret_kwargs[key] = kwargs[key]
if key in dnsopts:
if isinstance(dnsopts[key], six.string_types):
for config in includes:
__salt__['file.sed'](path=config,
before='^{0}=.*'.format(key),
after='{0}={1}'.format(key, kwargs[key]))
else:
__salt__['file.append'](config_file,
'{0}={1}'.format(key, kwargs[key]))
else:
__salt__['file.append'](config_file,
'{0}={1}'.format(key, kwargs[key]))
return ret_kwargs |
def get_prep_value(self, value):
"""Convert value to JSON string before save"""
try:
return json.dumps(value)
except Exception as err:
raise ValidationError(str(err)) | Convert value to JSON string before save | Below is the the instruction that describes the task:
### Input:
Convert value to JSON string before save
### Response:
def get_prep_value(self, value):
"""Convert value to JSON string before save"""
try:
return json.dumps(value)
except Exception as err:
raise ValidationError(str(err)) |
def getenv(variable_key, default=None, required=False, silent=True):
'''getenv will attempt to get an environment variable. If the variable
is not found, None is returned.
:param variable_key: the variable name
:param required: exit with error if not found
:param silent: Do not print debugging information for variable
'''
variable = os.environ.get(variable_key, default)
if variable is None and required:
bot.error("Cannot find environment variable %s, exiting." %variable_key)
sys.exit(1)
if not silent:
if variable is not None:
bot.verbose2("%s found as %s" %(variable_key,variable))
else:
bot.verbose2("%s not defined (None)" %variable_key)
return variable | getenv will attempt to get an environment variable. If the variable
is not found, None is returned.
:param variable_key: the variable name
:param required: exit with error if not found
:param silent: Do not print debugging information for variable | Below is the the instruction that describes the task:
### Input:
getenv will attempt to get an environment variable. If the variable
is not found, None is returned.
:param variable_key: the variable name
:param required: exit with error if not found
:param silent: Do not print debugging information for variable
### Response:
def getenv(variable_key, default=None, required=False, silent=True):
'''getenv will attempt to get an environment variable. If the variable
is not found, None is returned.
:param variable_key: the variable name
:param required: exit with error if not found
:param silent: Do not print debugging information for variable
'''
variable = os.environ.get(variable_key, default)
if variable is None and required:
bot.error("Cannot find environment variable %s, exiting." %variable_key)
sys.exit(1)
if not silent:
if variable is not None:
bot.verbose2("%s found as %s" %(variable_key,variable))
else:
bot.verbose2("%s not defined (None)" %variable_key)
return variable |
def ttl(self, name):
"""
get the number of seconds until the key's expiration
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.ttl(self.redis_key(name)) | get the number of seconds until the key's expiration
:param name: str the name of the redis key
:return: Future() | Below is the the instruction that describes the task:
### Input:
get the number of seconds until the key's expiration
:param name: str the name of the redis key
:return: Future()
### Response:
def ttl(self, name):
"""
get the number of seconds until the key's expiration
:param name: str the name of the redis key
:return: Future()
"""
with self.pipe as pipe:
return pipe.ttl(self.redis_key(name)) |
def storage_volumes(self):
"""
:class:`~zhmcclient.StorageVolumeManager`: Access to the
:term:`storage volumes <storage volume>` in this storage group.
"""
# We do here some lazy loading.
if not self._storage_volumes:
self._storage_volumes = StorageVolumeManager(self)
return self._storage_volumes | :class:`~zhmcclient.StorageVolumeManager`: Access to the
:term:`storage volumes <storage volume>` in this storage group. | Below is the the instruction that describes the task:
### Input:
:class:`~zhmcclient.StorageVolumeManager`: Access to the
:term:`storage volumes <storage volume>` in this storage group.
### Response:
def storage_volumes(self):
"""
:class:`~zhmcclient.StorageVolumeManager`: Access to the
:term:`storage volumes <storage volume>` in this storage group.
"""
# We do here some lazy loading.
if not self._storage_volumes:
self._storage_volumes = StorageVolumeManager(self)
return self._storage_volumes |
def percent_chance(self, pct):
"""Given a ``pct``% chance of something happening right now, decide at
random whether it actually happens, and return ``True`` or
``False`` as appropriate.
Values not between 0 and 100 are treated as though they
were 0 or 100, whichever is nearer.
"""
if pct <= 0:
return False
if pct >= 100:
return True
return pct / 100 < self.random() | Given a ``pct``% chance of something happening right now, decide at
random whether it actually happens, and return ``True`` or
``False`` as appropriate.
Values not between 0 and 100 are treated as though they
were 0 or 100, whichever is nearer. | Below is the the instruction that describes the task:
### Input:
Given a ``pct``% chance of something happening right now, decide at
random whether it actually happens, and return ``True`` or
``False`` as appropriate.
Values not between 0 and 100 are treated as though they
were 0 or 100, whichever is nearer.
### Response:
def percent_chance(self, pct):
"""Given a ``pct``% chance of something happening right now, decide at
random whether it actually happens, and return ``True`` or
``False`` as appropriate.
Values not between 0 and 100 are treated as though they
were 0 or 100, whichever is nearer.
"""
if pct <= 0:
return False
if pct >= 100:
return True
return pct / 100 < self.random() |
def is_js_date_utc(json):
"""Check if the string contains Date.UTC function
and return match group(s) if there is
"""
JS_date_utc_pattern = r'Date\.UTC\(([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?\)'
re_date = re.compile(JS_date_utc_pattern, re.M)
if re_date.search(json):
return re_date.search(json).group(0)
else:
return False | Check if the string contains Date.UTC function
and return match group(s) if there is | Below is the the instruction that describes the task:
### Input:
Check if the string contains Date.UTC function
and return match group(s) if there is
### Response:
def is_js_date_utc(json):
"""Check if the string contains Date.UTC function
and return match group(s) if there is
"""
JS_date_utc_pattern = r'Date\.UTC\(([0-9]+,[0-9]+,[0-9]+)(,[0-9]+,[0-9]+,[0-9]+)?(,[0-9]+)?\)'
re_date = re.compile(JS_date_utc_pattern, re.M)
if re_date.search(json):
return re_date.search(json).group(0)
else:
return False |
def get_witness_for_key_prefix(db, node_hash, key):
"""
Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath
"""
validate_is_bytes(key)
return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key))) | Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath | Below is the the instruction that describes the task:
### Input:
Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath
### Response:
def get_witness_for_key_prefix(db, node_hash, key):
"""
Get all witness given a keypath prefix.
Include
1. witness along the keypath and
2. witness in the subtrie of the last node in keypath
"""
validate_is_bytes(key)
return tuple(_get_witness_for_key_prefix(db, node_hash, encode_to_bin(key))) |
def get_sections_by_building_and_term(building, term):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed building and term.
"""
url = "{}?{}".format(
section_res_url_prefix,
urlencode([("quarter", term.quarter.lower(),),
("facility_code", building,),
("year", term.year,), ]))
return _json_to_sectionref(get_resource(url)) | Returns a list of uw_sws.models.SectionReference objects
for the passed building and term. | Below is the the instruction that describes the task:
### Input:
Returns a list of uw_sws.models.SectionReference objects
for the passed building and term.
### Response:
def get_sections_by_building_and_term(building, term):
"""
Returns a list of uw_sws.models.SectionReference objects
for the passed building and term.
"""
url = "{}?{}".format(
section_res_url_prefix,
urlencode([("quarter", term.quarter.lower(),),
("facility_code", building,),
("year", term.year,), ]))
return _json_to_sectionref(get_resource(url)) |
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo struct
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string and stringfileinfo_string.startswith(u'StringFileInfo'):
if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c)>128:
key_as_char.append('\\x%02x' %ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
setattr(stringtable_struct, key_as_char, value)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
new_stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
# check if the entry is crafted in a way that would lead to an infinite
# loop and break if so
#
if new_stringtable_offset == stringtable_offset:
break
stringtable_offset = new_stringtable_offset
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string and stringfileinfo_string.startswith( u'VarFileInfo' ):
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
if isinstance(word1, (int, long)) and isinstance(word1, (int, long)):
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break | Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var. | Below is the the instruction that describes the task:
### Input:
Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
### Response:
def parse_version_information(self, version_struct):
"""Parse version information structure.
The date will be made available in three attributes of the PE object.
VS_VERSIONINFO will contain the first three fields of the main structure:
'Length', 'ValueLength', and 'Type'
VS_FIXEDFILEINFO will hold the rest of the fields, accessible as sub-attributes:
'Signature', 'StrucVersion', 'FileVersionMS', 'FileVersionLS',
'ProductVersionMS', 'ProductVersionLS', 'FileFlagsMask', 'FileFlags',
'FileOS', 'FileType', 'FileSubtype', 'FileDateMS', 'FileDateLS'
FileInfo is a list of all StringFileInfo and VarFileInfo structures.
StringFileInfo structures will have a list as an attribute named 'StringTable'
containing all the StringTable structures. Each of those structures contains a
dictionary 'entries' with all the key/value version information string pairs.
VarFileInfo structures will have a list as an attribute named 'Var' containing
all Var structures. Each Var structure will have a dictionary as an attribute
named 'entry' which will contain the name and value of the Var.
"""
# Retrieve the data for the version info resource
#
start_offset = self.get_offset_from_rva( version_struct.OffsetToData )
raw_data = self.__data__[ start_offset : start_offset+version_struct.Size ]
# Map the main structure and the subsequent string
#
versioninfo_struct = self.__unpack_data__(
self.__VS_VERSIONINFO_format__, raw_data,
file_offset = start_offset )
if versioninfo_struct is None:
return
ustr_offset = version_struct.OffsetToData + versioninfo_struct.sizeof()
try:
versioninfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VS_VERSION_INFO string. Can\'t ' +
'read unicode string at offset 0x%x' % (
ustr_offset ) )
versioninfo_string = None
# If the structure does not contain the expected name, it's assumed to be invalid
#
if versioninfo_string != u'VS_VERSION_INFO':
self.__warnings.append('Invalid VS_VERSION_INFO block')
return
# Set the PE object's VS_VERSIONINFO to this one
#
self.VS_VERSIONINFO = versioninfo_struct
# The the Key attribute to point to the unicode string identifying the structure
#
self.VS_VERSIONINFO.Key = versioninfo_string
# Process the fixed version information, get the offset and structure
#
fixedfileinfo_offset = self.dword_align(
versioninfo_struct.sizeof() + 2 * (len(versioninfo_string) + 1),
version_struct.OffsetToData)
fixedfileinfo_struct = self.__unpack_data__(
self.__VS_FIXEDFILEINFO_format__,
raw_data[fixedfileinfo_offset:],
file_offset = start_offset+fixedfileinfo_offset )
if not fixedfileinfo_struct:
return
# Set the PE object's VS_FIXEDFILEINFO to this one
#
self.VS_FIXEDFILEINFO = fixedfileinfo_struct
# Start parsing all the StringFileInfo and VarFileInfo structures
#
# Get the first one
#
stringfileinfo_offset = self.dword_align(
fixedfileinfo_offset + fixedfileinfo_struct.sizeof(),
version_struct.OffsetToData)
original_stringfileinfo_offset = stringfileinfo_offset
# Set the PE object's attribute that will contain them all.
#
self.FileInfo = list()
while True:
# Process the StringFileInfo/VarFileInfo struct
#
stringfileinfo_struct = self.__unpack_data__(
self.__StringFileInfo_format__,
raw_data[stringfileinfo_offset:],
file_offset = start_offset+stringfileinfo_offset )
if stringfileinfo_struct is None:
self.__warnings.append(
'Error parsing StringFileInfo/VarFileInfo struct' )
return None
# Get the subsequent string defining the structure.
#
ustr_offset = ( version_struct.OffsetToData +
stringfileinfo_offset + versioninfo_struct.sizeof() )
try:
stringfileinfo_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringFileInfo string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
# Set such string as the Key attribute
#
stringfileinfo_struct.Key = stringfileinfo_string
# Append the structure to the PE object's list
#
self.FileInfo.append(stringfileinfo_struct)
# Parse a StringFileInfo entry
#
if stringfileinfo_string and stringfileinfo_string.startswith(u'StringFileInfo'):
if stringfileinfo_struct.Type == 1 and stringfileinfo_struct.ValueLength == 0:
stringtable_offset = self.dword_align(
stringfileinfo_offset + stringfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
stringfileinfo_struct.StringTable = list()
# Process the String Table entries
#
while True:
stringtable_struct = self.__unpack_data__(
self.__StringTable_format__,
raw_data[stringtable_offset:],
file_offset = start_offset+stringtable_offset )
if not stringtable_struct:
break
ustr_offset = ( version_struct.OffsetToData + stringtable_offset +
stringtable_struct.sizeof() )
try:
stringtable_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
stringtable_struct.LangID = stringtable_string
stringtable_struct.entries = dict()
stringtable_struct.entries_offsets = dict()
stringtable_struct.entries_lengths = dict()
stringfileinfo_struct.StringTable.append(stringtable_struct)
entry_offset = self.dword_align(
stringtable_offset + stringtable_struct.sizeof() +
2*(len(stringtable_string)+1),
version_struct.OffsetToData)
# Process all entries in the string table
#
while entry_offset < stringtable_offset + stringtable_struct.Length:
string_struct = self.__unpack_data__(
self.__String_format__, raw_data[entry_offset:],
file_offset = start_offset+entry_offset )
if not string_struct:
break
ustr_offset = ( version_struct.OffsetToData + entry_offset +
string_struct.sizeof() )
try:
key = self.get_string_u_at_rva( ustr_offset )
key_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Key string. Can\'t ' +
'read unicode string at offset 0x%x' % ( ustr_offset ) )
break
value_offset = self.dword_align(
2*(len(key)+1) + entry_offset + string_struct.sizeof(),
version_struct.OffsetToData)
ustr_offset = version_struct.OffsetToData + value_offset
try:
value = self.get_string_u_at_rva( ustr_offset,
max_length = string_struct.ValueLength )
value_offset = self.get_offset_from_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read StringTable Value string. ' +
'Can\'t read unicode string at offset 0x%x' % (
ustr_offset ) )
break
if string_struct.Length == 0:
entry_offset = stringtable_offset + stringtable_struct.Length
else:
entry_offset = self.dword_align(
string_struct.Length+entry_offset, version_struct.OffsetToData)
key_as_char = []
for c in key:
if ord(c)>128:
key_as_char.append('\\x%02x' %ord(c))
else:
key_as_char.append(c)
key_as_char = ''.join(key_as_char)
setattr(stringtable_struct, key_as_char, value)
stringtable_struct.entries[key] = value
stringtable_struct.entries_offsets[key] = (key_offset, value_offset)
stringtable_struct.entries_lengths[key] = (len(key), len(value))
new_stringtable_offset = self.dword_align(
stringtable_struct.Length + stringtable_offset,
version_struct.OffsetToData)
# check if the entry is crafted in a way that would lead to an infinite
# loop and break if so
#
if new_stringtable_offset == stringtable_offset:
break
stringtable_offset = new_stringtable_offset
if stringtable_offset >= stringfileinfo_struct.Length:
break
# Parse a VarFileInfo entry
#
elif stringfileinfo_string and stringfileinfo_string.startswith( u'VarFileInfo' ):
varfileinfo_struct = stringfileinfo_struct
varfileinfo_struct.name = 'VarFileInfo'
if varfileinfo_struct.Type == 1 and varfileinfo_struct.ValueLength == 0:
var_offset = self.dword_align(
stringfileinfo_offset + varfileinfo_struct.sizeof() +
2*(len(stringfileinfo_string)+1),
version_struct.OffsetToData)
varfileinfo_struct.Var = list()
# Process all entries
#
while True:
var_struct = self.__unpack_data__(
self.__Var_format__,
raw_data[var_offset:],
file_offset = start_offset+var_offset )
if not var_struct:
break
ustr_offset = ( version_struct.OffsetToData + var_offset +
var_struct.sizeof() )
try:
var_string = self.get_string_u_at_rva( ustr_offset )
except PEFormatError, excp:
self.__warnings.append(
'Error parsing the version information, ' +
'attempting to read VarFileInfo Var string. ' +
'Can\'t read unicode string at offset 0x%x' % (ustr_offset))
break
varfileinfo_struct.Var.append(var_struct)
varword_offset = self.dword_align(
2*(len(var_string)+1) + var_offset + var_struct.sizeof(),
version_struct.OffsetToData)
orig_varword_offset = varword_offset
while varword_offset < orig_varword_offset + var_struct.ValueLength:
word1 = self.get_word_from_data(
raw_data[varword_offset:varword_offset+2], 0)
word2 = self.get_word_from_data(
raw_data[varword_offset+2:varword_offset+4], 0)
varword_offset += 4
if isinstance(word1, (int, long)) and isinstance(word1, (int, long)):
var_struct.entry = {var_string: '0x%04x 0x%04x' % (word1, word2)}
var_offset = self.dword_align(
var_offset+var_struct.Length, version_struct.OffsetToData)
if var_offset <= var_offset+var_struct.Length:
break
# Increment and align the offset
#
stringfileinfo_offset = self.dword_align(
stringfileinfo_struct.Length+stringfileinfo_offset,
version_struct.OffsetToData)
# Check if all the StringFileInfo and VarFileInfo items have been processed
#
if stringfileinfo_struct.Length == 0 or stringfileinfo_offset >= versioninfo_struct.Length:
break |
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset) | Parse one or more log files | Below is the the instruction that describes the task:
### Input:
Parse one or more log files
### Response:
def run_parse(self):
"""Parse one or more log files"""
# Data set already has source file names from load_inputs
parsedset = {}
parsedset['data_set'] = []
for log in self.input_files:
parsemodule = self.parse_modules[self.args.parser]
try:
if self.args.tzone:
parsemodule.tzone = self.args.tzone
except NameError: pass
parsedset['data_set'].append(parsemodule.parse_file(log))
self.data_set = parsedset
del(parsedset) |
def validate_class_type_arguments(operator):
"""
Decorator to validate all the arguments to function
are of the type of calling class for passed operator
"""
def inner(function):
def wrapper(self, *args, **kwargs):
for arg in args + tuple(kwargs.values()):
if not isinstance(arg, self.__class__):
raise TypeError(
'unorderable types: {}() {} {}()'.format(
type(self).__name__, operator, type(arg).__name__
)
)
return function(self, *args, **kwargs)
return wrapper
return inner | Decorator to validate all the arguments to function
are of the type of calling class for passed operator | Below is the the instruction that describes the task:
### Input:
Decorator to validate all the arguments to function
are of the type of calling class for passed operator
### Response:
def validate_class_type_arguments(operator):
"""
Decorator to validate all the arguments to function
are of the type of calling class for passed operator
"""
def inner(function):
def wrapper(self, *args, **kwargs):
for arg in args + tuple(kwargs.values()):
if not isinstance(arg, self.__class__):
raise TypeError(
'unorderable types: {}() {} {}()'.format(
type(self).__name__, operator, type(arg).__name__
)
)
return function(self, *args, **kwargs)
return wrapper
return inner |
def quad_genz_keister_24 ( order ):
"""
Hermite Genz-Keister 24 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_24(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667]
"""
order = sorted(GENZ_KEISTER_24.keys())[order]
abscissas, weights = GENZ_KEISTER_24[order]
abscissas = numpy.array(abscissas)
weights = numpy.array(weights)
weights /= numpy.sum(weights)
abscissas *= numpy.sqrt(2)
return abscissas, weights | Hermite Genz-Keister 24 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_24(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667] | Below is the the instruction that describes the task:
### Input:
Hermite Genz-Keister 24 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_24(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667]
### Response:
def quad_genz_keister_24 ( order ):
"""
Hermite Genz-Keister 24 rule.
Args:
order (int):
The quadrature order. Must be in the interval (0, 8).
Returns:
(:py:data:typing.Tuple[numpy.ndarray, numpy.ndarray]):
Abscissas and weights
Examples:
>>> abscissas, weights = quad_genz_keister_24(1)
>>> print(numpy.around(abscissas, 4))
[-1.7321 0. 1.7321]
>>> print(numpy.around(weights, 4))
[0.1667 0.6667 0.1667]
"""
order = sorted(GENZ_KEISTER_24.keys())[order]
abscissas, weights = GENZ_KEISTER_24[order]
abscissas = numpy.array(abscissas)
weights = numpy.array(weights)
weights /= numpy.sum(weights)
abscissas *= numpy.sqrt(2)
return abscissas, weights |
def dotenv_values(dotenv_path):
"""
:param dotenv_path: env file
:return: ordered dict
"""
values = OrderedDict(parse_dotenv(dotenv_path))
values = resolve_nested_variables(values)
return values | :param dotenv_path: env file
:return: ordered dict | Below is the the instruction that describes the task:
### Input:
:param dotenv_path: env file
:return: ordered dict
### Response:
def dotenv_values(dotenv_path):
"""
:param dotenv_path: env file
:return: ordered dict
"""
values = OrderedDict(parse_dotenv(dotenv_path))
values = resolve_nested_variables(values)
return values |
def _gradient_penalty(self, real_samples, fake_samples, kwargs):
"""
Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm
"""
import torch
from torch.autograd import Variable, grad
real_samples = real_samples.view(fake_samples.shape)
subset_size = real_samples.shape[0]
real_samples = real_samples[:subset_size]
fake_samples = fake_samples[:subset_size]
alpha = torch.rand(subset_size)
if self.use_cuda:
alpha = alpha.cuda()
alpha = alpha.view((-1,) + ((1,) * (real_samples.dim() - 1)))
interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
interpolates = Variable(interpolates, requires_grad=True)
if self.use_cuda:
interpolates = interpolates.cuda()
d_output = self.critic(interpolates, **kwargs)
grad_ouputs = torch.ones(d_output.size())
if self.use_cuda:
grad_ouputs = grad_ouputs.cuda()
gradients = grad(
outputs=d_output,
inputs=interpolates,
grad_outputs=grad_ouputs,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10 | Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm | Below is the the instruction that describes the task:
### Input:
Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm
### Response:
def _gradient_penalty(self, real_samples, fake_samples, kwargs):
"""
Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm
"""
import torch
from torch.autograd import Variable, grad
real_samples = real_samples.view(fake_samples.shape)
subset_size = real_samples.shape[0]
real_samples = real_samples[:subset_size]
fake_samples = fake_samples[:subset_size]
alpha = torch.rand(subset_size)
if self.use_cuda:
alpha = alpha.cuda()
alpha = alpha.view((-1,) + ((1,) * (real_samples.dim() - 1)))
interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
interpolates = Variable(interpolates, requires_grad=True)
if self.use_cuda:
interpolates = interpolates.cuda()
d_output = self.critic(interpolates, **kwargs)
grad_ouputs = torch.ones(d_output.size())
if self.use_cuda:
grad_ouputs = grad_ouputs.cuda()
gradients = grad(
outputs=d_output,
inputs=interpolates,
grad_outputs=grad_ouputs,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10 |
def contains(self, x, y):
"""
Returns true if the given point is contained within the
bounding box, where all boundaries of the box are
considered to be inclusive.
"""
left, bottom, right, top = self.aarect().lbrt()
return (left <= x <= right) and (bottom <= y <= top) | Returns true if the given point is contained within the
bounding box, where all boundaries of the box are
considered to be inclusive. | Below is the the instruction that describes the task:
### Input:
Returns true if the given point is contained within the
bounding box, where all boundaries of the box are
considered to be inclusive.
### Response:
def contains(self, x, y):
"""
Returns true if the given point is contained within the
bounding box, where all boundaries of the box are
considered to be inclusive.
"""
left, bottom, right, top = self.aarect().lbrt()
return (left <= x <= right) and (bottom <= y <= top) |
def setup(applicationName,
applicationType=None,
style='plastique',
splash='',
splashType=None,
splashTextColor='white',
splashTextAlign=None,
theme=''):
"""
Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. }
"""
import_qt(globals())
output = {}
# check to see if there is a qapplication running
if not QtGui.QApplication.instance():
# make sure we have a valid QApplication type
if applicationType is None:
applicationType = QtGui.QApplication
app = applicationType([applicationName])
app.setApplicationName(applicationName)
app.setQuitOnLastWindowClosed(True)
stylize(app, style=style, theme=theme)
# utilized with the projexui.config.xschemeconfig
app.setProperty('useScheme', wrapVariant(True))
output['app'] = app
# create a new splash screen if desired
if splash:
if not splashType:
splashType = XLoggerSplashScreen
pixmap = QtGui.QPixmap(splash)
screen = splashType(pixmap)
if splashTextAlign is None:
splashTextAlign = QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom
screen.setTextColor(QtGui.QColor(splashTextColor))
screen.setTextAlignment(splashTextAlign)
screen.show()
QtGui.QApplication.instance().processEvents()
output['splash'] = screen
return output | Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. } | Below is the the instruction that describes the task:
### Input:
Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. }
### Response:
def setup(applicationName,
applicationType=None,
style='plastique',
splash='',
splashType=None,
splashTextColor='white',
splashTextAlign=None,
theme=''):
"""
Wrapper system for the QApplication creation process to handle all proper
pre-application setup. This method will verify that there is no application
running, creating one if necessary. If no application is created, a None
value is returned - signaling that there is already an app running. If you
need to specify your own QApplication subclass, you can do so through the
applicationType parameter.
:note This method should always be used with the exec_ method to
handle the post setup process.
:param applicationName | <str>
applicationType | <subclass of QApplication> || None
style | <str> || <QStyle> | style to use for the new app
splash | <str> | filepath to use for a splash screen
splashType | <subclass of QSplashScreen> || None
splashTextColor | <str> || <QColor>
splashTextAlign | <Qt.Alignment>
:usage |import projexui
|
|def main(argv):
| # initialize the application
| data = projexui.setup()
|
| # do some initialization code
| window = MyWindow()
| window.show()
|
| # execute the application
| projexui.exec_(window, data)
:return { <str> key: <variant> value, .. }
"""
import_qt(globals())
output = {}
# check to see if there is a qapplication running
if not QtGui.QApplication.instance():
# make sure we have a valid QApplication type
if applicationType is None:
applicationType = QtGui.QApplication
app = applicationType([applicationName])
app.setApplicationName(applicationName)
app.setQuitOnLastWindowClosed(True)
stylize(app, style=style, theme=theme)
# utilized with the projexui.config.xschemeconfig
app.setProperty('useScheme', wrapVariant(True))
output['app'] = app
# create a new splash screen if desired
if splash:
if not splashType:
splashType = XLoggerSplashScreen
pixmap = QtGui.QPixmap(splash)
screen = splashType(pixmap)
if splashTextAlign is None:
splashTextAlign = QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom
screen.setTextColor(QtGui.QColor(splashTextColor))
screen.setTextAlignment(splashTextAlign)
screen.show()
QtGui.QApplication.instance().processEvents()
output['splash'] = screen
return output |
def create_runtime_class(self, body, **kwargs):
"""
create a RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_runtime_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_runtime_class_with_http_info(body, **kwargs)
else:
(data) = self.create_runtime_class_with_http_info(body, **kwargs)
return data | create a RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_runtime_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
create a RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_runtime_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
### Response:
def create_runtime_class(self, body, **kwargs):
"""
create a RuntimeClass
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_runtime_class(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param V1beta1RuntimeClass body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.
:return: V1beta1RuntimeClass
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_runtime_class_with_http_info(body, **kwargs)
else:
(data) = self.create_runtime_class_with_http_info(body, **kwargs)
return data |
def rank(self, score):
'''Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*.'''
node = self.__head
rank = 0
for i in range(self.__level-1, -1, -1):
while node.next[i] and node.next[i].score <= score:
rank += node.width[i]
node = node.next[i]
if node.score == score:
return rank - 1
else:
return -1 - rank | Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*. | Below is the the instruction that describes the task:
### Input:
Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*.
### Response:
def rank(self, score):
'''Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*.'''
node = self.__head
rank = 0
for i in range(self.__level-1, -1, -1):
while node.next[i] and node.next[i].score <= score:
rank += node.width[i]
node = node.next[i]
if node.score == score:
return rank - 1
else:
return -1 - rank |
def pack_str(cls, value):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
"""
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value) | Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes | Below is the the instruction that describes the task:
### Input:
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
### Response:
def pack_str(cls, value):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
"""
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value) |
def distribute(self, volume, source, dest, *args, **kwargs):
"""
Distribute will move a volume of liquid from a single of source
to a list of target locations. See :any:`Transfer` for details
and a full list of optional arguments.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.distribute(50, plate[1], plate.cols[0]) # doctest: +SKIP
"""
# Note: currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call, so we cannot
# create a very reliable assertion on tip status
args = [volume, source, dest, *args]
kwargs['mode'] = 'distribute'
kwargs['mix_after'] = (0, 0)
if 'disposal_vol' not in kwargs:
kwargs['disposal_vol'] = self.min_volume
return self.transfer(*args, **kwargs) | Distribute will move a volume of liquid from a single of source
to a list of target locations. See :any:`Transfer` for details
and a full list of optional arguments.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.distribute(50, plate[1], plate.cols[0]) # doctest: +SKIP | Below is the the instruction that describes the task:
### Input:
Distribute will move a volume of liquid from a single of source
to a list of target locations. See :any:`Transfer` for details
and a full list of optional arguments.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.distribute(50, plate[1], plate.cols[0]) # doctest: +SKIP
### Response:
def distribute(self, volume, source, dest, *args, **kwargs):
"""
Distribute will move a volume of liquid from a single of source
to a list of target locations. See :any:`Transfer` for details
and a full list of optional arguments.
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> plate = labware.load('96-flat', '3') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left') # doctest: +SKIP
>>> p300.distribute(50, plate[1], plate.cols[0]) # doctest: +SKIP
"""
# Note: currently it varies whether the pipette should have a tip on
# or not depending on the parameters for this call, so we cannot
# create a very reliable assertion on tip status
args = [volume, source, dest, *args]
kwargs['mode'] = 'distribute'
kwargs['mix_after'] = (0, 0)
if 'disposal_vol' not in kwargs:
kwargs['disposal_vol'] = self.min_volume
return self.transfer(*args, **kwargs) |
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain)) | Record an API access. | Below is the the instruction that describes the task:
### Input:
Record an API access.
### Response:
def __access(self, ts):
""" Record an API access. """
with self.connection:
self.connection.execute("INSERT OR REPLACE INTO access_timestamp (timestamp, domain) VALUES (?, ?)",
(ts, self.domain)) |
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError() | Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path | Below is the the instruction that describes the task:
### Input:
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
### Response:
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError() |
def get_neurommsig_score(graph: BELGraph,
genes: List[Gene],
ora_weight: Optional[float] = None,
hub_weight: Optional[float] = None,
top_percent: Optional[float] = None,
topology_weight: Optional[float] = None) -> float:
"""Calculate the composite NeuroMMSig Score for a given list of genes.
:param graph: A BEL graph
:param genes: A list of gene nodes
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:return: The NeuroMMSig composite score
"""
ora_weight = ora_weight or 1.0
hub_weight = hub_weight or 1.0
topology_weight = topology_weight or 1.0
total_weight = ora_weight + hub_weight + topology_weight
genes = list(genes)
ora_score = neurommsig_gene_ora(graph, genes)
hub_score = neurommsig_hubs(graph, genes, top_percent=top_percent)
topology_score = neurommsig_topology(graph, genes)
weighted_sum = (
ora_weight * ora_score +
hub_weight * hub_score +
topology_weight * topology_score
)
return weighted_sum / total_weight | Calculate the composite NeuroMMSig Score for a given list of genes.
:param graph: A BEL graph
:param genes: A list of gene nodes
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:return: The NeuroMMSig composite score | Below is the the instruction that describes the task:
### Input:
Calculate the composite NeuroMMSig Score for a given list of genes.
:param graph: A BEL graph
:param genes: A list of gene nodes
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:return: The NeuroMMSig composite score
### Response:
def get_neurommsig_score(graph: BELGraph,
genes: List[Gene],
ora_weight: Optional[float] = None,
hub_weight: Optional[float] = None,
top_percent: Optional[float] = None,
topology_weight: Optional[float] = None) -> float:
"""Calculate the composite NeuroMMSig Score for a given list of genes.
:param graph: A BEL graph
:param genes: A list of gene nodes
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:return: The NeuroMMSig composite score
"""
ora_weight = ora_weight or 1.0
hub_weight = hub_weight or 1.0
topology_weight = topology_weight or 1.0
total_weight = ora_weight + hub_weight + topology_weight
genes = list(genes)
ora_score = neurommsig_gene_ora(graph, genes)
hub_score = neurommsig_hubs(graph, genes, top_percent=top_percent)
topology_score = neurommsig_topology(graph, genes)
weighted_sum = (
ora_weight * ora_score +
hub_weight * hub_score +
topology_weight * topology_score
)
return weighted_sum / total_weight |
def Popen(*args, **kwargs):
"""
Executes a command using subprocess.Popen and redirects output to AETROS and stdout.
Parses stdout as well for stdout API calls.
Use read_line argument to read stdout of command's stdout line by line.
Use returned process stdin to communicate with the command.
:return: subprocess.Popen
"""
read_line = None
if 'read_line' in kwargs:
read_line = kwargs['read_line']
del kwargs['read_line']
p = subprocess.Popen(*args, **kwargs)
wait_stdout = None
wait_stderr = None
if p.stdout:
wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line)
if p.stderr:
wait_stderr = sys.stderr.attach(p.stderr)
original_wait = p.wait
def wait():
original_wait()
if wait_stdout:
wait_stdout()
if wait_stderr:
wait_stderr()
p.wait = wait
return p | Executes a command using subprocess.Popen and redirects output to AETROS and stdout.
Parses stdout as well for stdout API calls.
Use read_line argument to read stdout of command's stdout line by line.
Use returned process stdin to communicate with the command.
:return: subprocess.Popen | Below is the the instruction that describes the task:
### Input:
Executes a command using subprocess.Popen and redirects output to AETROS and stdout.
Parses stdout as well for stdout API calls.
Use read_line argument to read stdout of command's stdout line by line.
Use returned process stdin to communicate with the command.
:return: subprocess.Popen
### Response:
def Popen(*args, **kwargs):
"""
Executes a command using subprocess.Popen and redirects output to AETROS and stdout.
Parses stdout as well for stdout API calls.
Use read_line argument to read stdout of command's stdout line by line.
Use returned process stdin to communicate with the command.
:return: subprocess.Popen
"""
read_line = None
if 'read_line' in kwargs:
read_line = kwargs['read_line']
del kwargs['read_line']
p = subprocess.Popen(*args, **kwargs)
wait_stdout = None
wait_stderr = None
if p.stdout:
wait_stdout = sys.stdout.attach(p.stdout, read_line=read_line)
if p.stderr:
wait_stderr = sys.stderr.attach(p.stderr)
original_wait = p.wait
def wait():
original_wait()
if wait_stdout:
wait_stdout()
if wait_stderr:
wait_stderr()
p.wait = wait
return p |
def set_inode(self, ino):
# type: (inode.Inode) -> None
'''
A method to set the Inode associated with this El Torito Entry.
Parameters:
ino - The Inode object corresponding to this entry.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
self.inode = ino | A method to set the Inode associated with this El Torito Entry.
Parameters:
ino - The Inode object corresponding to this entry.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
A method to set the Inode associated with this El Torito Entry.
Parameters:
ino - The Inode object corresponding to this entry.
Returns:
Nothing.
### Response:
def set_inode(self, ino):
# type: (inode.Inode) -> None
'''
A method to set the Inode associated with this El Torito Entry.
Parameters:
ino - The Inode object corresponding to this entry.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('El Torito Entry not yet initialized')
self.inode = ino |
def add_port(self, br_name, port_name, may_exist, fake_iface,
iface_names, settings=None):
"""
:type settings: list of (column, value_json)
where column is str,
value_json is json that is represented
by Datum.to_json()
"""
settings = settings or []
self.populate_cache()
if may_exist:
vsctl_port = self.find_port(port_name, False)
if vsctl_port:
want_names = set(iface_names)
have_names = set(ovsrec_iface.name for ovsrec_iface in
vsctl_port.port_cfg.interfaces)
if vsctl_port.bridge().name != br_name:
vsctl_fatal('"%s" but %s is actually attached to '
'vsctl_bridge %s' %
(br_name, port_name, vsctl_port.bridge().name))
if want_names != have_names:
want_names_string = ','.join(want_names)
have_names_string = ','.join(have_names)
vsctl_fatal('"%s" but %s actually has interface(s) %s' %
(want_names_string,
port_name, have_names_string))
return
self.check_conflicts(port_name,
'cannot create a port named %s' % port_name)
for iface_name in iface_names:
self.check_conflicts(
iface_name, 'cannot create an interface named %s' % iface_name)
vsctl_bridge = self.find_bridge(br_name, True)
ifaces = []
for iface_name in iface_names:
ovsrec_iface = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = iface_name
ifaces.append(ovsrec_iface)
ovsrec_port = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = port_name
ovsrec_port.interfaces = ifaces
ovsrec_port.bond_fake_iface = fake_iface
if vsctl_bridge.parent:
tag = vsctl_bridge.vlan
ovsrec_port.tag = tag
for column, value in settings:
# TODO:XXX self.symtab:
self.set_column(ovsrec_port, column, value)
if vsctl_bridge.parent:
ovsrec_bridge = vsctl_bridge.parent.br_cfg
else:
ovsrec_bridge = vsctl_bridge.br_cfg
self.bridge_insert_port(ovsrec_bridge, ovsrec_port)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
for ovsrec_iface in ifaces:
self.add_iface_to_cache(vsctl_port, ovsrec_iface) | :type settings: list of (column, value_json)
where column is str,
value_json is json that is represented
by Datum.to_json() | Below is the the instruction that describes the task:
### Input:
:type settings: list of (column, value_json)
where column is str,
value_json is json that is represented
by Datum.to_json()
### Response:
def add_port(self, br_name, port_name, may_exist, fake_iface,
iface_names, settings=None):
"""
:type settings: list of (column, value_json)
where column is str,
value_json is json that is represented
by Datum.to_json()
"""
settings = settings or []
self.populate_cache()
if may_exist:
vsctl_port = self.find_port(port_name, False)
if vsctl_port:
want_names = set(iface_names)
have_names = set(ovsrec_iface.name for ovsrec_iface in
vsctl_port.port_cfg.interfaces)
if vsctl_port.bridge().name != br_name:
vsctl_fatal('"%s" but %s is actually attached to '
'vsctl_bridge %s' %
(br_name, port_name, vsctl_port.bridge().name))
if want_names != have_names:
want_names_string = ','.join(want_names)
have_names_string = ','.join(have_names)
vsctl_fatal('"%s" but %s actually has interface(s) %s' %
(want_names_string,
port_name, have_names_string))
return
self.check_conflicts(port_name,
'cannot create a port named %s' % port_name)
for iface_name in iface_names:
self.check_conflicts(
iface_name, 'cannot create an interface named %s' % iface_name)
vsctl_bridge = self.find_bridge(br_name, True)
ifaces = []
for iface_name in iface_names:
ovsrec_iface = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = iface_name
ifaces.append(ovsrec_iface)
ovsrec_port = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = port_name
ovsrec_port.interfaces = ifaces
ovsrec_port.bond_fake_iface = fake_iface
if vsctl_bridge.parent:
tag = vsctl_bridge.vlan
ovsrec_port.tag = tag
for column, value in settings:
# TODO:XXX self.symtab:
self.set_column(ovsrec_port, column, value)
if vsctl_bridge.parent:
ovsrec_bridge = vsctl_bridge.parent.br_cfg
else:
ovsrec_bridge = vsctl_bridge.br_cfg
self.bridge_insert_port(ovsrec_bridge, ovsrec_port)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
for ovsrec_iface in ifaces:
self.add_iface_to_cache(vsctl_port, ovsrec_iface) |
def daily_forecast(self, name, limit=None):
"""
Queries the OWM Weather API for daily weather forecast for the specified
location (eg: "London,uk"). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of fourteen
days by default: this instance encapsulates *Weather* objects, with a
time interval of one day one from each other
:param name: the location's toponym
:type name: str or unicode
:param limit: the maximum number of daily *Weather* items to be
retrieved (default is ``None``, which stands for any number of
items)
:type limit: int or ``None``
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if negative values are supplied for limit
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
if limit is not None:
assert isinstance(limit, int), "'limit' must be an int or None"
if limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'q': encoded_name, 'lang': self._language}
if limit is not None:
params['cnt'] = limit
uri = http_client.HttpClient.to_url(DAILY_FORECAST_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval("daily")
return forecaster.Forecaster(forecast)
else:
return None | Queries the OWM Weather API for daily weather forecast for the specified
location (eg: "London,uk"). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of fourteen
days by default: this instance encapsulates *Weather* objects, with a
time interval of one day one from each other
:param name: the location's toponym
:type name: str or unicode
:param limit: the maximum number of daily *Weather* items to be
retrieved (default is ``None``, which stands for any number of
items)
:type limit: int or ``None``
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if negative values are supplied for limit | Below is the the instruction that describes the task:
### Input:
Queries the OWM Weather API for daily weather forecast for the specified
location (eg: "London,uk"). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of fourteen
days by default: this instance encapsulates *Weather* objects, with a
time interval of one day one from each other
:param name: the location's toponym
:type name: str or unicode
:param limit: the maximum number of daily *Weather* items to be
retrieved (default is ``None``, which stands for any number of
items)
:type limit: int or ``None``
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if negative values are supplied for limit
### Response:
def daily_forecast(self, name, limit=None):
"""
Queries the OWM Weather API for daily weather forecast for the specified
location (eg: "London,uk"). A *Forecaster* object is returned,
containing a *Forecast* instance covering a global streak of fourteen
days by default: this instance encapsulates *Weather* objects, with a
time interval of one day one from each other
:param name: the location's toponym
:type name: str or unicode
:param limit: the maximum number of daily *Weather* items to be
retrieved (default is ``None``, which stands for any number of
items)
:type limit: int or ``None``
:returns: a *Forecaster* instance or ``None`` if forecast data is not
available for the specified location
:raises: *ParseResponseException* when OWM Weather API responses' data
cannot be parsed, *APICallException* when OWM Weather API can not be
reached, *ValueError* if negative values are supplied for limit
"""
assert isinstance(name, str), "Value must be a string"
encoded_name = name
if limit is not None:
assert isinstance(limit, int), "'limit' must be an int or None"
if limit < 1:
raise ValueError("'limit' must be None or greater than zero")
params = {'q': encoded_name, 'lang': self._language}
if limit is not None:
params['cnt'] = limit
uri = http_client.HttpClient.to_url(DAILY_FORECAST_URL,
self._API_key,
self._subscription_type,
self._use_ssl)
_, json_data = self._wapi.cacheable_get_json(uri, params=params)
forecast = self._parsers['forecast'].parse_JSON(json_data)
if forecast is not None:
forecast.set_interval("daily")
return forecaster.Forecaster(forecast)
else:
return None |
def _get_requirements(fname):
"""
Create a list of requirements from the output of the pip freeze command
saved in a text file.
"""
packages = _read(fname).split('\n')
packages = (p.strip() for p in packages)
packages = (p for p in packages if p and not p.startswith('#'))
return list(packages) | Create a list of requirements from the output of the pip freeze command
saved in a text file. | Below is the the instruction that describes the task:
### Input:
Create a list of requirements from the output of the pip freeze command
saved in a text file.
### Response:
def _get_requirements(fname):
"""
Create a list of requirements from the output of the pip freeze command
saved in a text file.
"""
packages = _read(fname).split('\n')
packages = (p.strip() for p in packages)
packages = (p for p in packages if p and not p.startswith('#'))
return list(packages) |
def modify_environment (self, env):
"""The headas-init.sh script generates its variables in a bit of a funky way
-- it runs a script that generates a list of settings. These are their
transcriptions.
"""
plat = self._platform
def path (*args):
return os.path.join (self._installdir, *args)
env['CALDB'] = b'http://heasarc.gsfc.nasa.gov/FTP/caldb'
env['CALDBCONFIG'] = path ('caldb.config')
env['CALDBALIAS'] = path ('alias_config.fits')
env['HEADAS'] = path (plat)
env['LHEASOFT'] = env['HEADAS']
env['FTOOLS'] = env['HEADAS']
prepend_environ_path (env, 'PATH', path (plat, 'bin'))
prepend_environ_path (env, 'LD_LIBRARY_PATH', path (plat, 'lib'))
prepend_environ_path (env, 'PERLLIB', path (plat, 'lib', 'perl'))
prepend_environ_path (env, 'PERL5LIB', path (plat, 'lib', 'perl'))
prepend_environ_path (env, 'PYTHONPATH', path (plat, 'lib'))
prepend_environ_path (env, 'PYTHONPATH', path (plat, 'lib', 'python'))
userpfiles = user_data_path ('hea-pfiles')
io.ensure_dir (userpfiles, parents=True)
env['PFILES'] = ';'.join ([userpfiles,
path (plat, 'syspfiles')])
env['LHEA_DATA'] = path (plat, 'refdata')
env['LHEA_HELP'] = path (plat, 'help')
env['PGPLOT_DIR'] = path (plat, 'lib')
env['PGPLOT_FONT'] = path (plat, 'lib', 'grfont.dat')
env['PGPLOT_RGB'] = path (plat, 'lib', 'rgb.txt')
env['POW_LIBRARY'] = path (plat, 'lib', 'pow')
env['TCLRL_LIBDIR'] = path (plat, 'lib')
env['XANADU'] = path ()
env['XANBIN'] = path (plat)
env['XRDEFAULTS'] = path (plat, 'xrdefaults')
env['EXT'] = b'lnx' # XXX portability probably ...
env['LHEAPERL'] = b'/usr/bin/perl' # what could go wrong?
env['PFCLOBBER'] = b'1'
env['FTOOLSINPUT'] = b'stdin'
env['FTOOLSOUTPUT'] = b'stdout'
return env | The headas-init.sh script generates its variables in a bit of a funky way
-- it runs a script that generates a list of settings. These are their
transcriptions. | Below is the the instruction that describes the task:
### Input:
The headas-init.sh script generates its variables in a bit of a funky way
-- it runs a script that generates a list of settings. These are their
transcriptions.
### Response:
def modify_environment (self, env):
"""The headas-init.sh script generates its variables in a bit of a funky way
-- it runs a script that generates a list of settings. These are their
transcriptions.
"""
plat = self._platform
def path (*args):
return os.path.join (self._installdir, *args)
env['CALDB'] = b'http://heasarc.gsfc.nasa.gov/FTP/caldb'
env['CALDBCONFIG'] = path ('caldb.config')
env['CALDBALIAS'] = path ('alias_config.fits')
env['HEADAS'] = path (plat)
env['LHEASOFT'] = env['HEADAS']
env['FTOOLS'] = env['HEADAS']
prepend_environ_path (env, 'PATH', path (plat, 'bin'))
prepend_environ_path (env, 'LD_LIBRARY_PATH', path (plat, 'lib'))
prepend_environ_path (env, 'PERLLIB', path (plat, 'lib', 'perl'))
prepend_environ_path (env, 'PERL5LIB', path (plat, 'lib', 'perl'))
prepend_environ_path (env, 'PYTHONPATH', path (plat, 'lib'))
prepend_environ_path (env, 'PYTHONPATH', path (plat, 'lib', 'python'))
userpfiles = user_data_path ('hea-pfiles')
io.ensure_dir (userpfiles, parents=True)
env['PFILES'] = ';'.join ([userpfiles,
path (plat, 'syspfiles')])
env['LHEA_DATA'] = path (plat, 'refdata')
env['LHEA_HELP'] = path (plat, 'help')
env['PGPLOT_DIR'] = path (plat, 'lib')
env['PGPLOT_FONT'] = path (plat, 'lib', 'grfont.dat')
env['PGPLOT_RGB'] = path (plat, 'lib', 'rgb.txt')
env['POW_LIBRARY'] = path (plat, 'lib', 'pow')
env['TCLRL_LIBDIR'] = path (plat, 'lib')
env['XANADU'] = path ()
env['XANBIN'] = path (plat)
env['XRDEFAULTS'] = path (plat, 'xrdefaults')
env['EXT'] = b'lnx' # XXX portability probably ...
env['LHEAPERL'] = b'/usr/bin/perl' # what could go wrong?
env['PFCLOBBER'] = b'1'
env['FTOOLSINPUT'] = b'stdin'
env['FTOOLSOUTPUT'] = b'stdout'
return env |
def add_logger(self, cb, level='NORMAL', filters='ALL'):
'''Add a callback to receive log events from this component.
@param cb The callback function to receive log events. It must have the
signature cb(name, time, source, level, message), where name is the
name of the component the log record came from, time is a
floating-point time stamp, source is the name of the logger that
provided the log record, level is the log level of the record and
message is a text string.
@param level The maximum level of log records to receive.
@param filters Filter the objects from which to receive log messages.
@return An ID for this logger. Use this ID in future operations such as
removing this logger.
@raises AddLoggerError
'''
with self._mutex:
obs = sdo.RTCLogger(self, cb)
uuid_val = uuid.uuid4()
intf_type = obs._this()._NP_RepositoryId
props = {'logger.log_level': level,
'logger.filter': filters}
props = utils.dict_to_nvlist(props)
sprof = SDOPackage.ServiceProfile(id=uuid_val.get_bytes(),
interface_type=intf_type, service=obs._this(),
properties=props)
conf = self.object.get_configuration()
res = conf.add_service_profile(sprof)
if res:
self._loggers[uuid_val] = obs
return uuid_val
raise exceptions.AddLoggerError(self.name) | Add a callback to receive log events from this component.
@param cb The callback function to receive log events. It must have the
signature cb(name, time, source, level, message), where name is the
name of the component the log record came from, time is a
floating-point time stamp, source is the name of the logger that
provided the log record, level is the log level of the record and
message is a text string.
@param level The maximum level of log records to receive.
@param filters Filter the objects from which to receive log messages.
@return An ID for this logger. Use this ID in future operations such as
removing this logger.
@raises AddLoggerError | Below is the the instruction that describes the task:
### Input:
Add a callback to receive log events from this component.
@param cb The callback function to receive log events. It must have the
signature cb(name, time, source, level, message), where name is the
name of the component the log record came from, time is a
floating-point time stamp, source is the name of the logger that
provided the log record, level is the log level of the record and
message is a text string.
@param level The maximum level of log records to receive.
@param filters Filter the objects from which to receive log messages.
@return An ID for this logger. Use this ID in future operations such as
removing this logger.
@raises AddLoggerError
### Response:
def add_logger(self, cb, level='NORMAL', filters='ALL'):
'''Add a callback to receive log events from this component.
@param cb The callback function to receive log events. It must have the
signature cb(name, time, source, level, message), where name is the
name of the component the log record came from, time is a
floating-point time stamp, source is the name of the logger that
provided the log record, level is the log level of the record and
message is a text string.
@param level The maximum level of log records to receive.
@param filters Filter the objects from which to receive log messages.
@return An ID for this logger. Use this ID in future operations such as
removing this logger.
@raises AddLoggerError
'''
with self._mutex:
obs = sdo.RTCLogger(self, cb)
uuid_val = uuid.uuid4()
intf_type = obs._this()._NP_RepositoryId
props = {'logger.log_level': level,
'logger.filter': filters}
props = utils.dict_to_nvlist(props)
sprof = SDOPackage.ServiceProfile(id=uuid_val.get_bytes(),
interface_type=intf_type, service=obs._this(),
properties=props)
conf = self.object.get_configuration()
res = conf.add_service_profile(sprof)
if res:
self._loggers[uuid_val] = obs
return uuid_val
raise exceptions.AddLoggerError(self.name) |
def extract_fields(self):
"""Extract the given fieldnames from the object
:returns: Schema name/value mapping
:rtype: dict
"""
# get the proper data manager for the object
dm = IDataManager(self.context)
# filter out ignored fields
fieldnames = filter(lambda name: name not in self.ignore, self.keys)
# schema mapping
out = dict()
for fieldname in fieldnames:
try:
# get the field value with the data manager
fieldvalue = dm.json_data(fieldname)
# https://github.com/collective/plone.jsonapi.routes/issues/52
# -> skip restricted fields
except Unauthorized:
logger.debug("Skipping restricted field '%s'" % fieldname)
continue
except ValueError:
logger.debug("Skipping invalid field '%s'" % fieldname)
continue
out[fieldname] = api.to_json_value(self.context, fieldname, fieldvalue)
return out | Extract the given fieldnames from the object
:returns: Schema name/value mapping
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Extract the given fieldnames from the object
:returns: Schema name/value mapping
:rtype: dict
### Response:
def extract_fields(self):
"""Extract the given fieldnames from the object
:returns: Schema name/value mapping
:rtype: dict
"""
# get the proper data manager for the object
dm = IDataManager(self.context)
# filter out ignored fields
fieldnames = filter(lambda name: name not in self.ignore, self.keys)
# schema mapping
out = dict()
for fieldname in fieldnames:
try:
# get the field value with the data manager
fieldvalue = dm.json_data(fieldname)
# https://github.com/collective/plone.jsonapi.routes/issues/52
# -> skip restricted fields
except Unauthorized:
logger.debug("Skipping restricted field '%s'" % fieldname)
continue
except ValueError:
logger.debug("Skipping invalid field '%s'" % fieldname)
continue
out[fieldname] = api.to_json_value(self.context, fieldname, fieldvalue)
return out |
def get_fund_ownership(self, **kwargs):
"""Fund Ownership
Returns the top 10 fund holders, meaning any firm not defined as
buy-side or sell-side such as mutual funds, pension funds, endowments,
investment firms, and other large entities that manage funds on behalf
of others.
Reference: https://iexcloud.io/docs/api/#fund-ownership
Data Weighting: ``10000`` per symbol per period
Returns
-------
list or pandas.DataFrame
Stocks Fund Ownership endpoint data
"""
def fmt_p(out):
out = {(symbol, owner["entityProperName"]): owner
for symbol in out
for owner in out[symbol]}
return pd.DataFrame(out)
return self._get_endpoint("fund-ownership", fmt_p=fmt_p, params=kwargs) | Fund Ownership
Returns the top 10 fund holders, meaning any firm not defined as
buy-side or sell-side such as mutual funds, pension funds, endowments,
investment firms, and other large entities that manage funds on behalf
of others.
Reference: https://iexcloud.io/docs/api/#fund-ownership
Data Weighting: ``10000`` per symbol per period
Returns
-------
list or pandas.DataFrame
Stocks Fund Ownership endpoint data | Below is the the instruction that describes the task:
### Input:
Fund Ownership
Returns the top 10 fund holders, meaning any firm not defined as
buy-side or sell-side such as mutual funds, pension funds, endowments,
investment firms, and other large entities that manage funds on behalf
of others.
Reference: https://iexcloud.io/docs/api/#fund-ownership
Data Weighting: ``10000`` per symbol per period
Returns
-------
list or pandas.DataFrame
Stocks Fund Ownership endpoint data
### Response:
def get_fund_ownership(self, **kwargs):
"""Fund Ownership
Returns the top 10 fund holders, meaning any firm not defined as
buy-side or sell-side such as mutual funds, pension funds, endowments,
investment firms, and other large entities that manage funds on behalf
of others.
Reference: https://iexcloud.io/docs/api/#fund-ownership
Data Weighting: ``10000`` per symbol per period
Returns
-------
list or pandas.DataFrame
Stocks Fund Ownership endpoint data
"""
def fmt_p(out):
out = {(symbol, owner["entityProperName"]): owner
for symbol in out
for owner in out[symbol]}
return pd.DataFrame(out)
return self._get_endpoint("fund-ownership", fmt_p=fmt_p, params=kwargs) |
def _checkResponseRegisterAddress(payload, registeraddress):
"""Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=2, description='payload')
_checkRegisteraddress(registeraddress)
BYTERANGE_FOR_STARTADDRESS = slice(0, 2)
bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS]
receivedStartAddress = _twoByteStringToNum(bytesForStartAddress)
if receivedStartAddress != registeraddress:
raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \
receivedStartAddress, registeraddress, payload)) | Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError | Below is the the instruction that describes the task:
### Input:
Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError
### Response:
def _checkResponseRegisterAddress(payload, registeraddress):
"""Check that the start adress as given in the response is correct.
The first two bytes in the payload holds the address value.
Args:
* payload (string): The payload
* registeraddress (int): The register address (use decimal numbers, not hex).
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=2, description='payload')
_checkRegisteraddress(registeraddress)
BYTERANGE_FOR_STARTADDRESS = slice(0, 2)
bytesForStartAddress = payload[BYTERANGE_FOR_STARTADDRESS]
receivedStartAddress = _twoByteStringToNum(bytesForStartAddress)
if receivedStartAddress != registeraddress:
raise ValueError('Wrong given write start adress: {0}, but commanded is {1}. The data payload is: {2!r}'.format( \
receivedStartAddress, registeraddress, payload)) |
def translated(structure, values, lang_spec):
"""Return code associated to given structure and values,
translate with given language specification."""
# LANGUAGE SPECS
indentation = '\t'
endline = '\n'
object_code = ""
stack = []
# define shortcuts to behavior
push = lambda x: stack.append(x)
pop = lambda : stack.pop()
last = lambda : stack[-1] if len(stack) > 0 else ' '
def indented_code(s, level, end):
return lang_spec[INDENTATION]*level + s + end
# recreate python structure, and replace type by value
level = 0
CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION]
ACTION = LEXEM_TYPE_ACTION
DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL
for lexem_type in structure:
if lexem_type is ACTION:
# place previous conditions if necessary
if last() in CONDITIONS:
# construct conditions lines
value, values = values[0:len(stack)], values[len(stack):]
object_code += (indented_code(lang_spec[BEG_CONDITION]
+ lang_spec[LOGICAL_AND].join(value)
+ lang_spec[END_CONDITION],
level,
lang_spec[END_LINE]
))
# if provided, print the begin block token on a new line
if len(lang_spec[BEG_BLOCK]) > 0:
object_code += indented_code(
lang_spec[BEG_BLOCK],
level,
lang_spec[END_LINE]
)
stack = []
level += 1
# and place the action
object_code += indented_code(
lang_spec[BEG_ACTION] + values[0],
level,
lang_spec[END_ACTION]+lang_spec[END_LINE]
)
values = values[1:]
elif lexem_type in CONDITIONS:
push(lexem_type)
elif lexem_type is DOWNLEVEL:
if last() not in CONDITIONS:
# down level, and add a END_BLOCK only if needed
level -= 1
if level >= 0:
object_code += indented_code(
lang_spec[END_BLOCK], level,
lang_spec[END_LINE]
)
else:
level = 0
# add END_BLOCK while needed for reach level 0
while level > 0:
level -= 1
if level >= 0:
object_code += indented_code(
lang_spec[END_BLOCK], level,
lang_spec[END_LINE]
)
else:
level = 0
# Finished !
return object_code | Return code associated to given structure and values,
translate with given language specification. | Below is the the instruction that describes the task:
### Input:
Return code associated to given structure and values,
translate with given language specification.
### Response:
def translated(structure, values, lang_spec):
"""Return code associated to given structure and values,
translate with given language specification."""
# LANGUAGE SPECS
indentation = '\t'
endline = '\n'
object_code = ""
stack = []
# define shortcuts to behavior
push = lambda x: stack.append(x)
pop = lambda : stack.pop()
last = lambda : stack[-1] if len(stack) > 0 else ' '
def indented_code(s, level, end):
return lang_spec[INDENTATION]*level + s + end
# recreate python structure, and replace type by value
level = 0
CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION]
ACTION = LEXEM_TYPE_ACTION
DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL
for lexem_type in structure:
if lexem_type is ACTION:
# place previous conditions if necessary
if last() in CONDITIONS:
# construct conditions lines
value, values = values[0:len(stack)], values[len(stack):]
object_code += (indented_code(lang_spec[BEG_CONDITION]
+ lang_spec[LOGICAL_AND].join(value)
+ lang_spec[END_CONDITION],
level,
lang_spec[END_LINE]
))
# if provided, print the begin block token on a new line
if len(lang_spec[BEG_BLOCK]) > 0:
object_code += indented_code(
lang_spec[BEG_BLOCK],
level,
lang_spec[END_LINE]
)
stack = []
level += 1
# and place the action
object_code += indented_code(
lang_spec[BEG_ACTION] + values[0],
level,
lang_spec[END_ACTION]+lang_spec[END_LINE]
)
values = values[1:]
elif lexem_type in CONDITIONS:
push(lexem_type)
elif lexem_type is DOWNLEVEL:
if last() not in CONDITIONS:
# down level, and add a END_BLOCK only if needed
level -= 1
if level >= 0:
object_code += indented_code(
lang_spec[END_BLOCK], level,
lang_spec[END_LINE]
)
else:
level = 0
# add END_BLOCK while needed for reach level 0
while level > 0:
level -= 1
if level >= 0:
object_code += indented_code(
lang_spec[END_BLOCK], level,
lang_spec[END_LINE]
)
else:
level = 0
# Finished !
return object_code |
def _format_file_lines(files):
'''Given a list of filenames that we're about to print, limit it to a
reasonable number of lines.'''
LINES_TO_SHOW = 10
if len(files) <= LINES_TO_SHOW:
lines = '\n'.join(files)
else:
lines = ('\n'.join(files[:LINES_TO_SHOW - 1]) + '\n...{} total'.format(
len(files)))
return lines | Given a list of filenames that we're about to print, limit it to a
reasonable number of lines. | Below is the the instruction that describes the task:
### Input:
Given a list of filenames that we're about to print, limit it to a
reasonable number of lines.
### Response:
def _format_file_lines(files):
'''Given a list of filenames that we're about to print, limit it to a
reasonable number of lines.'''
LINES_TO_SHOW = 10
if len(files) <= LINES_TO_SHOW:
lines = '\n'.join(files)
else:
lines = ('\n'.join(files[:LINES_TO_SHOW - 1]) + '\n...{} total'.format(
len(files)))
return lines |
def read_case(input, format=None):
""" Returns a case object from the given input file object. The data
format may be optionally specified.
"""
# Map of data file types to readers.
format_map = {"matpower": MATPOWERReader,
"psse": PSSEReader, "pickle": PickleReader}
# Read case data.
if format_map.has_key(format):
reader_klass = format_map[format]
reader = reader_klass()
case = reader.read(input)
else:
# Try each of the readers at random.
for reader_klass in format_map.values():
reader = reader_klass()
try:
case = reader.read(input)
if case is not None:
break
except:
pass
else:
case = None
return case | Returns a case object from the given input file object. The data
format may be optionally specified. | Below is the the instruction that describes the task:
### Input:
Returns a case object from the given input file object. The data
format may be optionally specified.
### Response:
def read_case(input, format=None):
""" Returns a case object from the given input file object. The data
format may be optionally specified.
"""
# Map of data file types to readers.
format_map = {"matpower": MATPOWERReader,
"psse": PSSEReader, "pickle": PickleReader}
# Read case data.
if format_map.has_key(format):
reader_klass = format_map[format]
reader = reader_klass()
case = reader.read(input)
else:
# Try each of the readers at random.
for reader_klass in format_map.values():
reader = reader_klass()
try:
case = reader.read(input)
if case is not None:
break
except:
pass
else:
case = None
return case |
def get_arctic_lib(connection_string, **kwargs):
"""
Returns a mongo library for the given connection string
Parameters
---------
connection_string: `str`
Format must be one of the following:
library@trading for known mongo servers
library@hostname:port
Returns:
--------
Arctic library
"""
m = CONNECTION_STR.match(connection_string)
if not m:
raise ValueError("connection string incorrectly formed: %s" % connection_string)
library, host = m.group(1), m.group(2)
return _get_arctic(host, **kwargs)[library] | Returns a mongo library for the given connection string
Parameters
---------
connection_string: `str`
Format must be one of the following:
library@trading for known mongo servers
library@hostname:port
Returns:
--------
Arctic library | Below is the the instruction that describes the task:
### Input:
Returns a mongo library for the given connection string
Parameters
---------
connection_string: `str`
Format must be one of the following:
library@trading for known mongo servers
library@hostname:port
Returns:
--------
Arctic library
### Response:
def get_arctic_lib(connection_string, **kwargs):
"""
Returns a mongo library for the given connection string
Parameters
---------
connection_string: `str`
Format must be one of the following:
library@trading for known mongo servers
library@hostname:port
Returns:
--------
Arctic library
"""
m = CONNECTION_STR.match(connection_string)
if not m:
raise ValueError("connection string incorrectly formed: %s" % connection_string)
library, host = m.group(1), m.group(2)
return _get_arctic(host, **kwargs)[library] |
def stroke_antialias(self, flag=True):
"""stroke antialias
:param flag: True or False. (default is True)
:type flag: bool
"""
antialias = pgmagick.DrawableStrokeAntialias(flag)
self.drawer.append(antialias) | stroke antialias
:param flag: True or False. (default is True)
:type flag: bool | Below is the the instruction that describes the task:
### Input:
stroke antialias
:param flag: True or False. (default is True)
:type flag: bool
### Response:
def stroke_antialias(self, flag=True):
"""stroke antialias
:param flag: True or False. (default is True)
:type flag: bool
"""
antialias = pgmagick.DrawableStrokeAntialias(flag)
self.drawer.append(antialias) |
def register(scheme):
"""
Registers a new scheme to the urlparser.
:param schema | <str>
"""
scheme = nstr(scheme)
urlparse.uses_fragment.append(scheme)
urlparse.uses_netloc.append(scheme)
urlparse.uses_params.append(scheme)
urlparse.uses_query.append(scheme)
urlparse.uses_relative.append(scheme) | Registers a new scheme to the urlparser.
:param schema | <str> | Below is the the instruction that describes the task:
### Input:
Registers a new scheme to the urlparser.
:param schema | <str>
### Response:
def register(scheme):
"""
Registers a new scheme to the urlparser.
:param schema | <str>
"""
scheme = nstr(scheme)
urlparse.uses_fragment.append(scheme)
urlparse.uses_netloc.append(scheme)
urlparse.uses_params.append(scheme)
urlparse.uses_query.append(scheme)
urlparse.uses_relative.append(scheme) |
def summary(args):
"""
%prog summary gffile fastafile
Print summary stats, including:
- Gene/Exon/Intron
- Number
- Average size (bp)
- Median size (bp)
- Total length (Mb)
- % of genome
- % GC
"""
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, ref = args
s = Fasta(ref)
g = make_index(gff_file)
geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC
for f in g.features_of_type("gene"):
fid = f.id
fseq = s.sequence({'chr': f.chrom, 'start': f.start, 'stop': f.stop})
geneseqs.append(fseq)
exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \
if c.featuretype == "exon")
exons = list(exons)
for chrom, start, stop in exons:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
exonseqs.append(fseq)
introns = range_interleave(exons)
for chrom, start, stop in introns:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
intronseqs.append(fseq)
r = {} # Report
for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)):
tsizes = [len(x) for x in tseqs]
tsummary = SummaryStats(tsizes, dtype="int")
r[t, "Number"] = tsummary.size
r[t, "Average size (bp)"] = tsummary.mean
r[t, "Median size (bp)"] = tsummary.median
r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb")
r[t, "% of genome"] = percentage(tsummary.sum, s.totalsize, precision=0, mode=-1)
r[t, "% GC"] = gc(tseqs)
print(tabulate(r), file=sys.stderr) | %prog summary gffile fastafile
Print summary stats, including:
- Gene/Exon/Intron
- Number
- Average size (bp)
- Median size (bp)
- Total length (Mb)
- % of genome
- % GC | Below is the the instruction that describes the task:
### Input:
%prog summary gffile fastafile
Print summary stats, including:
- Gene/Exon/Intron
- Number
- Average size (bp)
- Median size (bp)
- Total length (Mb)
- % of genome
- % GC
### Response:
def summary(args):
"""
%prog summary gffile fastafile
Print summary stats, including:
- Gene/Exon/Intron
- Number
- Average size (bp)
- Median size (bp)
- Total length (Mb)
- % of genome
- % GC
"""
p = OptionParser(summary.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gff_file, ref = args
s = Fasta(ref)
g = make_index(gff_file)
geneseqs, exonseqs, intronseqs = [], [], [] # Calc % GC
for f in g.features_of_type("gene"):
fid = f.id
fseq = s.sequence({'chr': f.chrom, 'start': f.start, 'stop': f.stop})
geneseqs.append(fseq)
exons = set((c.chrom, c.start, c.stop) for c in g.children(fid, 2) \
if c.featuretype == "exon")
exons = list(exons)
for chrom, start, stop in exons:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
exonseqs.append(fseq)
introns = range_interleave(exons)
for chrom, start, stop in introns:
fseq = s.sequence({'chr': chrom, 'start': start, 'stop': stop})
intronseqs.append(fseq)
r = {} # Report
for t, tseqs in zip(("Gene", "Exon", "Intron"), (geneseqs, exonseqs, intronseqs)):
tsizes = [len(x) for x in tseqs]
tsummary = SummaryStats(tsizes, dtype="int")
r[t, "Number"] = tsummary.size
r[t, "Average size (bp)"] = tsummary.mean
r[t, "Median size (bp)"] = tsummary.median
r[t, "Total length (Mb)"] = human_size(tsummary.sum, precision=0, target="Mb")
r[t, "% of genome"] = percentage(tsummary.sum, s.totalsize, precision=0, mode=-1)
r[t, "% GC"] = gc(tseqs)
print(tabulate(r), file=sys.stderr) |
def run(self, *args):
"""Affiliate unique identities to organizations."""
self.parser.parse_args(args)
code = self.affiliate()
return code | Affiliate unique identities to organizations. | Below is the the instruction that describes the task:
### Input:
Affiliate unique identities to organizations.
### Response:
def run(self, *args):
"""Affiliate unique identities to organizations."""
self.parser.parse_args(args)
code = self.affiliate()
return code |
def _store_work_results(self, results, collection, md5):
""" Internal: Stores the work results of a worker."""
self.data_store.store_work_results(results, collection, md5) | Internal: Stores the work results of a worker. | Below is the the instruction that describes the task:
### Input:
Internal: Stores the work results of a worker.
### Response:
def _store_work_results(self, results, collection, md5):
""" Internal: Stores the work results of a worker."""
self.data_store.store_work_results(results, collection, md5) |
def get_folder_list(folder='.'):
""" Get list of sub-folders contained in input folder
:param folder: input folder to list sub-folders. Default is ``'.'``
:type folder: str
:return: list of sub-folders
:rtype: list(str)
"""
dir_list = get_content_list(folder)
return [f for f in dir_list if not os.path.isfile(os.path.join(folder, f))] | Get list of sub-folders contained in input folder
:param folder: input folder to list sub-folders. Default is ``'.'``
:type folder: str
:return: list of sub-folders
:rtype: list(str) | Below is the the instruction that describes the task:
### Input:
Get list of sub-folders contained in input folder
:param folder: input folder to list sub-folders. Default is ``'.'``
:type folder: str
:return: list of sub-folders
:rtype: list(str)
### Response:
def get_folder_list(folder='.'):
""" Get list of sub-folders contained in input folder
:param folder: input folder to list sub-folders. Default is ``'.'``
:type folder: str
:return: list of sub-folders
:rtype: list(str)
"""
dir_list = get_content_list(folder)
return [f for f in dir_list if not os.path.isfile(os.path.join(folder, f))] |
def installed(name, default=False, user=None):
'''
Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
rbenv_installed_ret = copy.deepcopy(ret)
if name.startswith('ruby-'):
name = re.sub(r'^ruby-', '', name)
if __opts__['test']:
ret = _ruby_installed(ret, name, user=user)
if not ret['result']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
else:
ret['comment'] = 'Ruby {0} is already installed'.format(name)
return ret
rbenv_installed_ret = _check_and_install_rbenv(rbenv_installed_ret, user)
if rbenv_installed_ret['result'] is False:
ret['result'] = False
ret['comment'] = 'Rbenv failed to install'
return ret
else:
return _check_and_install_ruby(ret, name, default, user=user) | Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0 | Below is the the instruction that describes the task:
### Input:
Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
### Response:
def installed(name, default=False, user=None):
'''
Verify that the specified ruby is installed with rbenv. Rbenv is
installed if necessary.
name
The version of ruby to install
default : False
Whether to make this ruby the default.
user: None
The user to run rbenv as.
.. versionadded:: 0.17.0
.. versionadded:: 0.16.0
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
rbenv_installed_ret = copy.deepcopy(ret)
if name.startswith('ruby-'):
name = re.sub(r'^ruby-', '', name)
if __opts__['test']:
ret = _ruby_installed(ret, name, user=user)
if not ret['result']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
else:
ret['comment'] = 'Ruby {0} is already installed'.format(name)
return ret
rbenv_installed_ret = _check_and_install_rbenv(rbenv_installed_ret, user)
if rbenv_installed_ret['result'] is False:
ret['result'] = False
ret['comment'] = 'Rbenv failed to install'
return ret
else:
return _check_and_install_ruby(ret, name, default, user=user) |
def getVariantAnnotationSets(self, datasetId):
"""
Returns the list of ReferenceSets for this server.
"""
# TODO this should be displayed per-variant set, not per dataset.
variantAnnotationSets = []
dataset = app.backend.getDataRepository().getDataset(datasetId)
for variantSet in dataset.getVariantSets():
variantAnnotationSets.extend(
variantSet.getVariantAnnotationSets())
return variantAnnotationSets | Returns the list of ReferenceSets for this server. | Below is the the instruction that describes the task:
### Input:
Returns the list of ReferenceSets for this server.
### Response:
def getVariantAnnotationSets(self, datasetId):
"""
Returns the list of ReferenceSets for this server.
"""
# TODO this should be displayed per-variant set, not per dataset.
variantAnnotationSets = []
dataset = app.backend.getDataRepository().getDataset(datasetId)
for variantSet in dataset.getVariantSets():
variantAnnotationSets.extend(
variantSet.getVariantAnnotationSets())
return variantAnnotationSets |
def formatTime(self, record, datefmt=None):
"""
Format time, including milliseconds.
"""
formatted = super(PalletFormatter, self).formatTime(
record, datefmt=datefmt)
return formatted + '.%03dZ' % record.msecs | Format time, including milliseconds. | Below is the the instruction that describes the task:
### Input:
Format time, including milliseconds.
### Response:
def formatTime(self, record, datefmt=None):
"""
Format time, including milliseconds.
"""
formatted = super(PalletFormatter, self).formatTime(
record, datefmt=datefmt)
return formatted + '.%03dZ' % record.msecs |
def latexify_spacegroup(spacegroup_symbol):
"""
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines.
"""
sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol)
return re.sub(r"-(\d)", r"$\\overline{\1}$", sym) | Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines. | Below is the the instruction that describes the task:
### Input:
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines.
### Response:
def latexify_spacegroup(spacegroup_symbol):
"""
Generates a latex formatted spacegroup. E.g., P2_1/c is converted to
P2$_{1}$/c and P-1 is converted to P$\\overline{1}$.
Args:
spacegroup_symbol (str): A spacegroup symbol
Returns:
A latex formatted spacegroup with proper subscripts and overlines.
"""
sym = re.sub(r"_(\d+)", r"$_{\1}$", spacegroup_symbol)
return re.sub(r"-(\d)", r"$\\overline{\1}$", sym) |
def filePath(self, index):
"""
Gets the file path of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: str
"""
return self._fs_model_source.filePath(
self._fs_model_proxy.mapToSource(index)) | Gets the file path of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: str | Below is the the instruction that describes the task:
### Input:
Gets the file path of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: str
### Response:
def filePath(self, index):
"""
Gets the file path of the item at the specified ``index``.
:param index: item index - QModelIndex
:return: str
"""
return self._fs_model_source.filePath(
self._fs_model_proxy.mapToSource(index)) |
def _log_diff_memory_data(self, prefix, new_memory_data, old_memory_data):
"""
Computes and logs the difference in memory utilization
between the given old and new memory data.
"""
def _vmem_used(memory_data):
return memory_data['machine_data'].used
def _process_mem_percent(memory_data):
return memory_data['process_data']['memory_percent']
def _process_rss(memory_data):
return memory_data['process_data']['memory_info'].rss
def _process_vms(memory_data):
return memory_data['process_data']['memory_info'].vms
if new_memory_data and old_memory_data:
log.info(
u"%s Diff Vmem used: %s, Diff percent memory: %s, Diff rss: %s, Diff vms: %s",
prefix,
_vmem_used(new_memory_data) - _vmem_used(old_memory_data),
_process_mem_percent(new_memory_data) - _process_mem_percent(old_memory_data),
_process_rss(new_memory_data) - _process_rss(old_memory_data),
_process_vms(new_memory_data) - _process_vms(old_memory_data),
) | Computes and logs the difference in memory utilization
between the given old and new memory data. | Below is the the instruction that describes the task:
### Input:
Computes and logs the difference in memory utilization
between the given old and new memory data.
### Response:
def _log_diff_memory_data(self, prefix, new_memory_data, old_memory_data):
"""
Computes and logs the difference in memory utilization
between the given old and new memory data.
"""
def _vmem_used(memory_data):
return memory_data['machine_data'].used
def _process_mem_percent(memory_data):
return memory_data['process_data']['memory_percent']
def _process_rss(memory_data):
return memory_data['process_data']['memory_info'].rss
def _process_vms(memory_data):
return memory_data['process_data']['memory_info'].vms
if new_memory_data and old_memory_data:
log.info(
u"%s Diff Vmem used: %s, Diff percent memory: %s, Diff rss: %s, Diff vms: %s",
prefix,
_vmem_used(new_memory_data) - _vmem_used(old_memory_data),
_process_mem_percent(new_memory_data) - _process_mem_percent(old_memory_data),
_process_rss(new_memory_data) - _process_rss(old_memory_data),
_process_vms(new_memory_data) - _process_vms(old_memory_data),
) |
def generate_synthetic_state_trajectory(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32):
"""Generate a synthetic state trajectory.
Parameters
----------
nsteps : int
Number of steps in the synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from the intrinsic
initial distribution.
start : int
starting state. Exclusive with initial_Pi
stop : int
stopping state. Trajectory will terminate when reaching the stopping state before length number of steps.
dtype : numpy.dtype, optional, default=numpy.int32
The numpy dtype to use to store the synthetic trajectory.
Returns
-------
states : np.array of shape (nstates,) of dtype=np.int32
The trajectory of hidden states, with each element in range(0,nstates).
Examples
--------
Generate a synthetic state trajectory of a specified length.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> states = model.generate_synthetic_state_trajectory(nsteps=100)
"""
# consistency check
if initial_Pi is not None and start is not None:
raise ValueError('Arguments initial_Pi and start are exclusive. Only set one of them.')
# Generate first state sample.
if start is None:
if initial_Pi is not None:
start = np.random.choice(range(self._nstates), size=1, p=initial_Pi)
else:
start = np.random.choice(range(self._nstates), size=1, p=self._Pi)
# Generate and return trajectory
from msmtools import generation as msmgen
traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start, stop=stop, dt=1)
return traj.astype(dtype) | Generate a synthetic state trajectory.
Parameters
----------
nsteps : int
Number of steps in the synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from the intrinsic
initial distribution.
start : int
starting state. Exclusive with initial_Pi
stop : int
stopping state. Trajectory will terminate when reaching the stopping state before length number of steps.
dtype : numpy.dtype, optional, default=numpy.int32
The numpy dtype to use to store the synthetic trajectory.
Returns
-------
states : np.array of shape (nstates,) of dtype=np.int32
The trajectory of hidden states, with each element in range(0,nstates).
Examples
--------
Generate a synthetic state trajectory of a specified length.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> states = model.generate_synthetic_state_trajectory(nsteps=100) | Below is the the instruction that describes the task:
### Input:
Generate a synthetic state trajectory.
Parameters
----------
nsteps : int
Number of steps in the synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from the intrinsic
initial distribution.
start : int
starting state. Exclusive with initial_Pi
stop : int
stopping state. Trajectory will terminate when reaching the stopping state before length number of steps.
dtype : numpy.dtype, optional, default=numpy.int32
The numpy dtype to use to store the synthetic trajectory.
Returns
-------
states : np.array of shape (nstates,) of dtype=np.int32
The trajectory of hidden states, with each element in range(0,nstates).
Examples
--------
Generate a synthetic state trajectory of a specified length.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> states = model.generate_synthetic_state_trajectory(nsteps=100)
### Response:
def generate_synthetic_state_trajectory(self, nsteps, initial_Pi=None, start=None, stop=None, dtype=np.int32):
"""Generate a synthetic state trajectory.
Parameters
----------
nsteps : int
Number of steps in the synthetic state trajectory to be generated.
initial_Pi : np.array of shape (nstates,), optional, default=None
The initial probability distribution, if samples are not to be taken from the intrinsic
initial distribution.
start : int
starting state. Exclusive with initial_Pi
stop : int
stopping state. Trajectory will terminate when reaching the stopping state before length number of steps.
dtype : numpy.dtype, optional, default=numpy.int32
The numpy dtype to use to store the synthetic trajectory.
Returns
-------
states : np.array of shape (nstates,) of dtype=np.int32
The trajectory of hidden states, with each element in range(0,nstates).
Examples
--------
Generate a synthetic state trajectory of a specified length.
>>> from bhmm import testsystems
>>> model = testsystems.dalton_model()
>>> states = model.generate_synthetic_state_trajectory(nsteps=100)
"""
# consistency check
if initial_Pi is not None and start is not None:
raise ValueError('Arguments initial_Pi and start are exclusive. Only set one of them.')
# Generate first state sample.
if start is None:
if initial_Pi is not None:
start = np.random.choice(range(self._nstates), size=1, p=initial_Pi)
else:
start = np.random.choice(range(self._nstates), size=1, p=self._Pi)
# Generate and return trajectory
from msmtools import generation as msmgen
traj = msmgen.generate_traj(self.transition_matrix, nsteps, start=start, stop=stop, dt=1)
return traj.astype(dtype) |
def get_extra_commands():
"""Use the configuration to discover additional CLI packages to load"""
from ambry.run import find_config_file
from ambry.dbexceptions import ConfigurationError
from ambry.util import yaml
try:
plugins_dir = find_config_file('cli.yaml')
except ConfigurationError:
return []
with open(plugins_dir) as f:
cli_modules = yaml.load(f)
return cli_modules | Use the configuration to discover additional CLI packages to load | Below is the the instruction that describes the task:
### Input:
Use the configuration to discover additional CLI packages to load
### Response:
def get_extra_commands():
"""Use the configuration to discover additional CLI packages to load"""
from ambry.run import find_config_file
from ambry.dbexceptions import ConfigurationError
from ambry.util import yaml
try:
plugins_dir = find_config_file('cli.yaml')
except ConfigurationError:
return []
with open(plugins_dir) as f:
cli_modules = yaml.load(f)
return cli_modules |
def set_blend_func(self, srgb='one', drgb='zero',
salpha=None, dalpha=None):
"""Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
"""
salpha = srgb if salpha is None else salpha
dalpha = drgb if dalpha is None else dalpha
self.glir.command('FUNC', 'glBlendFuncSeparate',
srgb, drgb, salpha, dalpha) | Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used. | Below is the the instruction that describes the task:
### Input:
Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
### Response:
def set_blend_func(self, srgb='one', drgb='zero',
salpha=None, dalpha=None):
"""Specify pixel arithmetic for RGB and alpha
Parameters
----------
srgb : str
Source RGB factor.
drgb : str
Destination RGB factor.
salpha : str | None
Source alpha factor. If None, ``srgb`` is used.
dalpha : str
Destination alpha factor. If None, ``drgb`` is used.
"""
salpha = srgb if salpha is None else salpha
dalpha = drgb if dalpha is None else dalpha
self.glir.command('FUNC', 'glBlendFuncSeparate',
srgb, drgb, salpha, dalpha) |
def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor,
gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5):
""" Vel factory function """
return TrpoPolicyGradient(
max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters,
discount_factor=discount_factor,
gae_lambda=gae_lambda,
improvement_acceptance_ratio=improvement_acceptance_ratio,
max_grad_norm=max_grad_norm
) | Vel factory function | Below is the the instruction that describes the task:
### Input:
Vel factory function
### Response:
def create(max_kl, cg_iters, line_search_iters, cg_damping, entropy_coef, vf_iters, discount_factor,
gae_lambda=1.0, improvement_acceptance_ratio=0.1, max_grad_norm=0.5):
""" Vel factory function """
return TrpoPolicyGradient(
max_kl, int(cg_iters), int(line_search_iters), cg_damping, entropy_coef, vf_iters,
discount_factor=discount_factor,
gae_lambda=gae_lambda,
improvement_acceptance_ratio=improvement_acceptance_ratio,
max_grad_norm=max_grad_norm
) |
def timestamp_update(sender, frames):
"""
Timestamp the modified field for all documents. This method should be
bound to a frame class like so:
```
MyFrameClass.listen('update', MyFrameClass.timestamp_update)
```
"""
for frame in frames:
frame.modified = datetime.now(timezone.utc) | Timestamp the modified field for all documents. This method should be
bound to a frame class like so:
```
MyFrameClass.listen('update', MyFrameClass.timestamp_update)
``` | Below is the the instruction that describes the task:
### Input:
Timestamp the modified field for all documents. This method should be
bound to a frame class like so:
```
MyFrameClass.listen('update', MyFrameClass.timestamp_update)
```
### Response:
def timestamp_update(sender, frames):
"""
Timestamp the modified field for all documents. This method should be
bound to a frame class like so:
```
MyFrameClass.listen('update', MyFrameClass.timestamp_update)
```
"""
for frame in frames:
frame.modified = datetime.now(timezone.utc) |
def _partial_search_validator(self, sub, sup, anagram=False,
subsequence=False, supersequence=False):
"""
It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
"""
def get_all_in(one, another):
for element in one:
if element in another:
yield element
def containment_check(sub, sup):
return (set(Counter(sub).keys()).issubset(
set(Counter(sup).keys())))
def containment_freq_check(sub, sup):
return (all([Counter(sub)[element] <= Counter(sup)[element]
for element in Counter(sub)]))
def extra_freq_check(sub, sup, list_of_tups):
# Would be used for matching anagrams, subsequences etc.
return (len(list_of_tups) > 0 and
all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]]
for tup in list_of_tups]))
# Regarding containment checking while having extra conditions,
# there's no good way to map each anagram or subseuqnece etc. that was
# found to the query word, without making it more complicated than
# it already is, because a query word can be anagram/subsequence etc.
# to multiple words of the timestamps yet finding the one with the
# right index would be the problem.
# Therefore we just approximate the solution by just counting
# the elements.
if len(sub) > len(sup):
return False
for pred, func in set([(anagram, self._is_anagram_of),
(subsequence, self._is_subsequence_of),
(supersequence, self._is_supersequence_of)]):
if pred:
pred_seive = [(sub_key, sup_key)
for sub_key in set(Counter(sub).keys())
for sup_key in set(Counter(sup).keys())
if func(sub_key, sup_key)]
if not extra_freq_check(sub, sup, pred_seive):
return False
if (
not any([anagram, subsequence, supersequence]) and
(not containment_check(sub, sup) or
not containment_freq_check(sub, sup))
):
return False
for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)):
if x1 != x2:
return False
return True | It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist` | Below is the the instruction that describes the task:
### Input:
It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
### Response:
def _partial_search_validator(self, sub, sup, anagram=False,
subsequence=False, supersequence=False):
"""
It's responsible for validating the partial results of `search` method.
If it returns True, the search would return its result. Else, search
method would discard what it found and look for others.
First, checks to see if all elements of `sub` is in `sup` with at least
the same frequency and then checks to see if every element `sub`
appears in `sup` with the same order (index-wise).
If advanced control sturctures are specified, the containment condition
won't be checked.
The code for index checking is from [1]_.
Parameters
----------
sub : list
sup : list
anagram : bool, optional
Default is `False`
subsequence : bool, optional
Default is `False`
supersequence : bool, optional
Default is `False`
Returns
-------
bool
References
----------
.. [1] : `
https://stackoverflow.com/questions/35964155/checking-if-list-is-a-sublist`
"""
def get_all_in(one, another):
for element in one:
if element in another:
yield element
def containment_check(sub, sup):
return (set(Counter(sub).keys()).issubset(
set(Counter(sup).keys())))
def containment_freq_check(sub, sup):
return (all([Counter(sub)[element] <= Counter(sup)[element]
for element in Counter(sub)]))
def extra_freq_check(sub, sup, list_of_tups):
# Would be used for matching anagrams, subsequences etc.
return (len(list_of_tups) > 0 and
all([Counter(sub)[tup[0]] <= Counter(sup)[tup[1]]
for tup in list_of_tups]))
# Regarding containment checking while having extra conditions,
# there's no good way to map each anagram or subseuqnece etc. that was
# found to the query word, without making it more complicated than
# it already is, because a query word can be anagram/subsequence etc.
# to multiple words of the timestamps yet finding the one with the
# right index would be the problem.
# Therefore we just approximate the solution by just counting
# the elements.
if len(sub) > len(sup):
return False
for pred, func in set([(anagram, self._is_anagram_of),
(subsequence, self._is_subsequence_of),
(supersequence, self._is_supersequence_of)]):
if pred:
pred_seive = [(sub_key, sup_key)
for sub_key in set(Counter(sub).keys())
for sup_key in set(Counter(sup).keys())
if func(sub_key, sup_key)]
if not extra_freq_check(sub, sup, pred_seive):
return False
if (
not any([anagram, subsequence, supersequence]) and
(not containment_check(sub, sup) or
not containment_freq_check(sub, sup))
):
return False
for x1, x2 in zip(get_all_in(sup, sub), get_all_in(sub, sup)):
if x1 != x2:
return False
return True |
def proxy(self):
"""Retrieve the upstream content and build an HttpResponse."""
headers = self.request.headers.filter(self.ignored_request_headers)
qs = self.request.query_string if self.pass_query_string else ''
# Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005
if (self.request.META.get('CONTENT_LENGTH', None) == '' and
get_django_version() == '1.10'):
del self.request.META['CONTENT_LENGTH']
request_kwargs = self.middleware.process_request(
self, self.request, method=self.request.method, url=self.proxy_url,
headers=headers, data=self.request.body, params=qs,
allow_redirects=False, verify=self.verify_ssl, cert=self.cert,
timeout=self.timeout)
result = request(**request_kwargs)
response = HttpResponse(result.content, status=result.status_code)
# Attach forwardable headers to response
forwardable_headers = HeaderDict(result.headers).filter(
self.ignored_upstream_headers)
for header, value in iteritems(forwardable_headers):
response[header] = value
return self.middleware.process_response(
self, self.request, result, response) | Retrieve the upstream content and build an HttpResponse. | Below is the the instruction that describes the task:
### Input:
Retrieve the upstream content and build an HttpResponse.
### Response:
def proxy(self):
"""Retrieve the upstream content and build an HttpResponse."""
headers = self.request.headers.filter(self.ignored_request_headers)
qs = self.request.query_string if self.pass_query_string else ''
# Fix for django 1.10.0 bug https://code.djangoproject.com/ticket/27005
if (self.request.META.get('CONTENT_LENGTH', None) == '' and
get_django_version() == '1.10'):
del self.request.META['CONTENT_LENGTH']
request_kwargs = self.middleware.process_request(
self, self.request, method=self.request.method, url=self.proxy_url,
headers=headers, data=self.request.body, params=qs,
allow_redirects=False, verify=self.verify_ssl, cert=self.cert,
timeout=self.timeout)
result = request(**request_kwargs)
response = HttpResponse(result.content, status=result.status_code)
# Attach forwardable headers to response
forwardable_headers = HeaderDict(result.headers).filter(
self.ignored_upstream_headers)
for header, value in iteritems(forwardable_headers):
response[header] = value
return self.middleware.process_response(
self, self.request, result, response) |
def configure(self):
""" The main configure function. Uses a schema file and an optional data file,
and combines them with user prompts to write a new data file. """
# Make the lazy folder if it doesn't already exist.
path = os.getcwd() + '/' + self.lazy_folder
if not os.path.exists(path):
os.makedirs(path)
schema_file = self.schema_file
data_file = self.data_file
# Initialise the schema and data objects.
schema, data = Schema(), Schema()
# Load the schema from a file.
try:
schema.load(schema_file)
except IOError as e:
# If we can't load the schema, choose from templates.
self.prompt.error("Could not find schema in " + schema_file + " - Choosing from default templates...")
schema = self.choose_schema(schema_file)
except (Exception, ValueError) as e:
self.prompt.error("Error: " + str(e) + " - Aborting...")
return False
else:
sp, sf = os.path.split(schema_file)
self.prompt.success('Loaded schema from ' + self.lazy_folder + sf)
# Load the data from a file.
try:
data.load(data_file)
except (Exception, IOError, ValueError) as e:
self.prompt.error('Could not find data file. Copying from schema...')
else:
sp, sf = os.path.split(data_file)
self.prompt.success('Loaded data from ' + self.lazy_folder + sf)
# Store the internals of the schema (labels, selects, etc.) in data.
data.internal = schema.internal
# If we have data from a data file, merge the schema file into it.
if data.data:
# Create a new Merge instance using the data from the schema and data files.
m = Merge(schema.data, data.data)
mods = m.merge()
for a in mods['added']:
self.prompt.success('Added ' + a + ' to data.')
for r in mods['removed']:
self.prompt.error('Removed ' + r + ' from data.')
for k,m in mods['modified']:
self.prompt.notice('Modified ' + k + ': ' + m[0] + ' became ' + m[1] + '.' )
# Otherwise, reference the data from the schema file verbatim.
else:
data.data = schema.data
# Store the data.
self.data = data
# Configure the data.
self.configure_data(data.data)
# Save the data to the out file.
self.data.save(self.data_file)
self.add_ignore()
sp, sf = os.path.split(self.data_file)
self.prompt.success('Saved to ' + self.lazy_folder + sf + '.') | The main configure function. Uses a schema file and an optional data file,
and combines them with user prompts to write a new data file. | Below is the the instruction that describes the task:
### Input:
The main configure function. Uses a schema file and an optional data file,
and combines them with user prompts to write a new data file.
### Response:
def configure(self):
""" The main configure function. Uses a schema file and an optional data file,
and combines them with user prompts to write a new data file. """
# Make the lazy folder if it doesn't already exist.
path = os.getcwd() + '/' + self.lazy_folder
if not os.path.exists(path):
os.makedirs(path)
schema_file = self.schema_file
data_file = self.data_file
# Initialise the schema and data objects.
schema, data = Schema(), Schema()
# Load the schema from a file.
try:
schema.load(schema_file)
except IOError as e:
# If we can't load the schema, choose from templates.
self.prompt.error("Could not find schema in " + schema_file + " - Choosing from default templates...")
schema = self.choose_schema(schema_file)
except (Exception, ValueError) as e:
self.prompt.error("Error: " + str(e) + " - Aborting...")
return False
else:
sp, sf = os.path.split(schema_file)
self.prompt.success('Loaded schema from ' + self.lazy_folder + sf)
# Load the data from a file.
try:
data.load(data_file)
except (Exception, IOError, ValueError) as e:
self.prompt.error('Could not find data file. Copying from schema...')
else:
sp, sf = os.path.split(data_file)
self.prompt.success('Loaded data from ' + self.lazy_folder + sf)
# Store the internals of the schema (labels, selects, etc.) in data.
data.internal = schema.internal
# If we have data from a data file, merge the schema file into it.
if data.data:
# Create a new Merge instance using the data from the schema and data files.
m = Merge(schema.data, data.data)
mods = m.merge()
for a in mods['added']:
self.prompt.success('Added ' + a + ' to data.')
for r in mods['removed']:
self.prompt.error('Removed ' + r + ' from data.')
for k,m in mods['modified']:
self.prompt.notice('Modified ' + k + ': ' + m[0] + ' became ' + m[1] + '.' )
# Otherwise, reference the data from the schema file verbatim.
else:
data.data = schema.data
# Store the data.
self.data = data
# Configure the data.
self.configure_data(data.data)
# Save the data to the out file.
self.data.save(self.data_file)
self.add_ignore()
sp, sf = os.path.split(self.data_file)
self.prompt.success('Saved to ' + self.lazy_folder + sf + '.') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.