code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _list(self, request, start_response):
"""Sends HTTP response containing the API directory.
This calls start_response and returns the response body.
Args:
request: An ApiRequest, the transformed request sent to the Discovery API.
start_response: A function with semantics defined in PEP-333.
Returns:
A string containing the response body.
"""
configs = []
generator = directory_list_generator.DirectoryListGenerator(request)
for config in self._config_manager.configs.itervalues():
if config != self.API_CONFIG:
configs.append(config)
directory = generator.pretty_print_config_to_json(configs)
if not directory:
_logger.error('Failed to get API directory')
# By returning a 404, code explorer still works if you select the
# API in the URL
return util.send_wsgi_not_found_response(start_response)
return self._send_success_response(directory, start_response)
|
Sends HTTP response containing the API directory.
This calls start_response and returns the response body.
Args:
request: An ApiRequest, the transformed request sent to the Discovery API.
start_response: A function with semantics defined in PEP-333.
Returns:
A string containing the response body.
|
def undoable(generator):
''' Decorator which creates a new undoable action type.
This decorator should be used on a generator of the following format::
@undoable
def operation(*args):
do_operation_code
yield 'descriptive text'
undo_operator_code
'''
def inner(*args, **kwargs):
action = _Action(generator, args, kwargs)
ret = action.do()
stack().append(action)
if isinstance(ret, tuple):
if len(ret) == 1:
return ret[0]
elif len(ret) == 0:
return None
return ret
return inner
|
Decorator which creates a new undoable action type.
This decorator should be used on a generator of the following format::
@undoable
def operation(*args):
do_operation_code
yield 'descriptive text'
undo_operator_code
|
def from_arg_kinds(cls, arch, fp_args, ret_fp=False, sizes=None, sp_delta=None, func_ty=None):
"""
Get an instance of the class that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:parmm func_ty: A SimType for the function itself
"""
basic = cls(arch, sp_delta=sp_delta, func_ty=func_ty)
basic.args = basic.arg_locs(fp_args, sizes)
basic.ret_val = basic.fp_return_val if ret_fp else basic.return_val
return basic
|
Get an instance of the class that will extract floating-point/integral args correctly.
:param arch: The Archinfo arch for this CC
:param fp_args: A list, with one entry for each argument the function can take. True if the argument is fp,
false if it is integral.
:param ret_fp: True if the return value for the function is fp.
:param sizes: Optional: A list, with one entry for each argument the function can take. Each entry is the
size of the corresponding argument in bytes.
:param sp_delta: The amount the stack pointer changes over the course of this function - CURRENTLY UNUSED
:parmm func_ty: A SimType for the function itself
|
def output_default(paragraphs, fp=sys.stdout, no_boilerplate=True):
"""
Outputs the paragraphs as:
<tag> text of the first paragraph
<tag> text of the second paragraph
...
where <tag> is <p>, <h> or <b> which indicates
standard paragraph, heading or boilerplate respecitvely.
"""
for paragraph in paragraphs:
if paragraph.class_type == 'good':
if paragraph.heading:
tag = 'h'
else:
tag = 'p'
elif no_boilerplate:
continue
else:
tag = 'b'
print('<%s> %s' % (tag, cgi.escape(paragraph.text)), file=fp)
|
Outputs the paragraphs as:
<tag> text of the first paragraph
<tag> text of the second paragraph
...
where <tag> is <p>, <h> or <b> which indicates
standard paragraph, heading or boilerplate respecitvely.
|
def do_command(self):
"""Call a single command with arguments."""
method = self.args[0]
raw_args = self.args[1:]
if '=' in method:
if raw_args:
self.parser.error("Please don't mix rTorrent and shell argument styles!")
method, raw_args = method.split('=', 1)
raw_args = raw_args.split(',')
self.execute(self.open(), method, self.cooked(raw_args))
|
Call a single command with arguments.
|
def hpre(*content, sep='\n'):
"""
Make mono-width text block (HTML)
:param content:
:param sep:
:return:
"""
return _md(quote_html(_join(*content, sep=sep)), symbols=MD_SYMBOLS[7])
|
Make mono-width text block (HTML)
:param content:
:param sep:
:return:
|
def format_docstring(elt, arg_comments:dict={}, alt_doc_string:str='', ignore_warn:bool=False)->str:
"Merge and format the docstring definition with `arg_comments` and `alt_doc_string`."
parsed = ""
doc = parse_docstring(inspect.getdoc(elt))
description = alt_doc_string or f"{doc['short_description']} {doc['long_description']}"
if description: parsed += f'\n\n{link_docstring(inspect.getmodule(elt), description)}'
resolved_comments = {**doc.get('comments', {}), **arg_comments} # arg_comments takes priority
args = inspect.getfullargspec(elt).args if not is_enum(elt.__class__) else elt.__members__.keys()
if resolved_comments: parsed += '\n'
for a in resolved_comments:
parsed += f'\n- *{a}*: {resolved_comments[a]}'
if a not in args and not ignore_warn: warn(f'Doc arg mismatch: {a}')
return_comment = arg_comments.get('return') or doc.get('return')
if return_comment: parsed += f'\n\n*return*: {return_comment}'
return parsed
|
Merge and format the docstring definition with `arg_comments` and `alt_doc_string`.
|
def ReadClientLastPings(self,
min_last_ping=None,
max_last_ping=None,
fleetspeak_enabled=None,
cursor=None):
"""Reads client ids for all clients in the database."""
query = "SELECT client_id, UNIX_TIMESTAMP(last_ping) FROM clients "
query_values = []
where_filters = []
if min_last_ping is not None:
where_filters.append("last_ping >= FROM_UNIXTIME(%s) ")
query_values.append(mysql_utils.RDFDatetimeToTimestamp(min_last_ping))
if max_last_ping is not None:
where_filters.append(
"(last_ping IS NULL OR last_ping <= FROM_UNIXTIME(%s))")
query_values.append(mysql_utils.RDFDatetimeToTimestamp(max_last_ping))
if fleetspeak_enabled is not None:
if fleetspeak_enabled:
where_filters.append("fleetspeak_enabled IS TRUE")
else:
where_filters.append(
"(fleetspeak_enabled IS NULL OR fleetspeak_enabled IS FALSE)")
if where_filters:
query += "WHERE " + "AND ".join(where_filters)
cursor.execute(query, query_values)
last_pings = {}
for int_client_id, last_ping in cursor.fetchall():
client_id = db_utils.IntToClientID(int_client_id)
last_pings[client_id] = mysql_utils.TimestampToRDFDatetime(last_ping)
return last_pings
|
Reads client ids for all clients in the database.
|
def get_model_choices():
"""
Get the select options for the model selector
:return:
"""
result = []
for ct in ContentType.objects.order_by('app_label', 'model'):
try:
if issubclass(ct.model_class(), TranslatableModel):
result.append(
('{} - {}'.format(ct.app_label, ct.model.lower()),
'{} - {}'.format(ct.app_label.capitalize(), ct.model_class()._meta.verbose_name_plural))
)
except TypeError:
continue
return result
|
Get the select options for the model selector
:return:
|
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
exc = excinfo.value
cc = self.colors
if isinstance(exc, NbCellError):
msg_items = [
cc.FAIL + "Notebook cell execution failed" + cc.ENDC]
formatstring = (
cc.OKBLUE + "Cell %d: %s\n\n" +
"Input:\n" + cc.ENDC + "%s\n")
msg_items.append(formatstring % (
exc.cell_num,
str(exc),
exc.source
))
if exc.inner_traceback:
msg_items.append((
cc.OKBLUE + "Traceback:" + cc.ENDC + "\n%s\n") %
exc.inner_traceback)
return "\n".join(msg_items)
else:
return "pytest plugin exception: %s" % str(exc)
|
called when self.runtest() raises an exception.
|
def text_to_qcolor(text):
"""
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
"""
color = QColor()
if not is_string(text): # testing for QString (PyQt API#1)
text = str(text)
if not is_text_string(text):
return color
if text.startswith('#') and len(text)==7:
correct = '#0123456789abcdef'
for char in text:
if char.lower() not in correct:
return color
elif text not in list(QColor.colorNames()):
return color
color.setNamedColor(text)
return color
|
Create a QColor from specified string
Avoid warning from Qt when an invalid QColor is instantiated
|
def lookup(self, subcmd_prefix):
"""Find subcmd in self.subcmds"""
for subcmd_name in list(self.subcmds.keys()):
if subcmd_name.startswith(subcmd_prefix) \
and len(subcmd_prefix) >= \
self.subcmds[subcmd_name].__class__.min_abbrev:
return self.subcmds[subcmd_name]
pass
return None
|
Find subcmd in self.subcmds
|
def is_empty_shape(sh: ShExJ.Shape) -> bool:
""" Determine whether sh has any value """
return sh.closed is None and sh.expression is None and sh.extra is None and \
sh.semActs is None
|
Determine whether sh has any value
|
def scan_temperature_old(self, measure, temperature, rate, delay=1):
"""Performs a temperature scan.
Measures until the target temperature is reached.
:param measure: A callable called repeatedly until stability at target
temperature is reached.
:param temperature: The target temperature in kelvin.
:param rate: The sweep rate in kelvin per minute.
:param delay: The time delay between each call to measure in seconds.
"""
self.activity = 'hold'
# Clear old sweep table
self.sweep_table.clear()
# Use current temperature as target temperature
# and calculate sweep time.
current_temperature = self.control_temperature
sweep_time = abs((temperature - current_temperature) / rate)
self.sweep_table[0] = temperature, sweep_time, 0.
self.sweep_table[-1] = temperature, 0., 0.
self.activity = 'sweep'
while self.activity == 'sweep':
measure()
time.sleep(delay)
|
Performs a temperature scan.
Measures until the target temperature is reached.
:param measure: A callable called repeatedly until stability at target
temperature is reached.
:param temperature: The target temperature in kelvin.
:param rate: The sweep rate in kelvin per minute.
:param delay: The time delay between each call to measure in seconds.
|
def clean_ticker(ticker):
"""
Cleans a ticker for easier use throughout MoneyTree
Splits by space and only keeps first bit. Also removes
any characters that are not letters. Returns as lowercase.
>>> clean_ticker('^VIX')
'vix'
>>> clean_ticker('SPX Index')
'spx'
"""
pattern = re.compile('[\W_]+')
res = pattern.sub('', ticker.split(' ')[0])
return res.lower()
|
Cleans a ticker for easier use throughout MoneyTree
Splits by space and only keeps first bit. Also removes
any characters that are not letters. Returns as lowercase.
>>> clean_ticker('^VIX')
'vix'
>>> clean_ticker('SPX Index')
'spx'
|
def _elements(self, IDs, func, aspList):
""" Returns the IDs as objects considering the
aspList and the function.
"""
res = []
for asp in aspList:
if (asp in [0, 180]):
# Generate func for conjunctions and oppositions
if func == self.N:
res.extend([func(ID, asp) for ID in IDs])
else:
res.extend([func(ID) for ID in IDs])
else:
# Generate Dexter and Sinister for others
res.extend([self.D(ID, asp) for ID in IDs])
res.extend([self.S(ID, asp) for ID in IDs])
return res
|
Returns the IDs as objects considering the
aspList and the function.
|
def load_agents(self, config_file=None):
"""
Loads all agents for this team from the rlbot.cfg
:param config_file: A config file that is similar to rlbot.cfg
"""
if config_file is not None:
self.overall_config = config_file
self.agents.clear()
num_participants = get_num_players(self.overall_config)
try:
for i in range(num_participants):
self.load_agent(i)
except BaseException as e:
raise ValueError(f"{str(e)}\nPlease check your config files! {self.overall_config_path}")
|
Loads all agents for this team from the rlbot.cfg
:param config_file: A config file that is similar to rlbot.cfg
|
def pre_calc(self, x, y, beta, n_order, center_x, center_y):
"""
calculates the H_n(x) and H_n(y) for a given x-array and y-array
:param x:
:param y:
:param amp:
:param beta:
:param n_order:
:param center_x:
:param center_y:
:return: list of H_n(x) and H_n(y)
"""
x_ = x - center_x
y_ = y - center_y
n = len(np.atleast_1d(x))
H_x = np.empty((n_order+1, n))
H_y = np.empty((n_order+1, n))
if n_order > 170:
raise ValueError('polynomial order to large', n_order)
for n in range(0, n_order+1):
prefactor = 1./np.sqrt(2**n*np.sqrt(np.pi)*math.factorial(n))
n_array = np.zeros(n+1)
n_array[n] = 1
H_x[n] = self.hermval(x_/beta, n_array, tensor=False) * prefactor * np.exp(-(x_/beta)**2/2.)
H_y[n] = self.hermval(y_/beta, n_array, tensor=False) * prefactor * np.exp(-(y_/beta)**2/2.)
return H_x, H_y
|
calculates the H_n(x) and H_n(y) for a given x-array and y-array
:param x:
:param y:
:param amp:
:param beta:
:param n_order:
:param center_x:
:param center_y:
:return: list of H_n(x) and H_n(y)
|
def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result
|
Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
|
def _set_mstp(self, v, load=False):
"""
Setter method for mstp, mapped from YANG variable /protocol/spanning_tree/mstp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mstp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mstp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mstp.mstp, is_container='container', presence=True, yang_name="mstp", rest_name="mstp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'callpoint': u'mstp-config', u'info': u'Multiple spanning tree', u'display-when': u'not ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/rpvst) or (/protocol/spanning-tree/pvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mstp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mstp.mstp, is_container='container', presence=True, yang_name="mstp", rest_name="mstp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-add-mode': None, u'callpoint': u'mstp-config', u'info': u'Multiple spanning tree', u'display-when': u'not ((/protocol/spanning-tree/stp) or (/protocol/spanning-tree/rstp) or (/protocol/spanning-tree/rpvst) or (/protocol/spanning-tree/pvst))'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='container', is_config=True)""",
})
self.__mstp = t
if hasattr(self, '_set'):
self._set()
|
Setter method for mstp, mapped from YANG variable /protocol/spanning_tree/mstp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mstp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mstp() directly.
|
def _get_input_for_run(args, executable, preset_inputs=None, input_name_prefix=None):
"""
Returns an input dictionary that can be passed to executable.run()
"""
# The following may throw if the executable is a workflow with no
# input spec available (because a stage is inaccessible)
exec_inputs = try_call(ExecutableInputs,
executable,
input_name_prefix=input_name_prefix,
active_region=args.region)
# Use input and system requirements from a cloned execution
if args.input_json is None and args.filename is None:
# --input-json and --input-json-file completely override input
# from the cloned job
exec_inputs.update(args.input_from_clone, strip_prefix=False)
# Update with inputs passed to the this function
if preset_inputs is not None:
exec_inputs.update(preset_inputs, strip_prefix=False)
# Update with inputs passed with -i, --input_json, --input_json_file, etc.
# If batch_tsv is set, do not prompt for missing arguments
require_all_inputs = (args.batch_tsv is None)
try_call(exec_inputs.update_from_args, args, require_all_inputs)
return exec_inputs.inputs
|
Returns an input dictionary that can be passed to executable.run()
|
def _file_filter(cls, filename, include_patterns, exclude_patterns):
""":returns: `True` if the file should be allowed through the filter."""
logger.debug('filename: {}'.format(filename))
for exclude_pattern in exclude_patterns:
if exclude_pattern.match(filename):
return False
if include_patterns:
found = False
for include_pattern in include_patterns:
if include_pattern.match(filename):
found = True
break
if not found:
return False
return True
|
:returns: `True` if the file should be allowed through the filter.
|
def _update_roster(self):
'''
Update default flat roster with the passed in information.
:return:
'''
roster_file = self._get_roster()
if os.access(roster_file, os.W_OK):
if self.__parsed_rosters[self.ROSTER_UPDATE_FLAG]:
with salt.utils.files.fopen(roster_file, 'a') as roster_fp:
roster_fp.write('# Automatically added by "{s_user}" at {s_time}\n{hostname}:\n host: '
'{hostname}\n user: {user}'
'\n passwd: {passwd}\n'.format(s_user=getpass.getuser(),
s_time=datetime.datetime.utcnow().isoformat(),
hostname=self.opts.get('tgt', ''),
user=self.opts.get('ssh_user', ''),
passwd=self.opts.get('ssh_passwd', '')))
log.info('The host {0} has been added to the roster {1}'.format(self.opts.get('tgt', ''),
roster_file))
else:
log.error('Unable to update roster {0}: access denied'.format(roster_file))
|
Update default flat roster with the passed in information.
:return:
|
def files(self):
"""Return list of files in root directory"""
self._printer('\tFiles Walk')
for directory in self.directory:
for path in os.listdir(directory):
full_path = os.path.join(directory, path)
if os.path.isfile(full_path):
if not path.startswith('.'):
self.filepaths.append(full_path)
return self._get_filepaths()
|
Return list of files in root directory
|
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data).astype(float)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
from ..centroids import centroid_com
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.dot(np.transpose(ypowers), data), xpowers)
|
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
|
def layer_tagger_mapping(self):
"""Dictionary that maps layer names to taggers that can create that layer."""
return {
PARAGRAPHS: self.tokenize_paragraphs,
SENTENCES: self.tokenize_sentences,
WORDS: self.tokenize_words,
ANALYSIS: self.tag_analysis,
TIMEXES: self.tag_timexes,
NAMED_ENTITIES: self.tag_named_entities,
CLAUSE_ANNOTATION: self.tag_clause_annotations,
CLAUSES: self.tag_clauses,
LAYER_CONLL: self.tag_syntax_vislcg3,
LAYER_VISLCG3: self.tag_syntax_maltparser,
WORDNET: self.tag_wordnet
}
|
Dictionary that maps layer names to taggers that can create that layer.
|
def verifies( self, hash, signature ):
"""Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = self.generator
n = G.order()
r = signature.r
s = signature.s
if r < 1 or r > n-1: return False
if s < 1 or s > n-1: return False
c = numbertheory.inverse_mod( s, n )
u1 = ( hash * c ) % n
u2 = ( r * c ) % n
xy = u1 * G + u2 * self.point
v = xy.x() % n
return v == r
|
Verify that signature is a valid signature of hash.
Return True if the signature is valid.
|
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for _ in range(0, response_size):
response_item = client_message.read_data()
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters
|
Decode response from client message
|
def check_version(component, expected_version):
"""Make sure the package version in setuptools matches what we expect it to be"""
comp = comp_names[component]
compath = os.path.realpath(os.path.abspath(comp.path))
sys.path.insert(0, compath)
import version
if version.version != expected_version:
raise EnvironmentError("Version mismatch during release, expected={}, found={}".format(expected_version, version.version))
|
Make sure the package version in setuptools matches what we expect it to be
|
def parse_xml_jtl(self, granularity):
"""
Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics
:param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'
:return: status of the metric parse
"""
data = defaultdict(list)
processed_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for input_file in self.infile_list:
logger.info('Processing : %s', input_file)
timestamp_format = None
tree = ElementTree.parse(input_file)
samples = tree.findall('./httpSample') + tree.findall('./sample')
for sample in samples:
if not timestamp_format or timestamp_format == 'unknown':
timestamp_format = naarad.utils.detect_timestamp_format(sample.get('ts'))
if timestamp_format == 'unknown':
continue
ts = naarad.utils.get_standardized_timestamp(sample.get('ts'), timestamp_format)
if ts == -1:
continue
ts = naarad.utils.reconcile_timezones(ts, self.timezone, self.graph_timezone)
aggregate_timestamp, averaging_factor = self.get_aggregation_timestamp(ts, granularity)
self.aggregate_count_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], aggregate_timestamp)
self.aggregate_values_over_time(processed_data, sample, [self._sanitize_label(sample.get('lb')), 'Overall_Summary'], ['t', 'by'], aggregate_timestamp)
logger.info('Finished parsing : %s', input_file)
logger.info('Processing metrics for output to csv')
self.average_values_for_plot(processed_data, data, averaging_factor)
logger.info('Writing time series csv')
for csv in data.keys():
self.csv_files.append(csv)
with open(csv, 'w') as csvf:
csvf.write('\n'.join(sorted(data[csv])))
logger.info('Processing raw data for stats')
self.calculate_key_stats(processed_data)
return True
|
Parse Jmeter workload output in XML format and extract overall and per transaction data and key statistics
:param string granularity: The time period over which to aggregate and average the raw data. Valid values are 'hour', 'minute' or 'second'
:return: status of the metric parse
|
def copy(a):
""" Copy an array to the shared memory.
Notes
-----
copy is not always necessary because the private memory is always copy-on-write.
Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory
"""
shared = anonymousmemmap(a.shape, dtype=a.dtype)
shared[:] = a[:]
return shared
|
Copy an array to the shared memory.
Notes
-----
copy is not always necessary because the private memory is always copy-on-write.
Use :code:`a = copy(a)` to immediately dereference the old 'a' on private memory
|
def notUnique(iterable, reportMax=INF):
"""Returns the elements in `iterable` that aren't unique; stops after it found
`reportMax` non-unique elements.
Examples:
>>> list(notUnique([1,1,2,2,3,3]))
[1, 2, 3]
>>> list(notUnique([1,1,2,2,3,3], 1))
[1]
"""
hash = {}
n=0
if reportMax < 1:
raise ValueError("`reportMax` must be >= 1 and is %r" % reportMax)
for item in iterable:
count = hash[item] = hash.get(item, 0) + 1
if count > 1:
yield item
n += 1
if n >= reportMax:
return
|
Returns the elements in `iterable` that aren't unique; stops after it found
`reportMax` non-unique elements.
Examples:
>>> list(notUnique([1,1,2,2,3,3]))
[1, 2, 3]
>>> list(notUnique([1,1,2,2,3,3], 1))
[1]
|
def expand(self, other):
"""
Add all elements from an other result to the list of elements of this result object.
It is used by the auto resolve feature.
:param other: Expand the result with the elements from this result.
:type other: overpy.Result
:raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
"""
if not isinstance(other, Result):
raise ValueError("Provided argument has to be instance of overpy:Result()")
other_collection_map = {Node: other.nodes, Way: other.ways, Relation: other.relations, Area: other.areas}
for element_type, own_collection in self._class_collection_map.items():
for element in other_collection_map[element_type]:
if is_valid_type(element, element_type) and element.id not in own_collection:
own_collection[element.id] = element
|
Add all elements from an other result to the list of elements of this result object.
It is used by the auto resolve feature.
:param other: Expand the result with the elements from this result.
:type other: overpy.Result
:raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
|
def print_about(self):
"""Print an info message about the tool."""
filepath = os.path.join(self.suite_path, "bin", self.tool_name)
print "Tool: %s" % self.tool_name
print "Path: %s" % filepath
print "Suite: %s" % self.suite_path
msg = "%s (%r)" % (self.context.load_path, self.context_name)
print "Context: %s" % msg
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
else:
variant = iter(variants).next()
print "Package: %s" % variant.qualified_package_name
return 0
|
Print an info message about the tool.
|
def star(self, **args):
'''
star any gist by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be starred')
r = requests.put(
'%s'%BASE_URL+'/gists/%s/star' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'id': self.gist_id
}
return response
raise Exception('Gist can\'t be starred')
|
star any gist by providing gistID or gistname(for authenticated user)
|
def wallet_republish(self, wallet, count):
"""
Rebroadcast blocks for accounts from **wallet** starting at frontier
down to **count** to the network
.. enable_control required
.. version 8.0 required
:param wallet: Wallet to rebroadcast blocks for
:type wallet: str
:param count: Max amount of blocks to rebroadcast since frontier block
:type count: int
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_republish(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... count=2
... )
[
"991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948",
"A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293",
"90D0C16AC92DD35814E84BFBCC739A039615D0A42A76EF44ADAEF1D99E9F8A35"
]
"""
wallet = self._process_value(wallet, 'wallet')
count = self._process_value(count, 'int')
payload = {"wallet": wallet, "count": count}
resp = self.call('wallet_republish', payload)
return resp.get('blocks') or []
|
Rebroadcast blocks for accounts from **wallet** starting at frontier
down to **count** to the network
.. enable_control required
.. version 8.0 required
:param wallet: Wallet to rebroadcast blocks for
:type wallet: str
:param count: Max amount of blocks to rebroadcast since frontier block
:type count: int
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_republish(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F",
... count=2
... )
[
"991CF190094C00F0B68E2E5F75F6BEE95A2E0BD93CEAA4A6734DB9F19B728948",
"A170D51B94E00371ACE76E35AC81DC9405D5D04D4CEBC399AEACE07AE05DD293",
"90D0C16AC92DD35814E84BFBCC739A039615D0A42A76EF44ADAEF1D99E9F8A35"
]
|
def UnpackItems(*items, fields=None, defaults=None):
"""
>>> UnpackItems(0)
:param items:
:param fields:
:param defaults:
:return: callable
"""
defaults = defaults or {}
@use_context
@use_raw_input
def _UnpackItems(context, bag):
nonlocal fields, items, defaults
if fields is None:
fields = ()
for item in items:
fields += tuple(bag[item].keys())
context.set_output_fields(fields)
values = ()
for item in items:
values += tuple(bag[item].get(field, defaults.get(field)) for field in fields)
return values
return _UnpackItems
|
>>> UnpackItems(0)
:param items:
:param fields:
:param defaults:
:return: callable
|
def get_abstracts(self, refresh=True):
"""Return a list of ScopusAbstract objects using ScopusSearch."""
return [ScopusAbstract(eid, refresh=refresh)
for eid in self.get_document_eids(refresh=refresh)]
|
Return a list of ScopusAbstract objects using ScopusSearch.
|
def deframesig(frames, siglen, frame_len, frame_step, winfunc=lambda x: numpy.ones((x,))):
"""Does overlap-add procedure to undo the action of framesig.
:param frames: the array of frames.
:param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: a 1-D signal.
"""
frame_len = round_half_up(frame_len)
frame_step = round_half_up(frame_step)
numframes = numpy.shape(frames)[0]
assert numpy.shape(frames)[1] == frame_len, '"frames" matrix is wrong size, 2nd dim is not equal to frame_len'
indices = numpy.tile(numpy.arange(0, frame_len), (numframes, 1)) + numpy.tile(
numpy.arange(0, numframes * frame_step, frame_step), (frame_len, 1)).T
indices = numpy.array(indices, dtype=numpy.int32)
padlen = (numframes - 1) * frame_step + frame_len
if siglen <= 0: siglen = padlen
rec_signal = numpy.zeros((padlen,))
window_correction = numpy.zeros((padlen,))
win = winfunc(frame_len)
for i in range(0, numframes):
window_correction[indices[i, :]] = window_correction[
indices[i, :]] + win + 1e-15 # add a little bit so it is never zero
rec_signal[indices[i, :]] = rec_signal[indices[i, :]] + frames[i, :]
rec_signal = rec_signal / window_correction
return rec_signal[0:siglen]
|
Does overlap-add procedure to undo the action of framesig.
:param frames: the array of frames.
:param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: a 1-D signal.
|
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
|
Given a naive datetime object this method will return a localized
datetime object
|
def extract_bag_of_words_from_corpus_parallel(corpus, lemmatizing="wordnet"):
"""
This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes.
Inputs: - corpus: A list of strings.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: This is a bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
####################################################################################################################
# Map and reduce document cleaning.
####################################################################################################################
# Build a pool of processes.
pool = Pool(processes=get_threads_number()*2,)
# Partition the tweets to chunks.
partitioned_corpus = chunks(corpus, len(corpus) / get_threads_number())
# Map the cleaning of the tweet corpus to a pool of processes.
list_of_bags_of_words, list_of_lemma_to_keywordset_maps = pool.map(partial(clean_corpus_serial, lemmatizing=lemmatizing), partitioned_corpus)
# Reduce dictionaries to a single dictionary serially.
bag_of_words = reduce_list_of_bags_of_words(list_of_bags_of_words)
# Reduce lemma to keyword maps to a single dictionary.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordset_maps:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total
|
This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes.
Inputs: - corpus: A list of strings.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: This is a bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
|
def get_keys(self, lst):
"""
return a list of pk values from object list
"""
pk_name = self.get_pk_name()
return [getattr(item, pk_name) for item in lst]
|
return a list of pk values from object list
|
def _broadcast_item(self, row_lookup, col_lookup, item, to_shape):
"""Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficient, there shouldn't be performance issue.
"""
# It is valid to pass a DataFrame or Series to __setitem__ that is larger than
# the target the user is trying to overwrite. This
if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)):
if not all(idx in item.index for idx in row_lookup):
raise ValueError(
"Must have equal len keys and value when setting with "
"an iterable"
)
if hasattr(item, "columns"):
if not all(idx in item.columns for idx in col_lookup):
raise ValueError(
"Must have equal len keys and value when setting "
"with an iterable"
)
item = item.reindex(index=row_lookup, columns=col_lookup)
else:
item = item.reindex(index=row_lookup)
try:
item = np.array(item)
if np.prod(to_shape) == np.prod(item.shape):
return item.reshape(to_shape)
else:
return np.broadcast_to(item, to_shape)
except ValueError:
from_shape = np.array(item).shape
raise ValueError(
"could not broadcast input array from shape {from_shape} into shape "
"{to_shape}".format(from_shape=from_shape, to_shape=to_shape)
)
|
Use numpy to broadcast or reshape item.
Notes:
- Numpy is memory efficient, there shouldn't be performance issue.
|
def fromfilenames(filenames, coltype = int):
"""
Return a segmentlist describing the intervals spanned by the files
whose names are given in the list filenames. The segmentlist is
constructed by parsing the file names, and the boundaries of each
segment are coerced to type coltype.
The file names are parsed using a generalization of the format
described in Technical Note LIGO-T010150-00-E, which allows the
start time and duration appearing in the file name to be
non-integers.
NOTE: the output is a segmentlist as described by the file names;
if the file names are not in time order, or describe overlaping
segments, then thusly shall be the output of this function. It is
recommended that this function's output be coalesced before use.
"""
pattern = re.compile(r"-([\d.]+)-([\d.]+)\.[\w_+#]+\Z")
l = segments.segmentlist()
for name in filenames:
[(s, d)] = pattern.findall(name.strip().rstrip(".gz"))
s = coltype(s)
d = coltype(d)
l.append(segments.segment(s, s + d))
return l
|
Return a segmentlist describing the intervals spanned by the files
whose names are given in the list filenames. The segmentlist is
constructed by parsing the file names, and the boundaries of each
segment are coerced to type coltype.
The file names are parsed using a generalization of the format
described in Technical Note LIGO-T010150-00-E, which allows the
start time and duration appearing in the file name to be
non-integers.
NOTE: the output is a segmentlist as described by the file names;
if the file names are not in time order, or describe overlaping
segments, then thusly shall be the output of this function. It is
recommended that this function's output be coalesced before use.
|
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
|
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
|
def calculate_squared_differences(image_tile_dict, transformed_array, template, sq_diff_tolerance=0.1):
"""As above, but for when the squared differences matching method is used
"""
template_norm_squared = np.sum(template**2)
image_norms_squared = {(x,y):np.sum(image_tile_dict[(x,y)]**2) for (x,y) in image_tile_dict.keys()}
match_points = image_tile_dict.keys()
# for correlation, then need to transofrm back to get correct value for division
h, w = template.shape
image_matches_normalised = {match_points[i]:-2*transformed_array[match_points[i][0], match_points[i][1]] + image_norms_squared[match_points[i]] + template_norm_squared for i in range(len(match_points))}
#print image_matches_normalised
cutoff = h*w*255**2*sq_diff_tolerance
normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) <= cutoff}
return normalised_matches.keys()
|
As above, but for when the squared differences matching method is used
|
def component_activated(self, component):
"""Initialize additional member variables for components.
Every component activated through the `Environment` object
gets an additional member variable: `env` (the environment object)
"""
component.env = self
super(Environment, self).component_activated(component)
|
Initialize additional member variables for components.
Every component activated through the `Environment` object
gets an additional member variable: `env` (the environment object)
|
def load_table_from_uri(
self,
source_uris,
destination,
job_id=None,
job_id_prefix=None,
location=None,
project=None,
job_config=None,
retry=DEFAULT_RETRY,
):
"""Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
Arguments:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
"""
job_id = _make_job_id(job_id, job_id_prefix)
if project is None:
project = self.project
if location is None:
location = self.location
job_ref = job._JobReference(job_id, project=project, location=location)
if isinstance(source_uris, six.string_types):
source_uris = [source_uris]
destination = _table_arg_to_table_ref(destination, default_project=self.project)
load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config)
load_job._begin(retry=retry)
return load_job
|
Starts a job for loading data into a table from CloudStorage.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load
Arguments:
source_uris (Union[str, Sequence[str]]):
URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
destination (Union[ \
:class:`~google.cloud.bigquery.table.Table`, \
:class:`~google.cloud.bigquery.table.TableReference`, \
str, \
]):
Table into which data is to be loaded. If a string is passed
in, this method attempts to create a table reference from a
string using
:func:`google.cloud.bigquery.table.TableReference.from_string`.
Keyword Arguments:
job_id (str): (Optional) Name of the job.
job_id_prefix (str):
(Optional) the user-provided prefix for a randomly generated
job ID. This parameter will be ignored if a ``job_id`` is
also given.
location (str):
Location where to run the job. Must match the location of the
destination table.
project (str):
Project ID of the project of where to run the job. Defaults
to the client's project.
job_config (google.cloud.bigquery.job.LoadJobConfig):
(Optional) Extra configuration options for the job.
retry (google.api_core.retry.Retry):
(Optional) How to retry the RPC.
Returns:
google.cloud.bigquery.job.LoadJob: A new load job.
|
def _deriv_growth(z, **cosmo):
""" Returns derivative of the linear growth factor at z
for a given cosmology **cosmo """
inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)
fz = (1 + z) * inv_h**3
deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\
1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\
fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)
return(deriv_g)
|
Returns derivative of the linear growth factor at z
for a given cosmology **cosmo
|
def Advertise(port, stype="SCOOP", sname="Broker", advertisername="Broker",
location=""):
"""
stype = always SCOOP
port = comma separated ports
sname = broker unique name
location = routable location (ip or dns)
"""
scoop.logger.info("Launching advertiser...")
service = minusconf.Service(stype, port, sname, location)
advertiser = minusconf.ThreadAdvertiser([service], advertisername)
advertiser.start()
scoop.logger.info("Advertiser launched.")
return advertiser
|
stype = always SCOOP
port = comma separated ports
sname = broker unique name
location = routable location (ip or dns)
|
def stream(identifier=None, priority=LOG_INFO, level_prefix=False):
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
"""
if identifier is None:
if not _sys.argv or not _sys.argv[0] or _sys.argv[0] == '-c':
identifier = 'python'
else:
identifier = _sys.argv[0]
fd = stream_fd(identifier, priority, level_prefix)
return _os.fdopen(fd, 'w', 1)
|
r"""Return a file object wrapping a stream to journal.
Log messages written to this file as simple newline sepearted text strings
are written to the journal.
The file will be line buffered, so messages are actually sent after a
newline character is written.
>>> from systemd import journal
>>> stream = journal.stream('myapp') # doctest: +SKIP
>>> res = stream.write('message...\n') # doctest: +SKIP
will produce the following message in the journal::
PRIORITY=7
SYSLOG_IDENTIFIER=myapp
MESSAGE=message...
If identifier is None, a suitable default based on sys.argv[0] will be used.
This interface can be used conveniently with the print function:
>>> from __future__ import print_function
>>> stream = journal.stream() # doctest: +SKIP
>>> print('message...', file=stream) # doctest: +SKIP
priority is the syslog priority, one of `LOG_EMERG`, `LOG_ALERT`,
`LOG_CRIT`, `LOG_ERR`, `LOG_WARNING`, `LOG_NOTICE`, `LOG_INFO`, `LOG_DEBUG`.
level_prefix is a boolean. If true, kernel-style log priority level prefixes
(such as '<1>') are interpreted. See sd-daemon(3) for more information.
|
def _HandleLegacy(self, args, token=None):
"""Creates a new hunt."""
# We only create generic hunts with /hunts/create requests.
generic_hunt_args = rdf_hunts.GenericHuntArgs()
generic_hunt_args.flow_runner_args.flow_name = args.flow_name
generic_hunt_args.flow_args = args.flow_args
# Clear all fields marked with HIDDEN, except for output_plugins - they are
# marked HIDDEN, because we have a separate UI for them, not because they
# shouldn't be shown to the user at all.
#
# TODO(user): Refactor the code to remove the HIDDEN label from
# HuntRunnerArgs.output_plugins.
args.hunt_runner_args.ClearFieldsWithLabel(
rdf_structs.SemanticDescriptor.Labels.HIDDEN,
exceptions="output_plugins")
args.hunt_runner_args.hunt_name = standard.GenericHunt.__name__
if args.original_hunt and args.original_flow:
raise ValueError(
"A hunt can't be a copy of a flow and a hunt at the same time.")
if args.original_hunt:
ref = rdf_hunts.FlowLikeObjectReference.FromHuntId(
utils.SmartStr(args.original_hunt.hunt_id))
args.hunt_runner_args.original_object = ref
elif args.original_flow:
ref = rdf_hunts.FlowLikeObjectReference.FromFlowIdAndClientId(
utils.SmartStr(args.original_flow.flow_id),
utils.SmartStr(args.original_flow.client_id))
args.hunt_runner_args.original_object = ref
# Anyone can create the hunt but it will be created in the paused
# state. Permissions are required to actually start it.
with implementation.StartHunt(
runner_args=args.hunt_runner_args, args=generic_hunt_args,
token=token) as hunt_obj:
# Nothing really to do here - hunts are always created in the paused
# state.
logging.info("User %s created a new %s hunt (%s)", token.username,
hunt_obj.args.flow_runner_args.flow_name, hunt_obj.urn)
return ApiHunt().InitFromAff4Object(hunt_obj, with_full_summary=True)
|
Creates a new hunt.
|
def ftr_get_config(website_url, exact_host_match=False):
""" Download the Five Filters config from centralized repositories.
Repositories can be local if you need to override siteconfigs.
The first entry found is returned. If no configuration is found,
`None` is returned. If :mod:`cacheops` is installed, the result will
be cached with a default expiration delay of 3 days.
:param exact_host_match: If ``False`` (default), we will look for
wildcard config matches. For example if host is
``www.test.example.org``, we will try looking up
``test.example.org`` and ``example.org``.
:param exact_host_match: bool
:param website_url: either a full web URI (eg.
``http://www.website.com:PORT/path/to/a/page.html``) or simply
a domain name (eg. ``www.website.com``). In case of a domain name,
no check is performed yet, be careful of what you pass.
:type website_url: str or unicode
:returns: tuple -- the loaded site config (as unicode string) and
the hostname matched (unicode string too).
:raises: :class:`SiteConfigNotFound` if no config could be found.
.. note:: Whatever ``exact_host_match`` value is, the ``www`` part is
always removed from the URL or domain name.
.. todo:: there is currently no merging/cascading of site configs. In
the original Five Filters implementation, primary and secondary
configurations were merged. We could eventually re-implement this
part if needed by someone. PRs welcome as always.
"""
def check_requests_result(result):
return (
u'text/plain' in result.headers.get('content-type')
and u'<!DOCTYPE html>' not in result.text
and u'<html ' not in result.text
and u'</html>' not in result.text
)
repositories = [
x.strip() for x in os.environ.get(
'PYTHON_FTR_REPOSITORIES',
os.path.expandvars(u'${HOME}/sources/ftr-site-config') + u' '
+ u'https://raw.githubusercontent.com/1flow/ftr-site-config/master/ ' # NOQA
+ u'https://raw.githubusercontent.com/fivefilters/ftr-site-config/master/' # NOQA
).split() if x.strip() != u'']
try:
proto, host_and_port, remaining = split_url(website_url)
except:
host_and_port = website_url
host_domain_parts = host_and_port.split(u'.')
# we don't store / use the “www.” part of domain name in siteconfig.
if host_domain_parts[0] == u'www':
host_domain_parts = host_domain_parts[1:]
if exact_host_match:
domain_names = [u'.'.join(host_domain_parts)]
else:
domain_names = [
u'.'.join(host_domain_parts[-i:])
for i in reversed(range(2, len(host_domain_parts) + 1))
]
LOGGER.debug(u'Gathering configurations for domains %s from %s.',
domain_names, repositories)
for repository in repositories:
# try, in turn:
# website.ext.txt
# .website.ext.txt
for domain_name in domain_names:
skip_repository = False
for txt_siteconfig_name in (
u'{0}.txt'.format(domain_name),
u'.{0}.txt'.format(domain_name),
):
if repository.startswith('http'):
siteconfig_url = repository + txt_siteconfig_name
result = requests.get(siteconfig_url)
if result.status_code == requests.codes.ok:
if not check_requests_result(result):
LOGGER.error(u'“%s” repository URL does not '
u'return text/plain results.',
repository)
skip_repository = True
break
LOGGER.info(u'Using remote siteconfig for domain '
u'%s from %s.', domain_name,
siteconfig_url, extra={
'siteconfig': domain_name})
return result.text, txt_siteconfig_name[:-4]
else:
filename = os.path.join(repository, txt_siteconfig_name)
if os.path.exists(filename):
LOGGER.info(u'Using local siteconfig for domain '
u'%s from %s.', domain_name,
filename, extra={
'siteconfig': domain_name})
with codecs.open(filename, 'rb', encoding='utf8') as f:
return f.read(), txt_siteconfig_name[:-4]
if skip_repository:
break
if skip_repository:
break
raise SiteConfigNotFound(
u'No configuration found for domains {0} in repositories {1}'.format(
u', '.join(domain_names), u', '.join(repositories)
)
)
|
Download the Five Filters config from centralized repositories.
Repositories can be local if you need to override siteconfigs.
The first entry found is returned. If no configuration is found,
`None` is returned. If :mod:`cacheops` is installed, the result will
be cached with a default expiration delay of 3 days.
:param exact_host_match: If ``False`` (default), we will look for
wildcard config matches. For example if host is
``www.test.example.org``, we will try looking up
``test.example.org`` and ``example.org``.
:param exact_host_match: bool
:param website_url: either a full web URI (eg.
``http://www.website.com:PORT/path/to/a/page.html``) or simply
a domain name (eg. ``www.website.com``). In case of a domain name,
no check is performed yet, be careful of what you pass.
:type website_url: str or unicode
:returns: tuple -- the loaded site config (as unicode string) and
the hostname matched (unicode string too).
:raises: :class:`SiteConfigNotFound` if no config could be found.
.. note:: Whatever ``exact_host_match`` value is, the ``www`` part is
always removed from the URL or domain name.
.. todo:: there is currently no merging/cascading of site configs. In
the original Five Filters implementation, primary and secondary
configurations were merged. We could eventually re-implement this
part if needed by someone. PRs welcome as always.
|
def gdf_to_geojson(gdf, date_format='epoch', properties=None, filename=None):
"""Serialize a GeoPandas dataframe to a geojson format Python dictionary / file
"""
# convert dates/datetimes to preferred string format if specified
gdf = convert_date_columns(gdf, date_format)
gdf_out = gdf[['geometry'] + properties or []]
geojson_str = gdf_out.to_json()
if filename:
with codecs.open(filename, "w", "utf-8-sig") as f:
f.write(geojson_str)
return None
else:
return json.loads(geojson_str)
|
Serialize a GeoPandas dataframe to a geojson format Python dictionary / file
|
def event_filter_type(self, event_filter_type):
"""Sets the event_filter_type of this Dashboard.
How charts belonging to this dashboard should display events. BYCHART is default if unspecified # noqa: E501
:param event_filter_type: The event_filter_type of this Dashboard. # noqa: E501
:type: str
"""
allowed_values = ["BYCHART", "AUTOMATIC", "ALL", "NONE", "BYDASHBOARD", "BYCHARTANDDASHBOARD"] # noqa: E501
if event_filter_type not in allowed_values:
raise ValueError(
"Invalid value for `event_filter_type` ({0}), must be one of {1}" # noqa: E501
.format(event_filter_type, allowed_values)
)
self._event_filter_type = event_filter_type
|
Sets the event_filter_type of this Dashboard.
How charts belonging to this dashboard should display events. BYCHART is default if unspecified # noqa: E501
:param event_filter_type: The event_filter_type of this Dashboard. # noqa: E501
:type: str
|
def compile_insert_get_id(self, query, values, sequence=None):
"""
Compile an insert and get ID statement into SQL.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict
:param sequence: The id sequence
:type sequence: str
:return: The compiled statement
:rtype: str
"""
if sequence is None:
sequence = "id"
return "%s RETURNING %s" % (
self.compile_insert(query, values),
self.wrap(sequence),
)
|
Compile an insert and get ID statement into SQL.
:param query: A QueryBuilder instance
:type query: QueryBuilder
:param values: The values to insert
:type values: dict
:param sequence: The id sequence
:type sequence: str
:return: The compiled statement
:rtype: str
|
def import_from_string(value):
"""Copy of rest_framework.settings.import_from_string"""
value = value.replace('-', '_')
try:
module_path, class_name = value.rsplit('.', 1)
module = import_module(module_path)
return getattr(module, class_name)
except (ImportError, AttributeError) as ex:
raise ImportError("Could not import '{}'. {}: {}.".format(
value, ex.__class__.__name__, ex))
|
Copy of rest_framework.settings.import_from_string
|
def delete(self, url: StrOrURL, **kwargs: Any) -> '_RequestContextManager':
"""Perform HTTP DELETE request."""
return _RequestContextManager(
self._request(hdrs.METH_DELETE, url,
**kwargs))
|
Perform HTTP DELETE request.
|
def packetToDict(pkt):
"""
Given a packet, this turns it into a dictionary ... is this useful?
in: packet, array of numbers
out: dictionary (key, value)
"""
d = {
'id': pkt[4],
'instruction': xl320.InstrToStr[pkt[7]],
'length': (pkt[6] << 8) + pkt[5],
'params': pkt[8:-2],
'crc': pkt[-2:]
}
return d
|
Given a packet, this turns it into a dictionary ... is this useful?
in: packet, array of numbers
out: dictionary (key, value)
|
def rpc_get_pydoc_documentation(self, symbol):
"""Get the Pydoc documentation for the given symbol.
Uses pydoc and can return a string with backspace characters
for bold highlighting.
"""
try:
docstring = pydoc.render_doc(str(symbol),
"Elpy Pydoc Documentation for %s",
False)
except (ImportError, pydoc.ErrorDuringImport):
return None
else:
if isinstance(docstring, bytes):
docstring = docstring.decode("utf-8", "replace")
return docstring
|
Get the Pydoc documentation for the given symbol.
Uses pydoc and can return a string with backspace characters
for bold highlighting.
|
def get_most_recent_event(self, originator_id, lt=None, lte=None):
"""
Gets a domain event from the sequence identified by `originator_id`
at the highest position.
:param originator_id: ID of a sequence of events
:param lt: get highest before this position
:param lte: get highest at or before this position
:return: domain event
"""
events = self.get_domain_events(originator_id=originator_id, lt=lt, lte=lte, limit=1, is_ascending=False)
events = list(events)
try:
return events[0]
except IndexError:
pass
|
Gets a domain event from the sequence identified by `originator_id`
at the highest position.
:param originator_id: ID of a sequence of events
:param lt: get highest before this position
:param lte: get highest at or before this position
:return: domain event
|
def _subclass_must_implement(self, fn):
"""
Returns a NotImplementedError for a function that should be implemented.
:param fn: name of the function
"""
m = "Missing function implementation in {}: {}".format(type(self), fn)
return NotImplementedError(m)
|
Returns a NotImplementedError for a function that should be implemented.
:param fn: name of the function
|
def load(source, triples=False, cls=PENMANCodec, **kwargs):
"""
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
"""
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
|
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
|
def send(self, data, room=None, skip_sid=None, namespace=None,
callback=None):
"""Send a message to the server.
The only difference with the :func:`socketio.Client.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.client.send(data, namespace=namespace or self.namespace,
callback=callback)
|
Send a message to the server.
The only difference with the :func:`socketio.Client.send` method is
that when the ``namespace`` argument is not given the namespace
associated with the class is used.
|
def groups_moderators(self, room_id=None, group=None, **kwargs):
"""Lists all moderators of a group."""
if room_id:
return self.__call_api_get('groups.moderators', roomId=room_id, kwargs=kwargs)
elif group:
return self.__call_api_get('groups.moderators', roomName=group, kwargs=kwargs)
else:
raise RocketMissingParamException('roomId or group required')
|
Lists all moderators of a group.
|
def create_user(self, user):
"""
Creates a new user.
:param user: The user object to be created.
:type user: ``dict``
"""
data = self._create_user_dict(user=user)
response = self._perform_request(
url='/um/users',
method='POST',
data=json.dumps(data))
return response
|
Creates a new user.
:param user: The user object to be created.
:type user: ``dict``
|
def instantiate_by_name_with_default(self, object_name, default_value=None):
""" Instantiate object from the environment, possibly giving some extra arguments """
if object_name not in self.instances:
if object_name not in self.environment:
return default_value
else:
instance = self.instantiate_from_data(self.environment[object_name])
self.instances[object_name] = instance
return instance
else:
return self.instances[object_name]
|
Instantiate object from the environment, possibly giving some extra arguments
|
def importSNPs(name) :
"""Import a SNP set shipped with pyGeno. Most of the datawraps only contain URLs towards data provided by third parties."""
path = os.path.join(this_dir, "bootstrap_data", "SNPs/" + name)
PS.importSNPs(path)
|
Import a SNP set shipped with pyGeno. Most of the datawraps only contain URLs towards data provided by third parties.
|
def extract_path_info(
environ_or_baseurl,
path_or_url,
charset="utf-8",
errors="werkzeug.url_quote",
collapse_http_schemes=True,
):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u"@", 1)[-1].split(u":", 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u"http" and port == u"80") or (
scheme == u"https" and port == u"443"
):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u":" + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u"http", u"https"):
return None
else:
if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u"/")
if not cur_path.startswith(base_path):
return None
return u"/" + cur_path[len(base_path) :].lstrip(u"/")
|
Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
|
def deactivate_user(query):
"""
Deactivate a user.
"""
user = _query_to_user(query)
if click.confirm(f'Are you sure you want to deactivate {user!r}?'):
user.active = False
user_manager.save(user, commit=True)
click.echo(f'Successfully deactivated {user!r}')
else:
click.echo('Cancelled.')
|
Deactivate a user.
|
def dynamize_attribute_updates(self, pending_updates):
"""
Convert a set of pending item updates into the structure
required by Layer1.
"""
d = {}
for attr_name in pending_updates:
action, value = pending_updates[attr_name]
if value is None:
# DELETE without an attribute value
d[attr_name] = {"Action": action}
else:
d[attr_name] = {"Action": action,
"Value": self.dynamize_value(value)}
return d
|
Convert a set of pending item updates into the structure
required by Layer1.
|
def query_raw(self, metric, **kwargs): # noqa: E501
"""Perform a raw data query against Wavefront servers that returns second granularity points grouped by tags # noqa: E501
An API to check if ingested points are as expected. Points ingested within a single second are averaged when returned. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_raw(metric, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str metric: metric to query ingested points for (cannot contain wildcards) (required)
:param str host: host to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param str source: source to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param int start_time: start time in epoch milliseconds (cannot be more than a day in the past) null to use an hour before endTime
:param int end_time: end time in epoch milliseconds (cannot be more than a day in the past) null to use now
:return: list[RawTimeseries]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.query_raw_with_http_info(metric, **kwargs) # noqa: E501
else:
(data) = self.query_raw_with_http_info(metric, **kwargs) # noqa: E501
return data
|
Perform a raw data query against Wavefront servers that returns second granularity points grouped by tags # noqa: E501
An API to check if ingested points are as expected. Points ingested within a single second are averaged when returned. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.query_raw(metric, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str metric: metric to query ingested points for (cannot contain wildcards) (required)
:param str host: host to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param str source: source to query ingested points for (cannot contain wildcards). host or source is equivalent, only one should be used.
:param int start_time: start time in epoch milliseconds (cannot be more than a day in the past) null to use an hour before endTime
:param int end_time: end time in epoch milliseconds (cannot be more than a day in the past) null to use now
:return: list[RawTimeseries]
If the method is called asynchronously,
returns the request thread.
|
def load_interfaces(self, interfaces_dict):
"""
Populates the namespace under the instance
"""
if interfaces_dict.get('apilist', {}).get('interfaces', None) is None:
raise ValueError("Invalid response for GetSupportedAPIList")
interfaces = interfaces_dict['apilist']['interfaces']
if len(interfaces) == 0:
raise ValueError("API returned not interfaces; probably using invalid key")
# clear existing interface instances
for interface in self.interfaces:
delattr(self, interface.name)
self.interfaces = []
# create interface instances from response
for interface in interfaces:
obj = WebAPIInterface(interface, parent=self)
self.interfaces.append(obj)
setattr(self, obj.name, obj)
|
Populates the namespace under the instance
|
def to_dict(self, index=0):
"""
Dict format for use in Javascript / Jason Chuang's display technology.
"""
index += 1
rep = {}
rep["index"] = index
rep["leaf"] = len(self.children) == 0
rep["depth"] = self.udepth
rep["scoreDistr"] = [0.0] * len(LabeledTree.SCORE_MAPPING)
# dirac distribution at correct label
if self.label is not None:
rep["scoreDistr"][self.label] = 1.0
mapping = LabeledTree.SCORE_MAPPING[:]
rep["rating"] = mapping[self.label] - min(mapping)
# if you are using this method for printing predictions
# from a model, the the dot product with the model's output
# distribution should be taken with this list:
rep["numChildren"] = len(self.children)
text = self.text if self.text != None else ""
seen_tokens = 0
witnessed_pixels = 0
for i, child in enumerate(self.children):
if i > 0:
text += " "
child_key = "child%d" % (i)
(rep[child_key], index) = child.to_dict(index)
text += rep[child_key]["text"]
seen_tokens += rep[child_key]["tokens"]
witnessed_pixels += rep[child_key]["pixels"]
rep["text"] = text
rep["tokens"] = 1 if (self.text != None and len(self.text) > 0) else seen_tokens
rep["pixels"] = witnessed_pixels + 3 if len(self.children) > 0 else text_size(self.text)
return (rep, index)
|
Dict format for use in Javascript / Jason Chuang's display technology.
|
def featureCounts_chart (self):
""" Make the featureCounts assignment rates plot """
# Config for the plot
config = {
'id': 'featureCounts_assignment_plot',
'title': 'featureCounts: Assignments',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.featurecounts_data, self.featurecounts_keys, config)
|
Make the featureCounts assignment rates plot
|
def _run_server(self, multiprocessing):
"""Use server multiprocessing to extract PCAP files."""
if not self._flag_m:
raise UnsupportedCall(f"Extractor(engine={self._exeng})' has no attribute '_run_server'")
if not self._flag_q:
self._flag_q = True
warnings.warn("'Extractor(engine=pipeline)' does not support output; "
f"'fout={self._ofnm}' ignored", AttributeWarning, stacklevel=stacklevel())
self._frnum = 1 # frame number (revised)
self._expkg = multiprocessing # multiprocessing module
self._mpsvc = NotImplemented # multiprocessing server process
self._mpprc = list() # multiprocessing process list
self._mpfdp = collections.defaultdict(multiprocessing.Queue) # multiprocessing file pointer
self._mpmng = multiprocessing.Manager() # multiprocessing manager
self._mpbuf = self._mpmng.dict() # multiprocessing frame dict
self._mpfrm = self._mpmng.list() # multiprocessing frame storage
self._mprsm = self._mpmng.list() # multiprocessing reassembly buffer
self._mpkit = self._mpmng.Namespace() # multiprocessing work kit
self._mpkit.counter = 0 # work count (on duty)
self._mpkit.pool = 1 # work pool (ready)
self._mpkit.eof = False # EOF flag
self._mpkit.trace = None # flow tracer
# preparation
self.record_header()
self._mpfdp[0].put(self._gbhdr.length)
self._mpsvc = multiprocessing.Process(
target=self._server_analyse_frame,
kwargs={'mpfrm': self._mpfrm, 'mprsm': self._mprsm, 'mpbuf': self._mpbuf, 'mpkit': self._mpkit}
)
self._mpsvc.start()
# extraction
while True:
# check EOF
if self._mpkit.eof:
self._update_eof()
break
# check counter
if self._mpkit.pool and self._mpkit.counter < CPU_CNT - 1:
# update file offset
self._ifile.seek(self._mpfdp.pop(self._frnum-1).get(), os.SEEK_SET)
# create worker
# print(self._frnum, 'start')
proc = multiprocessing.Process(
target=self._server_extract_frame,
kwargs={'mpkit': self._mpkit, 'mpbuf': self._mpbuf, 'mpfdp': self._mpfdp[self._frnum]}
)
# update status
self._mpkit.pool -= 1
self._mpkit.counter += 1
# start and record
proc.start()
self._frnum += 1
self._mpprc.append(proc)
# check buffer
if len(self._mpprc) >= CPU_CNT - 1:
[proc.join() for proc in self._mpprc[:-4]]
del self._mpprc[:-4]
|
Use server multiprocessing to extract PCAP files.
|
def pop(h):
"""Pop the heap value from the heap."""
n = h.size() - 1
h.swap(0, n)
down(h, 0, n)
return h.pop()
|
Pop the heap value from the heap.
|
def on_tab_close_clicked(self, event, state_m):
"""Triggered when the states-editor close button is clicked
Closes the tab.
:param state_m: The desired state model (the selected state)
"""
[page, state_identifier] = self.find_page_of_state_m(state_m)
if page:
self.close_page(state_identifier, delete=False)
|
Triggered when the states-editor close button is clicked
Closes the tab.
:param state_m: The desired state model (the selected state)
|
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
|
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
|
def apply(self, key, value, prompt=None,
on_load=lambda a: a, on_save=lambda a: a):
"""Applies a setting value to a key, if the value is not `None`.
Returns without prompting if either of the following:
* `value` is not `None`
* already present in the dictionary
Args:
prompt:
May either be a string to prompt via `raw_input` or a
method (callable) that returns the value.
on_load:
lambda. Value is passed through here after loaded.
on_save:
lambda. Value is saved as this value.
"""
# Reset value if flag exists without value
if value == '':
value = None
if key and self.data.has_key(key): del self.data[key]
# If value is explicitly set from args.
if value is not None:
value = on_load(value)
if key: self.data[key] = on_save(value)
return value
elif not key or not self.has_key(key):
if callable(prompt):
value = prompt()
elif prompt is not None:
value = raw_input(prompt + ": ")
if value is None:
if self.data.has_key(key): del self.data[key]
return None
self.data[key] = on_save(value)
return value
return on_load(self.data[key])
|
Applies a setting value to a key, if the value is not `None`.
Returns without prompting if either of the following:
* `value` is not `None`
* already present in the dictionary
Args:
prompt:
May either be a string to prompt via `raw_input` or a
method (callable) that returns the value.
on_load:
lambda. Value is passed through here after loaded.
on_save:
lambda. Value is saved as this value.
|
def execute(self, progress_fn, print_verbose_info=None):
"""
Start the progress bar, and return only when the progress reaches 100%.
:param progress_fn: the executor function (or a generator). This function should take no arguments
and return either a single number -- the current progress level, or a tuple (progress level, delay),
where delay is the time interval for when the progress should be checked again. This function may at
any point raise the ``StopIteration(message)`` exception, which will interrupt the progress bar,
display the ``message`` in red font, and then re-raise the exception.
:raises StopIteration: if the job is interrupted. The reason for interruption is provided in the exception's
message. The message will say "cancelled" if the job was interrupted by the user by pressing Ctrl+C.
"""
assert_is_type(progress_fn, FunctionType, GeneratorType, MethodType)
if isinstance(progress_fn, GeneratorType):
# Convert generator to a regular function
progress_fn = (lambda g: lambda: next(g))(progress_fn)
# Initialize the execution context
self._next_poll_time = 0
self._t0 = time.time()
self._x0 = 0
self._v0 = 0.01 # corresponds to 100s completion time
self._ve = 0.01
progress = 0
status = None # Status message in case the job gets interrupted.
try:
while True:
# We attempt to synchronize all helper functions, ensuring that each of them has the same idea
# for what the current time moment is. Otherwise we could have some corner cases when one method
# says that something must happen right now, while the other already sees that moment in the past.
now = time.time()
# Query the progress level, but only if it's time already
if self._next_poll_time <= now:
res = progress_fn() # may raise StopIteration
assert_is_type(res, (numeric, numeric), numeric)
if not isinstance(res, tuple):
res = (res, -1)
# Progress querying could have taken some time, so update the current time moment
now = time.time()
self._store_model_progress(res, now)
self._recalculate_model_parameters(now)
# Render the widget regardless of whether it's too early or not
progress = min(self._compute_progress_at_time(now)[0], 1)
if progress == 1 and self._get_real_progress() >= 1:
# Do not exit until both the model and the actual progress reach 100% mark.
break
result = self._widget.render(progress)
assert_is_type(result, RenderResult)
time0 = result.next_time
time1 = self._get_time_at_progress(result.next_progress)
next_render_time = min(time0, time1)
self._draw(result.rendered)
# Wait until the next rendering/querying cycle
wait_time = min(next_render_time, self._next_poll_time) - now
if wait_time > 0:
time.sleep(wait_time)
if print_verbose_info is not None:
print_verbose_info(progress)
except KeyboardInterrupt:
# If the user presses Ctrl+C, we interrupt the progress bar.
status = "cancelled"
except StopIteration as e:
# If the generator raises StopIteration before reaching 100%, then the progress display will
# reamin incomplete.
status = str(e)
# Do one final rendering before we exit
result = self._widget.render(progress=progress, status=status)
self._draw(result.rendered, final=True)
if status == "cancelled":
# Re-raise the exception, to inform the upstream caller that something unexpected happened.
raise StopIteration(status)
|
Start the progress bar, and return only when the progress reaches 100%.
:param progress_fn: the executor function (or a generator). This function should take no arguments
and return either a single number -- the current progress level, or a tuple (progress level, delay),
where delay is the time interval for when the progress should be checked again. This function may at
any point raise the ``StopIteration(message)`` exception, which will interrupt the progress bar,
display the ``message`` in red font, and then re-raise the exception.
:raises StopIteration: if the job is interrupted. The reason for interruption is provided in the exception's
message. The message will say "cancelled" if the job was interrupted by the user by pressing Ctrl+C.
|
def _get_default_iface_linux():
# type: () -> Optional[str]
"""Get the default interface by reading /proc/net/route.
This is the same source as the `route` command, however it's much
faster to read this file than to call `route`. If it fails for whatever
reason, we can fall back on the system commands (e.g for a platform
that has a route command, but maybe doesn't use /proc?).
"""
data = _read_file('/proc/net/route')
if data is not None and len(data) > 1:
for line in data.split('\n')[1:-1]:
iface_name, dest = line.split('\t')[:2]
if dest == '00000000':
return iface_name
return None
|
Get the default interface by reading /proc/net/route.
This is the same source as the `route` command, however it's much
faster to read this file than to call `route`. If it fails for whatever
reason, we can fall back on the system commands (e.g for a platform
that has a route command, but maybe doesn't use /proc?).
|
def js_distance(p, q):
"""Compute the Jensen-Shannon distance between two discrete distributions.
NOTE: JS divergence is not a metric but the sqrt of JS divergence is a
metric and is called the JS distance.
Parameters
----------
p : np.array
probability mass array (sums to 1)
q : np.array
probability mass array (sums to 1)
Returns
-------
js_dist : float
Jensen-Shannon distance between two discrete distributions
"""
js_dist = np.sqrt(js_divergence(p, q))
return js_dist
|
Compute the Jensen-Shannon distance between two discrete distributions.
NOTE: JS divergence is not a metric but the sqrt of JS divergence is a
metric and is called the JS distance.
Parameters
----------
p : np.array
probability mass array (sums to 1)
q : np.array
probability mass array (sums to 1)
Returns
-------
js_dist : float
Jensen-Shannon distance between two discrete distributions
|
def play(self, **kwargs):
"""Trigger a job explicitly.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobPlayError: If the job could not be triggered
"""
path = '%s/%s/play' % (self.manager.path, self.get_id())
self.manager.gitlab.http_post(path)
|
Trigger a job explicitly.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabJobPlayError: If the job could not be triggered
|
def clear_cached_data(self):
"""Clear any internally cached BLE device data. Necessary in some cases
to prevent issues with stale device data getting cached by the OS.
"""
# Go through and remove any device that isn't currently connected.
for device in self.list_devices():
# Skip any connected device.
if device.is_connected:
continue
# Remove this device. First get the adapter associated with the device.
adapter = dbus.Interface(self._bus.get_object('org.bluez', device._adapter),
_ADAPTER_INTERFACE)
# Now call RemoveDevice on the adapter to remove the device from
# bluez's DBus hierarchy.
adapter.RemoveDevice(device._device.object_path)
|
Clear any internally cached BLE device data. Necessary in some cases
to prevent issues with stale device data getting cached by the OS.
|
def get_theming_attribute(self, mode, name, part=None):
"""
looks up theming attribute
:param mode: ui-mode (e.g. `search`,`thread`...)
:type mode: str
:param name: identifier of the atttribute
:type name: str
:rtype: urwid.AttrSpec
"""
colours = int(self._config.get('colourmode'))
return self._theme.get_attribute(colours, mode, name, part)
|
looks up theming attribute
:param mode: ui-mode (e.g. `search`,`thread`...)
:type mode: str
:param name: identifier of the atttribute
:type name: str
:rtype: urwid.AttrSpec
|
def getLogger(name):
"""Return a logger from a given name.
If the name does not have a log handler, this will create one for it based
on the module name which will log everything to a log file in a location
the executing user will have access to.
:param name: ``str``
:return: ``object``
"""
log = logging.getLogger(name=name)
for handler in log.handlers:
if name == handler.name:
return log
else:
return LogSetup().default_logger(name=name.split('.')[0])
|
Return a logger from a given name.
If the name does not have a log handler, this will create one for it based
on the module name which will log everything to a log file in a location
the executing user will have access to.
:param name: ``str``
:return: ``object``
|
def build_network_settings(**settings):
'''
Build the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.build_network_settings <settings>
'''
changes = []
# Read current configuration and store default values
current_network_settings = _parse_current_network_settings()
# Build settings
opts = _parse_network_settings(settings, current_network_settings)
# Ubuntu has moved away from /etc/default/networking
# beginning with the 12.04 release so we disable or enable
# the networking related services on boot
skip_etc_default_networking = (
__grains__['osfullname'] == 'Ubuntu' and
int(__grains__['osrelease'].split('.')[0]) >= 12)
if skip_etc_default_networking:
if opts['networking'] == 'yes':
service_cmd = 'service.enable'
else:
service_cmd = 'service.disable'
if __salt__['service.available']('NetworkManager'):
__salt__[service_cmd]('NetworkManager')
if __salt__['service.available']('networking'):
__salt__[service_cmd]('networking')
else:
try:
template = JINJA.get_template('network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template network.jinja')
return ''
network = template.render(opts)
if 'test' in settings and settings['test']:
return _read_temp(network)
# Write settings
_write_file_network(network, _DEB_NETWORKING_FILE, True)
# Get hostname and domain from opts
sline = opts['hostname'].split('.', 1)
opts['hostname'] = sline[0]
current_domainname = current_network_settings['domainname']
current_searchdomain = current_network_settings['searchdomain']
new_domain = False
if len(sline) > 1:
new_domainname = sline[1]
if new_domainname != current_domainname:
domainname = new_domainname
opts['domainname'] = new_domainname
new_domain = True
else:
domainname = current_domainname
opts['domainname'] = domainname
else:
domainname = current_domainname
opts['domainname'] = domainname
new_search = False
if 'search' in opts:
new_searchdomain = opts['search']
if new_searchdomain != current_searchdomain:
searchdomain = new_searchdomain
opts['searchdomain'] = new_searchdomain
new_search = True
else:
searchdomain = current_searchdomain
opts['searchdomain'] = searchdomain
else:
searchdomain = current_searchdomain
opts['searchdomain'] = searchdomain
# If the domain changes, then we should write the resolv.conf file.
if new_domain or new_search:
# Look for existing domain line and update if necessary
resolve = _parse_resolve()
domain_prog = re.compile(r'domain\s+')
search_prog = re.compile(r'search\s+')
new_contents = []
for item in _read_file(_DEB_RESOLV_FILE):
if domain_prog.match(item):
item = 'domain {0}'.format(domainname)
elif search_prog.match(item):
item = 'search {0}'.format(searchdomain)
new_contents.append(item)
# A domain line didn't exist so we'll add one in
# with the new domainname
if 'domain' not in resolve:
new_contents.insert(0, 'domain {0}' . format(domainname))
# A search line didn't exist so we'll add one in
# with the new search domain
if 'search' not in resolve:
new_contents.insert('domain' in resolve, 'search {0}'.format(searchdomain))
new_resolv = '\n'.join(new_contents)
# Write /etc/resolv.conf
if not ('test' in settings and settings['test']):
_write_file_network(new_resolv, _DEB_RESOLV_FILE)
# used for returning the results back
try:
template = JINJA.get_template('display-network.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template display-network.jinja')
return ''
network = template.render(opts)
changes.extend(_read_temp(network))
return changes
|
Build the global network script.
CLI Example:
.. code-block:: bash
salt '*' ip.build_network_settings <settings>
|
def get_library_citation():
'''Return a descriptive string and reference data for what users of the library should cite'''
all_ref_data = api.get_reference_data()
lib_refs_data = {k: all_ref_data[k] for k in _lib_refs}
return (_lib_refs_desc, lib_refs_data)
|
Return a descriptive string and reference data for what users of the library should cite
|
def parse_field(self, field_data, index=0):
"""Parse field and add missing options"""
field = {
'__index__': index,
}
if isinstance(field_data, str):
field.update(self.parse_string_field(field_data))
elif isinstance(field_data, dict):
field.update(field_data)
else:
raise TypeError('Expected a str or dict get {}'.format(type(field_data)))
if 'field' not in field:
field['field'] = None
if 'label' not in field and field['field']:
try:
field['label'] = self.object._meta.get_field(field['field']).verbose_name.capitalize()
except Exception:
field['label'] = field['field'].replace('_', '').capitalize()
elif 'label' not in field:
field['label'] = ''
if 'format' not in field:
field['format'] = '{0}'
# Set default options
for name, options in self.fields_options.items():
if 'default' in options and name not in field:
field[name] = options['default']
return field
|
Parse field and add missing options
|
def grant_permissions(self, proxy_model):
"""
Create the default permissions for the just added proxy model
"""
ContentType = apps.get_model('contenttypes', 'ContentType')
try:
Permission = apps.get_model('auth', 'Permission')
except LookupError:
return
# searched_perms will hold the permissions we're looking for as (content_type, (codename, name))
searched_perms = []
ctype = ContentType.objects.get_for_model(proxy_model)
for perm in self.default_permissions:
searched_perms.append((
'{0}_{1}'.format(perm, proxy_model._meta.model_name),
"Can {0} {1}".format(perm, proxy_model._meta.verbose_name_raw)
))
all_perms = set(Permission.objects.filter(
content_type=ctype,
).values_list(
'content_type', 'codename'
))
permissions = [
Permission(codename=codename, name=name, content_type=ctype)
for codename, name in searched_perms if (ctype.pk, codename) not in all_perms
]
Permission.objects.bulk_create(permissions)
|
Create the default permissions for the just added proxy model
|
def default_image_loader(filename, flags, **kwargs):
""" This default image loader just returns filename, rect, and any flags
"""
def load(rect=None, flags=None):
return filename, rect, flags
return load
|
This default image loader just returns filename, rect, and any flags
|
def script(self, sql_script, split_algo='sql_split', prep_statements=True, dump_fails=True):
"""Wrapper method providing access to the SQLScript class's methods and properties."""
return Execute(sql_script, split_algo, prep_statements, dump_fails, self)
|
Wrapper method providing access to the SQLScript class's methods and properties.
|
def run_radia(job, bams, univ_options, radia_options, chrom):
"""
This module will run radia on the RNA and DNA bams
ARGUMENTS
1. bams: Dict of bams and their indexes
bams
|- 'tumor_rna': <JSid>
|- 'tumor_rnai': <JSid>
|- 'tumor_dna': <JSid>
|- 'tumor_dnai': <JSid>
|- 'normal_dna': <JSid>
+- 'normal_dnai': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. radia_options: Dict of parameters specific to radia
radia_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
+- 'genome': <JSid for genome fasta file>
4. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. Dict of filtered radia output vcf and logfile (Nested return)
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
"""
job.fileStore.logToMaster('Running radia on %s:%s' %(univ_options['patient'], chrom))
work_dir = job.fileStore.getLocalTempDir()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'genome.fasta': radia_options['genome_fasta'],
'genome.fasta.fai': radia_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir,
docker=True)
radia_output = ''.join([work_dir, '/radia_', chrom, '.vcf'])
radia_log = ''.join([work_dir, '/radia_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom,
'-n', input_files['normal.bam'],
'-t', input_files['tumor.bam'],
'-r', input_files['rna.bam'],
''.join(['--rnaTumorFasta=', input_files['genome.fasta']]),
'-f', input_files['genome.fasta'],
'-o', docker_path(radia_output),
'-i', 'hg19_M_rCRS',
'-m', input_files['genome.fasta'],
'-d', 'aarjunrao@soe.ucsc.edu',
'-q', 'Illumina',
'--disease', 'CANCER',
'-l', 'INFO',
'-g', docker_path(radia_log)]
docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'])
output_files = defaultdict()
for radia_file in [radia_output, radia_log]:
output_files[os.path.basename(radia_file)] = \
job.fileStore.writeGlobalFile(radia_file)
filterradia = job.wrapJobFn(run_filter_radia, bams,
output_files[os.path.basename(radia_output)],
univ_options, radia_options, chrom, disk='60G', memory='6G')
job.addChild(filterradia)
return filterradia.rv()
|
This module will run radia on the RNA and DNA bams
ARGUMENTS
1. bams: Dict of bams and their indexes
bams
|- 'tumor_rna': <JSid>
|- 'tumor_rnai': <JSid>
|- 'tumor_dna': <JSid>
|- 'tumor_dnai': <JSid>
|- 'normal_dna': <JSid>
+- 'normal_dnai': <JSid>
2. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
3. radia_options: Dict of parameters specific to radia
radia_options
|- 'dbsnp_vcf': <JSid for dnsnp vcf file>
+- 'genome': <JSid for genome fasta file>
4. chrom: String containing chromosome name with chr appended
RETURN VALUES
1. Dict of filtered radia output vcf and logfile (Nested return)
|- 'radia_filtered_CHROM.vcf': <JSid>
+- 'radia_filtered_CHROM_radia.log': <JSid>
|
def coverage_region_detailed_stats(target_name, bed_file, data, out_dir):
"""
Calculate coverage at different completeness cutoff
for region in coverage option.
"""
if bed_file and utils.file_exists(bed_file):
ready_depth = tz.get_in(["depth", target_name], data)
if ready_depth:
cov_file = ready_depth["regions"]
dist_file = ready_depth["dist"]
thresholds_file = ready_depth.get("thresholds")
out_cov_file = os.path.join(out_dir, os.path.basename(cov_file))
out_dist_file = os.path.join(out_dir, os.path.basename(dist_file))
out_thresholds_file = os.path.join(out_dir, os.path.basename(thresholds_file)) \
if thresholds_file and os.path.isfile(thresholds_file) else None
if not utils.file_uptodate(out_cov_file, cov_file):
utils.copy_plus(cov_file, out_cov_file)
utils.copy_plus(dist_file, out_dist_file)
utils.copy_plus(thresholds_file, out_thresholds_file) if out_thresholds_file else None
return [out_cov_file, out_dist_file] + ([out_thresholds_file] if out_thresholds_file else [])
return []
|
Calculate coverage at different completeness cutoff
for region in coverage option.
|
def create_dset_to3d(prefix,file_list,file_order='zt',num_slices=None,num_reps=None,TR=None,slice_order='alt+z',only_dicoms=True,sort_filenames=False):
'''manually create dataset by specifying everything (not recommended, but necessary when autocreation fails)
If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted,
it assumes that this it not a time-dependent dataset
:only_dicoms: filter the given list by readable DICOM images
:sort_filenames: sort the given files by filename using the right-most number in the filename'''
tags = {
'num_rows': (0x0028,0x0010),
'num_reps': (0x0020,0x0105),
'TR': (0x0018,0x0080)
}
with nl.notify('Trying to create dataset %s' % prefix):
if os.path.exists(prefix):
nl.notify('Error: file "%s" already exists!' % prefix,level=nl.level.error)
return False
tagvals = {}
for f in file_list:
try:
tagvals[f] = info_for_tags(f,tags.values())
except:
pass
if only_dicoms:
new_file_list = []
for f in file_list:
if f in tagvals and len(tagvals[f][tags['num_rows']])>0:
# Only include DICOMs that actually have image information
new_file_list.append(f)
file_list = new_file_list
if sort_filenames:
def file_num(fname):
try:
nums = [x.strip('.') for x in re.findall(r'[\d.]+',fname) if x.strip('.')!='']
return float(nums[-1])
except:
return fname
file_list = sorted(file_list,key=file_num)
if len(file_list)==0:
nl.notify('Error: Couldn\'t find any valid DICOM images',level=nl.level.error)
return False
cmd = ['to3d','-skip_outliers','-quit_on_err','-prefix',prefix]
if num_slices!=None or num_reps!=None:
# Time-based dataset
if num_slices==None:
if len(file_list)%num_reps!=0:
nl.notify('Error: trying to guess # of slices, but %d (number for files) doesn\'t divide evenly into %d (number of reps)' % (len(file_list),num_reps),level=nl.level.error)
return False
num_slices = len(file_list)/num_reps
if num_reps==None:
if len(file_list)%num_slices==0:
num_reps = len(file_list)/num_slices
elif len(file_list)==1 and tags['num_reps'] in tagvals[file_list[0]]:
num_reps = tagvals[file_list[0]][tags['num_reps']]
else:
nl.notify('Error: trying to guess # of reps, but %d (number for files) doesn\'t divide evenly into %d (number of slices)' % (len(file_list),num_slices),level=nl.level.error)
return False
if TR==None:
TR = tagvals[file_list[0]][tags['TR']]
cmd += ['-time:%s'%file_order]
if file_order=='zt':
cmd += [num_slices,num_reps]
else:
cmd += [num_reps,num_slices]
cmd += [TR,slice_order]
cmd += ['-@']
cmd = [str(x) for x in cmd]
out = None
try:
p = subprocess.Popen(cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = p.communicate('\n'.join(file_list))
if p.returncode!=0:
raise Exception
except:
with nl.notify('Error: to3d returned error',level=nl.level.error):
if out:
nl.notify('stdout:\n' + out[0] + '\nstderr:\n' + out[1],level=nl.level.error)
return False
|
manually create dataset by specifying everything (not recommended, but necessary when autocreation fails)
If `num_slices` or `num_reps` is omitted, it will be inferred by the number of images. If both are omitted,
it assumes that this it not a time-dependent dataset
:only_dicoms: filter the given list by readable DICOM images
:sort_filenames: sort the given files by filename using the right-most number in the filename
|
def configure_sessionmaker(graph):
"""
Create the SQLAlchemy session class.
"""
engine_routing_strategy = getattr(graph, graph.config.sessionmaker.engine_routing_strategy)
if engine_routing_strategy.supports_multiple_binds:
ScopedFactory.infect(graph, "postgres")
class RoutingSession(Session):
"""
Route session bind to an appropriate engine.
See: http://docs.sqlalchemy.org/en/latest/orm/persistence_techniques.html#partitioning-strategies
"""
def get_bind(self, mapper=None, clause=None):
return engine_routing_strategy.get_bind(mapper, clause)
return sessionmaker(class_=RoutingSession)
|
Create the SQLAlchemy session class.
|
def generate_variants(unresolved_spec):
"""Generates variants from a spec (dict) with unresolved values.
There are two types of unresolved values:
Grid search: These define a grid search over values. For example, the
following grid search values in a spec will produce six distinct
variants in combination:
"activation": grid_search(["relu", "tanh"])
"learning_rate": grid_search([1e-3, 1e-4, 1e-5])
Lambda functions: These are evaluated to produce a concrete value, and
can express dependencies or conditional distributions between values.
They can also be used to express random search (e.g., by calling
into the `random` or `np` module).
"cpu": lambda spec: spec.config.num_workers
"batch_size": lambda spec: random.uniform(1, 1000)
Finally, to support defining specs in plain JSON / YAML, grid search
and lambda functions can also be defined alternatively as follows:
"activation": {"grid_search": ["relu", "tanh"]}
"cpu": {"eval": "spec.config.num_workers"}
"""
for resolved_vars, spec in _generate_variants(unresolved_spec):
assert not _unresolved_values(spec)
yield format_vars(resolved_vars), spec
|
Generates variants from a spec (dict) with unresolved values.
There are two types of unresolved values:
Grid search: These define a grid search over values. For example, the
following grid search values in a spec will produce six distinct
variants in combination:
"activation": grid_search(["relu", "tanh"])
"learning_rate": grid_search([1e-3, 1e-4, 1e-5])
Lambda functions: These are evaluated to produce a concrete value, and
can express dependencies or conditional distributions between values.
They can also be used to express random search (e.g., by calling
into the `random` or `np` module).
"cpu": lambda spec: spec.config.num_workers
"batch_size": lambda spec: random.uniform(1, 1000)
Finally, to support defining specs in plain JSON / YAML, grid search
and lambda functions can also be defined alternatively as follows:
"activation": {"grid_search": ["relu", "tanh"]}
"cpu": {"eval": "spec.config.num_workers"}
|
def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule)
|
Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
|
def view_set(method_name):
"""
Creates a setter that will call the view method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the view.
@type method_name: str
"""
def view_set(value, context, **_params):
method = getattr(context["view"], method_name)
return _set(method, context["key"], value, (), {})
return view_set
|
Creates a setter that will call the view method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the view.
@type method_name: str
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.