code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def read_socket_input(connection, socket_obj):
"""Read from the network layer and processes all data read. Can
support both blocking and non-blocking sockets.
Returns the number of input bytes processed, or EOS if input processing
is done. Any exceptions raised by the socket are re-raised.
"""
count = connection.needs_input
if count <= 0:
return count # 0 or EOS
while True:
try:
sock_data = socket_obj.recv(count)
break
except socket.timeout as e:
LOG.debug("Socket timeout exception %s", str(e))
raise # caller must handle
except socket.error as e:
err = e.errno
if err in [errno.EAGAIN,
errno.EWOULDBLOCK,
errno.EINTR]:
# try again later
return 0
# otherwise, unrecoverable, caller must handle
LOG.debug("Socket error exception %s", str(e))
raise
except Exception as e: # beats me... assume fatal
LOG.debug("unknown socket exception %s", str(e))
raise # caller must handle
if len(sock_data) > 0:
count = connection.process_input(sock_data)
else:
LOG.debug("Socket closed")
count = Connection.EOS
connection.close_input()
connection.close_output()
return count
|
Read from the network layer and processes all data read. Can
support both blocking and non-blocking sockets.
Returns the number of input bytes processed, or EOS if input processing
is done. Any exceptions raised by the socket are re-raised.
|
def variantAnnotationsGenerator(self, request):
"""
Returns a generator over the (variantAnnotaitons, nextPageToken) pairs
defined by the specified request.
"""
compoundId = datamodel.VariantAnnotationSetCompoundId.parse(
request.variant_annotation_set_id)
dataset = self.getDataRepository().getDataset(compoundId.dataset_id)
variantSet = dataset.getVariantSet(compoundId.variant_set_id)
variantAnnotationSet = variantSet.getVariantAnnotationSet(
request.variant_annotation_set_id)
iterator = paging.VariantAnnotationsIntervalIterator(
request, variantAnnotationSet)
return iterator
|
Returns a generator over the (variantAnnotaitons, nextPageToken) pairs
defined by the specified request.
|
def deactivate_in_ec(self, ec_index):
'''Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.deactivate_component(self._obj)
|
Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
|
def get_tracerinfo(tracerinfo_file):
"""
Read an output's tracerinfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
tracerinfo_file : str
Path to tracerinfo.dat
Returns
-------
DataFrame containing the tracer information.
"""
widths = [rec.width for rec in tracer_recs]
col_names = [rec.name for rec in tracer_recs]
dtypes = [rec.type for rec in tracer_recs]
usecols = [name for name in col_names if not name.startswith('-')]
tracer_df = pd.read_fwf(tracerinfo_file, widths=widths, names=col_names,
dtypes=dtypes, comment="#", header=None,
usecols=usecols)
# Check an edge case related to a bug in GEOS-Chem v12.0.3 which
# erroneously dropped short/long tracer names in certain tracerinfo.dat outputs.
# What we do here is figure out which rows were erroneously processed (they'll
# have NaNs in them) and raise a warning if there are any
na_free = tracer_df.dropna(subset=['tracer', 'scale'])
only_na = tracer_df[~tracer_df.index.isin(na_free.index)]
if len(only_na) > 0:
warn("At least one row in {} wasn't decoded correctly; we strongly"
" recommend you manually check that file to see that all"
" tracers are properly recorded."
.format(tracerinfo_file))
tracer_desc = {tracer.name: tracer.desc for tracer in tracer_recs
if not tracer.name.startswith('-')}
# Process some of the information about which variables are hydrocarbons
# and chemical tracers versus other diagnostics.
def _assign_hydrocarbon(row):
if row['C'] != 1:
row['hydrocarbon'] = True
row['molwt'] = C_MOLECULAR_WEIGHT
else:
row['hydrocarbon'] = False
return row
tracer_df = (
tracer_df
.apply(_assign_hydrocarbon, axis=1)
.assign(chemical=lambda x: x['molwt'].astype(bool))
)
return tracer_df, tracer_desc
|
Read an output's tracerinfo.dat file and parse into a DataFrame for
use in selecting and parsing categories.
Parameters
----------
tracerinfo_file : str
Path to tracerinfo.dat
Returns
-------
DataFrame containing the tracer information.
|
def filter_by(self, string):
"""Filters treeview"""
self._reatach()
if string == '':
self.filter_remove()
return
self._expand_all()
self.treeview.selection_set('')
children = self.treeview.get_children('')
for item in children:
_, detached = self._detach(item)
if detached:
self._detached.extend(detached)
for i, p, idx in self._detached:
# txt = self.treeview.item(i, 'text')
self.treeview.detach(i)
self.filter_on = True
|
Filters treeview
|
def CRRAutility(c, gam):
'''
Evaluates constant relative risk aversion (CRRA) utility of consumption c
given risk aversion parameter gam.
Parameters
----------
c : float
Consumption value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Utility
Tests
-----
Test a value which should pass:
>>> c, gamma = 1.0, 2.0 # Set two values at once with Python syntax
>>> utility(c=c, gam=gamma)
-1.0
'''
if gam == 1:
return np.log(c)
else:
return( c**(1.0 - gam) / (1.0 - gam) )
|
Evaluates constant relative risk aversion (CRRA) utility of consumption c
given risk aversion parameter gam.
Parameters
----------
c : float
Consumption value
gam : float
Risk aversion
Returns
-------
(unnamed) : float
Utility
Tests
-----
Test a value which should pass:
>>> c, gamma = 1.0, 2.0 # Set two values at once with Python syntax
>>> utility(c=c, gam=gamma)
-1.0
|
def add_file_recursive(self, filename, trim=False):
"""Add a file and all its recursive dependencies to the graph.
Args:
filename: The name of the file.
trim: Whether to trim the dependencies of builtin and system files.
"""
assert not self.final, 'Trying to mutate a final graph.'
self.add_source_file(filename)
queue = collections.deque([filename])
seen = set()
while queue:
filename = queue.popleft()
self.graph.add_node(filename)
try:
deps, broken = self.get_file_deps(filename)
except parsepy.ParseError:
# Python couldn't parse `filename`. If we're sure that it is a
# Python file, we mark it as unreadable and keep the node in the
# graph so importlab's callers can do their own syntax error
# handling if desired.
if filename.endswith('.py'):
self.unreadable_files.add(filename)
else:
self.graph.remove_node(filename)
continue
for f in broken:
self.broken_deps[filename].add(f)
for f in deps:
if self.follow_file(f, seen, trim):
queue.append(f)
seen.add(f)
self.graph.add_node(f)
self.graph.add_edge(filename, f)
|
Add a file and all its recursive dependencies to the graph.
Args:
filename: The name of the file.
trim: Whether to trim the dependencies of builtin and system files.
|
def unwrap_state_dict(self, obj: Dict[str, Any]) -> Union[Tuple[str, Any], Tuple[None, None]]:
"""Unwraps a marshalled state previously wrapped using :meth:`wrap_state_dict`."""
if len(obj) == 2:
typename = obj.get(self.type_key)
state = obj.get(self.state_key)
if typename is not None:
return typename, state
return None, None
|
Unwraps a marshalled state previously wrapped using :meth:`wrap_state_dict`.
|
def evaluate(self, verbose=False, decode=True, passes=None, num_threads=1, apply_experimental=True):
"""Evaluates by creating a MultiIndex containing evaluated data and index.
See `LazyResult`
Returns
-------
MultiIndex
MultiIndex with evaluated data.
"""
evaluated_data = [v.evaluate(verbose, decode, passes, num_threads, apply_experimental) for v in self.values]
return MultiIndex(evaluated_data, self.names)
|
Evaluates by creating a MultiIndex containing evaluated data and index.
See `LazyResult`
Returns
-------
MultiIndex
MultiIndex with evaluated data.
|
def create_parser(self, prog_name, subcommand):
"""
Customize the parser to include option groups.
"""
parser = optparse.OptionParser(
prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.get_option_list())
for name, description, option_list in self.get_option_groups():
group = optparse.OptionGroup(parser, name, description);
list(map(group.add_option, option_list))
parser.add_option_group(group)
return parser
|
Customize the parser to include option groups.
|
def has_active_condition(self, condition, instances):
"""
Given a list of instances, and the condition active for
this switch, returns a boolean representing if the
conditional is met, including a non-instance default.
"""
return_value = None
for instance in instances + [None]:
if not self.can_execute(instance):
continue
result = self.is_active(instance, condition)
if result is False:
return False
elif result is True:
return_value = True
return return_value
|
Given a list of instances, and the condition active for
this switch, returns a boolean representing if the
conditional is met, including a non-instance default.
|
def xpathNextAncestor(self, ctxt):
"""Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.xmlXPathNextAncestor(ctxt__o, self._o)
if ret is None:raise xpathError('xmlXPathNextAncestor() failed')
__tmp = xmlNode(_obj=ret)
return __tmp
|
Traversal function for the "ancestor" direction the
ancestor axis contains the ancestors of the context node;
the ancestors of the context node consist of the parent of
context node and the parent's parent and so on; the nodes
are ordered in reverse document order; thus the parent is
the first node on the axis, and the parent's parent is the
second node on the axis
|
def set(self, instance, value, **kw): # noqa
"""Set the value of the refernce field
"""
ref = []
# The value is an UID
if api.is_uid(value):
ref.append(api.get_object_by_uid(value))
# The value is already an object
if api.is_at_content(value):
ref.append(value)
# The value is a dictionary
# -> handle it like a catalog query
if u.is_dict(value):
results = api.search(portal_type=self.allowed_types, **value)
ref = map(api.get_object, results)
# The value is a list
if u.is_list(value):
for item in value:
# uid
if api.is_uid(item):
ref.append(api.get_object_by_uid(item))
continue
# object
if api.is_at_content(item):
ref.append(api.get_object(item))
continue
# path
if api.is_path(item):
ref.append(api.get_object_by_path(item))
continue
# dict (catalog query)
if u.is_dict(item):
# If there is UID of objects, just use it.
uid = item.get('uid', None)
if uid:
obj = api.get_object_by_uid(uid)
ref.append(obj)
else:
results = api.search(portal_type=self.allowed_types, **item)
objs = map(api.get_object, results)
ref.extend(objs)
continue
# Plain string
# -> do a catalog query for title
if isinstance(item, basestring):
results = api.search(portal_type=self.allowed_types, title=item)
objs = map(api.get_object, results)
ref.extend(objs)
continue
# The value is a physical path
if api.is_path(value):
ref.append(api.get_object_by_path(value))
# Handle non multi valued fields
if not self.multi_valued:
if len(ref) > 1:
raise ValueError("Multiple values given for single valued "
"field {}".format(repr(self.field)))
else:
ref = ref[0]
return self._set(instance, ref, **kw)
|
Set the value of the refernce field
|
def _make_examples(bam_file, data, ref_file, region_bed, out_file, work_dir):
"""Create example pileup images to feed into variant calling.
"""
log_dir = utils.safe_makedir(os.path.join(work_dir, "log"))
example_dir = utils.safe_makedir(os.path.join(work_dir, "examples"))
if len(glob.glob(os.path.join(example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data)))) == 0:
with tx_tmpdir(data) as tx_example_dir:
cmd = ["dv_make_examples.py", "--cores", dd.get_num_cores(data), "--ref", ref_file,
"--reads", bam_file, "--regions", region_bed, "--logdir", log_dir,
"--examples", tx_example_dir, "--sample", dd.get_sample_name(data)]
do.run(cmd, "DeepVariant make_examples %s" % dd.get_sample_name(data))
for fname in glob.glob(os.path.join(tx_example_dir, "%s.tfrecord*.gz" % dd.get_sample_name(data))):
utils.copy_plus(fname, os.path.join(example_dir, os.path.basename(fname)))
return example_dir
|
Create example pileup images to feed into variant calling.
|
def rewrite_references_json(json_content, rewrite_json):
""" general purpose references json rewriting by matching the id value """
for ref in json_content:
if ref.get("id") and ref.get("id") in rewrite_json:
for key, value in iteritems(rewrite_json.get(ref.get("id"))):
ref[key] = value
return json_content
|
general purpose references json rewriting by matching the id value
|
def prepare_check(data):
"""Prepare check for catalog endpoint
Parameters:
data (Object or ObjectID): Check ID or check definition
Returns:
Tuple[str, dict]: where first is ID and second is check definition
"""
if not data:
return None, {}
if isinstance(data, str):
return data, {}
result = {}
if "ID" in data:
result["CheckID"] = data["ID"]
for k in ("Node", "CheckID", "Name", "Notes", "Status", "ServiceID"):
if k in data:
result[k] = data[k]
if list(result) == ["CheckID"]:
return result["CheckID"], {}
return result.get("CheckID"), result
|
Prepare check for catalog endpoint
Parameters:
data (Object or ObjectID): Check ID or check definition
Returns:
Tuple[str, dict]: where first is ID and second is check definition
|
def _copyAllocatedStates(self):
"""If state is allocated in CPP, copy over the data into our numpy arrays."""
# Get learn states if we need to print them out
if self.verbosity > 1 or self.retrieveLearningStates:
(activeT, activeT1, predT, predT1) = self.cells4.getLearnStates()
self.lrnActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
if self.allocateStatesInCPP:
assert False
(activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT,
confidenceT1) = self.cells4.getStates()
self.cellConfidence['t'] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn))
self.cellConfidence['t-1'] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.colConfidence['t'] = colConfidenceT.reshape(self.numberOfCols)
self.colConfidence['t-1'] = colConfidenceT1.reshape(self.numberOfCols)
self.infActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.infActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))
self.infPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.infPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
|
If state is allocated in CPP, copy over the data into our numpy arrays.
|
def merge_dicts(base, updates):
"""
Given two dicts, merge them into a new dict as a shallow copy.
Parameters
----------
base: dict
The base dictionary.
updates: dict
Secondary dictionary whose values override the base.
"""
if not base:
base = dict()
if not updates:
updates = dict()
z = base.copy()
z.update(updates)
return z
|
Given two dicts, merge them into a new dict as a shallow copy.
Parameters
----------
base: dict
The base dictionary.
updates: dict
Secondary dictionary whose values override the base.
|
def _get_new_column_header(self, vcf_reader):
"""Returns a standardized column header.
MuTect sample headers include the name of input alignment, which is
nice, but doesn't match up with the sample names reported in Strelka
or VarScan. To fix this, we replace with NORMAL and TUMOR using the
MuTect metadata command line to replace them correctly."""
mutect_dict = self._build_mutect_dict(vcf_reader.metaheaders)
new_header_list = []
required_keys = set([self._NORMAL_SAMPLE_KEY, self._TUMOR_SAMPLE_KEY])
mutect_keys = set(mutect_dict.keys())
if not required_keys.issubset(mutect_keys):
raise utils.JQException("Unable to determine normal "
"and tumor sample ordering "
"based on MuTect metaheader.")
for field_name in vcf_reader.column_header.split("\t"):
if field_name == mutect_dict[self._NORMAL_SAMPLE_KEY]:
field_name = "NORMAL"
elif field_name == mutect_dict[self._TUMOR_SAMPLE_KEY]:
field_name = "TUMOR"
new_header_list.append(field_name)
return "\t".join(new_header_list)
|
Returns a standardized column header.
MuTect sample headers include the name of input alignment, which is
nice, but doesn't match up with the sample names reported in Strelka
or VarScan. To fix this, we replace with NORMAL and TUMOR using the
MuTect metadata command line to replace them correctly.
|
def submit_vasp_directory(self, rootdir, authors, projects=None,
references='', remarks=None, master_data=None,
master_history=None, created_at=None,
ncpus=None):
"""
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
"""
from pymatgen.apps.borg.hive import VaspToComputedEntryDrone
from pymatgen.apps.borg.queen import BorgQueen
drone = VaspToComputedEntryDrone(inc_structure=True,
data=["filename",
"initial_structure"])
queen = BorgQueen(drone, number_of_drones=ncpus)
queen.parallel_assimilate(rootdir)
structures = []
metadata = []
histories = []
for e in queen.get_data():
structures.append(e.structure)
m = {
"_vasp": {
"parameters": e.parameters,
"final_energy": e.energy,
"final_energy_per_atom": e.energy_per_atom,
"initial_structure": e.data["initial_structure"].as_dict()
}
}
if "history" in e.parameters:
histories.append(e.parameters["history"])
if master_data is not None:
m.update(master_data)
metadata.append(m)
if master_history is not None:
histories = master_history * len(structures)
return self.submit_structures(
structures, authors, projects=projects, references=references,
remarks=remarks, data=metadata, histories=histories,
created_at=created_at)
|
Assimilates all vasp run directories beneath a particular
directory using BorgQueen to obtain structures, and then submits thhem
to the Materials Project as SNL files. VASP related meta data like
initial structure and final energies are automatically incorporated.
.. note::
As of now, this MP REST feature is open only to a select group of
users. Opening up submissions to all users is being planned for
the future.
Args:
rootdir (str): Rootdir to start assimilating VASP runs from.
authors: *List* of {"name":'', "email":''} dicts,
*list* of Strings as 'John Doe <johndoe@gmail.com>',
or a single String with commas separating authors. The same
list of authors should apply to all runs.
projects ([str]): List of Strings ['Project A', 'Project B'].
This applies to all structures.
references (str): A String in BibTeX format. Again, this applies to
all structures.
remarks ([str]): List of Strings ['Remark A', 'Remark B']
master_data (dict): A free form dict. Namespaced at the root
level with an underscore, e.g. {"_materialsproject":<custom
data>}. This data is added to all structures detected in the
directory, in addition to other vasp data on a per structure
basis.
master_history: A master history to be added to all entries.
created_at (datetime): A datetime object
ncpus (int): Number of cpus to use in using BorgQueen to
assimilate. Defaults to None, which means serial.
|
def table_to_csv(table, engine, filepath, chunksize=1000, overwrite=False):
"""
Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。
"""
sql = select([table])
sql_to_csv(sql, engine, filepath, chunksize)
|
Export entire table to a csv file.
:param table: :class:`sqlalchemy.Table` instance.
:param engine: :class:`sqlalchemy.engine.base.Engine`.
:param filepath: file path.
:param chunksize: number of rows write to csv each time.
:param overwrite: bool, if True, avoid to overite existing file.
**中文文档**
将整个表中的所有数据, 写入csv文件。
|
def fetch_uri(self, uri, start=None, end=None):
"""fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
"""
namespace, alias = uri_re.match(uri).groups()
return self.fetch(alias=alias, namespace=namespace, start=start, end=end)
|
fetch sequence for URI/CURIE of the form namespace:alias, such as
NCBI:NM_000059.3.
|
async def login(self, email: str, password: str) -> bool:
"""Login to the profile."""
login_resp = await self._request(
'post',
API_URL_USER,
json={
'version': '1.0',
'method': 'Signin',
'param': {
'Email': email,
'Password': password,
'CaptchaCode': ''
},
'sourcetype': 0
})
_LOGGER.debug('Login response: %s', login_resp)
if login_resp.get('Code') != 0:
return False
self.account_id = login_resp['Json']['gid']
return True
|
Login to the profile.
|
def respects_language(fun):
"""Decorator for tasks with respect to site's current language.
You can use this decorator on your tasks together with default @task
decorator (remember that the task decorator must be applied last).
See also the with-statement alternative :func:`respect_language`.
**Example**:
.. code-block:: python
@task
@respects_language
def my_task()
# localize something.
The task will then accept a ``language`` argument that will be
used to set the language in the task, and the task can thus be
called like:
.. code-block:: python
from django.utils import translation
from myapp.tasks import my_task
# Pass the current language on to the task
my_task.delay(language=translation.get_language())
# or set the language explicitly
my_task.delay(language='no.no')
"""
@wraps(fun)
def _inner(*args, **kwargs):
with respect_language(kwargs.pop('language', None)):
return fun(*args, **kwargs)
return _inner
|
Decorator for tasks with respect to site's current language.
You can use this decorator on your tasks together with default @task
decorator (remember that the task decorator must be applied last).
See also the with-statement alternative :func:`respect_language`.
**Example**:
.. code-block:: python
@task
@respects_language
def my_task()
# localize something.
The task will then accept a ``language`` argument that will be
used to set the language in the task, and the task can thus be
called like:
.. code-block:: python
from django.utils import translation
from myapp.tasks import my_task
# Pass the current language on to the task
my_task.delay(language=translation.get_language())
# or set the language explicitly
my_task.delay(language='no.no')
|
def connection_key(self):
"""
Return an index key used to cache the sampler connection.
"""
return "{host}:{namespace}:{username}".format(host=self.host, namespace=self.namespace, username=self.username)
|
Return an index key used to cache the sampler connection.
|
def _layout(dict_vars, dict_vars_extra):
"""Print nicely [(var, description)] from phyvars"""
desc = [(v, m.description) for v, m in dict_vars.items()]
desc.extend((v, baredoc(m.description))
for v, m in dict_vars_extra.items())
_pretty_print(desc, min_col_width=26)
|
Print nicely [(var, description)] from phyvars
|
def http_purge_url(url):
"""
Do an HTTP PURGE of the given asset.
The URL is run through urlparse and must point to the varnish instance not the varnishadm
"""
url = urlparse(url)
connection = HTTPConnection(url.hostname, url.port or 80)
path = url.path or '/'
connection.request('PURGE', '%s?%s' % (path, url.query) if url.query else path, '',
{'Host': '%s:%s' % (url.hostname, url.port) if url.port else url.hostname})
response = connection.getresponse()
if response.status != 200:
logging.error('Purge failed with status: %s' % response.status)
return response
|
Do an HTTP PURGE of the given asset.
The URL is run through urlparse and must point to the varnish instance not the varnishadm
|
def load_external_components(typesys):
"""Load all external types defined by iotile plugins.
This allows plugins to register their own types for type annotations and
allows all registered iotile components that have associated type libraries to
add themselves to the global type system.
"""
# Find all of the registered IOTile components and see if we need to add any type libraries for them
from iotile.core.dev.registry import ComponentRegistry
reg = ComponentRegistry()
modules = reg.list_components()
typelibs = reduce(lambda x, y: x+y, [reg.find_component(x).find_products('type_package') for x in modules], [])
for lib in typelibs:
if lib.endswith('.py'):
lib = lib[:-3]
typesys.load_external_types(lib)
|
Load all external types defined by iotile plugins.
This allows plugins to register their own types for type annotations and
allows all registered iotile components that have associated type libraries to
add themselves to the global type system.
|
def xml_compare(expected, found):
"""Checks equality of two ``ElementTree`` objects.
:param expected: An ``ElementTree`` object.
:param found: An ``ElementTree`` object.
:return: ``Boolean``, whether the two objects are equal.
"""
# if comparing the same ET object
if expected == found:
return True
# compare element attributes, ignoring order
if set(expected.items()) != set(found.items()):
return False
# check for equal number of children
expected_children = list(expected)
found_children = list(found)
if len(expected_children) != len(found_children):
return False
# compare children
if not all([xml_compare(a, b) for a, b in zip(expected_children, found_children)]):
return False
# compare elements, if there is no text node, return True
if (expected.text is None or expected.text.strip() == "") \
and (found.text is None or found.text.strip() == ""):
return True
else:
return expected.tag == found.tag and expected.text == found.text \
and expected.attrib == found.attrib
|
Checks equality of two ``ElementTree`` objects.
:param expected: An ``ElementTree`` object.
:param found: An ``ElementTree`` object.
:return: ``Boolean``, whether the two objects are equal.
|
def get_output_structure(self):
'''Determine the structure from the output'''
bohr_to_angstrom = 0.529177249
# determine the number of atoms
natoms = int(float(self._get_line('number of atoms/cell', self.outputf).split('=')[-1]))
# determine the initial lattice parameter
alat = float(self._get_line('lattice parameter (alat)', self.outputf).split('=')[-1].split()[0])
# find the initial unit cell
unit_cell = []
with open(self.outputf, 'r') as fp:
for line in fp:
if "crystal axes:" in line:
for i in range(3):
unit_cell.append([float(j)*alat*bohr_to_angstrom for j in next(fp).split('(')[-1].split(')')[0].split()])
break
if len(unit_cell) == 0: raise Exception('Cannot find the initial unit cell')
# find the initial atomic coordinates
coords = [] ; atom_symbols = []
with open(self.outputf, 'r') as fp:
for line in fp:
if "site n." in line and "atom" in line and "positions" in line and "alat units" in line:
for i in range(natoms):
coordline = next(fp)
atom_symbols.append(''.join([i for i in coordline.split()[1] if not i.isdigit()]))
coord_conv_factor = alat*bohr_to_angstrom
coords.append([float(j)*coord_conv_factor for j in coordline.rstrip().split('=')[-1].split('(')[-1].split(')')[0].split()])
break
if len(coords) == 0: raise Exception('Cannot find the initial atomic coordinates')
if type(self.is_relaxed()) == type(None):
# static run: create, populate, and return the initial structure
structure = Atoms(symbols=atom_symbols, cell=unit_cell, pbc=True)
structure.set_positions(coords)
return structure
else:
# relaxation run: update with the final structure
with open(self.outputf) as fp:
for line in fp:
if "Begin final coordinates" in line:
if 'new unit-cell volume' in next(fp):
# unit cell allowed to change
next(fp) # blank line
# get the final unit cell
unit_cell = []
cellheader = next(fp)
if 'bohr' in cellheader.lower():
cell_conv_factor = bohr_to_angstrom
elif 'angstrom' in cellheader.lower():
cell_conv_factor = 1.0
else:
alat = float(cellheader.split('alat=')[-1].replace(')', ''))
cell_conv_factor = alat*bohr_to_angstrom
for i in range(3):
unit_cell.append([float(j)*cell_conv_factor for j in next(fp).split()])
next(fp) # blank line
# get the final atomic coordinates
coordtype = next(fp).split()[-1].replace('(', '').replace(')', '')
if coordtype == 'bohr':
coord_conv_factor = bohr_to_angstrom
elif coordtype == 'angstrom' or coordtype == 'crystal':
coord_conv_factor = 1.0
else:
coord_conv_factor = alat*bohr_to_angstrom
coords = [] # reinitialize the coords
for i in range(natoms):
coordline = next(fp).split()
coords.append([float(j)*coord_conv_factor for j in coordline[1:4]])
# create, populate, and return the final structure
structure = Atoms(symbols=atom_symbols, cell=unit_cell, pbc=True)
if coordtype == 'crystal':
structure.set_scaled_positions(coords) # direct coord
else:
structure.set_positions(coords) # cartesian coord
return structure
raise Exception('Cannot find the final coordinates')
|
Determine the structure from the output
|
def format_message(self, msg):
"""format message."""
return {'timestamp': int(msg.created * 1000),
'message': self.format(msg),
'stream': self.log_stream or msg.name,
'group': self.log_group}
|
format message.
|
def get_tournament_prize_pool(self, leagueid=None, **kwargs):
"""Returns a dictionary that includes community funded tournament prize pools
:param leagueid: (int, optional)
:return: dictionary of prize pools, see :doc:`responses </responses>`
"""
if 'leagueid' not in kwargs:
kwargs['leagueid'] = leagueid
url = self.__build_url(urls.GET_TOURNAMENT_PRIZE_POOL, **kwargs)
req = self.executor(url)
if self.logger:
self.logger.info('URL: {0}'.format(url))
if not self.__check_http_err(req.status_code):
return response.build(req, url, self.raw_mode)
|
Returns a dictionary that includes community funded tournament prize pools
:param leagueid: (int, optional)
:return: dictionary of prize pools, see :doc:`responses </responses>`
|
def todegdec(origin):
"""
Convert from [+/-]DDD°MMM'SSS.SSSS" or [+/-]DDD°MMM.MMMM' to [+/-]DDD.DDDDD
"""
# if the input is already a float (or can be converted to float)
try:
return float(origin)
except ValueError:
pass
# DMS format
m = dms_re.search(origin)
if m:
degrees = int(m.group('degrees'))
minutes = float(m.group('minutes'))
seconds = float(m.group('seconds'))
return degrees + minutes / 60 + seconds / 3600
# Degree + Minutes format
m = mindec_re.search(origin)
if m:
degrees = int(m.group('degrees'))
minutes = float(m.group('minutes'))
return degrees + minutes / 60
|
Convert from [+/-]DDD°MMM'SSS.SSSS" or [+/-]DDD°MMM.MMMM' to [+/-]DDD.DDDDD
|
def to_dict(self):
"""
Prepare a JSON serializable dict for read-only purposes.
Includes storages and IP-addresses.
Use prepare_post_body for POST and .save() for PUT.
"""
fields = dict(vars(self).items())
if self.populated:
fields['ip_addresses'] = []
fields['storage_devices'] = []
for ip in self.ip_addresses:
fields['ip_addresses'].append({
'address': ip.address,
'access': ip.access,
'family': ip.family
})
for storage in self.storage_devices:
fields['storage_devices'].append({
'address': storage.address,
'storage': storage.uuid,
'storage_size': storage.size,
'storage_title': storage.title,
'type': storage.type,
})
del fields['populated']
del fields['cloud_manager']
return fields
|
Prepare a JSON serializable dict for read-only purposes.
Includes storages and IP-addresses.
Use prepare_post_body for POST and .save() for PUT.
|
def import_data(self, data):
"""Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
"""
_completed_num = 0
for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data)))
_completed_num += 1
if self.algorithm_name == 'random_search':
return
assert "parameter" in trial_info
_params = trial_info["parameter"]
assert "value" in trial_info
_value = trial_info['value']
if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value)
continue
self.supplement_data_num += 1
_parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)])
self.total_data[_parameter_id] = _add_index(in_x=self.json, parameter=_params)
self.receive_trial_result(parameter_id=_parameter_id, parameters=_params, value=_value)
logger.info("Successfully import data to TPE/Anneal tuner.")
|
Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
|
def scan_forever(queue, *args, **kwargs):
"""Return an infinite iterator over an fsq queue that blocks waiting
for the queue trigger. Work is yielded as FSQWorkItem objects when
available, assuming the default generator (FSQScanGenerator) is
in use.
Essentially, this function wraps fsq.scan() and blocks for more work.
It takes all the same parameters as scan(), plus process_once_now,
which is a boolean to determine if an initial .scan() is run before
listening to the trigger. This argument defaults to True.
"""
process_once_now = kwargs.get('process_once_now', True)
if process_once_now:
for work in scan(queue, *args, **kwargs):
yield work
while True:
with open(fsq_path.trigger(queue), 'rb') as t:
t.read(1)
for work in scan(queue, *args, **kwargs):
yield work
|
Return an infinite iterator over an fsq queue that blocks waiting
for the queue trigger. Work is yielded as FSQWorkItem objects when
available, assuming the default generator (FSQScanGenerator) is
in use.
Essentially, this function wraps fsq.scan() and blocks for more work.
It takes all the same parameters as scan(), plus process_once_now,
which is a boolean to determine if an initial .scan() is run before
listening to the trigger. This argument defaults to True.
|
def _ProcessAudio(self, tag, wall_time, step, audio):
"""Processes a audio by adding it to accumulated state."""
event = AudioEvent(wall_time=wall_time,
step=step,
encoded_audio_string=audio.encoded_audio_string,
content_type=audio.content_type,
sample_rate=audio.sample_rate,
length_frames=audio.length_frames)
self.audios.AddItem(tag, event)
|
Processes a audio by adding it to accumulated state.
|
def run(self, plugins, context, callback=None, callback_args=[]):
"""Commence asynchronous tasks
This method runs through the provided `plugins` in
an asynchronous manner, interrupted by either
completion or failure of a plug-in.
Inbetween processes, the GUI is fed information
from the task and redraws itself.
Arguments:
plugins (list): Plug-ins to process
context (list): Instances to process
callback (func, optional): Called on finish
callback_args (list, optional): Arguments passed to callback
"""
# if "ready" not in self.states:
# return self.error.emit("Not ready")
# Initial set-up
self.data["state"]["is_running"] = True
# Setup statistics for better debugging.
# (To be finalised in `on_finished`)
util.timer("publishing")
stats = {"requestCount": self.host.stats()["totalRequestCount"]}
# For each completed task, update
# the GUI and commence next task.
def on_next(result):
if isinstance(result, StopIteration):
return on_finished(str(result))
self.data["models"]["item"].update_with_result(result)
self.data["models"]["result"].update_with_result(result)
# Once the main thread has finished updating
# the GUI, we can proceed handling of next task.
util.defer(self.host.context, callback=update_context)
def update_context(ctx):
item_model = self.data["models"]["item"]
instance_items = {item.id: item for item in item_model.instances}
for instance in ctx:
id = instance.id
item = instance_items.get(id)
if item is not None:
proxy = next((i for i in context if i.id == id), None)
update_instance(item, proxy, instance.data)
continue
context.append(instance)
item_model.add_instance(instance.to_json())
if len(ctx) < item_model.instance_count():
remove_instance(ctx, instance_items)
util.defer(lambda: next(iterator), callback=on_next)
def update_instance(item, proxy, data):
"""Update model and proxy for reflecting changes on instance"""
# Update instance item model data for GUI
item.isToggled = data.get("publish", True)
item.optional = data.get("optional", True)
item.category = data.get("category", data["family"])
families = [data["family"]]
families.extend(data.get("families", []))
item.familiesConcatenated = ", ".join(families)
if proxy is None:
return
# Update proxy instance data which currently being iterated in
# the primary iterator
proxy.data["publish"] = data.get("publish", True)
proxy.data["family"] = data["family"]
proxy.data["families"] = data.get("families", [])
def remove_instance(ctx, items):
"""Remove instance"""
instances = {i.id: i for i in context}
instance_ids = set(i.id for i in ctx)
instance_ids.add(ctx.id)
for id, item in items.items():
if id not in instance_ids:
# Remove from model
self.data["models"]["item"].remove_instance(item)
# Remove instance from list
context.remove(instances[id])
def on_finished(message=None):
"""Locally running function"""
self.data["state"]["is_running"] = False
self.finished.emit()
if message:
self.info.emit(message)
# Report statistics
stats["requestCount"] -= self.host.stats()["totalRequestCount"]
util.timer_end("publishing", "Spent %.2f ms resetting")
util.echo("Made %i requests during publish."
% abs(stats["requestCount"]))
if callback:
callback(*callback_args)
# The iterator initiates processing and is
# executed one item at a time in a separate thread.
# Once the thread finishes execution, it signals
# the `callback`.
iterator = self.iterator(plugins, context)
util.defer(lambda: next(iterator), callback=on_next)
|
Commence asynchronous tasks
This method runs through the provided `plugins` in
an asynchronous manner, interrupted by either
completion or failure of a plug-in.
Inbetween processes, the GUI is fed information
from the task and redraws itself.
Arguments:
plugins (list): Plug-ins to process
context (list): Instances to process
callback (func, optional): Called on finish
callback_args (list, optional): Arguments passed to callback
|
def polylog2(x):
r'''Simple function to calculate PolyLog(2, x) from ranges 0 <= x <= 1,
with relative error guaranteed to be < 1E-7 from 0 to 0.99999. This
is a Pade approximation, with three coefficient sets with splits at 0.7
and 0.99. An exception is raised if x is under 0 or above 1.
Parameters
----------
x : float
Value to evaluate PolyLog(2, x) T
Returns
-------
y : float
Evaluated result
Notes
-----
Efficient (2-4 microseconds). No implementation of this function exists in
SciPy. Derived with mpmath's pade approximation.
Required for the entropy integral of
:obj:`thermo.heat_capacity.Zabransky_quasi_polynomial`.
Examples
--------
>>> polylog2(0.5)
0.5822405264516294
'''
if 0 <= x <= 0.7:
p = [0.06184590404457956, -0.7460693871557973, 2.2435704485433376, -2.1944070385048526, 0.3382265629285811, 0.2791966558569478]
q = [-0.005308735283483908, 0.1823421262956287, -1.2364596896290079, 2.9897802200092296, -2.9365321202088004, 1.0]
offset = 0.26
elif 0.7 < x <= 0.99:
p = [7543860.817140365, -10254250.429758755, -4186383.973408412, 7724476.972409749, -3130743.609030545, 600806.068543299, -62981.15051292659, 3696.7937385473397, -114.06795167646395, 1.4406337969700391]
q = [-1262997.3422452002, 10684514.56076485, -16931658.916668657, 10275996.02842749, -3079141.9506451315, 511164.4690136096, -49254.56172495263, 2738.0399260270983, -81.36790509581284, 1.0]
offset = 0.95
elif 0.99 < x <= 1:
p = [8.548256176424551e+34, 1.8485781239087334e+35, -2.1706889553798647e+34, 8.318563643438321e+32, -1.559802348661511e+31, 1.698939241177209e+29, -1.180285031647229e+27, 5.531049937687143e+24, -1.8085903366375877e+22, 4.203276811951035e+19, -6.98211620300421e+16, 82281997048841.92, -67157299796.61345, 36084814.54808544, -11478.108105137717, 1.6370226052761176]
q = [-1.9763570499484274e+35, 1.4813997374958851e+35, -1.4773854824041134e+34, 5.38853721252814e+32, -9.882387315028929e+30, 1.0635231532999732e+29, -7.334629044071992e+26, 3.420655574477631e+24, -1.1147787784365177e+22, 2.584530363912858e+19, -4.285376337404043e+16, 50430830490687.56, -41115254924.43107, 22072284.971253656, -7015.799744041691, 1.0]
offset = 0.999
else:
raise Exception('Approximation is valid between 0 and 1 only.')
x = x - offset
return horner(p, x)/horner(q, x)
|
r'''Simple function to calculate PolyLog(2, x) from ranges 0 <= x <= 1,
with relative error guaranteed to be < 1E-7 from 0 to 0.99999. This
is a Pade approximation, with three coefficient sets with splits at 0.7
and 0.99. An exception is raised if x is under 0 or above 1.
Parameters
----------
x : float
Value to evaluate PolyLog(2, x) T
Returns
-------
y : float
Evaluated result
Notes
-----
Efficient (2-4 microseconds). No implementation of this function exists in
SciPy. Derived with mpmath's pade approximation.
Required for the entropy integral of
:obj:`thermo.heat_capacity.Zabransky_quasi_polynomial`.
Examples
--------
>>> polylog2(0.5)
0.5822405264516294
|
def decrypt_file(file, key):
"""
Decrypts the file ``file``.
The encrypted file is assumed to end with the ``.enc`` extension. The
decrypted file is saved to the same location without the ``.enc``
extension.
The permissions on the decrypted file are automatically set to 0o600.
See also :func:`doctr.local.encrypt_file`.
"""
if not file.endswith('.enc'):
raise ValueError("%s does not end with .enc" % file)
fer = Fernet(key)
with open(file, 'rb') as f:
decrypted_file = fer.decrypt(f.read())
with open(file[:-4], 'wb') as f:
f.write(decrypted_file)
os.chmod(file[:-4], 0o600)
|
Decrypts the file ``file``.
The encrypted file is assumed to end with the ``.enc`` extension. The
decrypted file is saved to the same location without the ``.enc``
extension.
The permissions on the decrypted file are automatically set to 0o600.
See also :func:`doctr.local.encrypt_file`.
|
def get(method, hmc, uri, uri_parms, logon_required):
"""Operation: List Logical Partitions of CPC (empty result in DPM
mode."""
cpc_oid = uri_parms[0]
query_str = uri_parms[1]
try:
cpc = hmc.cpcs.lookup_by_oid(cpc_oid)
except KeyError:
raise InvalidResourceError(method, uri)
result_lpars = []
if not cpc.dpm_enabled:
filter_args = parse_query_parms(method, uri, query_str)
for lpar in cpc.lpars.list(filter_args):
result_lpar = {}
for prop in lpar.properties:
if prop in ('object-uri', 'name', 'status'):
result_lpar[prop] = lpar.properties[prop]
result_lpars.append(result_lpar)
return {'logical-partitions': result_lpars}
|
Operation: List Logical Partitions of CPC (empty result in DPM
mode.
|
def koji_instance(config, message, instance=None, *args, **kw):
""" Particular koji instances
You may not have even known it, but we have multiple instances of the koji
build system. There is the **primary** buildsystem at
`koji.fedoraproject.org <http://koji.fedoraproject.org>`_ and also
secondary instances for `ppc <http://ppc.koji.fedoraproject.org>`_, `arm
<http://arm.koji.fedoraproject.org>`_, and `s390
<http://s390.koji.fedoraproject.org>`_.
With this rule, you can limit messages to only those from particular koji
instances (like the **primary** one if you want to ignore the secondary
ones). You should use this rule **in combination** with other koji rules
so you get only a *certain subset* of messages from one instance. You
almost certainly do not want **all** messages from a given instance.
You can specify several instances by separating them with a comma ',',
i.e.: ``primary,ppc``.
"""
instance = kw.get('instance', instance)
if not instance:
return False
instances = [item.strip() for item in instance.split(',')]
return message['msg'].get('instance') in instances
|
Particular koji instances
You may not have even known it, but we have multiple instances of the koji
build system. There is the **primary** buildsystem at
`koji.fedoraproject.org <http://koji.fedoraproject.org>`_ and also
secondary instances for `ppc <http://ppc.koji.fedoraproject.org>`_, `arm
<http://arm.koji.fedoraproject.org>`_, and `s390
<http://s390.koji.fedoraproject.org>`_.
With this rule, you can limit messages to only those from particular koji
instances (like the **primary** one if you want to ignore the secondary
ones). You should use this rule **in combination** with other koji rules
so you get only a *certain subset* of messages from one instance. You
almost certainly do not want **all** messages from a given instance.
You can specify several instances by separating them with a comma ',',
i.e.: ``primary,ppc``.
|
def get_bss_load(_, data):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n935.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
"""
answers = {
'station count': (data[1] << 8) | data[0],
'channel utilisation': data[2] / 255.0,
'available admission capacity': (data[4] << 8) | data[3],
}
return answers
|
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/scan.c?id=v3.17#n935.
Positional arguments:
data -- bytearray data to read.
Returns:
Dict.
|
def add_file(self, file_obj):
"""
Add new file into the storage.
Args:
file_obj (file): Opened file-like object.
Returns:
obj: Path where the file-like object is stored contained with hash\
in :class:`.PathAndHash` object.
Raises:
AssertionError: If the `file_obj` is not file-like object.
IOError: If the file couldn't be added to storage.
"""
BalancedDiscStorage._check_interface(file_obj)
file_hash = self._get_hash(file_obj)
dir_path = self._create_dir_path(file_hash)
final_path = os.path.join(dir_path, file_hash)
def copy_to_file(from_file, to_path):
with open(to_path, "wb") as out_file:
for part in self._get_file_iterator(from_file):
out_file.write(part)
try:
copy_to_file(from_file=file_obj, to_path=final_path)
except Exception:
os.unlink(final_path)
raise
return PathAndHash(path=final_path, hash=file_hash)
|
Add new file into the storage.
Args:
file_obj (file): Opened file-like object.
Returns:
obj: Path where the file-like object is stored contained with hash\
in :class:`.PathAndHash` object.
Raises:
AssertionError: If the `file_obj` is not file-like object.
IOError: If the file couldn't be added to storage.
|
def _Close(self):
"""Closes the file-like object."""
# pylint: disable=protected-access
super(EWFFile, self)._Close()
for file_object in self._file_objects:
file_object.close()
self._file_objects = []
|
Closes the file-like object.
|
def bucket_policy_to_dict(policy):
"""Produce a dictionary of read, write permissions for an existing bucket policy document"""
import json
if not isinstance(policy, dict):
policy = json.loads(policy)
statements = {s['Sid']: s for s in policy['Statement']}
d = {}
for rw in ('Read', 'Write'):
for prefix in TOP_LEVEL_DIRS:
sid = rw.title() + prefix.title()
if sid in statements:
if isinstance(statements[sid]['Principal']['AWS'], list):
for principal in statements[sid]['Principal']['AWS']:
user_name = principal.split('/').pop()
d[(user_name, prefix)] = rw[0]
else:
user_name = statements[sid]['Principal']['AWS'].split('/').pop()
d[(user_name, prefix)] = rw[0]
return d
|
Produce a dictionary of read, write permissions for an existing bucket policy document
|
def centroid(self):
'''
Return the geometric center.
'''
if self.v is None:
raise ValueError('Mesh has no vertices; centroid is not defined')
return np.mean(self.v, axis=0)
|
Return the geometric center.
|
def save_aggregate_report_to_elasticsearch(aggregate_report,
index_suffix=None,
monthly_indexes=False):
"""
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
"""
logger.debug("Saving aggregate report to Elasticsearch")
aggregate_report = aggregate_report.copy()
metadata = aggregate_report["report_metadata"]
org_name = metadata["org_name"]
report_id = metadata["report_id"]
domain = aggregate_report["policy_published"]["domain"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%d %H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%d %H:%M:%S")
if monthly_indexes:
index_date = begin_date.strftime("%Y-%m")
else:
index_date = begin_date.strftime("%Y-%m-%d")
aggregate_report["begin_date"] = begin_date
aggregate_report["end_date"] = end_date
date_range = [aggregate_report["begin_date"],
aggregate_report["end_date"]]
org_name_query = Q(dict(match=dict(org_name=org_name)))
report_id_query = Q(dict(match=dict(report_id=report_id)))
domain_query = Q(dict(match={"published_policy.domain": domain}))
begin_date_query = Q(dict(match=dict(date_range=begin_date)))
end_date_query = Q(dict(match=dict(date_range=end_date)))
search = Search(index="dmarc_aggregate*")
query = org_name_query & report_id_query & domain_query
query = query & begin_date_query & end_date_query
search.query = query
existing = search.execute()
if len(existing) > 0:
raise AlreadySaved("An aggregate report ID {0} from {1} about {2} "
"with a date range of {3} UTC to {4} UTC already "
"exists in "
"Elasticsearch".format(report_id,
org_name,
domain,
begin_date_human,
end_date_human))
published_policy = _PublishedPolicy(
domain=aggregate_report["policy_published"]["domain"],
adkim=aggregate_report["policy_published"]["adkim"],
aspf=aggregate_report["policy_published"]["aspf"],
p=aggregate_report["policy_published"]["p"],
sp=aggregate_report["policy_published"]["sp"],
pct=aggregate_report["policy_published"]["pct"],
fo=aggregate_report["policy_published"]["fo"]
)
for record in aggregate_report["records"]:
agg_doc = _AggregateReportDoc(
xml_schemea=aggregate_report["xml_schema"],
org_name=metadata["org_name"],
org_email=metadata["org_email"],
org_extra_contact_info=metadata["org_extra_contact_info"],
report_id=metadata["report_id"],
date_range=date_range,
errors=metadata["errors"],
published_policy=published_policy,
source_ip_address=record["source"]["ip_address"],
source_country=record["source"]["country"],
source_reverse_dns=record["source"]["reverse_dns"],
source_base_domain=record["source"]["base_domain"],
message_count=record["count"],
disposition=record["policy_evaluated"]["disposition"],
dkim_aligned=record["policy_evaluated"]["dkim"] == "pass",
spf_aligned=record["policy_evaluated"]["spf"] == "pass",
header_from=record["identifiers"]["header_from"],
envelope_from=record["identifiers"]["envelope_from"],
envelope_to=record["identifiers"]["envelope_to"]
)
for override in record["policy_evaluated"]["policy_override_reasons"]:
agg_doc.add_policy_override(type_=override["type"],
comment=override["comment"])
for dkim_result in record["auth_results"]["dkim"]:
agg_doc.add_dkim_result(domain=dkim_result["domain"],
selector=dkim_result["selector"],
result=dkim_result["result"])
for spf_result in record["auth_results"]["spf"]:
agg_doc.add_spf_result(domain=spf_result["domain"],
scope=spf_result["scope"],
result=spf_result["result"])
index = "dmarc_aggregate"
if index_suffix:
index = "{0}_{1}".format(index, index_suffix)
index = "{0}-{1}".format(index, index_date)
create_indexes([index])
agg_doc.meta.index = index
try:
agg_doc.save()
except Exception as e:
raise ElasticsearchError(
"Elasticsearch error: {0}".format(e.__str__()))
|
Saves a parsed DMARC aggregate report to ElasticSearch
Args:
aggregate_report (OrderedDict): A parsed forensic report
index_suffix (str): The suffix of the name of the index to save to
monthly_indexes (bool): Use monthly indexes instead of daily indexes
Raises:
AlreadySaved
|
def _save_if_needed(request, response_content):
""" Save data to disk, if requested by the user
:param request: Download request
:type request: DownloadRequest
:param response_content: content of the download response
:type response_content: bytes
"""
if request.save_response:
file_path = request.get_file_path()
create_parent_folder(file_path)
with open(file_path, 'wb') as file:
file.write(response_content)
LOGGER.debug('Saved data from %s to %s', request.url, file_path)
|
Save data to disk, if requested by the user
:param request: Download request
:type request: DownloadRequest
:param response_content: content of the download response
:type response_content: bytes
|
def get_representation(self, prefix="", suffix="\n"):
"""return the string representation of the current object."""
res = prefix + "Section " + self.get_section_name().upper() + suffix
return res
|
return the string representation of the current object.
|
def get_decode_value(self):
"""Return the key value based on it's storage type."""
if self._store_type == PUBLIC_KEY_STORE_TYPE_HEX:
value = bytes.fromhex(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE64:
value = b64decode(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_BASE85:
value = b85decode(self._value)
elif self._store_type == PUBLIC_KEY_STORE_TYPE_JWK:
# TODO: need to decide on which jwk library to import?
raise NotImplementedError
else:
value = self._value
return value
|
Return the key value based on it's storage type.
|
async def connect(self, hostname=None, port=None, tls=False, **kwargs):
""" Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters. """
if not port:
if tls:
port = DEFAULT_TLS_PORT
else:
port = rfc1459.protocol.DEFAULT_PORT
return await super().connect(hostname, port, tls=tls, **kwargs)
|
Connect to a server, optionally over TLS. See pydle.features.RFC1459Support.connect for misc parameters.
|
def require_condition(cls, expr, message, *format_args, **format_kwds):
"""
used to assert a certain state. If the expression renders a false
value, an exception will be raised with the supplied message
:param: message: The failure message to attach to the raised Buzz
:param: expr: A boolean value indicating an evaluated expression
:param: format_args: Format arguments. Follows str.format convention
:param: format_kwds: Format keyword args. Follows str.format convetion
"""
if not expr:
raise cls(message, *format_args, **format_kwds)
|
used to assert a certain state. If the expression renders a false
value, an exception will be raised with the supplied message
:param: message: The failure message to attach to the raised Buzz
:param: expr: A boolean value indicating an evaluated expression
:param: format_args: Format arguments. Follows str.format convention
:param: format_kwds: Format keyword args. Follows str.format convetion
|
def credentials(self):
"""google.auth.credentials.Credentials: Credentials to use for queries
performed through IPython magics
Note:
These credentials do not need to be explicitly defined if you are
using Application Default Credentials. If you are not using
Application Default Credentials, manually construct a
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
Example:
Manually setting the context credentials:
>>> from google.cloud.bigquery import magics
>>> from google.oauth2 import service_account
>>> credentials = (service_account
... .Credentials.from_service_account_file(
... '/path/to/key.json'))
>>> magics.context.credentials = credentials
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
"""
if self._credentials is None:
self._credentials, _ = google.auth.default()
return self._credentials
|
google.auth.credentials.Credentials: Credentials to use for queries
performed through IPython magics
Note:
These credentials do not need to be explicitly defined if you are
using Application Default Credentials. If you are not using
Application Default Credentials, manually construct a
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
Example:
Manually setting the context credentials:
>>> from google.cloud.bigquery import magics
>>> from google.oauth2 import service_account
>>> credentials = (service_account
... .Credentials.from_service_account_file(
... '/path/to/key.json'))
>>> magics.context.credentials = credentials
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
|
def search_channels(self, query, limit=25, offset=0):
"""Search for channels and return them
:param query: the query string
:type query: :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of channels
:rtype: :class:`list` of :class:`models.Channel` instances
:raises: None
"""
r = self.kraken_request('GET', 'search/channels',
params={'query': query,
'limit': limit,
'offset': offset})
return models.Channel.wrap_search(r)
|
Search for channels and return them
:param query: the query string
:type query: :class:`str`
:param limit: maximum number of results
:type limit: :class:`int`
:param offset: offset for pagination
:type offset: :class:`int`
:returns: A list of channels
:rtype: :class:`list` of :class:`models.Channel` instances
:raises: None
|
def _patch_expand_paths(self, settings, name, value):
"""
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
list.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (list): List of paths to patch.
Returns:
list: Patched path list to an absolute path.
"""
return [self._patch_expand_path(settings, name, item)
for item in value]
|
Apply ``SettingsPostProcessor._patch_expand_path`` to each element in
list.
Args:
settings (dict): Current settings.
name (str): Setting name.
value (list): List of paths to patch.
Returns:
list: Patched path list to an absolute path.
|
def assertSameType(a, b):
"""
Raises an exception if @b is not an instance of type(@a)
"""
if not isinstance(b, type(a)):
raise NotImplementedError("This operation is only supported for " \
"elements of the same type. Instead found {} and {}".
format(type(a), type(b)))
|
Raises an exception if @b is not an instance of type(@a)
|
def _path_polygon(self, points):
"Low-level polygon-drawing routine."
(xmin, ymin, xmax, ymax) = _compute_bounding_box(points)
if invisible_p(xmax, ymax):
return
self.setbb(xmin, ymin)
self.setbb(xmax, ymax)
self.newpath()
self.moveto(xscale(points[0][0]), yscale(points[0][1]))
for point in points[1:]:
self.lineto(xscale(point[0]), yscale(point[1]))
self.closepath()
|
Low-level polygon-drawing routine.
|
def _revert_categories(self):
"""
Inplace conversion to categories.
"""
for column, dtype in self._categories.items():
if column in self.columns:
self[column] = self[column].astype(dtype)
|
Inplace conversion to categories.
|
def vx(self,*args,**kwargs):
"""
NAME:
vx
PURPOSE:
return x velocity at time t
INPUT:
t - (optional) time at which to get the velocity
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vx(t)
HISTORY:
2010-11-30 - Written - Bovy (NYU)
"""
thiso= self(*args,**kwargs)
if not len(thiso.shape) == 2: thiso= thiso.reshape((thiso.shape[0],1))
if len(thiso[:,0]) == 2:
return thiso[1,:]
if len(thiso[:,0]) != 4 and len(thiso[:,0]) != 6:
raise AttributeError("orbit must track azimuth to use vx()")
elif len(thiso[:,0]) == 4:
theta= thiso[3,:]
else:
theta= thiso[5,:]
return thiso[1,:]*nu.cos(theta)-thiso[2,:]*nu.sin(theta)
|
NAME:
vx
PURPOSE:
return x velocity at time t
INPUT:
t - (optional) time at which to get the velocity
vo= (Object-wide default) physical scale for velocities to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
vx(t)
HISTORY:
2010-11-30 - Written - Bovy (NYU)
|
def export_node(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="", who="",
add_join=False):
"""
General method for node exporting
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join.
"""
node_type = node[1][consts.Consts.type]
if node_type == consts.Consts.start_event:
return BpmnDiagramGraphCsvExport.export_start_event(bpmn_graph, export_elements, node, nodes_classification,
order=order, prefix=prefix, condition=condition,
who=who)
elif node_type == consts.Consts.end_event:
return BpmnDiagramGraphCsvExport.export_end_event(export_elements, node, order=order, prefix=prefix,
condition=condition, who=who)
else:
return BpmnDiagramGraphCsvExport.export_element(bpmn_graph, export_elements, node, nodes_classification,
order=order, prefix=prefix, condition=condition, who=who,
add_join=add_join)
|
General method for node exporting
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join.
|
def _add_none_handler(validation_callable, # type: Callable
none_policy # type: int
):
# type: (...) -> Callable
"""
Adds a wrapper or nothing around the provided validation_callable, depending on the selected policy
:param validation_callable:
:param none_policy: an int representing the None policy, see NonePolicy
:return:
"""
if none_policy is NonePolicy.SKIP:
return _none_accepter(validation_callable) # accept all None values
elif none_policy is NonePolicy.FAIL:
return _none_rejecter(validation_callable) # reject all None values
elif none_policy is NonePolicy.VALIDATE:
return validation_callable # do not handle None specifically, do not wrap
else:
raise ValueError('Invalid none_policy : ' + str(none_policy))
|
Adds a wrapper or nothing around the provided validation_callable, depending on the selected policy
:param validation_callable:
:param none_policy: an int representing the None policy, see NonePolicy
:return:
|
def symmetric_difference_update(self, that):
"""
Update the set, keeping only elements found in either *self* or *that*,
but not in both.
"""
_set = self._set
_list = self._list
_set.symmetric_difference_update(that)
_list.clear()
_list.update(_set)
return self
|
Update the set, keeping only elements found in either *self* or *that*,
but not in both.
|
def get_first_n_queues(self, n):
"""
Run through the sequence until n queues are created and return
them. If fewer are created, return those plus empty iterables to
compensate.
"""
try:
while len(self.queues) < n:
self.__fetch__()
except StopIteration:
pass
values = list(self.queues.values())
missing = n - len(values)
values.extend(iter([]) for n in range(missing))
return values
|
Run through the sequence until n queues are created and return
them. If fewer are created, return those plus empty iterables to
compensate.
|
def _shuffled_order(w, h):
"""
Generator for the order of 4-byte values.
32bit channels are also encoded using delta encoding,
but it make no sense to apply delta compression to bytes.
It is possible to apply delta compression to 2-byte or 4-byte
words, but it seems it is not the best way either.
In PSD, each 4-byte item is split into 4 bytes and these
bytes are packed together: "123412341234" becomes "111222333444";
delta compression is applied to the packed data.
So we have to (a) decompress data from the delta compression
and (b) recombine data back to 4-byte values.
"""
rowsize = 4 * w
for row in range(0, rowsize * h, rowsize):
for offset in range(row, row + w):
for x in range(offset, offset + rowsize, w):
yield x
|
Generator for the order of 4-byte values.
32bit channels are also encoded using delta encoding,
but it make no sense to apply delta compression to bytes.
It is possible to apply delta compression to 2-byte or 4-byte
words, but it seems it is not the best way either.
In PSD, each 4-byte item is split into 4 bytes and these
bytes are packed together: "123412341234" becomes "111222333444";
delta compression is applied to the packed data.
So we have to (a) decompress data from the delta compression
and (b) recombine data back to 4-byte values.
|
def add(self, doc, attributes=None):
"""Adds a document to the index.
Before adding documents to the index it should have been fully
setup, with the document ref and all fields to index already having
been specified.
The document must have a field name as specified by the ref (by default
this is 'id') and it should have all fields defined for indexing,
though None values will not cause errors.
Args:
- doc (dict): The document to be added to the index.
- attributes (dict, optional): A set of attributes corresponding
to the document, currently a single `boost` -> int will be
taken into account.
"""
doc_ref = str(doc[self._ref])
self._documents[doc_ref] = attributes or {}
self.document_count += 1
for field_name, field in self._fields.items():
extractor = field.extractor
field_value = doc[field_name] if extractor is None else extractor(doc)
tokens = Tokenizer(field_value)
terms = self.pipeline.run(tokens)
field_ref = FieldRef(doc_ref, field_name)
field_terms = defaultdict(int)
# TODO: field_refs are casted to strings in JS, should we allow
# FieldRef as keys?
self.field_term_frequencies[str(field_ref)] = field_terms
self.field_lengths[str(field_ref)] = len(terms)
for term in terms:
# TODO: term is a Token, should we allow Tokens as keys?
term_key = str(term)
field_terms[term_key] += 1
if term_key not in self.inverted_index:
posting = {_field_name: {} for _field_name in self._fields}
posting["_index"] = self.term_index
self.term_index += 1
self.inverted_index[term_key] = posting
if doc_ref not in self.inverted_index[term_key][field_name]:
self.inverted_index[term_key][field_name][doc_ref] = defaultdict(
list
)
for metadata_key in self.metadata_whitelist:
metadata = term.metadata[metadata_key]
self.inverted_index[term_key][field_name][doc_ref][
metadata_key
].append(metadata)
|
Adds a document to the index.
Before adding documents to the index it should have been fully
setup, with the document ref and all fields to index already having
been specified.
The document must have a field name as specified by the ref (by default
this is 'id') and it should have all fields defined for indexing,
though None values will not cause errors.
Args:
- doc (dict): The document to be added to the index.
- attributes (dict, optional): A set of attributes corresponding
to the document, currently a single `boost` -> int will be
taken into account.
|
def _read_header(stream, decoder, strict=False):
"""
Read AMF L{Message} header from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is C{False}. Will raise a
L{pyamf.DecodeError} if the data that was read from the stream does not
match the header length.
@return: A C{tuple} containing the name of the header, a C{bool}
determining if understanding this header is required and the decoded
data.
@note: Quite what understanding required headers actually means is unknown.
"""
name_len = stream.read_ushort()
name = stream.read_utf8_string(name_len)
required = bool(stream.read_uchar())
data_len = stream.read_ulong()
pos = stream.tell()
data = decoder.readElement()
if strict and pos + data_len != stream.tell():
raise pyamf.DecodeError(
"Data read from stream does not match header length")
return (name, required, data)
|
Read AMF L{Message} header from the stream.
@type stream: L{BufferedByteStream<pyamf.util.BufferedByteStream>}
@param decoder: An AMF0 decoder.
@param strict: Use strict decoding policy. Default is C{False}. Will raise a
L{pyamf.DecodeError} if the data that was read from the stream does not
match the header length.
@return: A C{tuple} containing the name of the header, a C{bool}
determining if understanding this header is required and the decoded
data.
@note: Quite what understanding required headers actually means is unknown.
|
def choose_one(things):
"""Returns a random entry from a list of things"""
choice = SystemRandom().randint(0, len(things) - 1)
return things[choice].strip()
|
Returns a random entry from a list of things
|
def account_settings_update(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/account_settings#update-account-settings"
api_path = "/api/v2/account/settings.json"
return self.call(api_path, method="PUT", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/account_settings#update-account-settings
|
def parse_datetime(value: Union[datetime, StrIntFloat]) -> datetime:
"""
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
"""
if isinstance(value, datetime):
return value
number = get_numeric(value)
if number is not None:
return from_unix_seconds(number)
match = datetime_re.match(cast(str, value))
if not match:
raise errors.DateTimeError()
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo_str = kw.pop('tzinfo')
if tzinfo_str == 'Z':
tzinfo = timezone.utc
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset = 60 * int(tzinfo_str[1:3]) + offset_mins
if tzinfo_str[0] == '-':
offset = -offset
tzinfo = timezone(timedelta(minutes=offset))
else:
tzinfo = None
kw_: Dict[str, Union[int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None}
kw_['tzinfo'] = tzinfo
with change_exception(errors.DateTimeError, ValueError):
return datetime(**kw_)
|
Parse a datetime/int/float/string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raise ValueError if the input is well formatted but not a valid datetime.
Raise ValueError if the input isn't well formatted.
|
def save(self, filesto, upload_to=None, name=None, secret=None, prefix=None,
allowed=None, denied=None, max_size=None, **kwargs):
"""
Except for `filesto`, all of these parameters are optional, so only
bother setting the ones relevant to *this upload*.
filesto
: A `werkzeug.FileUploader`.
upload_to
: Relative path to where to upload
secret
: If True, instead of the original filename, a random one'll
be used.
prefix
: To avoid race-conditions between users uploading files with
the same name at the same time. If `secret` is True, this
will be ignored.
name
: If set, it'll be used as the name of the uploaded file.
Instead of a string, this can also be a callable.
allowed
: List of allowed file extensions. `None` to allow all
of them. If the uploaded file doesn't have one of these
extensions, an `UnsupportedMediaType` exception will be
raised.
denied
: List of forbidden extensions. Set to `None` to disable.
If the uploaded file *does* have one of these extensions, a
`UnsupportedMediaType` exception will be raised.
max_size
: Maximum file size, in bytes, that file can have.
Note: The attribute `max_content_length` defined in the
`request` object has higher priority.
"""
if not filesto:
return None
upload_to = upload_to or self.upload_to
secret = secret or self.secret
prefix = prefix or self.prefix
original_filename = filesto.filename
allowed = allowed or self.allowed
denied = denied or self.denied
self.validate(filesto, allowed, denied, max_size)
if callable(upload_to):
filepath = upload_to(original_filename)
else:
filepath = upload_to
oname, ext = os.path.splitext(original_filename)
if name:
new_name = name(original_filename) if callable(name) else name
else:
new_name = get_random_filename() if secret else prefix + oname
filename = get_unique_filename(self.base_path, filepath, new_name, ext=ext)
fullpath = os.path.join(
make_dirs(self.base_path, filepath),
filename
)
filesto.save(fullpath)
filesize = os.path.getsize(fullpath)
# Post validation
if max_size and filesize > max_size:
self.delete_file(fullpath)
raise RequestEntityTooLarge
return os.path.join(filepath, filename)
|
Except for `filesto`, all of these parameters are optional, so only
bother setting the ones relevant to *this upload*.
filesto
: A `werkzeug.FileUploader`.
upload_to
: Relative path to where to upload
secret
: If True, instead of the original filename, a random one'll
be used.
prefix
: To avoid race-conditions between users uploading files with
the same name at the same time. If `secret` is True, this
will be ignored.
name
: If set, it'll be used as the name of the uploaded file.
Instead of a string, this can also be a callable.
allowed
: List of allowed file extensions. `None` to allow all
of them. If the uploaded file doesn't have one of these
extensions, an `UnsupportedMediaType` exception will be
raised.
denied
: List of forbidden extensions. Set to `None` to disable.
If the uploaded file *does* have one of these extensions, a
`UnsupportedMediaType` exception will be raised.
max_size
: Maximum file size, in bytes, that file can have.
Note: The attribute `max_content_length` defined in the
`request` object has higher priority.
|
def _create_response_future(self, query, parameters, trace, custom_payload,
timeout, execution_profile=EXEC_PROFILE_DEFAULT,
paging_state=None, host=None):
""" Returns the ResponseFuture before calling send_request() on it """
prepared_statement = None
if isinstance(query, six.string_types):
query = SimpleStatement(query)
elif isinstance(query, PreparedStatement):
query = query.bind(parameters)
if self.cluster._config_mode == _ConfigMode.LEGACY:
if execution_profile is not EXEC_PROFILE_DEFAULT:
raise ValueError("Cannot specify execution_profile while using legacy parameters.")
if timeout is _NOT_SET:
timeout = self.default_timeout
cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level
serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else self.default_serial_consistency_level
retry_policy = query.retry_policy or self.cluster.default_retry_policy
row_factory = self.row_factory
load_balancing_policy = self.cluster.load_balancing_policy
spec_exec_policy = None
else:
execution_profile = self._maybe_get_execution_profile(execution_profile)
if timeout is _NOT_SET:
timeout = execution_profile.request_timeout
cl = query.consistency_level if query.consistency_level is not None else execution_profile.consistency_level
serial_cl = query.serial_consistency_level if query.serial_consistency_level is not None else execution_profile.serial_consistency_level
retry_policy = query.retry_policy or execution_profile.retry_policy
row_factory = execution_profile.row_factory
load_balancing_policy = execution_profile.load_balancing_policy
spec_exec_policy = execution_profile.speculative_execution_policy
fetch_size = query.fetch_size
if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2:
fetch_size = self.default_fetch_size
elif self._protocol_version == 1:
fetch_size = None
start_time = time.time()
if self._protocol_version >= 3 and self.use_client_timestamp:
timestamp = self.cluster.timestamp_generator()
else:
timestamp = None
if isinstance(query, SimpleStatement):
query_string = query.query_string
statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None
if parameters:
query_string = bind_params(query_string, parameters, self.encoder)
message = QueryMessage(
query_string, cl, serial_cl,
fetch_size, timestamp=timestamp,
keyspace=statement_keyspace)
elif isinstance(query, BoundStatement):
prepared_statement = query.prepared_statement
message = ExecuteMessage(
prepared_statement.query_id, query.values, cl,
serial_cl, fetch_size,
timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata),
result_metadata_id=prepared_statement.result_metadata_id)
elif isinstance(query, BatchStatement):
if self._protocol_version < 2:
raise UnsupportedOperation(
"BatchStatement execution is only supported with protocol version "
"2 or higher (supported in Cassandra 2.0 and higher). Consider "
"setting Cluster.protocol_version to 2 to support this operation.")
statement_keyspace = query.keyspace if ProtocolVersion.uses_keyspace_flag(self._protocol_version) else None
message = BatchMessage(
query.batch_type, query._statements_and_parameters, cl,
serial_cl, timestamp, statement_keyspace)
message.tracing = trace
message.update_custom_payload(query.custom_payload)
message.update_custom_payload(custom_payload)
message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version
message.paging_state = paging_state
spec_exec_plan = spec_exec_policy.new_plan(query.keyspace or self.keyspace, query) if query.is_idempotent and spec_exec_policy else None
return ResponseFuture(
self, message, query, timeout, metrics=self._metrics,
prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory,
load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan,
host=host)
|
Returns the ResponseFuture before calling send_request() on it
|
def call_sockeye_train(model: str,
bpe_dir: str,
model_dir: str,
log_fname: str,
num_gpus: int,
test_mode: bool = False):
"""
Call sockeye.train with specified arguments on prepared inputs. Will resume
partial training or skip training if model is already finished. Record
command for future use.
:param model: Type of translation model to train.
:param bpe_dir: Directory of BPE-encoded input data.
:param model_dir: Model output directory.
:param log_fname: Location to write log file.
:param num_gpus: Number of GPUs to use for training (0 for CPU).
:param test_mode: Run in test mode, stopping after a small number of
updates.
"""
# Inputs and outputs
fnames = ["--source={}".format(os.path.join(bpe_dir, PREFIX_TRAIN + SUFFIX_SRC_GZ)),
"--target={}".format(os.path.join(bpe_dir, PREFIX_TRAIN + SUFFIX_TRG_GZ)),
"--validation-source={}".format(os.path.join(bpe_dir, PREFIX_DEV + SUFFIX_SRC_GZ)),
"--validation-target={}".format(os.path.join(bpe_dir, PREFIX_DEV + SUFFIX_TRG_GZ)),
"--output={}".format(model_dir)]
# Assemble command
command = [sys.executable, "-m", "sockeye.train"] + fnames + MODELS[model]
# Request GPUs or specify CPU
if num_gpus > 0:
command.append("--device-ids=-{}".format(num_gpus))
else:
command.append("--use-cpu")
# Test mode trains a smaller model for a small number of steps
if test_mode:
command += MODEL_TEST_ARGS[model]
command_fname = os.path.join(model_dir, FILE_COMMAND.format("sockeye.train"))
# Run unless training already finished
if not os.path.exists(command_fname):
# Call Sockeye training
with open(log_fname, "wb") as log:
logging.info("sockeye.train: %s", model_dir)
logging.info("Log: %s", log_fname)
logging.info("(This step can take several days. See log file or TensorBoard for progress)")
subprocess.check_call(command, stderr=log)
# Record successful command
logging.info("Command: %s", command_fname)
print_command(command, command_fname)
|
Call sockeye.train with specified arguments on prepared inputs. Will resume
partial training or skip training if model is already finished. Record
command for future use.
:param model: Type of translation model to train.
:param bpe_dir: Directory of BPE-encoded input data.
:param model_dir: Model output directory.
:param log_fname: Location to write log file.
:param num_gpus: Number of GPUs to use for training (0 for CPU).
:param test_mode: Run in test mode, stopping after a small number of
updates.
|
def update_status(self, helper, status):
""" update the helper """
if status:
self.status(status[0])
# if the status is ok, add it to the long output
if status[0] == 0:
self.add_long_output(status[1])
# if the status is not ok, add it to the summary
else:
self.add_summary(status[1])
|
update the helper
|
def get_available_user_FIELD_transitions(instance, user, field):
"""
List of transitions available in current model state
with all conditions met and user have rights on it
"""
for transition in get_available_FIELD_transitions(instance, field):
if transition.has_perm(instance, user):
yield transition
|
List of transitions available in current model state
with all conditions met and user have rights on it
|
def acorr(blk, max_lag=None):
"""
Calculate the autocorrelation of a given 1-D block sequence.
Parameters
----------
blk :
An iterable with well-defined length. Don't use this function with Stream
objects!
max_lag :
The size of the result, the lags you'd need. Defaults to ``len(blk) - 1``,
since any lag beyond would result in zero.
Returns
-------
A list with lags from 0 up to max_lag, where its ``i``-th element has the
autocorrelation for a lag equals to ``i``. Be careful with negative lags!
You should use abs(lag) indexes when working with them.
Examples
--------
>>> seq = [1, 2, 3, 4, 3, 4, 2]
>>> acorr(seq) # Default max_lag is len(seq) - 1
[59, 52, 42, 30, 17, 8, 2]
>>> acorr(seq, 9) # Zeros at the end
[59, 52, 42, 30, 17, 8, 2, 0, 0, 0]
>>> len(acorr(seq, 3)) # Resulting length is max_lag + 1
4
>>> acorr(seq, 3)
[59, 52, 42, 30]
"""
if max_lag is None:
max_lag = len(blk) - 1
return [sum(blk[n] * blk[n + tau] for n in xrange(len(blk) - tau))
for tau in xrange(max_lag + 1)]
|
Calculate the autocorrelation of a given 1-D block sequence.
Parameters
----------
blk :
An iterable with well-defined length. Don't use this function with Stream
objects!
max_lag :
The size of the result, the lags you'd need. Defaults to ``len(blk) - 1``,
since any lag beyond would result in zero.
Returns
-------
A list with lags from 0 up to max_lag, where its ``i``-th element has the
autocorrelation for a lag equals to ``i``. Be careful with negative lags!
You should use abs(lag) indexes when working with them.
Examples
--------
>>> seq = [1, 2, 3, 4, 3, 4, 2]
>>> acorr(seq) # Default max_lag is len(seq) - 1
[59, 52, 42, 30, 17, 8, 2]
>>> acorr(seq, 9) # Zeros at the end
[59, 52, 42, 30, 17, 8, 2, 0, 0, 0]
>>> len(acorr(seq, 3)) # Resulting length is max_lag + 1
4
>>> acorr(seq, 3)
[59, 52, 42, 30]
|
def as_dict(self):
"""json friendly dict representation of Kpoints"""
d = {"comment": self.comment, "nkpoints": self.num_kpts,
"generation_style": self.style.name, "kpoints": self.kpts,
"usershift": self.kpts_shift,
"kpts_weights": self.kpts_weights, "coord_type": self.coord_type,
"labels": self.labels, "tet_number": self.tet_number,
"tet_weight": self.tet_weight,
"tet_connections": self.tet_connections}
optional_paras = ["genvec1", "genvec2", "genvec3", "shift"]
for para in optional_paras:
if para in self.__dict__:
d[para] = self.__dict__[para]
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
return d
|
json friendly dict representation of Kpoints
|
def coroutine(func):
"""
Initializes coroutine essentially priming it to the yield statement.
Used as a decorator over functions that generate coroutines.
.. code-block:: python
# Basic coroutine producer/consumer pattern
from translate import coroutine
@coroutine
def coroutine_foo(bar):
try:
while True:
baz = (yield)
bar.send(baz)
except GeneratorExit:
bar.close()
:param func: Unprimed Generator
:type func: Function
:return: Initialized Coroutine
:rtype: Function
"""
@wraps(func)
def initialization(*args, **kwargs):
start = func(*args, **kwargs)
next(start)
return start
return initialization
|
Initializes coroutine essentially priming it to the yield statement.
Used as a decorator over functions that generate coroutines.
.. code-block:: python
# Basic coroutine producer/consumer pattern
from translate import coroutine
@coroutine
def coroutine_foo(bar):
try:
while True:
baz = (yield)
bar.send(baz)
except GeneratorExit:
bar.close()
:param func: Unprimed Generator
:type func: Function
:return: Initialized Coroutine
:rtype: Function
|
def get_properties(self):
"""
Add property to variables in BIF
Returns
-------
dict: dict of type {variable: list of properties }
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_properties()
{'bowel-problem': ['position = (335, 99)'],
'dog-out': ['position = (300, 195)'],
'family-out': ['position = (257, 99)'],
'hear-bark': ['position = (296, 268)'],
'light-on': ['position = (218, 195)']}
"""
variables = self.model.nodes()
property_tag = {}
for variable in sorted(variables):
properties = self.model.node[variable]
properties = collections.OrderedDict(sorted(properties.items()))
property_tag[variable] = []
for prop, val in properties.items():
property_tag[variable].append(str(prop) + " = " + str(val))
return property_tag
|
Add property to variables in BIF
Returns
-------
dict: dict of type {variable: list of properties }
Example
-------
>>> from pgmpy.readwrite import BIFReader, BIFWriter
>>> model = BIFReader('dog-problem.bif').get_model()
>>> writer = BIFWriter(model)
>>> writer.get_properties()
{'bowel-problem': ['position = (335, 99)'],
'dog-out': ['position = (300, 195)'],
'family-out': ['position = (257, 99)'],
'hear-bark': ['position = (296, 268)'],
'light-on': ['position = (218, 195)']}
|
def get_results(self, hql, schema='default', fetch_size=None, hive_conf=None):
"""
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
"""
results_iter = self._get_results(hql, schema,
fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {
'data': list(results_iter),
'header': header
}
return results
|
Get results of the provided hql in target schema.
:param hql: hql to be executed.
:type hql: str or list
:param schema: target schema, default to 'default'.
:type schema: str
:param fetch_size: max size of result to fetch.
:type fetch_size: int
:param hive_conf: hive_conf to execute alone with the hql.
:type hive_conf: dict
:return: results of hql execution, dict with data (list of results) and header
:rtype: dict
|
def create_cursor(self, name=None):
"""
Returns an active connection cursor to the database.
"""
return Cursor(self.client_connection, self.connection, self.djongo_connection)
|
Returns an active connection cursor to the database.
|
def placeholder(type_):
"""Returns the EmptyVal instance for the given type"""
typetuple = type_ if isinstance(type_, tuple) else (type_,)
if any in typetuple:
typetuple = any
if typetuple not in EMPTY_VALS:
EMPTY_VALS[typetuple] = EmptyVal(typetuple)
return EMPTY_VALS[typetuple]
|
Returns the EmptyVal instance for the given type
|
def get_resource_by_urn(self, urn):
"""Fetch the resource corresponding to the input CTS URN.
Currently supports
only HucitAuthor and HucitWork.
:param urn: the CTS URN of the resource to fetch
:return: either an instance of `HucitAuthor` or of `HucitWork`
"""
search_query = """
PREFIX frbroo: <http://erlangen-crm.org/efrbroo/>
PREFIX crm: <http://erlangen-crm.org/current/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
SELECT ?resource_URI
WHERE {
?resource_URI crm:P1_is_identified_by ?urn .
?urn a crm:E42_Identifier .
?urn rdfs:label "%s"
}
""" % urn
# check type of the input URN
try:
assert isinstance(urn, CTS_URN)
except Exception as e:
# convert to pyCTS.CTS_URN if it's a string
urn = CTS_URN(urn)
logger.debug('Converted the input urn from string to %s' % type(
CTS_URN
))
if (urn.work is not None):
Work = self._session.get_class(surf.ns.EFRBROO['F1_Work'])
result = self._store.execute_sparql(search_query)
if len(result['results']['bindings']) == 0:
raise ResourceNotFound
else:
tmp = result['results']['bindings'][0]
resource_uri = tmp['resource_URI']['value']
return self._session.get_resource(resource_uri, Work)
elif (urn.work is None and urn.textgroup is not None):
Person = self._session.get_class(surf.ns.EFRBROO['F10_Person'])
result = self._store.execute_sparql(search_query)
if len(result['results']['bindings']) == 0:
raise ResourceNotFound
else:
tmp = result['results']['bindings'][0]
resource_uri = tmp['resource_URI']['value']
return self._session.get_resource(resource_uri, Person)
|
Fetch the resource corresponding to the input CTS URN.
Currently supports
only HucitAuthor and HucitWork.
:param urn: the CTS URN of the resource to fetch
:return: either an instance of `HucitAuthor` or of `HucitWork`
|
def proto_avgRange(theABF,m1=None,m2=None):
"""experiment: generic VC time course experiment."""
abf=ABF(theABF)
abf.log.info("analyzing as a fast IV")
if m1 is None:
m1=abf.sweepLength
if m2 is None:
m2=abf.sweepLength
I1=int(abf.pointsPerSec*m1)
I2=int(abf.pointsPerSec*m2)
Ts=np.arange(abf.sweeps)*abf.sweepInterval
Yav=np.empty(abf.sweeps)*np.nan # average
Ysd=np.empty(abf.sweeps)*np.nan # standard deviation
#Yar=np.empty(abf.sweeps)*np.nan # area
for sweep in abf.setsweeps():
Yav[sweep]=np.average(abf.sweepY[I1:I2])
Ysd[sweep]=np.std(abf.sweepY[I1:I2])
#Yar[sweep]=np.sum(abf.sweepY[I1:I2])/(I2*I1)-Yav[sweep]
plot=ABFplot(abf)
plt.figure(figsize=(SQUARESIZE*2,SQUARESIZE/2))
plt.subplot(131)
plot.title="first sweep"
plot.figure_sweep(0)
plt.title("First Sweep\n(shaded measurement range)")
plt.axvspan(m1,m2,color='r',ec=None,alpha=.1)
plt.subplot(132)
plt.grid(alpha=.5)
for i,t in enumerate(abf.comment_times):
plt.axvline(t/60,color='r',alpha=.5,lw=2,ls='--')
plt.plot(Ts/60,Yav,'.',alpha=.75)
plt.title("Range Average\nTAGS: %s"%(", ".join(abf.comment_tags)))
plt.ylabel(abf.units2)
plt.xlabel("minutes")
plt.margins(0,.1)
plt.subplot(133)
plt.grid(alpha=.5)
for i,t in enumerate(abf.comment_times):
plt.axvline(t/60,color='r',alpha=.5,lw=2,ls='--')
plt.plot(Ts/60,Ysd,'.',alpha=.5,color='g',ms=15,mew=0)
#plt.fill_between(Ts/60,Ysd*0,Ysd,lw=0,alpha=.5,color='g')
plt.title("Range Standard Deviation\nTAGS: %s"%(", ".join(abf.comment_tags)))
plt.ylabel(abf.units2)
plt.xlabel("minutes")
plt.margins(0,.1)
plt.axis([None,None,0,np.percentile(Ysd,99)*1.25])
plt.tight_layout()
frameAndSave(abf,"sweep vs average","experiment")
plt.close('all')
|
experiment: generic VC time course experiment.
|
def get_recipe_env(self, arch, with_flags_in_cc=True):
"""
Adds openssl recipe to include and library path.
"""
env = super(ScryptRecipe, self).get_recipe_env(arch, with_flags_in_cc)
openssl_recipe = self.get_recipe('openssl', self.ctx)
env['CFLAGS'] += openssl_recipe.include_flags(arch)
env['LDFLAGS'] += ' -L{}'.format(self.ctx.get_libs_dir(arch.arch))
env['LDFLAGS'] += ' -L{}'.format(self.ctx.libs_dir)
env['LDFLAGS'] += openssl_recipe.link_dirs_flags(arch)
env['LIBS'] = env.get('LIBS', '') + openssl_recipe.link_libs_flags()
return env
|
Adds openssl recipe to include and library path.
|
def hash_file(filepath: str) -> str:
"""Return the hexdigest MD5 hash of content of file at `filepath`."""
md5 = hashlib.md5()
acc_hash(filepath, md5)
return md5.hexdigest()
|
Return the hexdigest MD5 hash of content of file at `filepath`.
|
def exception_handle(method):
"""Handle exception raised by requests library."""
def wrapper(*args, **kwargs):
try:
result = method(*args, **kwargs)
return result
except ProxyError:
LOG.exception('ProxyError when try to get %s.', args)
raise ProxyError('A proxy error occurred.')
except ConnectionException:
LOG.exception('ConnectionError when try to get %s.', args)
raise ConnectionException('DNS failure, refused connection, etc.')
except Timeout:
LOG.exception('Timeout when try to get %s', args)
raise Timeout('The request timed out.')
except RequestException:
LOG.exception('RequestException when try to get %s.', args)
raise RequestException('Please check out your network.')
return wrapper
|
Handle exception raised by requests library.
|
def revcomp(sequence):
"returns reverse complement of a string"
sequence = sequence[::-1].strip()\
.replace("A", "t")\
.replace("T", "a")\
.replace("C", "g")\
.replace("G", "c").upper()
return sequence
|
returns reverse complement of a string
|
def handle_message(self, msg):
"""Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message
"""
if msg.msg_id not in self.msg_types:
self.report_message_type(msg)
self.msg_types.add(msg.msg_id)
self.tc.message('inspection', typeId=msg.msg_id, message=msg.msg,
file=os.path.relpath(msg.abspath).replace('\\', '/'),
line=str(msg.line),
SEVERITY=TC_SEVERITY.get(msg.category))
|
Issues an `inspection` service message based on a PyLint message.
Registers each message type upon first encounter.
:param utils.Message msg: a PyLint message
|
def __search(self, obj, item, parent="root", parents_ids=frozenset({})):
"""The main search method"""
if self.__skip_this(item, parent):
return
elif isinstance(obj, strings) and isinstance(item, strings):
self.__search_str(obj, item, parent)
elif isinstance(obj, strings) and isinstance(item, numbers):
return
elif isinstance(obj, numbers):
self.__search_numbers(obj, item, parent)
elif isinstance(obj, MutableMapping):
self.__search_dict(obj, item, parent, parents_ids)
elif isinstance(obj, tuple):
self.__search_tuple(obj, item, parent, parents_ids)
elif isinstance(obj, (set, frozenset)):
if self.warning_num < 10:
logger.warning(
"Set item detected in the path."
"'set' objects do NOT support indexing. But DeepSearch will still report a path."
)
self.warning_num += 1
self.__search_iterable(obj, item, parent, parents_ids)
elif isinstance(obj, Iterable):
self.__search_iterable(obj, item, parent, parents_ids)
else:
self.__search_obj(obj, item, parent, parents_ids)
|
The main search method
|
def all(self, data={}, **kwargs):
""""
Fetch All Refund
Returns:
Refund dict
"""
return super(Refund, self).all(data, **kwargs)
|
Fetch All Refund
Returns:
Refund dict
|
def _parse_sections(self):
""" parse sections and TOC """
def _list_to_dict(_dict, path, sec):
tmp = _dict
for elm in path[:-1]:
tmp = tmp[elm]
tmp[sec] = OrderedDict()
self._sections = list()
section_regexp = r"\n==* .* ==*\n" # '== {STUFF_NOT_\n} =='
found_obj = re.findall(section_regexp, self.content)
res = OrderedDict()
path = list()
last_depth = 0
for obj in found_obj:
depth = obj.count("=") / 2 # this gets us to the single side...
depth -= 2 # now, we can calculate depth
sec = obj.lstrip("\n= ").rstrip(" =\n")
if depth == 0:
last_depth = 0
path = [sec]
res[sec] = OrderedDict()
elif depth > last_depth:
last_depth = depth
path.append(sec)
_list_to_dict(res, path, sec)
elif depth < last_depth:
# path.pop()
while last_depth > depth:
path.pop()
last_depth -= 1
path.pop()
path.append(sec)
_list_to_dict(res, path, sec)
last_depth = depth
else:
path.pop()
path.append(sec)
_list_to_dict(res, path, sec)
last_depth = depth
self._sections.append(sec)
self._table_of_contents = res
|
parse sections and TOC
|
def on_any_event(self, event):
"""File created or modified"""
if os.path.isfile(event.src_path):
self.callback(event.src_path, **self.kwargs)
|
File created or modified
|
def startProcesses(self):
"""Create and start python multiprocesses
Starting a multiprocess creates a process fork.
In theory, there should be no problem in first starting the multithreading environment and after that perform forks (only the thread requestin the fork is copied), but in practice, all kinds of weird behaviour arises.
Read all about it in here : http://www.linuxprogrammingblog.com/threads-and-fork-think-twice-before-using-them
"""
self.process_map = {} # each key is a list of started multiprocesses
# self.process_avail = {} # count instances
for mvision_class in self.mvision_classes:
name = mvision_class.name
tag = mvision_class.tag
num = mvision_class.max_instances
if (tag not in self.process_map):
self.process_map[tag] = []
# self.process_avail[tag] = num
for n in range(0, num):
p = mvision_class()
p.start()
self.process_map[tag].append(p)
|
Create and start python multiprocesses
Starting a multiprocess creates a process fork.
In theory, there should be no problem in first starting the multithreading environment and after that perform forks (only the thread requestin the fork is copied), but in practice, all kinds of weird behaviour arises.
Read all about it in here : http://www.linuxprogrammingblog.com/threads-and-fork-think-twice-before-using-them
|
def init_debug(self):
"""Initialize debugging features, such as a handler for USR2 to print a trace"""
import signal
def debug_trace(sig, frame):
"""Interrupt running process, and provide a python prompt for interactive
debugging."""
self.log('Trace signal received')
self.log(''.join(traceback.format_stack(frame)))
signal.signal(signal.SIGUSR2, debug_trace)
|
Initialize debugging features, such as a handler for USR2 to print a trace
|
def push_resource_cache(resourceid, info):
"""
Cache resource specific information
:param resourceid: Resource id as string
:param info: Dict to push
:return: Nothing
"""
if not resourceid:
raise ResourceInitError("Resource id missing")
if not DutInformationList._cache.get(resourceid):
DutInformationList._cache[resourceid] = dict()
DutInformationList._cache[resourceid] = merge(DutInformationList._cache[resourceid], info)
|
Cache resource specific information
:param resourceid: Resource id as string
:param info: Dict to push
:return: Nothing
|
def _get_dirs(user_dir, startup_dir):
'''
Return a list of startup dirs
'''
try:
users = os.listdir(user_dir)
except WindowsError: # pylint: disable=E0602
users = []
full_dirs = []
for user in users:
full_dir = os.path.join(user_dir, user, startup_dir)
if os.path.exists(full_dir):
full_dirs.append(full_dir)
return full_dirs
|
Return a list of startup dirs
|
def get_asset_form_for_create(self, asset_record_types):
"""Gets the asset form for creating new assets.
A new form should be requested for each create transaction.
arg: asset_record_types (osid.type.Type[]): array of asset
record types
return: (osid.repository.AssetForm) - the asset form
raise: NullArgument - ``asset_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.get_resource_form_for_create_template
for arg in asset_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if asset_record_types == []:
obj_form = objects.AssetForm(
repository_id=self._catalog_id,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
else:
obj_form = objects.AssetForm(
repository_id=self._catalog_id,
record_types=asset_record_types,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not CREATED
return obj_form
|
Gets the asset form for creating new assets.
A new form should be requested for each create transaction.
arg: asset_record_types (osid.type.Type[]): array of asset
record types
return: (osid.repository.AssetForm) - the asset form
raise: NullArgument - ``asset_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form for requested record
types
*compliance: mandatory -- This method must be implemented.*
|
def kernels_initialize(self, folder):
""" create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder
"""
if not os.path.isdir(folder):
raise ValueError('Invalid folder: ' + folder)
resources = []
resource = {'path': 'INSERT_SCRIPT_PATH_HERE'}
resources.append(resource)
username = self.get_config_value(self.CONFIG_NAME_USER)
meta_data = {
'id': username + '/INSERT_KERNEL_SLUG_HERE',
'title': 'INSERT_TITLE_HERE',
'code_file': 'INSERT_CODE_FILE_PATH_HERE',
'language': 'INSERT_LANGUAGE_HERE',
'kernel_type': 'INSERT_KERNEL_TYPE_HERE',
'is_private': 'true',
'enable_gpu': 'false',
'enable_internet': 'false',
'dataset_sources': [],
'competition_sources': [],
'kernel_sources': [],
}
meta_file = os.path.join(folder, self.KERNEL_METADATA_FILE)
with open(meta_file, 'w') as f:
json.dump(meta_data, f, indent=2)
return meta_file
|
create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder
|
def mask(self):
"""
Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None`
"""
if not hasattr(self, "_mask"):
self._mask = Mask(self) if self.has_mask() else None
return self._mask
|
Returns mask associated with this layer.
:return: :py:class:`~psd_tools.api.mask.Mask` or `None`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.