code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def address_offset(self):
"""
Byte address offset of this node relative to it's parent
If this node is an array, it's index must be known
Raises
------
ValueError
If this property is referenced on a node whose array index is not
fully defined
"""
if self.inst.is_array:
if self.current_idx is None:
raise ValueError("Index of array element must be known to derive address")
# Calculate the "flattened" index of a general multidimensional array
# For example, a component array declared as:
# foo[S0][S1][S2]
# and referenced as:
# foo[I0][I1][I2]
# Is flattened like this:
# idx = I0*S1*S2 + I1*S2 + I2
idx = 0
for i in range(len(self.current_idx)):
sz = 1
for j in range(i+1, len(self.inst.array_dimensions)):
sz *= self.inst.array_dimensions[j]
idx += sz * self.current_idx[i]
offset = self.inst.addr_offset + idx * self.inst.array_stride
else:
offset = self.inst.addr_offset
return offset
|
Byte address offset of this node relative to it's parent
If this node is an array, it's index must be known
Raises
------
ValueError
If this property is referenced on a node whose array index is not
fully defined
|
def oauth_login(self, provider, id_column, id, attrs, defaults, redirect_url=None):
"""Execute a login via oauth. If no user exists, oauth_signup() will be called
"""
user = self.query.filter(**dict([(id_column, id)])).first()
if not redirect_url:
redirect_url = request.args.get('next') or url_for(self.options["redirect_after_login"])
if self.logged_in():
if user and user != self.current:
if self.options["oauth_user_already_exists_message"]:
flash(self.options["oauth_user_already_exists_message"].format(provider=provider), "error")
return redirect(redirect_url)
if provider not in self.current.auth_providers:
self.current.auth_providers.append(provider)
current_app.features.models.save(self.current, **attrs)
elif not user:
return self.oauth_signup(provider, attrs, defaults, redirect_url=redirect_url)
else:
self.login(user, provider=provider, **attrs)
return redirect(redirect_url)
|
Execute a login via oauth. If no user exists, oauth_signup() will be called
|
def log(cls, event=None, actor=None, data=None):
"""Generate and insert a new event
Args:
event (str): Action performed
actor (str): Actor (user or subsystem) triggering the event
data (dict): Any extra data necessary for describing the event
Returns:
`None`
"""
from cloud_inquisitor.log import auditlog
auditlog(event=event, actor=actor, data=data)
|
Generate and insert a new event
Args:
event (str): Action performed
actor (str): Actor (user or subsystem) triggering the event
data (dict): Any extra data necessary for describing the event
Returns:
`None`
|
def request(self):
""" Returns an OAuth2 Session to be used to make requests.
Returns None if a token hasn't yet been received."""
headers = {'Accept': 'application/json'}
# Use API Key if possible
if self.api_key:
headers['X-API-KEY'] = self.api_key
return requests,headers
else:
# Try to use OAuth
if self.token:
return OAuth2Session(self.client_id, token=self.token),headers
else:
raise APIError("No API key and no OAuth session available")
|
Returns an OAuth2 Session to be used to make requests.
Returns None if a token hasn't yet been received.
|
def push(package, is_public=False, is_team=False, reupload=False, hash=None):
"""
Push a Quilt data package to the server
"""
team, owner, pkg, subpath = parse_package(package, allow_subpath=True)
_check_team_id(team)
session = _get_session(team)
store, pkgroot = PackageStore.find_package(team, owner, pkg, pkghash=hash)
if pkgroot is None:
raise CommandException("Package {package} not found.".format(package=package))
pkghash = hash_contents(pkgroot)
if hash is not None:
assert pkghash == hash
contents = pkgroot
for component in subpath:
try:
contents = contents.children[component]
except (AttributeError, KeyError):
raise CommandException("Invalid subpath: %r" % component)
def _push_package(dry_run=False, sizes=dict()):
data = json.dumps(dict(
dry_run=dry_run,
is_public=is_public,
is_team=is_team,
contents=contents,
description="", # TODO
sizes=sizes
), default=encode_node)
compressed_data = gzip_compress(data.encode('utf-8'))
if subpath:
return session.post(
"{url}/api/package_update/{owner}/{pkg}/{subpath}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
subpath='/'.join(subpath)
),
data=compressed_data,
headers={
'Content-Encoding': 'gzip'
}
)
else:
return session.put(
"{url}/api/package/{owner}/{pkg}/{hash}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
hash=pkghash
),
data=compressed_data,
headers={
'Content-Encoding': 'gzip'
}
)
print("Fetching upload URLs from the registry...")
resp = _push_package(dry_run=True)
obj_urls = resp.json()['upload_urls']
assert set(obj_urls) == set(find_object_hashes(contents))
obj_sizes = {
obj_hash: os.path.getsize(store.object_path(obj_hash)) for obj_hash in obj_urls
}
success = upload_fragments(store, obj_urls, obj_sizes, reupload=reupload)
if not success:
raise CommandException("Failed to upload fragments")
print("Uploading package metadata...")
resp = _push_package(sizes=obj_sizes)
package_url = resp.json()['package_url']
if not subpath:
# Update the latest tag.
print("Updating the 'latest' tag...")
session.put(
"{url}/api/tag/{owner}/{pkg}/{tag}".format(
url=get_registry_url(team),
owner=owner,
pkg=pkg,
tag=LATEST_TAG
),
data=json.dumps(dict(
hash=pkghash
))
)
print("Push complete. %s is live:\n%s" % (package, package_url))
|
Push a Quilt data package to the server
|
def _parse_status(self, output):
'''
Unit testing is so much easier when Vagrant is removed from the
equation.
'''
parsed = self._parse_machine_readable_output(output)
statuses = []
# group tuples by target name
# assuming tuples are sorted by target name, this should group all
# the tuples with info for each target.
for target, tuples in itertools.groupby(parsed, lambda tup: tup[1]):
# transform tuples into a dict mapping "type" to "data"
info = {kind: data for timestamp, _, kind, data in tuples}
status = Status(name=target, state=info.get('state'),
provider=info.get('provider-name'))
statuses.append(status)
return statuses
|
Unit testing is so much easier when Vagrant is removed from the
equation.
|
def find_genome_length(self):
"""
Determine the total length of all the contigs for each strain
"""
for sample in self.metadata:
# Use the sum() method to add all the contig lengths in the list
sample[self.analysistype].genome_length = sum(sample[self.analysistype].contig_lengths)
|
Determine the total length of all the contigs for each strain
|
def _set_formatter(self):
"""
Inspects config and sets the name of the formatter to either "json" or "text"
as instance attr. If not present in config, default is "text"
"""
if hasattr(self._config, "formatter") and self._config.formatter == "json":
self._formatter = "json"
else:
self._formatter = "text"
|
Inspects config and sets the name of the formatter to either "json" or "text"
as instance attr. If not present in config, default is "text"
|
def set_nest_transactions_with_savepoints(self, nest_transactions_with_savepoints):
"""Sets if nested transactions should use savepoints.
:param nest_transactions_with_savepoints: `True` or `False`
"""
if self._transaction_nesting_level > 0:
raise DBALConnectionError.may_not_alter_nested_transaction_with_savepoints_in_transaction()
if not self._platform.is_savepoints_supported():
raise DBALConnectionError.savepoints_not_supported()
self._nest_transactions_with_savepoints = bool(nest_transactions_with_savepoints)
|
Sets if nested transactions should use savepoints.
:param nest_transactions_with_savepoints: `True` or `False`
|
def _run_runner(self):
'''
Actually execute specific runner
:return:
'''
import salt.minion
ret = {}
low = {'fun': self.opts['fun']}
try:
# Allocate a jid
async_pub = self._gen_async_pub()
self.jid = async_pub['jid']
fun_args = salt.utils.args.parse_input(
self.opts['arg'],
no_parse=self.opts.get('no_parse', []))
verify_fun(self.functions, low['fun'])
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[low['fun']],
fun_args)
low['arg'] = args
low['kwarg'] = kwargs
if self.opts.get('eauth'):
if 'token' in self.opts:
try:
with salt.utils.files.fopen(os.path.join(self.opts['cachedir'], '.root_key'), 'r') as fp_:
low['key'] = salt.utils.stringutils.to_unicode(fp_.readline())
except IOError:
low['token'] = self.opts['token']
# If using eauth and a token hasn't already been loaded into
# low, prompt the user to enter auth credentials
if 'token' not in low and 'key' not in low and self.opts['eauth']:
# This is expensive. Don't do it unless we need to.
import salt.auth
resolver = salt.auth.Resolver(self.opts)
res = resolver.cli(self.opts['eauth'])
if self.opts['mktoken'] and res:
tok = resolver.token_cli(
self.opts['eauth'],
res
)
if tok:
low['token'] = tok.get('token', '')
if not res:
log.error('Authentication failed')
return ret
low.update(res)
low['eauth'] = self.opts['eauth']
else:
user = salt.utils.user.get_specific_user()
if low['fun'] in ['state.orchestrate', 'state.orch', 'state.sls']:
low['kwarg']['orchestration_jid'] = async_pub['jid']
# Run the runner!
if self.opts.get('async', False):
if self.opts.get('eauth'):
async_pub = self.cmd_async(low)
else:
async_pub = self.asynchronous(self.opts['fun'],
low,
user=user,
pub=async_pub)
# by default: info will be not enough to be printed out !
log.warning(
'Running in asynchronous mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured. '
'This execution is running under tag %s', async_pub['tag']
)
return async_pub['jid'] # return the jid
# otherwise run it in the main process
if self.opts.get('eauth'):
ret = self.cmd_sync(low)
if isinstance(ret, dict) and set(ret) == {'data', 'outputter'}:
outputter = ret['outputter']
ret = ret['data']
else:
outputter = None
display_output(ret, outputter, self.opts)
else:
ret = self._proc_function(self.opts['fun'],
low,
user,
async_pub['tag'],
async_pub['jid'],
daemonize=False)
except salt.exceptions.SaltException as exc:
evt = salt.utils.event.get_event('master', opts=self.opts)
evt.fire_event({'success': False,
'return': '{0}'.format(exc),
'retcode': 254,
'fun': self.opts['fun'],
'fun_args': fun_args,
'jid': self.jid},
tag='salt/run/{0}/ret'.format(self.jid))
# Attempt to grab documentation
if 'fun' in low:
ret = self.get_docs('{0}*'.format(low['fun']))
else:
ret = None
# If we didn't get docs returned then
# return the `not availble` message.
if not ret:
ret = '{0}'.format(exc)
if not self.opts.get('quiet', False):
display_output(ret, 'nested', self.opts)
else:
# If we don't have any values in ret by now, that's a problem.
# Otherwise, we shouldn't be overwriting the retcode.
if not ret:
ret = {
'retcode': salt.defaults.exitcodes.EX_SOFTWARE,
}
log.debug('Runner return: %s', ret)
return ret
|
Actually execute specific runner
:return:
|
def convert_all(cls, records):
"""Convert the list of bibrecs into one MARCXML.
>>> from harvestingkit.bibrecord import BibRecordPackage
>>> from harvestingkit.inspire_cds_package import Inspire2CDS
>>> bibrecs = BibRecordPackage("inspire.xml")
>>> bibrecs.parse()
>>> xml = Inspire2CDS.convert_all(bibrecs.get_records())
:param records: list of BibRecord dicts
:type records: list
:returns: MARCXML as string
"""
out = ["<collection>"]
for rec in records:
conversion = cls(rec)
out.append(conversion.convert())
out.append("</collection>")
return "\n".join(out)
|
Convert the list of bibrecs into one MARCXML.
>>> from harvestingkit.bibrecord import BibRecordPackage
>>> from harvestingkit.inspire_cds_package import Inspire2CDS
>>> bibrecs = BibRecordPackage("inspire.xml")
>>> bibrecs.parse()
>>> xml = Inspire2CDS.convert_all(bibrecs.get_records())
:param records: list of BibRecord dicts
:type records: list
:returns: MARCXML as string
|
def varify_user_lock(repository_path, session_token):
""" Verify that a returning user has a valid token and their lock has not expired """
with open(cpjoin(repository_path, 'user_file'), 'r') as fd2:
content = fd2.read()
if len(content) == 0: return False
try: res = json.loads(content)
except ValueError: return False
return res['session_token'] == session_token and int(time.time()) < int(res['expires'])
return False
|
Verify that a returning user has a valid token and their lock has not expired
|
def is_pickle_file(abspath):
"""Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file
"""
abspath = abspath.lower()
fname, ext = os.path.splitext(abspath)
if ext in [".pickle", ".pk", ".p"]:
is_pickle = True
elif ext == ".gz":
is_pickle = False
elif ext == ".tmp":
return is_pickle_file(fname)
else:
raise PickleExtError(
"'%s' is not a valid pickle file. "
"extension has to be '.pickle' for uncompressed, '.gz' "
"for compressed." % abspath)
return is_pickle
|
Parse file extension.
- *.pickle: uncompressed, utf-8 encode pickle file
- *.gz: compressed, utf-8 encode pickle file
|
def cmd_log(self, reopen=False, rotate=False):
"""Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation.
"""
cmd = b''
if reopen:
cmd += b'l'
if rotate:
cmd += b'L'
return self.send_command(cmd)
|
Allows managing of uWSGI log related stuff
:param bool reopen: Reopen log file. Could be required after third party rotation.
:param bool rotate: Trigger built-in log rotation.
|
def _which_display(self, log: str, output: str) -> HTML:
"""
Determines if the log or lst should be returned as the results for the cell based on parsing the log
looking for errors and the presence of lst output.
:param log: str log from code submission
:param output: None or str lst output if there was any
:return: The correct results based on log and lst
:rtype: str
"""
lines = re.split(r'[\n]\s*', log)
i = 0
elog = []
for line in lines:
i += 1
e = []
if line.startswith('ERROR'):
logger.debug("In ERROR Condition")
e = lines[(max(i - 15, 0)):(min(i + 16, len(lines)))]
elog = elog + e
tlog = '\n'.join(elog)
logger.debug("elog count: " + str(len(elog)))
logger.debug("tlog: " + str(tlog))
color_log = highlight(log, SASLogLexer(), HtmlFormatter(full=True, style=SASLogStyle, lineseparator="<br>"))
# store the log for display in the showSASLog nbextension
self.cachedlog = color_log
# Are there errors in the log? if show the lines on each side of the error
if len(elog) == 0 and len(output) > self.lst_len: # no error and LST output
debug1 = 1
logger.debug("DEBUG1: " + str(debug1) + " no error and LST output ")
return HTML(output)
elif len(elog) == 0 and len(output) <= self.lst_len: # no error and no LST
debug1 = 2
logger.debug("DEBUG1: " + str(debug1) + " no error and no LST")
return HTML(color_log)
elif len(elog) > 0 and len(output) <= self.lst_len: # error and no LST
debug1 = 3
logger.debug("DEBUG1: " + str(debug1) + " error and no LST")
return HTML(color_log)
else: # errors and LST
debug1 = 4
logger.debug("DEBUG1: " + str(debug1) + " errors and LST")
return HTML(color_log + output)
|
Determines if the log or lst should be returned as the results for the cell based on parsing the log
looking for errors and the presence of lst output.
:param log: str log from code submission
:param output: None or str lst output if there was any
:return: The correct results based on log and lst
:rtype: str
|
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False,
hashed_key=False, **h5dcreate_kwargs):
"""HDF5 cache decorator.
Parameters
----------
filepath : string, optional
Path to HDF5 file. If None a temporary file name will be used.
parent : string, optional
Path to group within HDF5 file to use as parent. If None the root
group will be used.
group : string, optional
Path to group within HDF5 file, relative to parent, to use as
container for cached data. If None the name of the wrapped function
will be used.
names : sequence of strings, optional
Name(s) of dataset(s). If None, default names will be 'f00', 'f01',
etc.
typed : bool, optional
If True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
hashed_key : bool, optional
If False (default) the key will not be hashed, which makes for
readable cache group names. If True the key will be hashed, however
note that on Python >= 3.3 the hash value will not be the same between
sessions unless the environment variable PYTHONHASHSEED has been set
to the same value.
Returns
-------
decorator : function
Examples
--------
Without any arguments, will cache using a temporary HDF5 file::
>>> import allel
>>> @allel.util.hdf5_cache()
... def foo(n):
... print('executing foo')
... return np.arange(n)
...
>>> foo(3)
executing foo
array([0, 1, 2])
>>> foo(3)
array([0, 1, 2])
>>> foo.cache_filepath # doctest: +SKIP
'/tmp/tmp_jwtwgjz'
Supports multiple return values, including scalars, e.g.::
>>> @allel.util.hdf5_cache()
... def bar(n):
... print('executing bar')
... a = np.arange(n)
... return a, a**2, n**2
...
>>> bar(3)
executing bar
(array([0, 1, 2]), array([0, 1, 4]), 9)
>>> bar(3)
(array([0, 1, 2]), array([0, 1, 4]), 9)
Names can also be specified for the datasets, e.g.::
>>> @allel.util.hdf5_cache(names=['z', 'x', 'y'])
... def baz(n):
... print('executing baz')
... a = np.arange(n)
... return a, a**2, n**2
...
>>> baz(3)
executing baz
(array([0, 1, 2]), array([0, 1, 4]), 9)
>>> baz(3)
(array([0, 1, 2]), array([0, 1, 4]), 9)
"""
# initialise HDF5 file path
if filepath is None:
import tempfile
filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5')
atexit.register(os.remove, filepath)
# initialise defaults for dataset creation
h5dcreate_kwargs.setdefault('chunks', True)
def decorator(user_function):
# setup the name for the cache container group
if group is None:
container = user_function.__name__
else:
container = group
def wrapper(*args, **kwargs):
# load from cache or not
no_cache = kwargs.pop('no_cache', False)
# compute a key from the function arguments
key = _make_key(args, kwargs, typed)
if hashed_key:
key = str(hash(key))
else:
key = str(key).replace('/', '__slash__')
return _hdf5_cache_act(filepath, parent, container, key, names,
no_cache, user_function, args, kwargs,
h5dcreate_kwargs)
wrapper.cache_filepath = filepath
return update_wrapper(wrapper, user_function)
return decorator
|
HDF5 cache decorator.
Parameters
----------
filepath : string, optional
Path to HDF5 file. If None a temporary file name will be used.
parent : string, optional
Path to group within HDF5 file to use as parent. If None the root
group will be used.
group : string, optional
Path to group within HDF5 file, relative to parent, to use as
container for cached data. If None the name of the wrapped function
will be used.
names : sequence of strings, optional
Name(s) of dataset(s). If None, default names will be 'f00', 'f01',
etc.
typed : bool, optional
If True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
hashed_key : bool, optional
If False (default) the key will not be hashed, which makes for
readable cache group names. If True the key will be hashed, however
note that on Python >= 3.3 the hash value will not be the same between
sessions unless the environment variable PYTHONHASHSEED has been set
to the same value.
Returns
-------
decorator : function
Examples
--------
Without any arguments, will cache using a temporary HDF5 file::
>>> import allel
>>> @allel.util.hdf5_cache()
... def foo(n):
... print('executing foo')
... return np.arange(n)
...
>>> foo(3)
executing foo
array([0, 1, 2])
>>> foo(3)
array([0, 1, 2])
>>> foo.cache_filepath # doctest: +SKIP
'/tmp/tmp_jwtwgjz'
Supports multiple return values, including scalars, e.g.::
>>> @allel.util.hdf5_cache()
... def bar(n):
... print('executing bar')
... a = np.arange(n)
... return a, a**2, n**2
...
>>> bar(3)
executing bar
(array([0, 1, 2]), array([0, 1, 4]), 9)
>>> bar(3)
(array([0, 1, 2]), array([0, 1, 4]), 9)
Names can also be specified for the datasets, e.g.::
>>> @allel.util.hdf5_cache(names=['z', 'x', 'y'])
... def baz(n):
... print('executing baz')
... a = np.arange(n)
... return a, a**2, n**2
...
>>> baz(3)
executing baz
(array([0, 1, 2]), array([0, 1, 4]), 9)
>>> baz(3)
(array([0, 1, 2]), array([0, 1, 4]), 9)
|
def _read(self, source):
"""
Reads and parses the config source
:param file/str source: Config source URL (http/https), or string, file name, or file pointer.
"""
if source.startswith('http://') or source.startswith('https://'):
source = url_content(source, cache_duration=self._cache_duration, from_cache_on_error=True)
return super(RemoteConfig, self)._read(source)
|
Reads and parses the config source
:param file/str source: Config source URL (http/https), or string, file name, or file pointer.
|
def _star_comparison(filter_value, tested_value):
"""
Tests a filter containing a joker
"""
if not is_string(tested_value):
# Unhandled value type...
return False
parts = filter_value.split("*")
i = 0
last_part = len(parts) - 1
idx = 0
for part in parts:
# Find the part in the tested value
idx = tested_value.find(part, idx)
if idx == -1:
# Part not found
return False
len_part = len(part)
if i == 0 and len_part != 0 and idx != 0:
# First part is not a star, but the tested value is not at
# position 0 => Doesn't match
return False
if (
i == last_part
and len_part != 0
and idx != len(tested_value) - len_part
):
# Last tested part is not at the end of the sequence
return False
# Be sure to test the next part
idx += len_part
i += 1
# Whole test passed
return True
|
Tests a filter containing a joker
|
def get_spaces(self):
"""
Return a flat list of the names for spaces in the organization.
"""
self.spaces = []
for resource in self._get_spaces()['resources']:
self.spaces.append(resource['entity']['name'])
return self.spaces
|
Return a flat list of the names for spaces in the organization.
|
def serialize(self, pid, record, links_factory=None):
"""Serialize a single record and persistent identifier.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links.
"""
return simpledc.tostring(
self.transform_record(pid, record, links_factory))
|
Serialize a single record and persistent identifier.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for record links.
|
def _swap_on_miss(partition_result):
"""
Given a partition_dict result, if the partition missed, swap
the before and after.
"""
before, item, after = partition_result
return (before, item, after) if item else (after, item, before)
|
Given a partition_dict result, if the partition missed, swap
the before and after.
|
def _symbol_extract(self, regex, plus = True, brackets=False):
"""Extracts a symbol or full symbol from the current line,
optionally including the character under the cursor.
:arg regex: the compiled regular expression to use for extraction.
:arg plus: when true, the character under the cursor *is* included.
:arg brackets: when true, matching pairs of brackets are first removed
before the regex is run.
"""
charplus = self.pos[1] + (1 if plus else -1)
consider = self.current_line[:charplus][::-1]
#We want to remove matching pairs of brackets so that derived types
#that have arrays still get intellisense.
if brackets==True:
#The string has already been reversed, just run through it.
rightb = []
lastchar = None
for i in range(len(consider)):
if consider[i] == ")":
rightb.append(i)
elif consider[i] == "(" and len(rightb) > 0:
lastchar = i
rightb.pop()
if lastchar is not None:
consider = '%' + consider[lastchar+1:]
rematch = regex.match(consider)
if rematch is not None:
return rematch.group("symbol")[::-1]
else:
return ""
|
Extracts a symbol or full symbol from the current line,
optionally including the character under the cursor.
:arg regex: the compiled regular expression to use for extraction.
:arg plus: when true, the character under the cursor *is* included.
:arg brackets: when true, matching pairs of brackets are first removed
before the regex is run.
|
def compute(self):
"""Run the SuperSmoother."""
self._compute_primary_smooths()
self._smooth_the_residuals()
self._select_best_smooth_at_each_point()
self._enhance_bass()
self._smooth_best_span_estimates()
self._apply_best_spans_to_primaries()
self._smooth_interpolated_smooth()
self._store_unsorted_results(self.smooth_result, numpy.zeros(len(self.smooth_result)))
|
Run the SuperSmoother.
|
def smooth_magseries_savgol(mags, windowsize, polyorder=2):
'''This smooths the magseries with a Savitsky-Golay filter.
Parameters
----------
mags : np.array
The input mags/flux time-series to smooth.
windowsize : int
This is a odd integer containing the smoothing window size.
polyorder : int
This is an integer containing the polynomial degree order to use when
generating the Savitsky-Golay filter.
Returns
-------
np.array
The smoothed mag/flux time-series array.
'''
smoothed = savgol_filter(mags, windowsize, polyorder)
return smoothed
|
This smooths the magseries with a Savitsky-Golay filter.
Parameters
----------
mags : np.array
The input mags/flux time-series to smooth.
windowsize : int
This is a odd integer containing the smoothing window size.
polyorder : int
This is an integer containing the polynomial degree order to use when
generating the Savitsky-Golay filter.
Returns
-------
np.array
The smoothed mag/flux time-series array.
|
def lxc_path(cls, *join_paths):
"""Returns the LXC path (default on ubuntu is /var/lib/lxc)"""
response = subwrap.run(['lxc-ls', '-d'])
output = response.std_out
lxc_path = output.splitlines()[0]
lxc_path = lxc_path.strip()
return os.path.join(lxc_path, *join_paths)
|
Returns the LXC path (default on ubuntu is /var/lib/lxc)
|
def get_member_named(self, name):
"""Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
:class:`Member`
The member in this guild with the associated name. If not found
then ``None`` is returned.
"""
result = None
members = self.members
if len(name) > 5 and name[-5] == '#':
# The 5 length is checking to see if #0000 is in the string,
# as a#0000 has a length of 6, the minimum for a potential
# discriminator lookup.
potential_discriminator = name[-4:]
# do the actual lookup and return if found
# if it isn't found then we'll do a full name lookup below.
result = utils.get(members, name=name[:-5], discriminator=potential_discriminator)
if result is not None:
return result
def pred(m):
return m.nick == name or m.name == name
return utils.find(pred, members)
|
Returns the first member found that matches the name provided.
The name can have an optional discriminator argument, e.g. "Jake#0001"
or "Jake" will both do the lookup. However the former will give a more
precise result. Note that the discriminator must have all 4 digits
for this to work.
If a nickname is passed, then it is looked up via the nickname. Note
however, that a nickname + discriminator combo will not lookup the nickname
but rather the username + discriminator combo due to nickname + discriminator
not being unique.
If no member is found, ``None`` is returned.
Parameters
-----------
name: :class:`str`
The name of the member to lookup with an optional discriminator.
Returns
--------
:class:`Member`
The member in this guild with the associated name. If not found
then ``None`` is returned.
|
def _css_helper(self):
""" Add CSS links for the current page and for the plugins """
entries = [entry for entry in self._plugin_manager.call_hook("css") if entry is not None]
# Load javascript for the current page
entries += self._get_ctx()["css"]
entries = ["<link href='" + entry + "' rel='stylesheet'>" for entry in entries]
return "\n".join(entries)
|
Add CSS links for the current page and for the plugins
|
def get_argparser():
"""
Get the command line argument parser.
"""
parser = argparse.ArgumentParser("twarc")
parser.add_argument('command', choices=commands)
parser.add_argument('query', nargs='?', default=None)
parser.add_argument("--log", dest="log",
default="twarc.log", help="log file")
parser.add_argument("--consumer_key",
default=None, help="Twitter API consumer key")
parser.add_argument("--consumer_secret",
default=None, help="Twitter API consumer secret")
parser.add_argument("--access_token",
default=None, help="Twitter API access key")
parser.add_argument("--access_token_secret",
default=None, help="Twitter API access token secret")
parser.add_argument('--config',
help="Config file containing Twitter keys and secrets")
parser.add_argument('--profile',
help="Name of a profile in your configuration file")
parser.add_argument('--warnings', action='store_true',
help="Include warning messages in output")
parser.add_argument("--connection_errors", type=int, default="0",
help="Number of connection errors before giving up")
parser.add_argument("--http_errors", type=int, default="0",
help="Number of http errors before giving up")
parser.add_argument("--max_id", dest="max_id",
help="maximum tweet id to search for")
parser.add_argument("--since_id", dest="since_id",
help="smallest id to search for")
parser.add_argument("--result_type", dest="result_type",
choices=["mixed", "recent", "popular"],
default="recent", help="search result type")
parser.add_argument("--lang", dest="lang",
help="limit to ISO 639-1 language code"),
parser.add_argument("--geocode", dest="geocode",
help="limit by latitude,longitude,radius")
parser.add_argument("--locations", dest="locations",
help="limit filter stream to location(s)")
parser.add_argument("--follow", dest="follow",
help="limit filter to tweets from given user id(s)")
parser.add_argument("--recursive", dest="recursive", action="store_true",
help="also fetch replies to replies")
parser.add_argument("--tweet_mode", action="store", default="extended",
dest="tweet_mode", choices=["compat", "extended"],
help="set tweet mode")
parser.add_argument("--protected", dest="protected", action="store_true",
help="include protected tweets")
parser.add_argument("--output", action="store", default=None,
dest="output", help="write output to file path")
parser.add_argument("--format", action="store", default="json",
dest="format", choices=["json", "csv", "csv-excel"],
help="set output format")
parser.add_argument("--split", action="store", type=int, default=0,
help="used with --output to split into numbered files")
parser.add_argument("--skip_key_validation", action="store_true",
help="skip checking keys are valid on startup")
return parser
|
Get the command line argument parser.
|
def set_window_class(self, window, name, class_):
"""
Change the window's classname and or class.
:param name: The new class name. If ``None``, no change.
:param class_: The new class. If ``None``, no change.
"""
_libxdo.xdo_set_window_class(self._xdo, window, name, class_)
|
Change the window's classname and or class.
:param name: The new class name. If ``None``, no change.
:param class_: The new class. If ``None``, no change.
|
def _prepare_args(log_likelihood_fn, state,
log_likelihood=None, description='log_likelihood'):
"""Processes input args to meet list-like assumptions."""
state_parts = list(state) if mcmc_util.is_list_like(state) else [state]
state_parts = [tf.convert_to_tensor(s, name='current_state')
for s in state_parts]
log_likelihood = _maybe_call_fn(
log_likelihood_fn,
state_parts,
log_likelihood,
description)
return [state_parts, log_likelihood]
|
Processes input args to meet list-like assumptions.
|
def yieldOutput(self):
"""
Generate the text output for the table.
@rtype: generator of str
@return: Text output.
"""
width = self.__width
if width:
num_cols = len(width)
fmt = ['%%%ds' % -w for w in width]
if width[-1] > 0:
fmt[-1] = '%s'
fmt = self.__sep.join(fmt)
for row in self.__cols:
row.extend( [''] * (num_cols - len(row)) )
yield fmt % tuple(row)
|
Generate the text output for the table.
@rtype: generator of str
@return: Text output.
|
def execute_lines(self, lines):
"""Execute lines and give focus to shell"""
self.shell.execute_lines(to_text_string(lines))
self.shell.setFocus()
|
Execute lines and give focus to shell
|
def to_glyphs_guidelines(self, ufo_obj, glyphs_obj):
"""Set guidelines."""
if not ufo_obj.guidelines:
return
for guideline in ufo_obj.guidelines:
new_guideline = self.glyphs_module.GSGuideLine()
name = guideline.name
# Locked
if name is not None and name.endswith(LOCKED_NAME_SUFFIX):
name = name[: -len(LOCKED_NAME_SUFFIX)]
new_guideline.locked = True
if guideline.color:
name = (name or "") + COLOR_NAME_SUFFIX % str(guideline.color)
if guideline.identifier:
name = (name or "") + IDENTIFIER_NAME_SUFFIX % guideline.identifier
new_guideline.name = name
new_guideline.position = Point(guideline.x or 0, guideline.y or 0)
if guideline.angle is not None:
new_guideline.angle = guideline.angle % 360
elif _is_vertical(guideline.x, guideline.y, None):
new_guideline.angle = 90
glyphs_obj.guides.append(new_guideline)
|
Set guidelines.
|
def notify(self, msgtype, method, params):
"""Handle an incoming notify request."""
self.dispatch.call(method, params)
|
Handle an incoming notify request.
|
def _send_delete_request(self, path, headers):
"""
Sends the DELETE request to the Route53 endpoint.
:param str path: The path to tack on to the endpoint URL for
the query.
:param dict headers: A dict of headers to send with the request.
:rtype: str
:returns: The body of the response.
"""
r = requests.delete(self.endpoint + path, headers=headers)
return r.text
|
Sends the DELETE request to the Route53 endpoint.
:param str path: The path to tack on to the endpoint URL for
the query.
:param dict headers: A dict of headers to send with the request.
:rtype: str
:returns: The body of the response.
|
def format_value(value):
"""
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
"""
if isinstance(value, basestring):
value = value.replace('"', '\"')
value = u'"{0}"'.format(value)
elif isinstance(value, bool):
value = str(value)
elif isinstance(value, int):
value = "{0}i".format(value)
elif isinstance(value, float):
value = str(value)
return value
|
Integers are numeric values that do not include a decimal and are followed by a trailing i when inserted
(e.g. 1i, 345i, 2015i, -10i). Note that all values must have a trailing i.
If they do not they will be written as floats.
Floats are numeric values that are not followed by a trailing i. (e.g. 1, 1.0, -3.14, 6.0e5, 10).
Boolean values indicate true or false. Valid boolean strings for line protocol are
(t, T, true, True, TRUE, f, F, false, False and FALSE).
Strings are text values. All string field values must be surrounded in double-quotes ".
If the string contains a double-quote, the double-quote must be escaped with a backslash, e.g. \".
|
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
|
Return the object as a ustar header block.
|
def visit_BoolOp(self, node):
"""
Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type.
"""
# Visit subnodes
self.generic_visit(node)
# Merge all operands types.
[self.combine(node, value) for value in node.values]
|
Merge BoolOp operand type.
BoolOp are "and" and "or" and may return any of these results so all
operands should have the combinable type.
|
def send(self):
"""
Send the message to each user on the queryset.
Create SentDrip for each user that gets a message.
Returns count of created SentDrips.
"""
if not self.from_email:
self.from_email = getattr(settings, 'DRIP_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)
MessageClass = message_class_for(self.drip_model.message_class)
count = 0
for user in self.get_queryset():
message_instance = MessageClass(self, user)
try:
result = message_instance.message.send()
if result:
SentDrip.objects.create(
drip=self.drip_model,
user=user,
from_email=self.from_email,
from_email_name=self.from_email_name,
subject=message_instance.subject,
body=message_instance.body
)
count += 1
except Exception as e:
logging.error("Failed to send drip %s to user %s: %s" % (self.drip_model.id, user, e))
return count
|
Send the message to each user on the queryset.
Create SentDrip for each user that gets a message.
Returns count of created SentDrips.
|
def gpg_fetch_key( key_url, key_id=None, config_dir=None ):
"""
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
"""
dat = None
from_blockstack = False
# make sure it's valid
try:
urlparse.urlparse(key_url)
except:
log.error("Invalid URL")
return None
if "://" in key_url and not key_url.lower().startswith("iks://"):
opener = None
key_data = None
# handle blockstack:// URLs
if key_url.startswith("blockstack://"):
blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) )
opener = urllib2.build_opener( blockstack_opener )
from_blockstack = True
elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"):
# fetch, but at least try not to look like a bot
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
else:
# defaults
opener = urllib2.build_opener()
try:
f = opener.open( key_url )
key_data_str = f.read()
key_data = None
if from_blockstack:
# expect: {'key name': 'PEM string'}
key_data_dict = json.loads(key_data_str)
assert len(key_data_dict) == 1, "Got multiple keys"
key_data = str(key_data_dict[key_data_dict.keys()[0]])
else:
# expect: PEM string
key_data = key_data_str
f.close()
except Exception, e:
log.exception(e)
if key_id is not None:
log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url))
else:
log.error("Failed to fetch key from '%s'" % key_url)
return None
# verify, if we have the ID.
# if we don't have the key ID, then we must be fetching from blockstack
# (since then the data will have already been verified by the protocol, using locally-hosted trusted information)
if not from_blockstack and key_id is None:
log.error( "No key ID given for key located at %s" % key_url )
return None
if key_id is not None:
rc = gpg_verify_key( key_id, key_data, config_dir=config_dir )
if not rc:
log.error("Failed to verify key %s" % key_id)
return None
dat = key_data
else:
# iks protocol, fetch from keyserver
key_server = key_url
if '://' in key_server:
key_server = urlparse.urlparse(key_server).netloc
dat = gpg_download_key( key_id, key_server, config_dir=config_dir )
assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url)
return dat
|
Fetch a GPG public key from the given URL.
Supports anything urllib2 supports.
If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it.
The key is not accepted into any keyrings.
Return the key data on success. If key_id is given, verify the key matches.
Return None on error, or on failure to carry out any key verification
|
def extract_arguments(text):
"""
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
"""
regexp = re.compile("/\w*(@\w*)*\s*([\s\S]*)",re.IGNORECASE)
result = regexp.match(text)
return result.group(2) if is_command(text) else None
|
Returns the argument after the command.
Examples:
extract_arguments("/get name"): 'name'
extract_arguments("/get"): ''
extract_arguments("/get@botName name"): 'name'
:param text: String to extract the arguments from a command
:return: the arguments if `text` is a command (according to is_command), else None.
|
def rotate_scale(im, angle, scale, borderValue=0, interp=cv2.INTER_CUBIC):
"""Rotates and scales the image
Parameters
----------
im: 2d array
The image
angle: number
The angle, in radians, to rotate
scale: positive number
The scale factor
borderValue: number, default 0
The value for the pixels outside the border (default 0)
Returns
-------
im: 2d array
the rotated and scaled image
Notes
-----
The output image has the same size as the input.
Therefore the image may be cropped in the process.
"""
im = np.asarray(im, dtype=np.float32)
rows, cols = im.shape
M = cv2.getRotationMatrix2D(
(cols / 2, rows / 2), -angle * 180 / np.pi, 1 / scale)
im = cv2.warpAffine(im, M, (cols, rows),
borderMode=cv2.BORDER_CONSTANT,
flags=interp,
borderValue=borderValue) # REPLICATE
return im
|
Rotates and scales the image
Parameters
----------
im: 2d array
The image
angle: number
The angle, in radians, to rotate
scale: positive number
The scale factor
borderValue: number, default 0
The value for the pixels outside the border (default 0)
Returns
-------
im: 2d array
the rotated and scaled image
Notes
-----
The output image has the same size as the input.
Therefore the image may be cropped in the process.
|
def get_packages(self, show):
"""
Return list of Distributions filtered by active status or all
@param show: Type of package(s) to show; active, non-active or all
@type show: string: "active", "non-active", "all"
@returns: list of pkg_resources Distribution objects
"""
if show == 'nonactive' or show == "all":
all_packages = []
for package in self.environment:
#There may be multiple versions of same packages
for i in range(len(self.environment[package])):
if self.environment[package][i]:
all_packages.append(self.environment[package][i])
return all_packages
else:
# Only activated packages
return self.working_set
|
Return list of Distributions filtered by active status or all
@param show: Type of package(s) to show; active, non-active or all
@type show: string: "active", "non-active", "all"
@returns: list of pkg_resources Distribution objects
|
def phase_estimation(U: np.ndarray, accuracy: int, reg_offset: int = 0) -> Program:
"""
Generate a circuit for quantum phase estimation.
:param U: A unitary matrix.
:param accuracy: Number of bits of accuracy desired.
:param reg_offset: Where to start writing measurements (default 0).
:return: A Quil program to perform phase estimation.
"""
assert isinstance(accuracy, int)
rows, cols = U.shape
m = int(log2(rows))
output_qubits = range(0, accuracy)
U_qubits = range(accuracy, accuracy + m)
p = Program()
ro = p.declare('ro', 'BIT', len(output_qubits))
# Hadamard initialization
for i in output_qubits:
p.inst(H(i))
# Controlled unitaries
for i in output_qubits:
if i > 0:
U = np.dot(U, U)
cU = controlled(U)
name = "CONTROLLED-U{0}".format(2 ** i)
# define the gate
p.defgate(name, cU)
# apply it
p.inst((name, i) + tuple(U_qubits))
# Compute the QFT
p = p + inverse_qft(output_qubits)
# Perform the measurements
for i in output_qubits:
p.measure(i, ro[reg_offset + i])
return p
|
Generate a circuit for quantum phase estimation.
:param U: A unitary matrix.
:param accuracy: Number of bits of accuracy desired.
:param reg_offset: Where to start writing measurements (default 0).
:return: A Quil program to perform phase estimation.
|
def _deshuffle_field(self, *args):
"""
Return to original ordering
"""
ip = self._invpermutation
fields = []
for arg in args:
fields.append( arg[ip] )
if len(fields) == 1:
return fields[0]
else:
return fields
|
Return to original ordering
|
def _run_atexit():
'''Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails.'''
global _atexit
for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs)
del _atexit[:]
|
Hook frameworks must invoke this after the main hook body has
successfully completed. Do not invoke it if the hook fails.
|
def create(self, model_name):
"""Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
"""
body = {'name': model_name}
parent = 'projects/' + self._project_id
# Model creation is instant. If anything goes wrong, Exception will be thrown.
return self._api.projects().models().create(body=body, parent=parent).execute()
|
Create a model.
Args:
model_name: the short name of the model, such as "iris".
Returns:
If successful, returns informaiton of the model, such as
{u'regions': [u'us-central1'], u'name': u'projects/myproject/models/mymodel'}
Raises:
If the model creation failed.
|
async def has_started(self):
"""
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
**This function is now deprecated and will be removed in v2.0+.**
:rtype: bool
"""
# pylint: disable=protected-access
timeout = False
auth_in_progress = False
if self._handler._connection.cbs:
timeout, auth_in_progress = await self._handler._auth.handle_token_async()
if timeout:
raise EventHubError("Authorization timeout.")
if auth_in_progress:
return False
if not await self._handler._client_ready_async():
return False
return True
|
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
**This function is now deprecated and will be removed in v2.0+.**
:rtype: bool
|
def is_less_than(self, other):
"""
Ensures :attr:`subject` is less than *other*.
"""
try:
unittest_case.assertTrue(self._subject < other)
except self._catch as err:
raise self._error_factory(_format("Expected {} to be less than {}", self._subject, other))
return ChainInspector(self._subject)
|
Ensures :attr:`subject` is less than *other*.
|
def _parse_entry(self, dom):
"""Sigh...."""
entry = {}
for tag in self._cap_tags:
# we need to handle the geocodes a bit differently
if tag == 'cap:geocode':
try:
geotypes = []
# FIXME: this will parse VTEC and add it to the feed as well, that's both a feature and a bug
for item in dom.getElementsByTagName('valueName'):
geotypes.append(str(item.firstChild.data))
n = 0
for geotype in geotypes:
try:
entry[geotype] = str(dom.getElementsByTagName('value')[n].firstChild.data).split(' ')
except AttributeError:
pass
n = n + 1
finally:
try:
entry['samecodes'] = [x for x in entry['FIPS6'] if str(x).isdigit()] # handle bad nws data
except Exception:
entry['samecodes'] = []
else:
try:
entry[tag] = dom.getElementsByTagName(tag)[0].firstChild.data
except AttributeError:
entry[tag] = ''
return entry
|
Sigh....
|
def list_tasks(collector):
"""List the available_tasks"""
print("Usage: aws_syncr <environment> <task>")
print("")
print("Available environments to choose from are")
print("-----------------------------------------")
print("")
for environment in os.listdir(collector.configuration_folder):
location = os.path.join(collector.configuration_folder, environment)
if os.path.isdir(location) and not environment.startswith("."):
print("\t{0}".format(environment))
print("")
print("Available tasks to choose from are:")
print("-----------------------------------")
print("")
keygetter = lambda item: item[1].label
tasks = sorted(available_actions.items(), key=keygetter)
sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0]))
max_length = max(len(name) for name, _ in sorted_tasks)
for key, task in sorted_tasks:
desc = dedent(task.__doc__ or "").strip().split('\n')[0]
print("\t{0}{1} :-: {2}".format(" " * (max_length-len(key)), key, desc))
print("")
|
List the available_tasks
|
def fix_windows_stdout_stderr():
"""
Processes can't write to stdout/stderr on frozen windows apps because they do not exist here
if process tries it anyway we get a nasty dialog window popping up, so we redirect the streams to a dummy
see https://github.com/jopohl/urh/issues/370
"""
if hasattr(sys, "frozen") and sys.platform == "win32":
try:
sys.stdout.write("\n")
sys.stdout.flush()
except:
class DummyStream(object):
def __init__(self): pass
def write(self, data): pass
def read(self, data): pass
def flush(self): pass
def close(self): pass
sys.stdout, sys.stderr, sys.stdin = DummyStream(), DummyStream(), DummyStream()
sys.__stdout__, sys.__stderr__, sys.__stdin__ = DummyStream(), DummyStream(), DummyStream()
|
Processes can't write to stdout/stderr on frozen windows apps because they do not exist here
if process tries it anyway we get a nasty dialog window popping up, so we redirect the streams to a dummy
see https://github.com/jopohl/urh/issues/370
|
def file_hash(fname):
"""
Calculate the SHA256 hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
"""
# Calculate the hash in chunks to avoid overloading the memory
chunksize = 65536
hasher = hashlib.sha256()
with open(fname, "rb") as fin:
buff = fin.read(chunksize)
while buff:
hasher.update(buff)
buff = fin.read(chunksize)
return hasher.hexdigest()
|
Calculate the SHA256 hash of a given file.
Useful for checking if a file has changed or been corrupted.
Parameters
----------
fname : str
The name of the file.
Returns
-------
hash : str
The hash of the file.
Examples
--------
>>> fname = "test-file-for-hash.txt"
>>> with open(fname, "w") as f:
... __ = f.write("content of the file")
>>> print(file_hash(fname))
0fc74468e6a9a829f103d069aeb2bb4f8646bad58bf146bb0e3379b759ec4a00
>>> import os
>>> os.remove(fname)
|
def new_multidigraph(self, name, data=None, **attr):
"""Return a new instance of type MultiDiGraph, initialized with the given
data if provided.
:arg name: a name for the graph
:arg data: dictionary or NetworkX graph object providing initial state
"""
self._init_graph(name, 'MultiDiGraph')
mdg = MultiDiGraph(self, name, data, **attr)
self._graph_objs[name] = mdg
return mdg
|
Return a new instance of type MultiDiGraph, initialized with the given
data if provided.
:arg name: a name for the graph
:arg data: dictionary or NetworkX graph object providing initial state
|
def _set_raw_return(self, sep):
"""Set the output raw return section
:param sep: the separator of current style
"""
raw = ''
if self.dst.style['out'] == 'numpydoc':
raw += '\n'
spaces = ' ' * 4
with_space = lambda s: '\n'.join([self.docs['out']['spaces'] + spaces + l.lstrip() if i > 0 else l for i, l in enumerate(s.splitlines())])
raw += self.dst.numpydoc.get_key_section_header('return', self.docs['out']['spaces'])
if self.docs['out']['rtype']:
rtype = self.docs['out']['rtype']
else:
rtype = 'type'
# case of several returns
if type(self.docs['out']['return']) is list:
for ret_elem in self.docs['out']['return']:
# if tuple (name, desc, rtype) else string desc
if type(ret_elem) is tuple and len(ret_elem) == 3:
rtype = ret_elem[2]
if rtype is None:
rtype = ''
raw += self.docs['out']['spaces']
if ret_elem[0]:
raw += ret_elem[0] + ' : '
raw += rtype + '\n' + self.docs['out']['spaces'] + spaces + with_space(ret_elem[1]).strip() + '\n'
else:
# There can be a problem
raw += self.docs['out']['spaces'] + rtype + '\n'
raw += self.docs['out']['spaces'] + spaces + with_space(str(ret_elem)).strip() + '\n'
# case of a unique return
elif self.docs['out']['return'] is not None:
raw += self.docs['out']['spaces'] + rtype
raw += '\n' + self.docs['out']['spaces'] + spaces + with_space(self.docs['out']['return']).strip() + '\n'
elif self.dst.style['out'] == 'google':
raw += '\n'
spaces = ' ' * 2
with_space = lambda s: '\n'.join([self.docs['out']['spaces'] + spaces +\
l.lstrip() if i > 0 else\
l for i, l in enumerate(s.splitlines())])
raw += self.dst.googledoc.get_key_section_header('return', self.docs['out']['spaces'])
if self.docs['out']['rtype']:
rtype = self.docs['out']['rtype']
else:
rtype = None
# case of several returns
if type(self.docs['out']['return']) is list:
for ret_elem in self.docs['out']['return']:
# if tuple (name=None, desc, rtype) else string desc
if type(ret_elem) is tuple and len(ret_elem) == 3:
rtype = ret_elem[2]
if rtype is None:
rtype = ''
raw += self.docs['out']['spaces'] + spaces
raw += rtype + ': ' + with_space(ret_elem[1]).strip() + '\n'
else:
# There can be a problem
if rtype:
raw += self.docs['out']['spaces'] + spaces + rtype + ': '
raw += with_space(str(ret_elem)).strip() + '\n'
else:
raw += self.docs['out']['spaces'] + spaces + with_space(str(ret_elem)).strip() + '\n'
# case of a unique return
elif self.docs['out']['return'] is not None:
if rtype:
raw += self.docs['out']['spaces'] + spaces + rtype + ': '
raw += with_space(self.docs['out']['return']).strip() + '\n'
else:
raw += self.docs['out']['spaces'] + spaces + with_space(self.docs['out']['return']).strip() + '\n'
elif self.dst.style['out'] == 'groups':
pass
else:
with_space = lambda s: '\n'.join([self.docs['out']['spaces'] + l if i > 0 else l for i, l in enumerate(s.splitlines())])
if self.docs['out']['return']:
if not self.docs['out']['params']:
raw += '\n'
raw += self.docs['out']['spaces'] + self.dst.get_key('return', 'out') + sep + with_space(self.docs['out']['return'].rstrip()).strip() + '\n'
if self.docs['out']['rtype']:
if not self.docs['out']['params']:
raw += '\n'
raw += self.docs['out']['spaces'] + self.dst.get_key('rtype', 'out') + sep + self.docs['out']['rtype'].rstrip() + '\n'
return raw
|
Set the output raw return section
:param sep: the separator of current style
|
def save(self, obj, run_id):
"""
Save a workflow
obj - instance of a workflow to save
run_id - unique id to give the run
"""
id_code = self.generate_save_identifier(obj, run_id)
self.store.save(obj, id_code)
|
Save a workflow
obj - instance of a workflow to save
run_id - unique id to give the run
|
def similar_movies(self, **kwargs):
"""
Get the similar movies for a specific movie id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API.
"""
path = self._get_id_path('similar_movies')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Get the similar movies for a specific movie id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any movie method.
Returns:
A dict representation of the JSON returned from the API.
|
def create_from_root(self, root_source):
"""Return a populated Object Root from dictionnary datas
"""
root_dto = ObjectRoot()
root_dto.configuration = root_source.configuration
root_dto.versions = [Version(x) for x in root_source.versions.values()]
for version in sorted(root_source.versions.values()):
hydrator = Hydrator(version, root_source.versions, root_source.versions[version.name].types)
for method in version.methods.values():
hydrator.hydrate_method(root_dto, root_source, method)
for type in version.types.values():
hydrator.hydrate_type(root_dto, root_source, type)
self.define_changes_status(root_dto)
return root_dto
|
Return a populated Object Root from dictionnary datas
|
def create(self, request):
""" Read the GeoJSON feature collection from the request body and
create new objects in the database. """
if self.readonly:
return HTTPMethodNotAllowed(headers={'Allow': 'GET, HEAD'})
collection = loads(request.body, object_hook=GeoJSON.to_instance)
if not isinstance(collection, FeatureCollection):
return HTTPBadRequest()
session = self.Session()
objects = []
for feature in collection.features:
create = False
obj = None
if hasattr(feature, 'id') and feature.id is not None:
obj = session.query(self.mapped_class).get(feature.id)
if self.before_create is not None:
self.before_create(request, feature, obj)
if obj is None:
obj = self.mapped_class(feature)
create = True
else:
obj.__update__(feature)
if create:
session.add(obj)
objects.append(obj)
session.flush()
collection = FeatureCollection(objects) if len(objects) > 0 else None
request.response.status_int = 201
return collection
|
Read the GeoJSON feature collection from the request body and
create new objects in the database.
|
def is_valid(self):
"""Return True if all widget contents are valid"""
for lineedit in self.lineedits:
if lineedit in self.validate_data and lineedit.isEnabled():
validator, invalid_msg = self.validate_data[lineedit]
text = to_text_string(lineedit.text())
if not validator(text):
QMessageBox.critical(self, self.get_name(),
"%s:<br><b>%s</b>" % (invalid_msg, text),
QMessageBox.Ok)
return False
return True
|
Return True if all widget contents are valid
|
def guess_codec(file, errors="strict", require_char=False):
"""Look at file contents and guess its correct encoding.
File must be open in binary mode and positioned at offset 0. If BOM
record is present then it is assumed to be UTF-8 or UTF-16 encoded
file. GEDCOM header is searched for CHAR record and encoding name
is extracted from it, if BOM record is present then CHAR record
must match BOM-defined encoding.
:param file: File object, must be open in binary mode.
:param str errors: Controls error handling behavior during string
decoding, accepts same values as standard `codecs.decode` method.
:param bool require_char: If True then exception is thrown if CHAR
record is not found in a header, if False and CHAR is not in the
header then codec determined from BOM or "gedcom" is returned.
:returns: Tuple (codec_name, bom_size)
:raises: :py:class:`CodecError` when codec name in file is unknown or
when codec name in file contradicts codec determined from BOM.
:raises: :py:class:`UnicodeDecodeError` when codec fails to decode
input lines and `errors` is set to "strict" (default).
"""
# mapping of gedcom character set specifiers to Python encoding names
gedcom_char_to_codec = {
'ansel': 'gedcom',
}
# check BOM first
bom_codec = check_bom(file)
bom_size = file.tell()
codec = bom_codec or 'gedcom'
# scan header until CHAR or end of header
while True:
# this stops at '\n'
line = file.readline()
if not line:
raise IOError("Unexpected EOF while reading GEDCOM header")
# do not decode bytes to strings here, reason is that some
# stupid apps split CONC record at byte level (in middle of
# of multi-byte characters). This implies that we can only
# work with encodings that have ASCII as single-byte subset.
line = line.lstrip().rstrip(b"\r\n")
words = line.split()
if len(words) >= 2 and words[0] == b"0" and words[1] != b"HEAD":
# past header but have not seen CHAR
if require_char:
raise CodecError("GEDCOM header does not have CHAR record")
else:
break
elif len(words) >= 3 and words[0] == b"1" and words[1] == b"CHAR":
try:
encoding = words[2].decode(codec, errors)
encoding = gedcom_char_to_codec.get(encoding.lower(),
encoding.lower())
new_codec = codecs.lookup(encoding).name
except LookupError:
raise CodecError("Unknown codec name {0}".format(encoding))
if bom_codec is None:
codec = new_codec
elif new_codec != bom_codec:
raise CodecError("CHAR codec {0} is different from BOM "
"codec {1}".format(new_codec, bom_codec))
break
return codec, bom_size
|
Look at file contents and guess its correct encoding.
File must be open in binary mode and positioned at offset 0. If BOM
record is present then it is assumed to be UTF-8 or UTF-16 encoded
file. GEDCOM header is searched for CHAR record and encoding name
is extracted from it, if BOM record is present then CHAR record
must match BOM-defined encoding.
:param file: File object, must be open in binary mode.
:param str errors: Controls error handling behavior during string
decoding, accepts same values as standard `codecs.decode` method.
:param bool require_char: If True then exception is thrown if CHAR
record is not found in a header, if False and CHAR is not in the
header then codec determined from BOM or "gedcom" is returned.
:returns: Tuple (codec_name, bom_size)
:raises: :py:class:`CodecError` when codec name in file is unknown or
when codec name in file contradicts codec determined from BOM.
:raises: :py:class:`UnicodeDecodeError` when codec fails to decode
input lines and `errors` is set to "strict" (default).
|
def _percentile(N, percent, key=lambda x:x):
"""
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
"""
if not N:
return None
k = (len(N)-1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0+d1
|
Find the percentile of a list of values.
@parameter N - is a list of values. Note N MUST BE already sorted.
@parameter percent - a float value from 0.0 to 1.0.
@parameter key - optional key function to compute value from each element of N.
@return - the percentile of the values
|
def argument(self) -> bool:
"""Parse statement argument.
Return ``True`` if the argument is followed by block of substatements.
"""
next = self.peek()
if next == "'":
quoted = True
self.sq_argument()
elif next == '"':
quoted = True
self.dq_argument()
elif self._arg == "":
quoted = False
self.unq_argument()
else:
raise UnexpectedInput(self, "single or double quote")
self.opt_separator()
next = self.peek()
if next == ";":
return False
if next == "{":
return True
elif quoted and next == "+":
self.offset += 1
self.opt_separator()
return self.argument()
else:
raise UnexpectedInput(self, "';', '{'" +
(" or '+'" if quoted else ""))
|
Parse statement argument.
Return ``True`` if the argument is followed by block of substatements.
|
def load_bernoulli_mnist_dataset(directory, split_name):
"""Returns Hugo Larochelle's binary static MNIST tf.data.Dataset."""
amat_file = download(directory, FILE_TEMPLATE.format(split=split_name))
dataset = tf.data.TextLineDataset(amat_file)
str_to_arr = lambda string: np.array([c == b"1" for c in string.split()])
def _parser(s):
booltensor = tf.compat.v1.py_func(str_to_arr, [s], tf.bool)
reshaped = tf.reshape(booltensor, [28, 28, 1])
return tf.cast(reshaped, dtype=tf.float32), tf.constant(0, tf.int32)
return dataset.map(_parser)
|
Returns Hugo Larochelle's binary static MNIST tf.data.Dataset.
|
def lowpass(ts, cutoff_hz, order=3):
"""forward-backward butterworth low-pass filter"""
orig_ndim = ts.ndim
if ts.ndim is 1:
ts = ts[:, np.newaxis]
channels = ts.shape[1]
fs = (len(ts) - 1.0) / (ts.tspan[-1] - ts.tspan[0])
nyq = 0.5 * fs
cutoff = cutoff_hz/nyq
b, a = signal.butter(order, cutoff, btype='low')
if not np.all(np.abs(np.roots(a)) < 1.0):
raise ValueError('Filter will not be stable with these values.')
dtype = ts.dtype
output = np.zeros((len(ts), channels), dtype)
for i in range(channels):
output[:, i] = signal.filtfilt(b, a, ts[:, i])
if orig_ndim is 1:
output = output[:, 0]
return Timeseries(output, ts.tspan, labels=ts.labels)
|
forward-backward butterworth low-pass filter
|
def supercell_composite(mucape, effective_storm_helicity, effective_shear):
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
.. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} *
\frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} *
\frac{\text{Effective Shear}}{20 \text{m/s}}
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : `pint.Quantity`
Effective bulk shear
Returns
-------
array-like
supercell composite
"""
effective_shear = np.clip(atleast_1d(effective_shear), None, 20 * units('m/s'))
effective_shear[effective_shear < 10 * units('m/s')] = 0 * units('m/s')
effective_shear = effective_shear / (20 * units('m/s'))
return ((mucape / (1000 * units('J/kg')))
* (effective_storm_helicity / (50 * units('m^2/s^2')))
* effective_shear).to('dimensionless')
|
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
.. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} *
\frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} *
\frac{\text{Effective Shear}}{20 \text{m/s}}
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : `pint.Quantity`
Effective bulk shear
Returns
-------
array-like
supercell composite
|
def sleep(self, ms=1):
"""
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
"""
self.scheduled.add(ms, getcurrent())
self.loop.switch()
|
Pauses the current green thread for *ms* milliseconds::
p = h.pipe()
@h.spawn
def _():
p.send('1')
h.sleep(50)
p.send('2')
p.recv() # returns '1'
p.recv() # returns '2' after 50 ms
|
def save_log_to_html(self):
"""Helper to write the log out as an html file."""
html = html_header()
html += (
'<img src="file:///%s/img/logos/inasafe-logo-url.png" '
'title="InaSAFE Logo" alt="InaSAFE Logo" />' % resources_path())
html += ('<h5 class="info"><i class="icon-info-sign icon-white"></i> '
'%s</h5>' % self.tr('Analysis log'))
for item in self.dynamic_messages_log:
html += "%s\n" % item.to_html()
html += html_footer()
if self.log_path is not None:
html_to_file(html, self.log_path)
else:
msg = self.tr('log_path is not set')
raise InvalidParameterError(msg)
|
Helper to write the log out as an html file.
|
def instantiate(self, params):
"""
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:type params: dict
:return:
:raise: CartoException
"""
try:
self.send(self.Meta.collection_endpoint, "POST", json=params)
except CartoRateLimitException as e:
raise e
except Exception as e:
raise CartoException(e)
|
Allows you to fetch the map tiles of a created map
:param params: The json with the styling info for the named map
:type params: dict
:return:
:raise: CartoException
|
def get_movielens(variant="20m"):
""" Gets movielens datasets
Parameters
---------
variant : string
Which version of the movielens dataset to download. Should be one of '20m', '10m',
'1m' or '100k'.
Returns
-------
movies : ndarray
An array of the movie titles.
ratings : csr_matrix
A sparse matrix where the row is the movieId, the column is the userId and the value is
the rating.
"""
filename = "movielens_%s.hdf5" % variant
path = os.path.join(_download.LOCAL_CACHE_DIR, filename)
if not os.path.isfile(path):
log.info("Downloading dataset to '%s'", path)
_download.download_file(URL_BASE + filename, path)
else:
log.info("Using cached dataset at '%s'", path)
with h5py.File(path, 'r') as f:
m = f.get('movie_user_ratings')
plays = csr_matrix((m.get('data'), m.get('indices'), m.get('indptr')))
return np.array(f['movie']), plays
|
Gets movielens datasets
Parameters
---------
variant : string
Which version of the movielens dataset to download. Should be one of '20m', '10m',
'1m' or '100k'.
Returns
-------
movies : ndarray
An array of the movie titles.
ratings : csr_matrix
A sparse matrix where the row is the movieId, the column is the userId and the value is
the rating.
|
def infer_gtr(self, print_raw=False, marginal=False, normalized_rate=True,
fixed_pi=None, pc=5.0, **kwargs):
"""
Calculates a GTR model given the multiple sequence alignment and the tree.
It performs ancestral sequence inferrence (joint or marginal), followed by
the branch lengths optimization. Then, the numbers of mutations are counted
in the optimal tree and related to the time within the mutation happened.
From these statistics, the relative state transition probabilities are inferred,
and the transition matrix is computed.
The result is used to construct the new GTR model of type 'custom'.
The model is assigned to the TreeAnc and is used in subsequent analysis.
Parameters
-----------
print_raw : bool
If True, print the inferred GTR model
marginal : bool
If True, use marginal sequence reconstruction
normalized_rate : bool
If True, sets the mutation rate prefactor to 1.0.
fixed_pi : np.array
Provide the equilibrium character concentrations.
If None is passed, the concentrations will be inferred from the alignment.
pc: float
Number of pseudo counts to use in gtr inference
Returns
-------
gtr : GTR
The inferred GTR model
"""
# decide which type of the Maximum-likelihood reconstruction use
# (marginal) or (joint)
if marginal:
_ml_anc = self._ml_anc_marginal
else:
_ml_anc = self._ml_anc_joint
self.logger("TreeAnc.infer_gtr: inferring the GTR model from the tree...", 1)
if (self.tree is None) or (self.aln is None):
self.logger("TreeAnc.infer_gtr: ERROR, alignment or tree are missing", 0)
return ttconf.ERROR
_ml_anc(final=True, **kwargs) # call one of the reconstruction types
alpha = list(self.gtr.alphabet)
n=len(alpha)
# matrix of mutations n_{ij}: i = derived state, j=ancestral state
nij = np.zeros((n,n))
Ti = np.zeros(n)
self.logger("TreeAnc.infer_gtr: counting mutations...", 2)
for node in self.tree.find_clades():
if hasattr(node,'mutations'):
for a,pos, d in node.mutations:
i,j = alpha.index(d), alpha.index(a)
nij[i,j]+=1
Ti[j] += 0.5*self._branch_length_to_gtr(node)
Ti[i] -= 0.5*self._branch_length_to_gtr(node)
for ni,nuc in enumerate(node.cseq):
i = alpha.index(nuc)
Ti[i] += self._branch_length_to_gtr(node)*self.multiplicity[ni]
self.logger("TreeAnc.infer_gtr: counting mutations...done", 3)
if print_raw:
print('alphabet:',alpha)
print('n_ij:', nij, nij.sum())
print('T_i:', Ti, Ti.sum())
root_state = np.array([np.sum((self.tree.root.cseq==nuc)*self.multiplicity) for nuc in alpha])
self._gtr = GTR.infer(nij, Ti, root_state, fixed_pi=fixed_pi, pc=pc,
alphabet=self.gtr.alphabet, logger=self.logger,
prof_map = self.gtr.profile_map)
if normalized_rate:
self.logger("TreeAnc.infer_gtr: setting overall rate to 1.0...", 2)
self._gtr.mu=1.0
return self._gtr
|
Calculates a GTR model given the multiple sequence alignment and the tree.
It performs ancestral sequence inferrence (joint or marginal), followed by
the branch lengths optimization. Then, the numbers of mutations are counted
in the optimal tree and related to the time within the mutation happened.
From these statistics, the relative state transition probabilities are inferred,
and the transition matrix is computed.
The result is used to construct the new GTR model of type 'custom'.
The model is assigned to the TreeAnc and is used in subsequent analysis.
Parameters
-----------
print_raw : bool
If True, print the inferred GTR model
marginal : bool
If True, use marginal sequence reconstruction
normalized_rate : bool
If True, sets the mutation rate prefactor to 1.0.
fixed_pi : np.array
Provide the equilibrium character concentrations.
If None is passed, the concentrations will be inferred from the alignment.
pc: float
Number of pseudo counts to use in gtr inference
Returns
-------
gtr : GTR
The inferred GTR model
|
def smoothed_joint(seg0, seg1, maxjointsize=3, tightness=1.99):
""" See Andy's notes on
Smoothing Bezier Paths for an explanation of the method.
Input: two segments seg0, seg1 such that seg0.end==seg1.start, and
jointsize, a positive number
Output: seg0_trimmed, elbow, seg1_trimmed, where elbow is a cubic bezier
object that smoothly connects seg0_trimmed and seg1_trimmed.
"""
assert seg0.end == seg1.start
assert 0 < maxjointsize
assert 0 < tightness < 2
# sgn = lambda x:x/abs(x)
q = seg0.end
try: v = seg0.unit_tangent(1)
except: v = seg0.unit_tangent(1 - 1e-4)
try: w = seg1.unit_tangent(0)
except: w = seg1.unit_tangent(1e-4)
max_a = maxjointsize / 2
a = min(max_a, min(seg1.length(), seg0.length()) / 20)
if isinstance(seg0, Line) and isinstance(seg1, Line):
'''
Note: Letting
c(t) = elbow.point(t), v= the unit tangent of seg0 at 1, w = the
unit tangent vector of seg1 at 0,
Q = seg0.point(1) = seg1.point(0), and a,b>0 some constants.
The elbow will be the unique CubicBezier, c, such that
c(0)= Q-av, c(1)=Q+aw, c'(0) = bv, and c'(1) = bw
where a and b are derived above/below from tightness and
maxjointsize.
'''
# det = v.imag*w.real-v.real*w.imag
# Note:
# If det is negative, the curvature of elbow is negative for all
# real t if and only if b/a > 6
# If det is positive, the curvature of elbow is negative for all
# real t if and only if b/a < 2
# if det < 0:
# b = (6+tightness)*a
# elif det > 0:
# b = (2-tightness)*a
# else:
# raise Exception("seg0 and seg1 are parallel lines.")
b = (2 - tightness)*a
elbow = CubicBezier(q - a*v, q - (a - b/3)*v, q + (a - b/3)*w, q + a*w)
seg0_trimmed = Line(seg0.start, elbow.start)
seg1_trimmed = Line(elbow.end, seg1.end)
return seg0_trimmed, [elbow], seg1_trimmed
elif isinstance(seg0, Line):
'''
Note: Letting
c(t) = elbow.point(t), v= the unit tangent of seg0 at 1,
w = the unit tangent vector of seg1 at 0,
Q = seg0.point(1) = seg1.point(0), and a,b>0 some constants.
The elbow will be the unique CubicBezier, c, such that
c(0)= Q-av, c(1)=Q, c'(0) = bv, and c'(1) = bw
where a and b are derived above/below from tightness and
maxjointsize.
'''
# det = v.imag*w.real-v.real*w.imag
# Note: If g has the same sign as det, then the curvature of elbow is
# negative for all real t if and only if b/a < 4
b = (4 - tightness)*a
# g = sgn(det)*b
elbow = CubicBezier(q - a*v, q + (b/3 - a)*v, q - b/3*w, q)
seg0_trimmed = Line(seg0.start, elbow.start)
return seg0_trimmed, [elbow], seg1
elif isinstance(seg1, Line):
args = (seg1.reversed(), seg0.reversed(), maxjointsize, tightness)
rseg1_trimmed, relbow, rseg0 = smoothed_joint(*args)
elbow = relbow[0].reversed()
return seg0, [elbow], rseg1_trimmed.reversed()
else:
# find a point on each seg that is about a/2 away from joint. Make
# line between them.
t0 = seg0.ilength(seg0.length() - a/2)
t1 = seg1.ilength(a/2)
seg0_trimmed = seg0.cropped(0, t0)
seg1_trimmed = seg1.cropped(t1, 1)
seg0_line = Line(seg0_trimmed.end, q)
seg1_line = Line(q, seg1_trimmed.start)
args = (seg0_trimmed, seg0_line, maxjointsize, tightness)
dummy, elbow0, seg0_line_trimmed = smoothed_joint(*args)
args = (seg1_line, seg1_trimmed, maxjointsize, tightness)
seg1_line_trimmed, elbow1, dummy = smoothed_joint(*args)
args = (seg0_line_trimmed, seg1_line_trimmed, maxjointsize, tightness)
seg0_line_trimmed, elbowq, seg1_line_trimmed = smoothed_joint(*args)
elbow = elbow0 + [seg0_line_trimmed] + elbowq + [seg1_line_trimmed] + elbow1
return seg0_trimmed, elbow, seg1_trimmed
|
See Andy's notes on
Smoothing Bezier Paths for an explanation of the method.
Input: two segments seg0, seg1 such that seg0.end==seg1.start, and
jointsize, a positive number
Output: seg0_trimmed, elbow, seg1_trimmed, where elbow is a cubic bezier
object that smoothly connects seg0_trimmed and seg1_trimmed.
|
def team(self, team_id):
"""Returns Team object with information about team specified by
``team_id``.
:param int team_id: (required), unique id for the team
:returns: :class:`Team <Team>`
"""
json = None
if int(team_id) > 0:
url = self._build_url('teams', str(team_id))
json = self._json(self._get(url), 200)
return Team(json, self._session) if json else None
|
Returns Team object with information about team specified by
``team_id``.
:param int team_id: (required), unique id for the team
:returns: :class:`Team <Team>`
|
def greedy_mapping(self, reference, hypothesis, uem=None):
"""Greedy label mapping
Parameters
----------
reference : Annotation
hypothesis : Annotation
Reference and hypothesis diarization
uem : Timeline
Evaluation map
Returns
-------
mapping : dict
Mapping between hypothesis (key) and reference (value) labels
"""
if uem:
reference, hypothesis = self.uemify(reference, hypothesis, uem=uem)
return self.mapper_(hypothesis, reference)
|
Greedy label mapping
Parameters
----------
reference : Annotation
hypothesis : Annotation
Reference and hypothesis diarization
uem : Timeline
Evaluation map
Returns
-------
mapping : dict
Mapping between hypothesis (key) and reference (value) labels
|
def slice_by_size(seq, size):
"""Slice a sequence into chunks, return as a generation of chunks with `size`."""
filling = null
for it in zip(*(itertools_chain(seq, [filling] * size),) * size):
if filling in it:
it = tuple(i for i in it if i is not filling)
if it:
yield it
|
Slice a sequence into chunks, return as a generation of chunks with `size`.
|
def _save_params(self):
"""
Saves model parameters at current checkpoint and optionally cleans up older parameter files to save disk space.
"""
self.model.save_params_to_file(self.current_params_fname)
utils.cleanup_params_files(self.model.output_dir, self.max_params_files_to_keep, self.state.checkpoint,
self.state.best_checkpoint, self.keep_initializations)
|
Saves model parameters at current checkpoint and optionally cleans up older parameter files to save disk space.
|
def remove_from_model(self, remove_orphans=False):
"""Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
"""
self._model.remove_reactions([self], remove_orphans=remove_orphans)
|
Removes the reaction from a model.
This removes all associations between a reaction the associated
model, metabolites and genes.
The change is reverted upon exit when using the model as a context.
Parameters
----------
remove_orphans : bool
Remove orphaned genes and metabolites from the model as well
|
def get_alarms_list(self, num_items=100, params=None):
"""
Get alarms as list of dictionaries
:param int num_items: Max items to retrieve
:param dict params: Additional params dictionary according to:
https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/alarms
:returns list: list of alarms
"""
if params and set(params.keys()) - VALID_ALARM_PARAMS:
self.log.error("Invalid alarm query parameters: {set(params.keys()) - VALID_ALARM_PARAMS}")
return None
return self._retrieve_items(item_type="alarms", num_items=num_items, params=params)
|
Get alarms as list of dictionaries
:param int num_items: Max items to retrieve
:param dict params: Additional params dictionary according to:
https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/alarms
:returns list: list of alarms
|
def chatToId(url):
"""
Extract the conversation ID from a conversation URL.
Matches addresses containing ``conversations/<chat>``.
Args:
url (str): Skype API URL
Returns:
str: extracted identifier
"""
match = re.search(r"conversations/([0-9]+:[^/]+)", url)
return match.group(1) if match else None
|
Extract the conversation ID from a conversation URL.
Matches addresses containing ``conversations/<chat>``.
Args:
url (str): Skype API URL
Returns:
str: extracted identifier
|
def generic_filename(path):
'''
Extract filename of given path os-indepently, taking care of known path
separators.
:param path: path
:return: filename
:rtype: str or unicode (depending on given path)
'''
for sep in common_path_separators:
if sep in path:
_, path = path.rsplit(sep, 1)
return path
|
Extract filename of given path os-indepently, taking care of known path
separators.
:param path: path
:return: filename
:rtype: str or unicode (depending on given path)
|
def parse(self, lines):
"""Parse the input lines from a robots.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines.
"""
# states:
# 0: start state
# 1: saw user-agent line
# 2: saw an allow or disallow line
state = 0
entry = Entry()
for line in lines:
if not line:
if state == 1:
entry = Entry()
state = 0
elif state == 2:
self._add_entry(entry)
entry = Entry()
state = 0
# remove optional comment and strip line
i = line.find('#')
if i >= 0:
line = line[:i]
line = line.strip()
if not line:
continue
line = line.split(':', 1)
if len(line) == 2:
line[0] = line[0].strip().lower()
line[1] = urllib.parse.unquote(line[1].strip())
if line[0] == "user-agent":
if state == 2:
self._add_entry(entry)
entry = Entry()
entry.useragents.append(line[1])
state = 1
elif line[0] == "disallow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], False))
state = 2
elif line[0] == "allow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], True))
state = 2
if state == 2:
self._add_entry(entry)
|
Parse the input lines from a robots.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines.
|
def edit(text, pos, key):
"""
Process a key input in the context of a line, and return the
resulting text and cursor position.
`text' and `key' must be of type str or unicode, and `pos' must be
an int in the range [0, len(text)].
If `key' is in keys(), the corresponding command is executed on the
line. Otherwise, if `key' is a single character, that character is
inserted at the cursor position. If neither condition is met, `text'
and `pos' are returned unmodified.
"""
if key in _key_bindings:
return _key_bindings[key](text, pos)
elif len(key) == 1:
return text[:pos] + key + text[pos:], pos + 1
else:
return text, pos
|
Process a key input in the context of a line, and return the
resulting text and cursor position.
`text' and `key' must be of type str or unicode, and `pos' must be
an int in the range [0, len(text)].
If `key' is in keys(), the corresponding command is executed on the
line. Otherwise, if `key' is a single character, that character is
inserted at the cursor position. If neither condition is met, `text'
and `pos' are returned unmodified.
|
def _save_results(self, zipdata, outdir, module, gmt, rank_metric, permutation_type):
"""reformat gsea results, and save to txt"""
res = OrderedDict()
for gs, gseale, ind, RES in zipdata:
rdict = OrderedDict()
rdict['es'] = gseale[0]
rdict['nes'] = gseale[1]
rdict['pval'] = gseale[2]
rdict['fdr'] = gseale[3]
rdict['geneset_size'] = len(gmt[gs])
rdict['matched_size'] = len(ind)
#reformat gene list.
_genes = rank_metric.index.values[ind]
rdict['genes'] = ";".join([ str(g).strip() for g in _genes ])
if self.module != 'ssgsea':
# extract leading edge genes
if rdict['es'] > 0:
# RES -> ndarray, ind -> list
idx = RES.argmax()
ldg_pos = list(filter(lambda x: x<= idx, ind))
elif rdict['es'] < 0:
idx = RES.argmin()
ldg_pos = list(filter(lambda x: x >= idx, ind))
else:
ldg_pos = ind # es == 0 ?
rdict['ledge_genes'] = ';'.join(list(map(str,rank_metric.iloc[ldg_pos].index)))
rdict['RES'] = RES
rdict['hits_indices'] = ind
# save to one odict
res[gs] = rdict
# save
self.results = res
# save to dataframe
res_df = pd.DataFrame.from_dict(res, orient='index')
res_df.index.name = 'Term'
res_df.drop(['RES','hits_indices'], axis=1, inplace=True)
res_df.sort_values(by=['fdr','pval'], inplace=True)
self.res2d = res_df
if self._outdir is None: return
out = os.path.join(outdir,'gseapy.{b}.{c}.report.csv'.format(b=module, c=permutation_type))
if self.module == 'ssgsea':
out = out.replace(".csv",".txt")
with open(out, 'a') as f:
f.write('# normalize enrichment scores by random permutation procedure (GSEA method)\n')
f.write("# might not proper for publication\n")
res_df.to_csv(f, sep='\t')
else:
res_df.to_csv(out)
return
|
reformat gsea results, and save to txt
|
def delay_1(year):
'''Test for delay of start of new year and to avoid'''
# Sunday, Wednesday, and Friday as start of the new year.
months = trunc(((235 * year) - 234) / 19)
parts = 12084 + (13753 * months)
day = trunc((months * 29) + parts / 25920)
if ((3 * (day + 1)) % 7) < 3:
day += 1
return day
|
Test for delay of start of new year and to avoid
|
def get_auth(self, username, password, authoritative_source, auth_options=None):
""" Returns an authentication object.
Examines the auth backend given after the '@' in the username and
returns a suitable instance of a subclass of the BaseAuth class.
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
"""
if auth_options is None:
auth_options = {}
# validate arguments
if (authoritative_source is None):
raise AuthError("Missing authoritative_source.")
# remove invalid cache entries
rem = list()
for key in self._auth_cache:
if self._auth_cache[key]['valid_until'] < datetime.utcnow():
rem.append(key)
for key in rem:
del(self._auth_cache[key])
user_authbackend = username.rsplit('@', 1)
# Find out what auth backend to use.
# If no auth backend was specified in username, use default
backend = ""
if len(user_authbackend) == 1:
backend = self._config.get('auth', 'default_backend')
self._logger.debug("Using default auth backend %s" % backend)
else:
backend = user_authbackend[1]
# do we have a cached instance?
auth_str = ( str(username) + str(password) + str(authoritative_source)
+ str(auth_options) )
if auth_str in self._auth_cache:
self._logger.debug('found cached auth object for user %s' % username)
return self._auth_cache[auth_str]['auth_object']
# Create auth object
try:
auth = self._backends[backend](backend, user_authbackend[0], password, authoritative_source, auth_options)
except KeyError:
raise AuthError("Invalid auth backend '%s' specified" %
str(backend))
# save auth object to cache
self._auth_cache[auth_str] = {
'valid_until': datetime.utcnow() + timedelta(seconds=self._config.getint('auth', 'auth_cache_timeout')),
'auth_object': auth
}
return auth
|
Returns an authentication object.
Examines the auth backend given after the '@' in the username and
returns a suitable instance of a subclass of the BaseAuth class.
* `username` [string]
Username to authenticate as.
* `password` [string]
Password to authenticate with.
* `authoritative_source` [string]
Authoritative source of the query.
* `auth_options` [dict]
A dict which, if authenticated as a trusted user, can override
`username` and `authoritative_source`.
|
def _gen_pool_xml(name,
ptype,
target=None,
permissions=None,
source_devices=None,
source_dir=None,
source_adapter=None,
source_hosts=None,
source_auth=None,
source_name=None,
source_format=None):
'''
Generate the XML string to define a libvirt storage pool
'''
hosts = [host.split(':') for host in source_hosts or []]
context = {
'name': name,
'ptype': ptype,
'target': {'path': target, 'permissions': permissions},
'source': {
'devices': source_devices or [],
'dir': source_dir,
'adapter': source_adapter,
'hosts': [{'name': host[0], 'port': host[1] if len(host) > 1 else None} for host in hosts],
'auth': source_auth,
'name': source_name,
'format': source_format
}
}
fn_ = 'libvirt_pool.jinja'
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template %s', fn_)
return ''
return template.render(**context)
|
Generate the XML string to define a libvirt storage pool
|
def gss(args):
"""
%prog gss fastafile plateMapping
Generate sequence files and metadata templates suited for gss submission.
The FASTA file is assumed to be exported from the JCVI data delivery folder
which looks like:
>1127963806024 /library_name=SIL1T054-B-01-120KB /clear_start=0
/clear_end=839 /primer_id=1049000104196 /trace_id=1064147620169
/trace_file_id=1127963805941 /clone_insert_id=1061064364776
/direction=reverse /sequencer_run_id=1064147620155
/sequencer_plate_barcode=B906423 /sequencer_plate_well_coordinates=C3
/sequencer_plate_96well_quadrant=1 /sequencer_plate_96well_coordinates=B02
/template_plate_barcode=CC0251602AB /growth_plate_barcode=BB0273005AB
AGCTTTAGTTTCAAGGATACCTTCATTGTCATTCCCGGTTATGATGATATCATCAAGATAAACAAGAATG
ACAATGATACCTGTTTGGTTCTGAAGTGTAAAGAGGGTATGTTCAGCTTCAGATCTTCTAAACCCTTTGT
CTAGTAAGCTGGCACTTAGCTTCCTATACCAAACCCTTTGTGATTGCTTCAGTCCATAAATTGCCTTTTT
Plate mapping file maps the JTC `sequencer_plate_barcode` to external IDs.
For example:
B906423 SIL-001
"""
p = OptionParser(gss.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
fastafile, mappingfile = args
seen = defaultdict(int)
clone = defaultdict(set)
plateMapping = DictFile(mappingfile)
fw = open("MetaData.txt", "w")
print(PublicationTemplate.format(**vars), file=fw)
print(LibraryTemplate.format(**vars), file=fw)
print(ContactTemplate.format(**vars), file=fw)
logging.debug("Meta data written to `{0}`".format(fw.name))
fw = open("GSS.txt", "w")
fw_log = open("GSS.log", "w")
for rec in SeqIO.parse(fastafile, "fasta"):
# First pass just check well number matchings and populate sequences in
# the same clone
description = rec.description
a = parse_description(description)
direction = a["direction"][0]
sequencer_plate_barcode = a["sequencer_plate_barcode"][0]
sequencer_plate_well_coordinates = \
a["sequencer_plate_well_coordinates"][0]
sequencer_plate_96well_quadrant = \
a["sequencer_plate_96well_quadrant"][0]
sequencer_plate_96well_coordinates = \
a["sequencer_plate_96well_coordinates"][0]
# Check the 96-well ID is correctly converted to 384-well ID
w96 = sequencer_plate_96well_coordinates
w96quad = int(sequencer_plate_96well_quadrant)
w384 = sequencer_plate_well_coordinates
assert convert_96_to_384(w96, w96quad) == w384
plate = sequencer_plate_barcode
assert plate in plateMapping, \
"{0} not found in `{1}` !".format(plate, mappingfile)
plate = plateMapping[plate]
d = Directions[direction]
cloneID = "{0}{1}".format(plate, w384)
gssID = "{0}{1}".format(cloneID, d)
seen[gssID] += 1
if seen[gssID] > 1:
gssID = "{0}{1}".format(gssID, seen[gssID])
seen[gssID] += 1
clone[cloneID].add(gssID)
seen = defaultdict(int)
for rec in SeqIO.parse(fastafile, "fasta"):
# need to populate gssID, mateID, cloneID, seq, plate, row, column
description = rec.description
a = parse_description(description)
direction = a["direction"][0]
sequencer_plate_barcode = a["sequencer_plate_barcode"][0]
sequencer_plate_well_coordinates = \
a["sequencer_plate_well_coordinates"][0]
w384 = sequencer_plate_well_coordinates
plate = sequencer_plate_barcode
plate = plateMapping[plate]
d = Directions[direction]
cloneID = "{0}{1}".format(plate, w384)
gssID = "{0}{1}".format(cloneID, d)
seen[gssID] += 1
if seen[gssID] > 1:
logging.error("duplicate key {0} found".format(gssID))
gssID = "{0}{1}".format(gssID, seen[gssID])
othergss = clone[cloneID] - set([gssID])
othergss = ", ".join(sorted(othergss))
vars.update(locals())
print(GSSTemplate.format(**vars), file=fw)
# Write conversion logs to log file
print("{0}\t{1}".format(gssID, description), file=fw_log)
print("=" * 60, file=fw_log)
logging.debug("A total of {0} seqs written to `{1}`".\
format(len(seen), fw.name))
fw.close()
fw_log.close()
|
%prog gss fastafile plateMapping
Generate sequence files and metadata templates suited for gss submission.
The FASTA file is assumed to be exported from the JCVI data delivery folder
which looks like:
>1127963806024 /library_name=SIL1T054-B-01-120KB /clear_start=0
/clear_end=839 /primer_id=1049000104196 /trace_id=1064147620169
/trace_file_id=1127963805941 /clone_insert_id=1061064364776
/direction=reverse /sequencer_run_id=1064147620155
/sequencer_plate_barcode=B906423 /sequencer_plate_well_coordinates=C3
/sequencer_plate_96well_quadrant=1 /sequencer_plate_96well_coordinates=B02
/template_plate_barcode=CC0251602AB /growth_plate_barcode=BB0273005AB
AGCTTTAGTTTCAAGGATACCTTCATTGTCATTCCCGGTTATGATGATATCATCAAGATAAACAAGAATG
ACAATGATACCTGTTTGGTTCTGAAGTGTAAAGAGGGTATGTTCAGCTTCAGATCTTCTAAACCCTTTGT
CTAGTAAGCTGGCACTTAGCTTCCTATACCAAACCCTTTGTGATTGCTTCAGTCCATAAATTGCCTTTTT
Plate mapping file maps the JTC `sequencer_plate_barcode` to external IDs.
For example:
B906423 SIL-001
|
def subrouters(self):
"""
Generator of sub-routers (middleware inheriting from Router)
contained within this router.
"""
yield from filter(lambda mw: isinstance(mw.func, Router), self.mw_list)
|
Generator of sub-routers (middleware inheriting from Router)
contained within this router.
|
def blackbox_network():
"""A micro-network to demonstrate blackboxing.
Diagram::
+----------+
+-------------------->+ A (COPY) + <---------------+
| +----------+ |
| +----------+ |
| +-----------+ B (COPY) + <-------------+ |
v v +----------+ | |
+-+-----+-+ +-+-----+-+
| | | |
| C (AND) | | F (AND) |
| | | |
+-+-----+-+ +-+-----+-+
| | ^ ^
| | +----------+ | |
| +---------> + D (COPY) +---------------+ |
| +----------+ |
| +----------+ |
+-------------------> + E (COPY) +-----------------+
+----------+
Connectivity Matrix:
+---+---+---+---+---+---+---+
| . | A | B | C | D | E | F |
+---+---+---+---+---+---+---+
| A | 0 | 0 | 1 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| B | 0 | 0 | 1 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| C | 0 | 0 | 0 | 1 | 1 | 0 |
+---+---+---+---+---+---+---+
| D | 0 | 0 | 0 | 0 | 0 | 1 |
+---+---+---+---+---+---+---+
| E | 0 | 0 | 0 | 0 | 0 | 1 |
+---+---+---+---+---+---+---+
| F | 1 | 1 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
In the documentation example, the state is (0, 0, 0, 0, 0, 0).
"""
num_nodes = 6
num_states = 2 ** num_nodes
tpm = np.zeros((num_states, num_nodes))
for index, previous_state in enumerate(all_states(num_nodes)):
current_state = [0 for i in range(num_nodes)]
if previous_state[5] == 1:
current_state[0] = 1
current_state[1] = 1
if previous_state[0] == 1 and previous_state[1]:
current_state[2] = 1
if previous_state[2] == 1:
current_state[3] = 1
current_state[4] = 1
if previous_state[3] == 1 and previous_state[4] == 1:
current_state[5] = 1
tpm[index, :] = current_state
cm = np.array([
[0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1],
[1, 1, 0, 0, 0, 0]
])
return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])
|
A micro-network to demonstrate blackboxing.
Diagram::
+----------+
+-------------------->+ A (COPY) + <---------------+
| +----------+ |
| +----------+ |
| +-----------+ B (COPY) + <-------------+ |
v v +----------+ | |
+-+-----+-+ +-+-----+-+
| | | |
| C (AND) | | F (AND) |
| | | |
+-+-----+-+ +-+-----+-+
| | ^ ^
| | +----------+ | |
| +---------> + D (COPY) +---------------+ |
| +----------+ |
| +----------+ |
+-------------------> + E (COPY) +-----------------+
+----------+
Connectivity Matrix:
+---+---+---+---+---+---+---+
| . | A | B | C | D | E | F |
+---+---+---+---+---+---+---+
| A | 0 | 0 | 1 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| B | 0 | 0 | 1 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| C | 0 | 0 | 0 | 1 | 1 | 0 |
+---+---+---+---+---+---+---+
| D | 0 | 0 | 0 | 0 | 0 | 1 |
+---+---+---+---+---+---+---+
| E | 0 | 0 | 0 | 0 | 0 | 1 |
+---+---+---+---+---+---+---+
| F | 1 | 1 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
In the documentation example, the state is (0, 0, 0, 0, 0, 0).
|
def BVirial_Pitzer_Curl(T, Tc, Pc, omega, order=0):
r'''Calculates the second virial coefficient using the model in [1]_.
Designed for simple calculations.
.. math::
B_r=B^{(0)}+\omega B^{(1)}
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
B^{(1)} = 0.073+0.46/T_r-0.5/T_r^2 -0.097/T_r^3 - 0.0073/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}}
\frac{d B^{(1)}}{dT} = - \frac{23 Tc}{50 T^{2}} + \frac{Tc^{2}}{T^{3}} + \frac{291 Tc^{3}}{1000 T^{4}} + \frac{73 Tc^{8}}{1250 T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{5000 T^{3}} \left(1100 + \frac{1385 Tc}{T} + \frac{242 Tc^{2}}{T^{2}}\right)
\frac{d^2 B^{(1)}}{dT^2} = \frac{Tc}{T^{3}} \left(\frac{23}{25} - \frac{3 Tc}{T} - \frac{291 Tc^{2}}{250 T^{2}} - \frac{657 Tc^{7}}{1250 T^{7}}\right)
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{500 T^{4}} \left(330 + \frac{554 Tc}{T} + \frac{121 Tc^{2}}{T^{2}}\right)
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc}{T^{4}} \left(- \frac{23}{25} + \frac{4 Tc}{T} + \frac{97 Tc^{2}}{50 T^{2}} + \frac{219 Tc^{7}}{125 T^{7}}\right)
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \log{\left (T \right )} + \frac{1}{20000 T^{2}} \left(2770 T Tc^{2} + 121 Tc^{3}\right)
\int{B^{(1)}} dT = \frac{73 T}{1000} + \frac{23 Tc}{50} \log{\left (T \right )} + \frac{1}{70000 T^{7}} \left(35000 T^{6} Tc^{2} + 3395 T^{5} Tc^{3} + 73 Tc^{8}\right)
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \log{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \log{\left (T \right )} - \frac{121 Tc^{3}}{20000 T}
\int\int B^{(1)} dT dT = \frac{73 T^{2}}{2000} + \frac{23 T}{50} Tc \log{\left (T \right )} - \frac{23 T}{50} Tc + \frac{Tc^{2}}{2} \log{\left (T \right )} - \frac{1}{420000 T^{6}} \left(20370 T^{5} Tc^{3} + 73 Tc^{8}\right)
Examples
--------
Example matching that in BVirial_Abbott, for isobutane.
>>> BVirial_Pitzer_Curl(510., 425.2, 38E5, 0.193)
-0.0002084535541385102
References
----------
.. [1] Pitzer, Kenneth S., and R. F. Curl. "The Volumetric and
Thermodynamic Properties of Fluids. III. Empirical Equation for the
Second Virial Coefficient1." Journal of the American Chemical Society
79, no. 10 (May 1, 1957): 2369-70. doi:10.1021/ja01567a007.
'''
Tr = T/Tc
if order == 0:
B0 = 0.1445 - 0.33/Tr - 0.1385/Tr**2 - 0.0121/Tr**3
B1 = 0.073 + 0.46/Tr - 0.5/Tr**2 - 0.097/Tr**3 - 0.0073/Tr**8
elif order == 1:
B0 = Tc*(3300*T**2 + 2770*T*Tc + 363*Tc**2)/(10000*T**4)
B1 = Tc*(-2300*T**7 + 5000*T**6*Tc + 1455*T**5*Tc**2 + 292*Tc**7)/(5000*T**9)
elif order == 2:
B0 = -3*Tc*(1100*T**2 + 1385*T*Tc + 242*Tc**2)/(5000*T**5)
B1 = Tc*(1150*T**7 - 3750*T**6*Tc - 1455*T**5*Tc**2 - 657*Tc**7)/(1250*T**10)
elif order == 3:
B0 = 3*Tc*(330*T**2 + 554*T*Tc + 121*Tc**2)/(500*T**6)
B1 = 3*Tc*(-230*T**7 + 1000*T**6*Tc + 485*T**5*Tc**2 + 438*Tc**7)/(250*T**11)
elif order == -1:
B0 = 289*T/2000 - 33*Tc*log(T)/100 + (2770*T*Tc**2 + 121*Tc**3)/(20000*T**2)
B1 = 73*T/1000 + 23*Tc*log(T)/50 + (35000*T**6*Tc**2 + 3395*T**5*Tc**3 + 73*Tc**8)/(70000*T**7)
elif order == -2:
B0 = 289*T**2/4000 - 33*T*Tc*log(T)/100 + 33*T*Tc/100 + 277*Tc**2*log(T)/2000 - 121*Tc**3/(20000*T)
B1 = 73*T**2/2000 + 23*T*Tc*log(T)/50 - 23*T*Tc/50 + Tc**2*log(T)/2 - (20370*T**5*Tc**3 + 73*Tc**8)/(420000*T**6)
else:
raise Exception('Only orders -2, -1, 0, 1, 2 and 3 are supported.')
Br = B0 + omega*B1
return Br*R*Tc/Pc
|
r'''Calculates the second virial coefficient using the model in [1]_.
Designed for simple calculations.
.. math::
B_r=B^{(0)}+\omega B^{(1)}
B^{(0)}=0.1445-0.33/T_r-0.1385/T_r^2-0.0121/T_r^3
B^{(1)} = 0.073+0.46/T_r-0.5/T_r^2 -0.097/T_r^3 - 0.0073/T_r^8
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of the fluid [Pa]
omega : float
Acentric factor for fluid, [-]
order : int, optional
Order of the calculation. 0 for the calculation of B itself; for 1/2/3,
the first/second/third derivative of B with respect to temperature; and
for -1/-2, the first/second indefinite integral of B with respect to
temperature. No other integrals or derivatives are implemented, and an
exception will be raised if any other order is given.
Returns
-------
B : float
Second virial coefficient in density form or its integral/derivative if
specified, [m^3/mol or m^3/mol/K^order]
Notes
-----
Analytical models for derivatives and integrals are available for orders
-2, -1, 1, 2, and 3, all obtained with SymPy.
For first temperature derivative of B:
.. math::
\frac{d B^{(0)}}{dT} = \frac{33 Tc}{100 T^{2}} + \frac{277 Tc^{2}}{1000 T^{3}} + \frac{363 Tc^{3}}{10000 T^{4}}
\frac{d B^{(1)}}{dT} = - \frac{23 Tc}{50 T^{2}} + \frac{Tc^{2}}{T^{3}} + \frac{291 Tc^{3}}{1000 T^{4}} + \frac{73 Tc^{8}}{1250 T^{9}}
For the second temperature derivative of B:
.. math::
\frac{d^2 B^{(0)}}{dT^2} = - \frac{3 Tc}{5000 T^{3}} \left(1100 + \frac{1385 Tc}{T} + \frac{242 Tc^{2}}{T^{2}}\right)
\frac{d^2 B^{(1)}}{dT^2} = \frac{Tc}{T^{3}} \left(\frac{23}{25} - \frac{3 Tc}{T} - \frac{291 Tc^{2}}{250 T^{2}} - \frac{657 Tc^{7}}{1250 T^{7}}\right)
For the third temperature derivative of B:
.. math::
\frac{d^3 B^{(0)}}{dT^3} = \frac{3 Tc}{500 T^{4}} \left(330 + \frac{554 Tc}{T} + \frac{121 Tc^{2}}{T^{2}}\right)
\frac{d^3 B^{(1)}}{dT^3} = \frac{3 Tc}{T^{4}} \left(- \frac{23}{25} + \frac{4 Tc}{T} + \frac{97 Tc^{2}}{50 T^{2}} + \frac{219 Tc^{7}}{125 T^{7}}\right)
For the first indefinite integral of B:
.. math::
\int{B^{(0)}} dT = \frac{289 T}{2000} - \frac{33 Tc}{100} \log{\left (T \right )} + \frac{1}{20000 T^{2}} \left(2770 T Tc^{2} + 121 Tc^{3}\right)
\int{B^{(1)}} dT = \frac{73 T}{1000} + \frac{23 Tc}{50} \log{\left (T \right )} + \frac{1}{70000 T^{7}} \left(35000 T^{6} Tc^{2} + 3395 T^{5} Tc^{3} + 73 Tc^{8}\right)
For the second indefinite integral of B:
.. math::
\int\int B^{(0)} dT dT = \frac{289 T^{2}}{4000} - \frac{33 T}{100} Tc \log{\left (T \right )} + \frac{33 T}{100} Tc + \frac{277 Tc^{2}}{2000} \log{\left (T \right )} - \frac{121 Tc^{3}}{20000 T}
\int\int B^{(1)} dT dT = \frac{73 T^{2}}{2000} + \frac{23 T}{50} Tc \log{\left (T \right )} - \frac{23 T}{50} Tc + \frac{Tc^{2}}{2} \log{\left (T \right )} - \frac{1}{420000 T^{6}} \left(20370 T^{5} Tc^{3} + 73 Tc^{8}\right)
Examples
--------
Example matching that in BVirial_Abbott, for isobutane.
>>> BVirial_Pitzer_Curl(510., 425.2, 38E5, 0.193)
-0.0002084535541385102
References
----------
.. [1] Pitzer, Kenneth S., and R. F. Curl. "The Volumetric and
Thermodynamic Properties of Fluids. III. Empirical Equation for the
Second Virial Coefficient1." Journal of the American Chemical Society
79, no. 10 (May 1, 1957): 2369-70. doi:10.1021/ja01567a007.
|
def send_subscribe(self, dup, topics):
"""Send subscribe COMMAND to server."""
pkt = MqttPkt()
pktlen = 2 + sum([2+len(topic)+1 for (topic, qos) in topics])
pkt.command = NC.CMD_SUBSCRIBE | (dup << 3) | (1 << 1)
pkt.remaining_length = pktlen
ret = pkt.alloc()
if ret != NC.ERR_SUCCESS:
return ret
#variable header
mid = self.mid_generate()
pkt.write_uint16(mid)
#payload
for (topic, qos) in topics:
pkt.write_string(topic)
pkt.write_byte(qos)
return self.packet_queue(pkt)
|
Send subscribe COMMAND to server.
|
def stop_all(self):
"""Halts both the analog output and input tasks"""
if self.aotask is not None:
self.aotask.stop()
self.aitask.stop()
self.daq_lock.release()
self.aitask = None
self.aotask = None
|
Halts both the analog output and input tasks
|
def do_results(args):
""" Write the results output file """
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samples = filenames['samfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samples):
logger.warning("Couldn't find %s; skipping..."%samples)
return
logger.info("Writing %s..."%srcfile)
from ugali.analysis.results import write_results
write_results(srcfile,config,srcfile,samples)
|
Write the results output file
|
def duplicate_nodes(self):
"""
Return a sequence of node keys of identical meshes.
Will combine meshes duplicated by copying in space with different keys in
self.geometry, as well as meshes repeated by self.nodes.
Returns
-----------
duplicates: (m) sequence of keys to self.nodes that represent
identical geometry
"""
# if there is no geometry we can have no duplicate nodes
if len(self.geometry) == 0:
return []
# geometry name : md5 of mesh
mesh_hash = {k: int(m.identifier_md5, 16)
for k, m in self.geometry.items()}
# the name of nodes in the scene graph with geometry
node_names = np.array(self.graph.nodes_geometry)
# the geometry names for each node in the same order
node_geom = np.array([self.graph[i][1] for i in node_names])
# the mesh md5 for each node in the same order
node_hash = np.array([mesh_hash[v] for v in node_geom])
# indexes of identical hashes
node_groups = grouping.group(node_hash)
# sequence of node names, where each sublist has identical geometry
duplicates = [np.sort(node_names[g]).tolist() for g in node_groups]
return duplicates
|
Return a sequence of node keys of identical meshes.
Will combine meshes duplicated by copying in space with different keys in
self.geometry, as well as meshes repeated by self.nodes.
Returns
-----------
duplicates: (m) sequence of keys to self.nodes that represent
identical geometry
|
def simOnePeriod(self):
'''
Simulates one period for this type. Calls the methods getMortality(), getShocks() or
readShocks, getStates(), getControls(), and getPostStates(). These should be defined for
AgentType subclasses, except getMortality (define its components simDeath and simBirth
instead) and readShocks.
Parameters
----------
None
Returns
-------
None
'''
self.getMortality() # Replace some agents with "newborns"
if self.read_shocks: # If shock histories have been pre-specified, use those
self.readShocks()
else: # Otherwise, draw shocks as usual according to subclass-specific method
self.getShocks()
self.getStates() # Determine each agent's state at decision time
self.getControls() # Determine each agent's choice or control variables based on states
self.getPostStates() # Determine each agent's post-decision / end-of-period states using states and controls
# Advance time for all agents
self.t_age = self.t_age + 1 # Age all consumers by one period
self.t_cycle = self.t_cycle + 1 # Age all consumers within their cycle
self.t_cycle[self.t_cycle == self.T_cycle] = 0
|
Simulates one period for this type. Calls the methods getMortality(), getShocks() or
readShocks, getStates(), getControls(), and getPostStates(). These should be defined for
AgentType subclasses, except getMortality (define its components simDeath and simBirth
instead) and readShocks.
Parameters
----------
None
Returns
-------
None
|
def copy(self):
"""Creates a copy of this :class:`Group`."""
ret = super().copy()
for cmd in self.commands:
ret.add_command(cmd.copy())
return ret
|
Creates a copy of this :class:`Group`.
|
def plot_ts(fignum, dates, ts):
"""
plot the geomagnetic polarity time scale
Parameters
__________
fignum : matplotlib figure number
dates : bounding dates for plot
ts : time scale ck95, gts04, or gts12
"""
vertical_plot_init(fignum, 10, 3)
TS, Chrons = pmag.get_ts(ts)
p = 1
X, Y = [], []
for d in TS:
if d <= dates[1]:
if d >= dates[0]:
if len(X) == 0:
ind = TS.index(d)
X.append(TS[ind - 1])
Y.append(p % 2)
X.append(d)
Y.append(p % 2)
p += 1
X.append(d)
Y.append(p % 2)
else:
X.append(dates[1])
Y.append(p % 2)
plt.plot(X, Y, 'k')
plot_vs(fignum, dates, 'w', '-')
plot_hs(fignum, [1.1, -.1], 'w', '-')
plt.xlabel("Age (Ma): " + ts)
isign = -1
for c in Chrons:
off = -.1
isign = -1 * isign
if isign > 0:
off = 1.05
if c[1] >= X[0] and c[1] < X[-1]:
plt.text(c[1] - .2, off, c[0])
return
|
plot the geomagnetic polarity time scale
Parameters
__________
fignum : matplotlib figure number
dates : bounding dates for plot
ts : time scale ck95, gts04, or gts12
|
def rgb_view(qimage, byteorder = 'big'):
"""Returns RGB view of a given 32-bit color QImage_'s memory.
Similarly to byte_view(), the result is a 3D numpy.uint8 array,
but reduced to the rgb dimensions (without alpha), and reordered
(using negative strides in the last dimension) to have the usual
[R,G,B] order. The image must have 32 bit pixel size, i.e. be
RGB32, ARGB32, or ARGB32_Premultiplied. (Note that in the latter
case, the values are of course premultiplied with alpha.)
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'big', i.e. RGB order. You may set the argument
`byteorder` to 'little' to get BGR, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 3) and dtype uint8"""
if byteorder is None:
byteorder = _sys.byteorder
bytes = byte_view(qimage, byteorder)
if bytes.shape[2] != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
if byteorder == 'little':
return bytes[...,:3] # strip A off BGRA
else:
return bytes[...,1:]
|
Returns RGB view of a given 32-bit color QImage_'s memory.
Similarly to byte_view(), the result is a 3D numpy.uint8 array,
but reduced to the rgb dimensions (without alpha), and reordered
(using negative strides in the last dimension) to have the usual
[R,G,B] order. The image must have 32 bit pixel size, i.e. be
RGB32, ARGB32, or ARGB32_Premultiplied. (Note that in the latter
case, the values are of course premultiplied with alpha.)
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'big', i.e. RGB order. You may set the argument
`byteorder` to 'little' to get BGR, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 3) and dtype uint8
|
def normalize_fragment(text, encoding='utf-8'):
'''Normalize a fragment.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
'''
path = percent_encode(text, encoding=encoding, encode_set=FRAGMENT_ENCODE_SET)
return uppercase_percent_encoding(path)
|
Normalize a fragment.
Percent-encodes unacceptable characters and ensures percent-encoding is
uppercase.
|
def milestone(self, extra_params=None):
"""
The Milestone that the Ticket is a part of
"""
if self.get('milestone_id', None):
milestones = self.space.milestones(id=self['milestone_id'], extra_params=extra_params)
if milestones:
return milestones[0]
|
The Milestone that the Ticket is a part of
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.