code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def query_pre_approvals(self, initial_date, final_date, page=None,
max_results=None):
""" query pre-approvals by date range """
last_page = False
results = []
while last_page is False:
search_result = self._consume_query_pre_approvals(
initial_date, final_date, page, max_results)
results.extend(search_result.pre_approvals)
if search_result.current_page is None or \
search_result.total_pages is None or \
search_result.current_page == search_result.total_pages:
last_page = True
else:
page = search_result.current_page + 1
return results
|
query pre-approvals by date range
|
def get_found_locations():
"""
INFO:__main__:found HELP in 1572
INFO:__main__:found MATHS in 1704
INFO:__main__:found ROCKS in 1975
#random.seed(1572)
#garbage = ''.join([chr(random.randint(32,122)) for x in range(100000)])
#wrd_location = garbage.find('HELP')
#print(wrd_location) # 73834
#print(garbage[wrd_location:wrd_location+4]) # HELP
"""
#random.seed(1704)
#garbage = ''.join([chr(random.randint(32,122)) for x in range(100000)])
#wrd_location = garbage.find('MATHS')
#print(wrd_location) # 73834
#print(garbage[wrd_location:wrd_location+5]) # HELP
random.seed(1704)
print(''.join([chr(random.randint(32,122)) for x in range(100000)])[59741:59746])
random.seed(1572)
print(''.join([chr(random.randint(32,122)) for x in range(100000)])[73834:73838])
random.seed(561240)
print(''.join([chr(random.randint(32,122)) for x in range(3)])) # WTF
random.seed(706075)
print(''.join([chr(random.randint(32,122)) for x in range(3)]))
|
INFO:__main__:found HELP in 1572
INFO:__main__:found MATHS in 1704
INFO:__main__:found ROCKS in 1975
#random.seed(1572)
#garbage = ''.join([chr(random.randint(32,122)) for x in range(100000)])
#wrd_location = garbage.find('HELP')
#print(wrd_location) # 73834
#print(garbage[wrd_location:wrd_location+4]) # HELP
|
def GuinierPorodGuinier(q, G, Rg1, alpha, Rg2):
"""Empirical Guinier-Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``G``: factor for the first Guinier-branch
``Rg1``: the first radius of gyration
``alpha``: the power-law exponent
``Rg2``: the second radius of gyration
Formula:
--------
``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``.
``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``.
``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``.
The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined
from conditions of smoothness at the cross-overs.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
"""
return GuinierPorodMulti(q, G, Rg1, alpha, Rg2)
|
Empirical Guinier-Porod-Guinier scattering
Inputs:
-------
``q``: independent variable
``G``: factor for the first Guinier-branch
``Rg1``: the first radius of gyration
``alpha``: the power-law exponent
``Rg2``: the second radius of gyration
Formula:
--------
``G*exp(-q^2*Rg1^2/3)`` if ``q<q_sep1``.
``A*q^alpha`` if ``q_sep1 <= q <=q_sep2``.
``G2*exp(-q^2*Rg2^2/3)`` if ``q_sep2<q``.
The parameters ``A``,``G2``, ``q_sep1``, ``q_sep2`` are determined
from conditions of smoothness at the cross-overs.
Literature:
-----------
B. Hammouda: A new Guinier-Porod model. J. Appl. Crystallogr. (2010) 43,
716-719.
|
def upload_to_cache_server(fpath):
"""Uploads .torrent file to a cache server.
Returns upload file URL.
:rtype: str
"""
url_base = 'http://torrage.info'
url_upload = '%s/autoupload.php' % url_base
url_download = '%s/torrent.php?h=' % url_base
file_field = 'torrent'
try:
import requests
response = requests.post(url_upload, files={file_field: open(fpath, 'rb')}, timeout=REMOTE_TIMEOUT)
response.raise_for_status()
info_cache = response.text
return url_download + info_cache
except (ImportError, requests.RequestException) as e:
# Now trace is lost. `raise from` to consider.
raise RemoteUploadError('Unable to upload to %s: %s' % (url_upload, e))
|
Uploads .torrent file to a cache server.
Returns upload file URL.
:rtype: str
|
def V_horiz_guppy(D, L, a, h, headonly=False):
r'''Calculates volume of a tank with guppy heads, according to [1]_.
.. math::
V_f = A_fL + \frac{2aR^2}{3}\cos^{-1}\left(1 - \frac{h}{R}\right)
+\frac{2a}{9R}\sqrt{2Rh - h^2}(2h-3R)(h+R)
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the guppy head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_guppy(D=108., L=156., a=42., h=36)/231.
1931.7208029476762
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF'''
R = 0.5*D
Af = R*R*acos((R-h)/R) - (R-h)*(2.*R*h - h*h)**0.5
Vf = 2.*a*R*R/3.*acos(1. - h/R) + 2.*a/9./R*(2*R*h - h**2)**0.5*(2*h - 3*R)*(h + R)
if headonly:
Vf = Vf/2.
else:
Vf += Af*L
return Vf
|
r'''Calculates volume of a tank with guppy heads, according to [1]_.
.. math::
V_f = A_fL + \frac{2aR^2}{3}\cos^{-1}\left(1 - \frac{h}{R}\right)
+\frac{2a}{9R}\sqrt{2Rh - h^2}(2h-3R)(h+R)
.. math::
Af = R^2\cos^{-1}\frac{R-h}{R} - (R-h)\sqrt{2Rh - h^2}
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
L : float
Length of the main cylindrical section, [m]
a : float
Distance the guppy head extends on one side, [m]
h : float
Height, as measured up to where the fluid ends, [m]
headonly : bool, optional
Function returns only the volume of a single head side if True
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_horiz_guppy(D=108., L=156., a=42., h=36)/231.
1931.7208029476762
References
----------
.. [1] Jones, D. "Calculating Tank Volume." Text. Accessed December 22, 2015.
http://www.webcalc.com.br/blog/Tank_Volume.PDF
|
def _apply_record_checks(self, i, r,
summarize=False,
report_unexpected_exceptions=True,
context=None):
"""Apply record checks on `r`."""
for check, modulus in self._record_checks:
if i % modulus == 0: # support sampling
rdict = self._as_dict(r)
try:
check(rdict)
except RecordError as e:
code = e.code if e.code is not None else RECORD_CHECK_FAILED
p = {'code': code}
if not summarize:
message = e.message if e.message is not None else MESSAGES[RECORD_CHECK_FAILED]
p['message'] = message
p['row'] = i + 1
p['record'] = r
if context is not None: p['context'] = context
if e.details is not None: p['details'] = e.details
yield p
except Exception as e:
if report_unexpected_exceptions:
p = {'code': UNEXPECTED_EXCEPTION}
if not summarize:
p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e)
p['row'] = i + 1
p['record'] = r
p['exception'] = e
p['function'] = '%s: %s' % (check.__name__,
check.__doc__)
if context is not None: p['context'] = context
yield p
|
Apply record checks on `r`.
|
def driver(self):
"""
Return the driver of this agent related to. None if the driver is not ready to bind.
Returns:
:py:class:`inherit from Poco <poco.pocofw.Poco>`: the driver of this agent related to.
"""
if not self._driver:
raise AttributeError("`driver` is not bound on this agent implementation({}). "
"Do you forget to call `super().on_bind_driver` when you override the method "
"`on_bind_driver` in your sub class?"
.format(repr(self)))
return self._driver
|
Return the driver of this agent related to. None if the driver is not ready to bind.
Returns:
:py:class:`inherit from Poco <poco.pocofw.Poco>`: the driver of this agent related to.
|
def summary(dataset_uri, format):
"""Report summary information about a dataset."""
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
creator_username = dataset._admin_metadata["creator_username"]
frozen_at = dataset._admin_metadata["frozen_at"]
num_items = len(dataset.identifiers)
tot_size = sum([dataset.item_properties(i)["size_in_bytes"]
for i in dataset.identifiers])
if format == "json":
json_lines = [
'{',
' "name": "{}",'.format(dataset.name),
' "uuid": "{}",'.format(dataset.uuid),
' "creator_username": "{}",'.format(creator_username),
' "number_of_items": {},'.format(num_items),
' "size_in_bytes": {},'.format(tot_size),
' "frozen_at": {}'.format(frozen_at),
'}',
]
formatted_json = "\n".join(json_lines)
colorful_json = pygments.highlight(
formatted_json,
pygments.lexers.JsonLexer(),
pygments.formatters.TerminalFormatter())
click.secho(colorful_json, nl=False)
else:
info = [
("name", dataset.name),
("uuid", dataset.uuid),
("creator_username", creator_username),
("number_of_items", str(num_items)),
("size", sizeof_fmt(tot_size).strip()),
("frozen_at", date_fmt(frozen_at)),
]
for key, value in info:
click.secho("{}: ".format(key), nl=False)
click.secho(value, fg="green")
|
Report summary information about a dataset.
|
def __ComputeUploadConfig(self, media_upload_config, method_id):
"""Fill out the upload config for this method."""
config = base_api.ApiUploadInfo()
if 'maxSize' in media_upload_config:
config.max_size = self.__MaxSizeToInt(
media_upload_config['maxSize'])
if 'accept' not in media_upload_config:
logging.warn(
'No accept types found for upload configuration in '
'method %s, using */*', method_id)
config.accept.extend([
str(a) for a in media_upload_config.get('accept', '*/*')])
for accept_pattern in config.accept:
if not _MIME_PATTERN_RE.match(accept_pattern):
logging.warn('Unexpected MIME type: %s', accept_pattern)
protocols = media_upload_config.get('protocols', {})
for protocol in ('simple', 'resumable'):
media = protocols.get(protocol, {})
for attr in ('multipart', 'path'):
if attr in media:
setattr(config, '%s_%s' % (protocol, attr), media[attr])
return config
|
Fill out the upload config for this method.
|
def faves(self, option):
"""
Set whether to filter by a user's faves list. Options available are
user.ONLY, user.NOT, and None; default is None.
"""
params = join_params(self.parameters, {"faves": option})
return self.__class__(**params)
|
Set whether to filter by a user's faves list. Options available are
user.ONLY, user.NOT, and None; default is None.
|
def on_success(self, metadata):
""" Called when a SUCCESS message has been received.
"""
handler = self.handlers.get("on_success")
if callable(handler):
handler(metadata)
handler = self.handlers.get("on_summary")
if callable(handler):
handler()
|
Called when a SUCCESS message has been received.
|
def storeToXML(self, out, comment=None, encoding='UTF-8'):
"""
Write the `Properties` object's entries (in unspecified order) in XML
properties format to ``out``.
:param out: a file-like object to write the properties to
:type out: binary file-like object
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param string encoding: the name of the encoding to use for the XML
document (also included in the XML declaration)
:return: `None`
"""
dump_xml(self.data, out, comment=comment, encoding=encoding)
|
Write the `Properties` object's entries (in unspecified order) in XML
properties format to ``out``.
:param out: a file-like object to write the properties to
:type out: binary file-like object
:param comment: if non-`None`, ``comment`` will be output as a
``<comment>`` element before the ``<entry>`` elements
:type comment: text string or `None`
:param string encoding: the name of the encoding to use for the XML
document (also included in the XML declaration)
:return: `None`
|
def remove_choice(self, choice_name):
"""Adds a choice for the experiment"""
self.choice_names.remove(choice_name)
self.redis.hset(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "choices", escape.json_encode(self.choice_names))
self.refresh()
|
Adds a choice for the experiment
|
def explode_dn(dn, notypes=0, flags=0):
"""
explode_dn(dn [, notypes=0]) -> list
This function takes a DN and breaks it up into its component parts.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
"""
if not dn:
return []
dn_decomp = str2dn(dn, flags)
rdn_list = []
for rdn in dn_decomp:
if notypes:
rdn_list.append('+'.join([
escape_dn_chars(avalue or '')
for atype, avalue, dummy in rdn
]))
else:
rdn_list.append('+'.join([
'='.join((atype, escape_dn_chars(avalue or '')))
for atype, avalue, dummy in rdn
]))
return rdn_list
|
explode_dn(dn [, notypes=0]) -> list
This function takes a DN and breaks it up into its component parts.
The notypes parameter is used to specify that only the component's
attribute values be returned and not the attribute types.
|
def question_image_filepath(instance, filename):
"""
Function DocString
"""
return '/'.join(['images', str(instance.question_level), str(instance.question_level_id), binascii.b2a_hex(os.urandom(15)), filename])
|
Function DocString
|
def set_imap_cb(self, w, index):
"""This callback is invoked when the user selects a new intensity
map from the preferences pane."""
name = imap.get_names()[index]
self.t_.set(intensity_map=name)
|
This callback is invoked when the user selects a new intensity
map from the preferences pane.
|
def move_up(self):
"""
Try to select the button above the currently selected one.
If a button is not there, wrap down to the bottom of the menu and select the last button.
"""
old_index = self.current_index
self.current_index -= 1
self.__wrap_index()
self.__handle_selections(old_index, self.current_index)
|
Try to select the button above the currently selected one.
If a button is not there, wrap down to the bottom of the menu and select the last button.
|
def _tree_to_labels(X, single_linkage_tree, min_cluster_size=10,
cluster_selection_method='eom',
allow_single_cluster=False,
match_reference_implementation=False):
"""Converts a pretrained tree and cluster size into a
set of labels and probabilities.
"""
condensed_tree = condense_tree(single_linkage_tree,
min_cluster_size)
stability_dict = compute_stability(condensed_tree)
labels, probabilities, stabilities = get_clusters(condensed_tree,
stability_dict,
cluster_selection_method,
allow_single_cluster,
match_reference_implementation)
return (labels, probabilities, stabilities, condensed_tree,
single_linkage_tree)
|
Converts a pretrained tree and cluster size into a
set of labels and probabilities.
|
def run():
"""Main script entry to handle the arguments given to the script."""
_parser_options()
set_verbose(args["verbose"])
if _check_global_settings():
_load_db()
else:
exit(-1)
#Check the server configuration against the script arguments passed in.
_setup_server()
if args["rollback"]:
_server_rollback()
okay("The server rollback appears to have been successful.")
exit(0)
_server_enable()
_list_repos()
_handle_install()
#This is the workhorse once a successful installation has happened.
_do_cron()
|
Main script entry to handle the arguments given to the script.
|
def unregister(self, bucket, name):
"""
Remove the function from the registry by name
"""
assert bucket in self, 'Bucket %s is unknown' % bucket
if not name in self[bucket]:
raise NotRegistered('The function %s is not registered' % name)
del self[bucket][name]
|
Remove the function from the registry by name
|
def rand_email():
"""Random email.
Usage Example::
>>> rand_email()
Z4Lljcbdw7m@npa.net
"""
name = random.choice(string.ascii_letters) + \
rand_str(string.ascii_letters + string.digits, random.randint(4, 14))
domain = rand_str(string.ascii_lowercase, random.randint(2, 10))
kind = random.choice(_all_email_kinds)
return "%s@%s%s" % (name, domain, kind)
|
Random email.
Usage Example::
>>> rand_email()
Z4Lljcbdw7m@npa.net
|
def has_in_url_path(url, subs):
"""Test if any of `subs` strings is present in the `url` path."""
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
return any([sub in path for sub in subs])
|
Test if any of `subs` strings is present in the `url` path.
|
def oortC(self,R,t=0.,nsigma=None,deg=False,phi=0.,
epsrel=1.e-02,epsabs=1.e-05,
grid=None,gridpoints=101,returnGrids=False,
derivRGrid=None,derivphiGrid=None,derivGridpoints=101,
derivHierarchgrid=False,
hierarchgrid=False,nlevels=2,integrate_method='dopr54_c'):
"""
NAME:
oortC
PURPOSE:
calculate the Oort function C at (R,phi,t)
INPUT:
R - radius at which to calculate C (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluate integrals of the derivatives of the DF; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
derivGridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid objects (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
derivHierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
Oort C at R,phi,t
HISTORY:
2011-10-16 - Written - Bovy (NYU)
"""
#First calculate the grids if they are not given
if isinstance(grid,bool) and grid:
(surfacemass,grid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=gridpoints,
returnGrid=True,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
elif isinstance(grid,evolveddiskdfGrid) or \
isinstance(grid,evolveddiskdfHierarchicalGrid):
surfacemass= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
if isinstance(derivRGrid,bool) and derivRGrid:
(dsurfacemassdR,derivRGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
elif isinstance(derivRGrid,evolveddiskdfGrid) or \
isinstance(derivRGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdR= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
if isinstance(derivphiGrid,bool) and derivphiGrid:
(dsurfacemassdphi,derivphiGrid)= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=True,
gridpoints=derivGridpoints,
returnGrid=True,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
elif isinstance(derivphiGrid,evolveddiskdfGrid) or \
isinstance(derivphiGrid,evolveddiskdfHierarchicalGrid):
dsurfacemassdphi= self.vmomentsurfacemass(R,0,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
#2C= -meanvR/R-dmeanvT/R/dphi+dmeanvR/dR
#meanvR
meanvR= self.meanvR(R,t=t,nsigma=nsigma,deg=deg,phi=phi,
epsrel=epsrel,epsabs=epsabs,
grid=grid,gridpoints=gridpoints,returnGrid=False,
surfacemass=surfacemass,
hierarchgrid=hierarchgrid,
nlevels=nlevels,integrate_method=integrate_method,
use_physical=False)
dmeanvTdphi= (self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivphiGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='phi')
/surfacemass
-self.vmomentsurfacemass(R,0,1,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdphi)
dmeanvRdR= (self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=derivRGrid,
gridpoints=derivGridpoints,
returnGrid=False,
hierarchgrid=derivHierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method,deriv='R')
/surfacemass
-self.vmomentsurfacemass(R,1,0,deg=deg,t=t,phi=phi,
nsigma=nsigma,epsrel=epsrel,
epsabs=epsabs,grid=grid,
gridpoints=gridpoints,
returnGrid=False,
hierarchgrid=hierarchgrid,
nlevels=nlevels,
integrate_method=integrate_method)
/surfacemass**2.*dsurfacemassdR)
if returnGrids:
return (0.5*(-meanvR/R-dmeanvTdphi/R+dmeanvRdR),grid,
derivRGrid,derivphiGrid)
else:
return 0.5*(-meanvR/R-dmeanvTdphi/R+dmeanvRdR)
|
NAME:
oortC
PURPOSE:
calculate the Oort function C at (R,phi,t)
INPUT:
R - radius at which to calculate C (can be Quantity)
phi= azimuth (rad unless deg=True; can be Quantity)
t= time at which to evaluate the DF (can be a list or ndarray; if this is the case, list needs to be in descending order and equally spaced) (can be Quantity)
nsigma - number of sigma to integrate the velocities over (based on an estimate, so be generous)
deg= azimuth is in degree (default=False); do not set this when giving phi as a Quantity
epsrel, epsabs - scipy.integrate keywords
grid= if set to True, build a grid and use that to evaluate integrals; if set to a grid-objects (such as returned by this procedure), use this grid
derivRGrid, derivphiGrid= if set to True, build a grid and use that to evaluate integrals of the derivatives of the DF; if set to a grid-objects (such as returned by this procedure), use this grid
gridpoints= number of points to use for the grid in 1D (default=101)
derivGridpoints= number of points to use for the grid in 1D (default=101)
returnGrid= if True, return the grid objects (default=False)
hierarchgrid= if True, use a hierarchical grid (default=False)
derivHierarchgrid= if True, use a hierarchical grid (default=False)
nlevels= number of hierarchical levels for the hierarchical grid
integrate_method= orbit.integrate method argument
OUTPUT:
Oort C at R,phi,t
HISTORY:
2011-10-16 - Written - Bovy (NYU)
|
def get_trades(self, pair="SWTH_NEO", start_time=None, end_time=None, limit=5000):
"""
Function to fetch a list of filled trades for the parameters requested.
Execution of this function is as follows::
get_trades(pair="SWTH_NEO", limit=3)
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:param start_time: Only return trades after this time (in epoch seconds).
:type start_time: int
:param end_time: Only return trades before this time (in epoch seconds).
:type end_time: int
:param limit: The number of filled trades to return. Min: 1, Max: 10000, Default: 5000
:type limit: int
:return: List of dictionaries consisting of filled orders that meet requirements of the parameters passed to it.
"""
if limit > 10000 or limit < 1:
raise ValueError("Attempting to request more trades than allowed by the API.")
api_params = {
"blockchain": self.blockchain,
"pair": pair,
"contract_hash": self.contract_hash
}
if start_time is not None:
api_params['from'] = start_time
if end_time is not None:
api_params['to'] = end_time
if limit != 5000:
api_params['limit'] = limit
return self.request.get(path='/trades', params=api_params)
|
Function to fetch a list of filled trades for the parameters requested.
Execution of this function is as follows::
get_trades(pair="SWTH_NEO", limit=3)
The expected return result for this function is as follows::
[{
'id': '15bb16e2-7a80-4de1-bb59-bcaff877dee0',
'fill_amount': 100000000,
'take_amount': 100000000,
'event_time': '2018-08-04T15:00:12.634Z',
'is_buy': True
}, {
'id': 'b6f9e530-60ff-46ff-9a71-362097a2025e',
'fill_amount': 47833882,
'take_amount': 97950000000,
'event_time': '2018-08-03T02:44:47.706Z',
'is_buy': True
}, {
'id': '7a308ccc-b7f5-46a3-bf6b-752ab076cc9f',
'fill_amount': 1001117,
'take_amount': 2050000000,
'event_time': '2018-08-03T02:32:50.703Z',
'is_buy': True
}]
:param pair: The trading pair that will be used to request filled trades.
:type pair: str
:param start_time: Only return trades after this time (in epoch seconds).
:type start_time: int
:param end_time: Only return trades before this time (in epoch seconds).
:type end_time: int
:param limit: The number of filled trades to return. Min: 1, Max: 10000, Default: 5000
:type limit: int
:return: List of dictionaries consisting of filled orders that meet requirements of the parameters passed to it.
|
def owner_profile(self) -> Profile:
""":class:`Profile` instance of the story item's owner."""
if not self._owner_profile:
self._owner_profile = Profile.from_id(self._context, self._node['owner']['id'])
return self._owner_profile
|
:class:`Profile` instance of the story item's owner.
|
def float_range(start=0, stop=None, step=1):
"""
Much like the built-in function range, but accepts floats
>>> tuple(float_range(0, 9, 1.5))
(0.0, 1.5, 3.0, 4.5, 6.0, 7.5)
"""
start = float(start)
while start < stop:
yield start
start += step
|
Much like the built-in function range, but accepts floats
>>> tuple(float_range(0, 9, 1.5))
(0.0, 1.5, 3.0, 4.5, 6.0, 7.5)
|
def most_free_pixel(self):
""" Find the black pixel with the largest distance from the white pixels.
Returns
-------
:obj:`numpy.ndarray`
2-vector containing the most free pixel
"""
dist_tf = self.to_distance_im()
max_px = np.where(dist_tf == np.max(dist_tf))
free_pixel = np.array([max_px[0][0], max_px[1][0]])
return free_pixel
|
Find the black pixel with the largest distance from the white pixels.
Returns
-------
:obj:`numpy.ndarray`
2-vector containing the most free pixel
|
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
|
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
|
def get_output(script):
"""Gets command output from shell logger."""
with logs.debug_time(u'Read output from external shell logger'):
commands = _get_last_n(const.SHELL_LOGGER_LIMIT)
for command in commands:
if command['command'] == script:
lines = _get_output_lines(command['output'])
output = '\n'.join(lines).strip()
return output
else:
logs.warn("Output isn't available in shell logger")
return None
|
Gets command output from shell logger.
|
def itershuffle(iterable, bufsize=1000):
"""Shuffle an iterator. This works by holding `bufsize` items back
and yielding them sometime later. Obviously, this is not unbiased –
but should be good enough for batching. Larger bufsize means less bias.
From https://gist.github.com/andres-erbsen/1307752
iterable (iterable): Iterator to shuffle.
bufsize (int): Items to hold back.
YIELDS (iterable): The shuffled iterator.
"""
iterable = iter(iterable)
buf = []
try:
while True:
for i in range(random.randint(1, bufsize - len(buf))):
buf.append(next(iterable))
random.shuffle(buf)
for i in range(random.randint(1, bufsize)):
if buf:
yield buf.pop()
else:
break
except StopIteration:
random.shuffle(buf)
while buf:
yield buf.pop()
raise StopIteration
|
Shuffle an iterator. This works by holding `bufsize` items back
and yielding them sometime later. Obviously, this is not unbiased –
but should be good enough for batching. Larger bufsize means less bias.
From https://gist.github.com/andres-erbsen/1307752
iterable (iterable): Iterator to shuffle.
bufsize (int): Items to hold back.
YIELDS (iterable): The shuffled iterator.
|
def load_file(self, filepath, chname=None, wait=True,
create_channel=True, display_image=True,
image_loader=None):
"""Load a file and display it.
Parameters
----------
filepath : str
The path of the file to load (must reference a local file).
chname : str, optional
The name of the channel in which to display the image.
wait : bool, optional
If `True`, then wait for the file to be displayed before returning
(synchronous behavior).
create_channel : bool, optional
Create channel.
display_image : bool, optional
If not `False`, then will load the image.
image_loader : func, optional
A special image loader, if provided.
Returns
-------
image
The image object that was loaded.
"""
if not chname:
channel = self.get_current_channel()
else:
if not self.has_channel(chname) and create_channel:
self.gui_call(self.add_channel, chname)
channel = self.get_channel(chname)
chname = channel.name
if image_loader is None:
image_loader = self.load_image
cache_dir = self.settings.get('download_folder', self.tmpdir)
info = iohelper.get_fileinfo(filepath, cache_dir=cache_dir)
# check that file is locally accessible
if not info.ondisk:
errmsg = "File must be locally loadable: %s" % (filepath)
self.gui_do(self.show_error, errmsg)
return
filepath = info.filepath
kwargs = {}
idx = None
if info.numhdu is not None:
kwargs['idx'] = info.numhdu
try:
image = image_loader(filepath, **kwargs)
except Exception as e:
errmsg = "Failed to load '%s': %s" % (filepath, str(e))
self.gui_do(self.show_error, errmsg)
return
future = Future.Future()
future.freeze(image_loader, filepath, **kwargs)
# Save a future for this image to reload it later if we
# have to remove it from memory
image.set(loader=image_loader, image_future=future)
if image.get('path', None) is None:
image.set(path=filepath)
# Assign a name to the image if the loader did not.
name = image.get('name', None)
if name is None:
name = iohelper.name_image_from_path(filepath, idx=idx)
image.set(name=name)
if display_image:
# Display image. If the wait parameter is False then don't wait
# for the image to load into the viewer
if wait:
self.gui_call(self.add_image, name, image, chname=chname)
else:
self.gui_do(self.add_image, name, image, chname=chname)
else:
self.gui_do(self.bulk_add_image, name, image, chname)
# Return the image
return image
|
Load a file and display it.
Parameters
----------
filepath : str
The path of the file to load (must reference a local file).
chname : str, optional
The name of the channel in which to display the image.
wait : bool, optional
If `True`, then wait for the file to be displayed before returning
(synchronous behavior).
create_channel : bool, optional
Create channel.
display_image : bool, optional
If not `False`, then will load the image.
image_loader : func, optional
A special image loader, if provided.
Returns
-------
image
The image object that was loaded.
|
def sendRequest(self, name, args):
"""sends a request to the peer"""
(respEvt, id) = self.newResponseEvent()
self.sendMessage({"id":id, "method":name, "params": args})
return respEvt
|
sends a request to the peer
|
def tseries_between(self, tstart=None, tend=None):
"""Return time series data between requested times.
Args:
tstart (float): starting time. Set to None to start at the
beginning of available data.
tend (float): ending time. Set to None to stop at the end of
available data.
Returns:
:class:`pandas.DataFrame`: slice of :attr:`tseries`.
"""
if self.tseries is None:
return None
ndat = self.tseries.shape[0]
if tstart is None:
istart = 0
else:
igm = 0
igp = ndat - 1
while igp - igm > 1:
istart = igm + (igp - igm) // 2
if self.tseries.iloc[istart]['t'] >= tstart:
igp = istart
else:
igm = istart
istart = igp
if tend is None:
iend = None
else:
igm = 0
igp = ndat - 1
while igp - igm > 1:
iend = igm + (igp - igm) // 2
if self.tseries.iloc[iend]['t'] > tend:
igp = iend
else:
igm = iend
iend = igm + 1
return self.tseries.iloc[istart:iend]
|
Return time series data between requested times.
Args:
tstart (float): starting time. Set to None to start at the
beginning of available data.
tend (float): ending time. Set to None to stop at the end of
available data.
Returns:
:class:`pandas.DataFrame`: slice of :attr:`tseries`.
|
def get_by_id_or_404(self, id, **kwargs):
"""Gets by a instance instance r raises a 404 is one isn't found."""
obj = self.get_by_id(id=id, **kwargs)
if obj:
return obj
raise Http404
|
Gets by a instance instance r raises a 404 is one isn't found.
|
def write_value(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the value of the Boolean object to the output stream.
Args:
ostream (Stream): A buffer to contain the encoded bytes of the
value of a Boolean object. Usually a BytearrayStream object.
Required.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
"""
try:
ostream.write(pack('!Q', self.value))
except Exception:
self.logger.error("Error writing boolean value to buffer")
raise
|
Write the value of the Boolean object to the output stream.
Args:
ostream (Stream): A buffer to contain the encoded bytes of the
value of a Boolean object. Usually a BytearrayStream object.
Required.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
|
def _make_sparse_blocks(self, variable, records, data):
'''
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
If all records are physical, this calls _make_sparse_blocks_with_physical
If any records are virtual, this calls _make_sparse_blocks_with_virtual
Parameters:
variable : dict
the variable dictionary, with 'Num_Dims', 'Dim_Sizes',
'Data_Type', 'Num_Elements' key words, typically
returned from a call to cdf read's varinq('variable',
expand=True)
records : list
a list of physical records
data : varies
bytes array, numpy.ndarray or list of str form with all physical
data or embedded virtual data (returned from call to
varget('variable') for a sparse variable)
Returns:
sparse_blocks: list
A list of sparse records/data in the form
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
'''
if (isinstance(data, dict)):
try:
data = data['Data']
except:
print('Unknown dictionary.... Skip')
return None
if (isinstance(data, np.ndarray)):
if (len(records) == len(data)):
# All are physical data
return self._make_sparse_blocks_with_physical(variable, records,
data)
elif (len(records) < len(data)):
# There are some virtual data
return self._make_sparse_blocks_with_virtual(variable, records,
data)
else:
print('Invalid sparse data... ',
'Less data than the specified records... Skip')
elif (isinstance(data, bytes)):
record_length = len(records)
for z in range(0, variable['Num_Dims']):
record_length = record_length * variable['Dim_Sizes'][z]
if (record_length == len(data)):
# All are physical data
return self._make_sparse_blocks_with_physical(variable, records,
data)
elif (record_length < len(data)):
# There are some virtual data
return self._make_sparse_blocks_with_virtual(variable, records,
data)
else:
print('Invalid sparse data... ',
'Less data than the specified records... Skip')
elif (isinstance(data, list)):
if (isinstance(data[0], list)):
if not (all(isinstance(el, str) for el in data[0])):
print('Can not handle list data.... ',
'Only support list of str... Skip')
return
else:
if not (all(isinstance(el, str) for el in data)):
print('Can not handle list data.... ',
'Only support list of str... Skip')
return
record_length = len(records)
# for z in range(0, variable['Num_Dims']):
# record_length = record_length * variable['Dim_Sizes'][z]
if (record_length == len(data)):
# All are physical data
return self._make_sparse_blocks_with_physical(variable, records,
data)
elif (record_length < len(data)):
# There are some virtual data
return self._make_sparse_blocks_with_virtual(variable, records,
data)
else:
print('Invalid sparse data... ',
'Less data than the specified records... Skip')
else:
print('Invalid sparse data... ',
'Less data than the specified records... Skip')
return
|
Handles the data for the variable with sparse records.
Organizes the physical record numbers into blocks in a list:
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
Place consecutive physical records into a single block
If all records are physical, this calls _make_sparse_blocks_with_physical
If any records are virtual, this calls _make_sparse_blocks_with_virtual
Parameters:
variable : dict
the variable dictionary, with 'Num_Dims', 'Dim_Sizes',
'Data_Type', 'Num_Elements' key words, typically
returned from a call to cdf read's varinq('variable',
expand=True)
records : list
a list of physical records
data : varies
bytes array, numpy.ndarray or list of str form with all physical
data or embedded virtual data (returned from call to
varget('variable') for a sparse variable)
Returns:
sparse_blocks: list
A list of sparse records/data in the form
[[start_rec1,end_rec1,data_1], [start_rec2,enc_rec2,data_2], ...]
|
def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
"""
if not targets:
def wrapper(target: Callable):
return self.spawn(target, count=count, **process_kwargs)
return wrapper
if len(targets) * count == 1:
return self._process(targets[0], **process_kwargs)
return ProcessList(
self._process(target, **process_kwargs)
for _ in range(count)
for target in targets
)
|
r"""
Produce one or many child process(s) bound to this context.
:param \*targets:
Passed on to the :py:class:`Process` constructor, one at a time.
:param count:
The number of processes to spawn for each item in ``targets``.
:param \*\*process_kwargs:
.. include:: /api/context/params/process_kwargs.rst
:return:
A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
|
def get_marginal_topic_distrib(doc_topic_distrib, doc_lengths):
"""
Return marginal topic distribution p(T) (topic proportions) given the document-topic distribution (theta)
`doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with `get_doc_lengths()`.
"""
unnorm = (doc_topic_distrib.T * doc_lengths).sum(axis=1)
return unnorm / unnorm.sum()
|
Return marginal topic distribution p(T) (topic proportions) given the document-topic distribution (theta)
`doc_topic_distrib` and the document lengths `doc_lengths`. The latter can be calculated with `get_doc_lengths()`.
|
def recursive_getattr(obj: Any, attr: str, *args) -> Any:
""" Recursive ``getattar``.
This can be used as a drop in for the standard ``getattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split('.'))
|
Recursive ``getattar``.
This can be used as a drop in for the standard ``getattr(...)``. Credit to:
https://stackoverflow.com/a/31174427
Args:
obj: Object to retrieve the attribute from.
attr: Name of the attribute, with each successive attribute separated by a ".".
Returns:
The requested attribute. (Same as ``getattr``).
Raises:
AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
|
def draw_confusion_matrix(matrix):
'''Draw confusion matrix for MNIST.'''
fig = tfmpl.create_figure(figsize=(7,7))
ax = fig.add_subplot(111)
ax.set_title('Confusion matrix for MNIST classification')
tfmpl.plots.confusion_matrix.draw(
ax, matrix,
axis_labels=['Digit ' + str(x) for x in range(10)],
normalize=True
)
return fig
|
Draw confusion matrix for MNIST.
|
def import_ecdsakey_from_private_pem(pem, scheme='ecdsa-sha2-nistp256', password=None):
"""
<Purpose>
Import the private ECDSA key stored in 'pem', and generate its public key
(which will also be included in the returned ECDSA key object). In addition,
a keyid identifier for the ECDSA key is generated. The object returned
conforms to:
{'keytype': 'ecdsa-sha2-nistp256',
'scheme': 'ecdsa-sha2-nistp256',
'keyid': keyid,
'keyval': {'public': '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
'private': '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
The private key is a string in PEM format.
>>> ecdsa_key = generate_ecdsa_key()
>>> private_pem = ecdsa_key['keyval']['private']
>>> ecdsa_key = import_ecdsakey_from_private_pem(private_pem)
>>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
scheme:
The signature scheme used by the imported key.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA
key if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'pem' specifies
an unsupported key type.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
"""
# Does 'pem' have the correct format?
# This check will ensure 'pem' conforms to
# 'securesystemslib.formats.ECDSARSA_SCHEMA'.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(pem)
# Is 'scheme' properly formatted?
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
if password is not None:
securesystemslib.formats.PASSWORD_SCHEMA.check_match(password)
else:
logger.debug('The password/passphrase is unset. The PEM is expected'
' to be unencrypted.')
# Begin building the ECDSA key dictionary.
ecdsakey_dict = {}
keytype = 'ecdsa-sha2-nistp256'
public = None
private = None
public, private = \
securesystemslib.ecdsa_keys.create_ecdsa_public_and_private_from_pem(pem,
password)
# Generate the keyid of the ECDSA key. 'key_value' corresponds to the
# 'keyval' entry of the 'ECDSAKEY_SCHEMA' dictionary. The private key
# information is not included in the generation of the 'keyid' identifier.
# Convert any '\r\n' (e.g., Windows) newline characters to '\n' so that a
# consistent keyid is generated.
key_value = {'public': public.replace('\r\n', '\n'),
'private': ''}
keyid = _get_keyid(keytype, scheme, key_value)
# Build the 'ecdsakey_dict' dictionary. Update 'key_value' with the ECDSA
# private key prior to adding 'key_value' to 'ecdsakey_dict'.
key_value['private'] = private
ecdsakey_dict['keytype'] = keytype
ecdsakey_dict['scheme'] = scheme
ecdsakey_dict['keyid'] = keyid
ecdsakey_dict['keyval'] = key_value
# Add "keyid_hash_algorithms" so equal ECDSA keys with
# different keyids can be associated using supported keyid_hash_algorithms
ecdsakey_dict['keyid_hash_algorithms'] = \
securesystemslib.settings.HASH_ALGORITHMS
return ecdsakey_dict
|
<Purpose>
Import the private ECDSA key stored in 'pem', and generate its public key
(which will also be included in the returned ECDSA key object). In addition,
a keyid identifier for the ECDSA key is generated. The object returned
conforms to:
{'keytype': 'ecdsa-sha2-nistp256',
'scheme': 'ecdsa-sha2-nistp256',
'keyid': keyid,
'keyval': {'public': '-----BEGIN PUBLIC KEY----- ... -----END PUBLIC KEY-----',
'private': '-----BEGIN EC PRIVATE KEY----- ... -----END EC PRIVATE KEY-----'}}
The private key is a string in PEM format.
>>> ecdsa_key = generate_ecdsa_key()
>>> private_pem = ecdsa_key['keyval']['private']
>>> ecdsa_key = import_ecdsakey_from_private_pem(private_pem)
>>> securesystemslib.formats.ECDSAKEY_SCHEMA.matches(ecdsa_key)
True
<Arguments>
pem:
A string in PEM format. The private key is extracted and returned in
an ecdsakey object.
scheme:
The signature scheme used by the imported key.
password: (optional)
The password, or passphrase, to decrypt the private part of the ECDSA
key if it is encrypted. 'password' is not used directly as the encryption
key, a stronger encryption key is derived from it.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'pem' specifies
an unsupported key type.
<Side Effects>
None.
<Returns>
A dictionary containing the ECDSA keys and other identifying information.
Conforms to 'securesystemslib.formats.ECDSAKEY_SCHEMA'.
|
def _deserialize_default(cls, cls_target, obj_raw):
"""
:type cls_target: T|type
:type obj_raw: int|str|bool|float|list|dict|None
:rtype: T
"""
if cls._is_deserialized(cls_target, obj_raw):
return obj_raw
elif type(obj_raw) == dict:
return cls._deserialize_dict(cls_target, obj_raw)
else:
return cls_target(obj_raw)
|
:type cls_target: T|type
:type obj_raw: int|str|bool|float|list|dict|None
:rtype: T
|
def get_data_view(self, data_view_id):
"""
Retrieves a summary of information for a given data view
- view id
- name
- description
- columns
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
"""
url = routes.get_data_view(data_view_id)
response = self._get(url).json()
result = response["data"]["data_view"]
datasets_list = []
for dataset in result["datasets"]:
datasets_list.append(Dataset(
name=dataset["name"],
id=dataset["id"],
description=dataset["description"]
))
columns_list = []
for column in result["columns"]:
columns_list.append(ColumnFactory.from_dict(column))
return DataView(
view_id=data_view_id,
name=result["name"],
description=result["description"],
datasets=datasets_list,
columns=columns_list,
)
|
Retrieves a summary of information for a given data view
- view id
- name
- description
- columns
:param data_view_id: The ID number of the data view to which the
run belongs, as a string
:type data_view_id: str
|
def get_length(self):
"""
Calculate and return the length of the line as a sum of lengths
of all its segments.
:returns:
Total length in km.
"""
length = 0
for i, point in enumerate(self.points):
if i != 0:
length += point.distance(self.points[i - 1])
return length
|
Calculate and return the length of the line as a sum of lengths
of all its segments.
:returns:
Total length in km.
|
def parametrize(self, operator, params):
"""
Return a parser that parses an operator with parameters.
"""
return (CaselessKeyword(operator, identChars=alphanums) +
self.parameter(params))
|
Return a parser that parses an operator with parameters.
|
def create(self, roomId=None, toPersonId=None, toPersonEmail=None,
text=None, markdown=None, files=None, **request_parameters):
"""Post a message, and optionally a attachment, to a room.
The files parameter is a list, which accepts multiple values to allow
for future expansion, but currently only one file may be included with
the message.
Args:
roomId(basestring): The room ID.
toPersonId(basestring): The ID of the recipient when sending a
private 1:1 message.
toPersonEmail(basestring): The email address of the recipient when
sending a private 1:1 message.
text(basestring): The message, in plain text. If `markdown` is
specified this parameter may be optionally used to provide
alternate text for UI clients that do not support rich text.
markdown(basestring): The message, in markdown format.
files(`list`): A list of public URL(s) or local path(s) to files to
be posted into the room. Only one file is allowed per message.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Message: A Message object with the details of the created message.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
ValueError: If the files parameter is a list of length > 1, or if
the string in the list (the only element in the list) does not
contain a valid URL or path to a local file.
"""
check_type(roomId, basestring)
check_type(toPersonId, basestring)
check_type(toPersonEmail, basestring)
check_type(text, basestring)
check_type(markdown, basestring)
check_type(files, list)
if files:
if len(files) != 1:
raise ValueError("The length of the `files` list is greater "
"than one (1). The files parameter is a "
"list, which accepts multiple values to "
"allow for future expansion, but currently "
"only one file may be included with the "
"message.")
check_type(files[0], basestring)
post_data = dict_from_items_with_values(
request_parameters,
roomId=roomId,
toPersonId=toPersonId,
toPersonEmail=toPersonEmail,
text=text,
markdown=markdown,
files=files,
)
# API request
if not files or is_web_url(files[0]):
# Standard JSON post
json_data = self._session.post(API_ENDPOINT, json=post_data)
elif is_local_file(files[0]):
# Multipart MIME post
try:
post_data['files'] = open_local_file(files[0])
multipart_data = MultipartEncoder(post_data)
headers = {'Content-type': multipart_data.content_type}
json_data = self._session.post(API_ENDPOINT,
headers=headers,
data=multipart_data)
finally:
post_data['files'].file_object.close()
else:
raise ValueError("The `files` parameter does not contain a vaild "
"URL or path to a local file.")
# Return a message object created from the response JSON data
return self._object_factory(OBJECT_TYPE, json_data)
|
Post a message, and optionally a attachment, to a room.
The files parameter is a list, which accepts multiple values to allow
for future expansion, but currently only one file may be included with
the message.
Args:
roomId(basestring): The room ID.
toPersonId(basestring): The ID of the recipient when sending a
private 1:1 message.
toPersonEmail(basestring): The email address of the recipient when
sending a private 1:1 message.
text(basestring): The message, in plain text. If `markdown` is
specified this parameter may be optionally used to provide
alternate text for UI clients that do not support rich text.
markdown(basestring): The message, in markdown format.
files(`list`): A list of public URL(s) or local path(s) to files to
be posted into the room. Only one file is allowed per message.
**request_parameters: Additional request parameters (provides
support for parameters that may be added in the future).
Returns:
Message: A Message object with the details of the created message.
Raises:
TypeError: If the parameter types are incorrect.
ApiError: If the Webex Teams cloud returns an error.
ValueError: If the files parameter is a list of length > 1, or if
the string in the list (the only element in the list) does not
contain a valid URL or path to a local file.
|
def strip_prompt(self, *args, **kwargs):
"""Strip the trailing router prompt from the output."""
a_string = super(FlexvnfSSH, self).strip_prompt(*args, **kwargs)
return self.strip_context_items(a_string)
|
Strip the trailing router prompt from the output.
|
def generate_mix2pl_dataset(n, m, useDirichlet=True):
"""
Description:
Generate a mixture of 2 Plackett-Luce models dataset
and return the parameters and votes.
Parameters:
n: number of votes to generate
m: number of alternatives
useDirichlet: boolean flag to use the Dirichlet distribution
"""
alpha = np.random.rand()
gamma1 = None
gamma2 = None
if useDirichlet:
gamma1 = np.random.dirichlet(np.ones(m))
gamma2 = np.random.dirichlet(np.ones(m))
else:
gamma1 = np.random.rand(m)
gamma1 /= np.sum(gamma1) # normalize sum to 1.0 (not needed for Dirichlet)
gamma2 = np.random.rand(m)
gamma2 /= np.sum(gamma1)
votes = []
for i in range(n):
vote = None
draw = np.random.rand()
if draw <= alpha:
vote = draw_pl_vote(m, gamma1)
else: # draw > alpha
vote = draw_pl_vote(m, gamma2)
votes.append(vote)
params = np.hstack((alpha, gamma1, gamma2))
return (params, votes)
|
Description:
Generate a mixture of 2 Plackett-Luce models dataset
and return the parameters and votes.
Parameters:
n: number of votes to generate
m: number of alternatives
useDirichlet: boolean flag to use the Dirichlet distribution
|
def get_data_files(dirname):
"""Return data files in directory *dirname*"""
flist = []
for dirpath, _dirnames, filenames in os.walk(dirname):
for fname in filenames:
flist.append(osp.join(dirpath, fname))
return flist
|
Return data files in directory *dirname*
|
def get_staff_updater(cls):
"""
This returns a function for passing to a signal.
"""
from django.core.exceptions import ImproperlyConfigured
if not issubclass(cls, BaseStaffMember):
raise ImproperlyConfigured("%s is not a sublass of StaffMember" % cls)
def update_staff_member(sender, instance, created, *args, **kwargs):
"""
Update the Staff Member instance when a User object is changed.
"""
if instance.is_staff and not cls.objects.filter(user=instance).count():
staffmember = cls(
user=instance,
is_active=True)
staffmember.save()
elif instance.is_staff:
staffmembers = cls.objects.filter(user=instance)
if len(staffmembers):
staffmember = staffmembers[0]
staffmember.is_active = True
if instance.first_name != staffmember.first_name:
staffmember.first_name = instance.first_name
staffmember.slug = slugify('%s %s' % (
instance.first_name, instance.last_name))
if instance.last_name != staffmember.last_name:
staffmember.last_name = instance.last_name
staffmember.slug = slugify('%s %s' % (
instance.first_name, instance.last_name))
if instance.email != staffmember.email:
staffmember.email = instance.email
staffmember.save()
elif not instance.is_staff:
# Make sure we deactivate any staff members associated with this user
for staffmember in cls.objects.filter(user=instance):
staffmember.is_active = False
staffmember.save()
from django.db import transaction
transaction.commit_unless_managed()
return update_staff_member
|
This returns a function for passing to a signal.
|
def account_unfollow(self, id):
"""
Unfollow a user.
Returns a `relationship dict`_ containing the updated relationship to the user.
"""
id = self.__unpack_id(id)
url = '/api/v1/accounts/{0}/unfollow'.format(str(id))
return self.__api_request('POST', url)
|
Unfollow a user.
Returns a `relationship dict`_ containing the updated relationship to the user.
|
def find_video_detail_by_id(self, video_id, ext=None):
"""doc: http://cloud.youku.com/docs?id=46
"""
url = 'https://api.youku.com/videos/show.json'
params = {
'client_id': self.client_id,
'video_id': video_id
}
if ext:
params['ext'] = ext
r = requests.get(url, params=params)
check_error(r)
return r.json()
|
doc: http://cloud.youku.com/docs?id=46
|
def unobserve_all_properties(self, handler):
"""Unregister a property observer from *all* observed properties."""
for name in self._property_handlers:
self.unobserve_property(name, handler)
|
Unregister a property observer from *all* observed properties.
|
def snapshots(self, xml_bytes):
"""Parse the XML returned by the C{DescribeSnapshots} function.
@param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root
element.
@return: A list of L{Snapshot} instances.
TODO: ownersSet, restorableBySet, ownerId, volumeSize, description,
ownerAlias.
"""
root = XML(xml_bytes)
result = []
for snapshot_data in root.find("snapshotSet"):
snapshot_id = snapshot_data.findtext("snapshotId")
volume_id = snapshot_data.findtext("volumeId")
status = snapshot_data.findtext("status")
start_time = snapshot_data.findtext("startTime")
start_time = datetime.strptime(
start_time[:19], "%Y-%m-%dT%H:%M:%S")
progress = snapshot_data.findtext("progress")[:-1]
progress = float(progress or "0") / 100.
snapshot = model.Snapshot(
snapshot_id, volume_id, status, start_time, progress)
result.append(snapshot)
return result
|
Parse the XML returned by the C{DescribeSnapshots} function.
@param xml_bytes: XML bytes with a C{DescribeSnapshotsResponse} root
element.
@return: A list of L{Snapshot} instances.
TODO: ownersSet, restorableBySet, ownerId, volumeSize, description,
ownerAlias.
|
def my_application(api):
"""An example application.
- Registers a webhook with mbed cloud services
- Requests the value of a resource
- Prints the value when it arrives
"""
device = api.list_connected_devices().first()
print('using device #', device.id)
api.delete_device_subscriptions(device.id)
try:
print('setting webhook url to:', ngrok_url)
api.update_webhook(ngrok_url)
print('requesting resource value for:', resource_path)
deferred = api.get_resource_value_async(device_id=device.id, resource_path=resource_path)
print('waiting for async #', deferred.async_id)
result = deferred.wait(15)
print('webhook sent us this payload value:', repr(result))
return result
except Exception:
print(traceback.format_exc())
finally:
api.delete_webhook()
print("Deregistered and unsubscribed from all resources. Exiting.")
exit(1)
|
An example application.
- Registers a webhook with mbed cloud services
- Requests the value of a resource
- Prints the value when it arrives
|
def convert_from_ik_angles(self, joints):
""" Convert from IKPY internal representation to poppy representation. """
if len(joints) != len(self.motors) + 2:
raise ValueError('Incompatible data, len(joints) should be {}!'.format(len(self.motors) + 2))
joints = [rad2deg(j) for j in joints[1:-1]]
joints *= self._reversed
return [(j * (1 if m.direct else -1)) - m.offset
for j, m in zip(joints, self.motors)]
|
Convert from IKPY internal representation to poppy representation.
|
def db_set_indexing(cls, is_indexing, impl, working_dir):
"""
Set lockfile path as to whether or not the system is indexing.
NOT THREAD SAFE, USE ONLY FOR CRASH DETECTION.
"""
indexing_lockfile_path = config.get_lockfile_filename(impl, working_dir)
if is_indexing:
# make sure this exists
with open(indexing_lockfile_path, 'w') as f:
pass
else:
# make sure it does not exist
try:
os.unlink(indexing_lockfile_path)
except:
pass
|
Set lockfile path as to whether or not the system is indexing.
NOT THREAD SAFE, USE ONLY FOR CRASH DETECTION.
|
def hist(self, dimension=None, num_bins=20, bin_range=None,
adjoin=True, individually=True, **kwargs):
"""Computes and adjoins histogram along specified dimension(s).
Defaults to first value dimension if present otherwise falls
back to first key dimension.
Args:
dimension: Dimension(s) to compute histogram on
num_bins (int, optional): Number of bins
bin_range (tuple optional): Lower and upper bounds of bins
adjoin (bool, optional): Whether to adjoin histogram
Returns:
AdjointLayout of HoloMap and histograms or just the
histograms
"""
if dimension is not None and not isinstance(dimension, list):
dimension = [dimension]
histmaps = [self.clone(shared_data=False) for _ in (dimension or [None])]
if individually:
map_range = None
else:
if dimension is None:
raise Exception("Please supply the dimension to compute a histogram for.")
map_range = self.range(kwargs['dimension'])
bin_range = map_range if bin_range is None else bin_range
style_prefix = 'Custom[<' + self.name + '>]_'
if issubclass(self.type, (NdOverlay, Overlay)) and 'index' not in kwargs:
kwargs['index'] = 0
for k, v in self.data.items():
hists = v.hist(adjoin=False, dimension=dimension,
bin_range=bin_range, num_bins=num_bins,
style_prefix=style_prefix, **kwargs)
if isinstance(hists, Layout):
for i, hist in enumerate(hists):
histmaps[i][k] = hist
else:
histmaps[0][k] = hists
if adjoin:
layout = self
for hist in histmaps:
layout = (layout << hist)
if issubclass(self.type, (NdOverlay, Overlay)):
layout.main_layer = kwargs['index']
return layout
else:
if len(histmaps) > 1:
return Layout(histmaps)
else:
return histmaps[0]
|
Computes and adjoins histogram along specified dimension(s).
Defaults to first value dimension if present otherwise falls
back to first key dimension.
Args:
dimension: Dimension(s) to compute histogram on
num_bins (int, optional): Number of bins
bin_range (tuple optional): Lower and upper bounds of bins
adjoin (bool, optional): Whether to adjoin histogram
Returns:
AdjointLayout of HoloMap and histograms or just the
histograms
|
def get_extreme(self, target_prop, maximize=True, min_temp=None,
max_temp=None, min_doping=None, max_doping=None,
isotropy_tolerance=0.05, use_average=True):
"""
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
"""
def is_isotropic(x, isotropy_tolerance):
"""
Internal method to tell you if 3-vector "x" is isotropic
Args:
x: the vector to determine isotropy for
isotropy_tolerance: tolerance, e.g. 0.05 is 5%
"""
if len(x) != 3:
raise ValueError("Invalid input to is_isotropic!")
st = sorted(x)
return bool(all([st[0], st[1], st[2]]) and \
(abs((st[1] - st[0]) / st[1]) <= isotropy_tolerance) and \
(abs((st[2] - st[0])) / st[2] <= isotropy_tolerance) and \
(abs((st[2] - st[1]) / st[2]) <= isotropy_tolerance))
if target_prop.lower() == "seebeck":
d = self.get_seebeck(output="eigs", doping_levels=True)
elif target_prop.lower() == "power factor":
d = self.get_power_factor(output="eigs", doping_levels=True)
elif target_prop.lower() == "conductivity":
d = self.get_conductivity(output="eigs", doping_levels=True)
elif target_prop.lower() == "kappa":
d = self.get_thermal_conductivity(output="eigs",
doping_levels=True)
elif target_prop.lower() == "zt":
d = self.get_zt(output="eigs", doping_levels=True)
else:
raise ValueError("Target property: {} not recognized!".
format(target_prop))
absval = True # take the absolute value of properties
x_val = None
x_temp = None
x_doping = None
x_isotropic = None
output = {}
min_temp = min_temp or 0
max_temp = max_temp or float('inf')
min_doping = min_doping or 0
max_doping = max_doping or float('inf')
for pn in ('p', 'n'):
for t in d[pn]: # temperatures
if min_temp <= float(t) <= max_temp:
for didx, evs in enumerate(d[pn][t]):
doping_lvl = self.doping[pn][didx]
if min_doping <= doping_lvl <= max_doping:
isotropic = is_isotropic(evs, isotropy_tolerance)
if absval:
evs = [abs(x) for x in evs]
if use_average:
val = float(sum(evs)) / len(evs)
else:
val = max(evs)
if x_val is None or (val > x_val and maximize) \
or (val < x_val and not maximize):
x_val = val
x_temp = t
x_doping = doping_lvl
x_isotropic = isotropic
output[pn] = {'value': x_val, 'temperature': x_temp,
'doping': x_doping, 'isotropic': x_isotropic}
x_val = None
if maximize:
max_type = 'p' if output['p']['value'] >= \
output['n']['value'] else 'n'
else:
max_type = 'p' if output['p']['value'] <= \
output['n']['value'] else 'n'
output['best'] = output[max_type]
output['best']['carrier_type'] = max_type
return output
|
This method takes in eigenvalues over a range of carriers,
temperatures, and doping levels, and tells you what is the "best"
value that can be achieved for the given target_property. Note that
this method searches the doping dict only, not the full mu dict.
Args:
target_prop: target property, i.e. "seebeck", "power factor",
"conductivity", "kappa", or "zt"
maximize: True to maximize, False to minimize (e.g. kappa)
min_temp: minimum temperature allowed
max_temp: maximum temperature allowed
min_doping: minimum doping allowed (e.g., 1E18)
max_doping: maximum doping allowed (e.g., 1E20)
isotropy_tolerance: tolerance for isotropic (0.05 = 5%)
use_average: True for avg of eigenval, False for max eigenval
Returns:
A dictionary with keys {"p", "n", "best"} with sub-keys:
{"value", "temperature", "doping", "isotropic"}
|
def addNewLvls(self):
"""
+---------------------+----------------------+
| Standard Levels | New Levels |
+---------------+-----+----------------+-----+
| Name |Level| Name |Level|
+===============+=====+================+=====+
| | |RAISEMSG | 99 |
+---------------+-----+----------------+-----+
|CRITICAL | 50 | | |
+---------------+-----+----------------+-----+
|ERROR | 40 | | |
+---------------+-----+----------------+-----+
|WARNING | 30 | | |
+---------------+-----+----------------+-----+
| | |IMPORTANTINFO | 25 |
+---------------+-----+----------------+-----+
|INFO | 20 | | |
+---------------+-----+----------------+-----+
|DEBUG | 10 | | |
+---------------+-----+----------------+-----+
| | |FILEONLY | 1 |
+---------------+-----+----------------+-----+
|NOTSET | 0 | | |
+---------------+-----+----------------+-----+
"""
# Level for raise message to print to file (File only)
RAISEMSG = 99
logging.addLevelName(RAISEMSG, 'RAISEMSG')
def raisemsg(self,msg,lvl=RAISEMSG, *args, **kws):
self.log(lvl,msg, *args, **kws)
logging.Logger.raisemsg = raisemsg
# Level for minimal info more important than standard INFO level
IMPORTANTINFO = 25
logging.addLevelName(IMPORTANTINFO, 'IMPORTANTINFO')
def importantinfo(self,msg,lvl=IMPORTANTINFO, *args, **kws):
self.log(lvl,msg, *args, **kws)
logging.Logger.importantinfo = importantinfo
# Level for message to ONLY be written to file and not the screen
FILEONLY = 1
logging.addLevelName(FILEONLY, 'FILEONLY')
def fileonly(self,msg,lvl=FILEONLY, *args, **kws):
self.log(lvl,msg, *args, **kws)
logging.Logger.fileonly = fileonly
|
+---------------------+----------------------+
| Standard Levels | New Levels |
+---------------+-----+----------------+-----+
| Name |Level| Name |Level|
+===============+=====+================+=====+
| | |RAISEMSG | 99 |
+---------------+-----+----------------+-----+
|CRITICAL | 50 | | |
+---------------+-----+----------------+-----+
|ERROR | 40 | | |
+---------------+-----+----------------+-----+
|WARNING | 30 | | |
+---------------+-----+----------------+-----+
| | |IMPORTANTINFO | 25 |
+---------------+-----+----------------+-----+
|INFO | 20 | | |
+---------------+-----+----------------+-----+
|DEBUG | 10 | | |
+---------------+-----+----------------+-----+
| | |FILEONLY | 1 |
+---------------+-----+----------------+-----+
|NOTSET | 0 | | |
+---------------+-----+----------------+-----+
|
def _find_exits(self, src_block, target_block):
"""
Source block has more than one exit, and through some of those exits, the control flow can eventually go to
the target block. This method returns exits that lead to the target block.
:param src_block: The block that has multiple exits.
:param target_block: The target block to reach.
:returns: a dict of statement ID -> a list of target IPs (or None if the exit should not be taken), each
corresponds to an exit to take in order to reach the target.
For example, it returns the following dict:
{
'default': None, # It has a default exit, but shouldn't be taken
15: [ 0x400080 ], # Statement 15 is an exit statement, and should be taken when the target is
# 0x400080
28: None # Statement 28 is an exit statement, but shouldn't be taken
}
"""
# Enumerate all statements and find exit statements
# Since we don't have a state, we have to rely on the pyvex block instead of SimIRSB
# Just create the block from pyvex again - not a big deal
if self.project.is_hooked(src_block.addr):
# Just return all exits for now
return { -1: [ target_block.addr ] }
block = self.project.factory.block(src_block.addr)
vex_block = block.vex
exit_stmt_ids = { }
for stmt_idx, stmt in enumerate(vex_block.statements):
if isinstance(stmt, pyvex.IRStmt.Exit):
exit_stmt_ids[stmt_idx] = None
# And of course, it has a default exit
# Don't forget about it.
exit_stmt_ids[DEFAULT_STATEMENT] = None
# Find all paths from src_block to target_block
# FIXME: This is some crappy code written in a hurry. Replace the all_simple_paths() later.
all_simple_paths = list(networkx.all_simple_paths(self._cfg.graph, src_block, target_block, cutoff=3))
for simple_path in all_simple_paths:
if len(simple_path) <= 1:
# Oops, it looks that src_block and target_block are the same guy?
continue
if self._same_function:
# Examine this path and make sure it does not have call or return edge
for i in range(len(simple_path) - 1):
jumpkind = self._cfg.graph[simple_path[i]][simple_path[i + 1]]['jumpkind']
if jumpkind in ('Ijk_Call', 'Ijk_Ret'):
return { }
# Get the first two nodes
a, b = simple_path[0], simple_path[1]
# Get the exit statement ID from CFG
exit_stmt_id = self._cfg.get_exit_stmt_idx(a, b)
if exit_stmt_id is None:
continue
# Mark it!
if exit_stmt_ids[exit_stmt_id] is None:
exit_stmt_ids[exit_stmt_id] = [ b.addr ]
else:
exit_stmt_ids[exit_stmt_id].append(b.addr)
return exit_stmt_ids
|
Source block has more than one exit, and through some of those exits, the control flow can eventually go to
the target block. This method returns exits that lead to the target block.
:param src_block: The block that has multiple exits.
:param target_block: The target block to reach.
:returns: a dict of statement ID -> a list of target IPs (or None if the exit should not be taken), each
corresponds to an exit to take in order to reach the target.
For example, it returns the following dict:
{
'default': None, # It has a default exit, but shouldn't be taken
15: [ 0x400080 ], # Statement 15 is an exit statement, and should be taken when the target is
# 0x400080
28: None # Statement 28 is an exit statement, but shouldn't be taken
}
|
def make_stream_features(self, stream, features):
"""Add SASL features to the <features/> element of the stream.
[receving entity only]
:returns: update <features/> element."""
mechs = self.settings['sasl_mechanisms']
if mechs and not stream.authenticated:
sub = ElementTree.SubElement(features, MECHANISMS_TAG)
for mech in mechs:
if mech in sasl.SERVER_MECHANISMS:
ElementTree.SubElement(sub, MECHANISM_TAG).text = mech
return features
|
Add SASL features to the <features/> element of the stream.
[receving entity only]
:returns: update <features/> element.
|
def logpdf(x, shape, loc=0.0, scale=1.0, skewness=1.0):
"""
Log PDF for the Skew-t distribution
Parameters
----------
x : np.array
random variables
shape : float
The degrees of freedom for the skew-t distribution
loc : np.array
The location parameter for the skew-t distribution
scale : float
The scale of the distribution
skewness : float
Skewness parameter (if 1, no skewness, if > 1, +ve skew, if < 1, -ve skew)
"""
m1 = (np.sqrt(shape)*sp.gamma((shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(shape/2.0))
loc = loc + (skewness - (1.0/skewness))*scale*m1
result = np.zeros(x.shape[0])
result[x-loc<0] = np.log(2.0) - np.log(skewness + 1.0/skewness) + ss.t.logpdf(x=skewness*x[(x-loc) < 0], loc=loc[(x-loc) < 0]*skewness,df=shape, scale=scale[(x-loc) < 0])
result[x-loc>=0] = np.log(2.0) - np.log(skewness + 1.0/skewness) + ss.t.logpdf(x=x[(x-loc) >= 0]/skewness, loc=loc[(x-loc) >= 0]/skewness,df=shape, scale=scale[(x-loc) >= 0])
return result
|
Log PDF for the Skew-t distribution
Parameters
----------
x : np.array
random variables
shape : float
The degrees of freedom for the skew-t distribution
loc : np.array
The location parameter for the skew-t distribution
scale : float
The scale of the distribution
skewness : float
Skewness parameter (if 1, no skewness, if > 1, +ve skew, if < 1, -ve skew)
|
def json_schema_to_click_type(schema: dict) -> tuple:
"""
A generic handler of a single property JSON schema to :class:`click.ParamType` converter
:param schema: JSON schema property to operate on
:return: Tuple of :class:`click.ParamType`, `description`` of option and optionally a :class:`click.Choice`
if the allowed values are a closed list (JSON schema ``enum``)
"""
choices = None
if isinstance(schema["type"], list):
if "string" in schema["type"]:
schema["type"] = "string"
click_type = SCHEMA_BASE_MAP[schema["type"]]
description = schema.get("title")
if schema.get("enum"):
# todo handle multi type enums better (or at all)
enum = [value for value in schema["enum"] if isinstance(value, str)]
choices = click.Choice(enum)
return click_type, description, choices
|
A generic handler of a single property JSON schema to :class:`click.ParamType` converter
:param schema: JSON schema property to operate on
:return: Tuple of :class:`click.ParamType`, `description`` of option and optionally a :class:`click.Choice`
if the allowed values are a closed list (JSON schema ``enum``)
|
def repolist(status='', media=None):
"""
Get the list of ``yum`` repositories.
Returns enabled repositories by default. Extra *status* may be passed
to list disabled repositories if necessary.
Media and debug repositories are kept disabled, except if you pass *media*.
::
import burlap
# Install a package that may be included in disabled repositories
burlap.rpm.install('vim', burlap.rpm.repolist('disabled'))
"""
manager = MANAGER
with settings(hide('running', 'stdout')):
if media:
repos = run_as_root("%(manager)s repolist %(status)s | sed '$d' | sed -n '/repo id/,$p'" % locals())
else:
repos = run_as_root("%(manager)s repolist %(status)s | sed '/Media\\|Debug/d' | sed '$d' | sed -n '/repo id/,$p'" % locals())
return [line.split(' ')[0] for line in repos.splitlines()[1:]]
|
Get the list of ``yum`` repositories.
Returns enabled repositories by default. Extra *status* may be passed
to list disabled repositories if necessary.
Media and debug repositories are kept disabled, except if you pass *media*.
::
import burlap
# Install a package that may be included in disabled repositories
burlap.rpm.install('vim', burlap.rpm.repolist('disabled'))
|
def references(self):
""" list: External links, or references, listed anywhere on the \
MediaWiki page
Note:
Not settable
Note
May include external links within page that are not \
technically cited anywhere """
if self._references is None:
self._references = list()
self.__pull_combined_properties()
return self._references
|
list: External links, or references, listed anywhere on the \
MediaWiki page
Note:
Not settable
Note
May include external links within page that are not \
technically cited anywhere
|
def calculate(self, scene, xaxis, yaxis):
"""
Calculates the grid data before rendering.
:param scene | <XChartScene>
xaxis | <XChartAxis>
yaxis | <XChartAxis>
"""
# build the grid location
sceneRect = scene.sceneRect()
# process axis information
h_lines = []
h_alt = []
h_labels = []
v_lines = []
v_alt = []
v_labels = []
xlabels = []
xcount = 1
xsections = 1
xdelta = 0
xdeltamin = 0
ylabels = []
ycount = 1
ysections = 1
ydeltamin = 0
ydelta = 0
axis_lft = 0
axis_rht = 0
axis_bot = 0
axis_top = 0
# precalculate xaxis information for width changes
if xaxis and self.showXAxis():
size = sceneRect.width()
xdeltamin = xaxis.minimumLabelWidth()
result = self.calculateAxis(xaxis, size, xdeltamin)
xlabels, xcount, xsections, newWidth, xdelta = result
if newWidth != size:
sceneRect.setWidth(newWidth)
# precalculate yaxis information for height changes
if yaxis and self.showYAxis():
size = sceneRect.height()
ydeltamin = yaxis.minimumLabelHeight()
result = self.calculateAxis(yaxis, size, ydeltamin)
ylabels, ycount, ysections, newHeight, ydelta = result
if newHeight != size:
sceneRect.setHeight(newHeight)
# generate the xaxis
if xaxis and self.showXAxis():
x = sceneRect.left() + xdeltamin / 2
axis_lft = x
axis_rht = x
alt = False
for i in range(xcount):
v_lines.append(QLineF(x, sceneRect.top(),
x, sceneRect.bottom()))
# store alternate info
if alt:
alt_rect = QRectF(x - xdelta, sceneRect.top(),
xdelta, sceneRect.height())
v_alt.append(alt_rect)
# store label information
v_labels.append((x, xdelta, xlabels[i]))
axis_rht = x
x += xdelta
alt = not alt
# generate the yaxis
if yaxis and self.showYAxis():
y = sceneRect.bottom() - ydeltamin / 2
axis_bot = y
axis_top = y
alt = False
for i in range(ycount):
h_lines.append(QLineF(sceneRect.left(), y,
sceneRect.right(), y))
# store alternate color
if alt:
alt_rect = QRectF(sceneRect.left(), y,
sceneRect.width(), ydelta)
h_alt.append(alt_rect)
# store the vertical information
h_labels.append((y, ydelta, ylabels[i]))
axis_top = y
y -= ydelta
alt = not alt
# assign the build data
self._buildData['grid_h_lines'] = h_lines
self._buildData['grid_h_alt'] = h_alt
self._buildData['grid_h_labels'] = h_labels
self._buildData['grid_v_lines'] = v_lines
self._buildData['grid_v_alt'] = v_alt
self._buildData['grid_v_labels'] = v_labels
self._buildData['grid_rect'] = sceneRect
self._buildData['axis_rect'] = QRectF(axis_lft, axis_top,
axis_rht - axis_lft,
axis_bot - axis_top)
scene.setSceneRect(sceneRect)
return sceneRect
|
Calculates the grid data before rendering.
:param scene | <XChartScene>
xaxis | <XChartAxis>
yaxis | <XChartAxis>
|
def _generate_main_files_header(notebook_object, notebook_title="Notebook Title",
notebook_description="Notebook Description"):
"""
Internal function that is used for generation of the 'MainFiles' notebooks header.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_title : None or str
Title of the Notebook.
notebook_description : str
An introductory text to present the Notebook and involve the reader.
"""
# =================================== Creation of Header =======================================
header_temp = HEADER_MAIN_FILES.replace("Notebook Title", notebook_title)
notebook_object["cells"].append(nb.v4.new_markdown_cell(header_temp))
# ================ Insertion of the div reserved to the Notebook Description ===================
notebook_object["cells"].append(nb.v4.new_markdown_cell(notebook_description,
**{"metadata":
{"tags": ["test"]}}))
|
Internal function that is used for generation of the 'MainFiles' notebooks header.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_title : None or str
Title of the Notebook.
notebook_description : str
An introductory text to present the Notebook and involve the reader.
|
def get_member_ids():
"""Return all member ids of the portal.
"""
pm = get_tool("portal_membership")
member_ids = pm.listMemberIds()
# How can it be possible to get member ids with None?
return filter(lambda x: x, member_ids)
|
Return all member ids of the portal.
|
def aggregate(name):
""" Perform an aggregation request. """
cube = get_cube(name)
result = cube.aggregate(aggregates=request.args.get('aggregates'),
drilldowns=request.args.get('drilldown'),
cuts=request.args.get('cut'),
order=request.args.get('order'),
page=request.args.get('page'),
page_size=request.args.get('pagesize'))
result['status'] = 'ok'
if request.args.get('format', '').lower() == 'csv':
return create_csv_response(result['cells'])
else:
return jsonify(result)
|
Perform an aggregation request.
|
def appendleft(self, item):
"""Add item to the left side of the GeventDeque.
This method does not block. Either the GeventDeque grows to
consume available memory, or if this GeventDeque has and is at
maxlen, the rightmost item is removed.
"""
self._deque.appendleft(item)
self.notEmpty.set()
|
Add item to the left side of the GeventDeque.
This method does not block. Either the GeventDeque grows to
consume available memory, or if this GeventDeque has and is at
maxlen, the rightmost item is removed.
|
def update_hpx_skymap_allsky(map_in, map_out):
""" 'Update' a HEALPix skymap
This checks map_out exists and creates it from map_in if it does not.
If map_out does exist, this adds the data in map_in to map_out
"""
if map_out is None:
in_hpx = map_in.hpx
out_hpx = HPX.create_hpx(in_hpx.nside, in_hpx.nest, in_hpx.coordsys,
None, in_hpx.ebins, None, in_hpx.conv, None)
data_out = map_in.expanded_counts_map()
print(data_out.shape, data_out.sum())
map_out = HpxMap(data_out, out_hpx)
else:
map_out.data += map_in.expanded_counts_map()
return map_out
|
'Update' a HEALPix skymap
This checks map_out exists and creates it from map_in if it does not.
If map_out does exist, this adds the data in map_in to map_out
|
def Grieves_Thodos(zs, Tcs, Aijs):
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_.
.. math::
T_{cm} = \sum_{i} \frac{T_{ci}}{1 + (1/x_i)\sum_j A_{ij} x_j}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = \frac{T_{c1}}{1 + (x_2/x_1)A_{12}} + \frac{T_{c2}}
{1 + (x_1/x_2)A_{21}}
Parameters
----------
zs : array-like
Mole fractions of all components
Tcs : array-like
Critical temperatures of all components, [K]
Aijs : array-like of shape `zs` by `zs`
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
All parameters, even if zero, must be given to this function.
Giving 0s gives really bad results however.
Examples
--------
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
>>> Grieves_Thodos([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]])
450.1839618758971
References
----------
.. [1] Grieves, Robert B., and George Thodos. "The Critical Temperatures of
Multicomponent Hydrocarbon Systems." AIChE Journal 8, no. 4
(September 1, 1962): 550-53. doi:10.1002/aic.690080426.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
'''
if not none_and_length_check([zs, Tcs]):
raise Exception('Function inputs are incorrect format')
Tcm = 0
for i in range(len(zs)):
Tcm += Tcs[i]/(1. + 1./zs[i]*sum(Aijs[i][j]*zs[j] for j in range(len(zs))))
return Tcm
|
r'''Calculates critical temperature of a mixture according to
mixing rules in [1]_.
.. math::
T_{cm} = \sum_{i} \frac{T_{ci}}{1 + (1/x_i)\sum_j A_{ij} x_j}
For a binary mxiture, this simplifies to:
.. math::
T_{cm} = \frac{T_{c1}}{1 + (x_2/x_1)A_{12}} + \frac{T_{c2}}
{1 + (x_1/x_2)A_{21}}
Parameters
----------
zs : array-like
Mole fractions of all components
Tcs : array-like
Critical temperatures of all components, [K]
Aijs : array-like of shape `zs` by `zs`
Interaction parameters
Returns
-------
Tcm : float
Critical temperatures of the mixture, [K]
Notes
-----
All parameters, even if zero, must be given to this function.
Giving 0s gives really bad results however.
Examples
--------
butane/pentane/hexane 0.6449/0.2359/0.1192 mixture, exp: 450.22 K.
>>> Grieves_Thodos([0.6449, 0.2359, 0.1192], [425.12, 469.7, 507.6], [[0, 1.2503, 1.516], [0.799807, 0, 1.23843], [0.659633, 0.807474, 0]])
450.1839618758971
References
----------
.. [1] Grieves, Robert B., and George Thodos. "The Critical Temperatures of
Multicomponent Hydrocarbon Systems." AIChE Journal 8, no. 4
(September 1, 1962): 550-53. doi:10.1002/aic.690080426.
.. [2] Najafi, Hamidreza, Babak Maghbooli, and Mohammad Amin Sobati.
"Prediction of True Critical Temperature of Multi-Component Mixtures:
Extending Fast Estimation Methods." Fluid Phase Equilibria 392
(April 25, 2015): 104-26. doi:10.1016/j.fluid.2015.02.001.
|
def create(self, title, description=None, private=False):
"""
Create a new collection.
This requires the 'write_collections' scope.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection.
"""
url = "/collections"
data = {
"title": title,
"description": description,
"private": private
}
result = self._post(url, data=data)
return CollectionModel.parse(result)
|
Create a new collection.
This requires the 'write_collections' scope.
:param title [string]: The title of the collection. (Required.)
:param description [string]: The collection’s description. (Optional.)
:param private [boolean]: Whether to make this collection private. (Optional; default false).
:return: [Collection]: The Unsplash Collection.
|
def _srvc_closing_routine(self, closing):
"""Routine to close an hdf5 file
The file is closed only when `closing=True`. `closing=True` means that
the file was opened in the current highest recursion level. This prevents re-opening
and closing of the file if `store` or `load` are called recursively.
"""
if (not self._keep_open and
closing and
self.is_open):
f_fd = self._hdf5file.fileno()
self._hdf5file.flush()
try:
os.fsync(f_fd)
try:
self._hdf5store.flush(fsync=True)
except TypeError:
f_fd = self._hdf5store._handle.fileno()
self._hdf5store.flush()
os.fsync(f_fd)
except OSError as exc:
# This seems to be the only way to avoid an OSError under Windows
errmsg = ('Encountered OSError while flushing file.'
'If you are using Windows, don`t worry! '
'I will ignore the error and try to close the file. '
'Original error: %s' % repr(exc))
self._logger.debug(errmsg)
self._hdf5store.close()
if self._hdf5file.isopen:
self._logger.error('Could not close HDF5 file!')
self._hdf5file = None
self._hdf5store = None
self._trajectory_group = None
self._trajectory_name = None
self._trajectory_index = None
self._overview_group_ = None
self._logger.debug('Closing HDF5 file')
return True
else:
return False
|
Routine to close an hdf5 file
The file is closed only when `closing=True`. `closing=True` means that
the file was opened in the current highest recursion level. This prevents re-opening
and closing of the file if `store` or `load` are called recursively.
|
def ggsave(name, plot, data=None, *args, **kwargs):
"""Save a GGStatements object to destination name
@param name output file name. if None, don't run R command
@param kwargs keyword args to pass to ggsave. The following are special
keywords for the python save method
data: a python data object (list, dict, DataFrame) used to populate
the `data` variable in R
libs: list of library names to load in addition to ggplot2
prefix: string containing R code to run before any ggplot commands (including data loading)
postfix: string containing R code to run after data is loaded (e.g., if you want to rename variable names)
quiet: if Truthy, don't print out R program string
"""
# constants
kwdefaults = {
'width': 10,
'height': 8,
'scale': 1
}
keys_to_rm = ["prefix", "quiet", "postfix", 'libs']
varname = 'p'
# process arguments
prefix = kwargs.get('prefix', '')
postfix = kwargs.get('postfix', '')
libs = kwargs.get('libs', [])
libs = '\n'.join(["library(%s)" % lib for lib in libs])
quiet = kwargs.get("quiet", False)
kwargs = {k: v for k, v in kwargs.iteritems()
if v is not None and k not in keys_to_rm}
kwdefaults.update(kwargs)
kwargs = kwdefaults
# figure out how to load data in the R environment
if data is None: data = plot.data
if data is None:
# Don't load anything, the data source is already present in R
data_src = ''
elif isinstance(data, basestring) and 'RPostgreSQL' in data:
# Hack to allow through data_sql results
data_src = data
elif isinstance(data, GGData):
data_src = str(data)
else:
# format the python data object
data_src = str(data_py(data))
prog = "%(header)s\n%(libs)s\n%(prefix)s\n%(data)s\n%(postfix)s\n%(varname)s = %(prog)s" % {
'header': "library(ggplot2)",
'libs': libs,
'data': data_src,
'prefix': prefix,
'postfix': postfix,
'varname': varname,
'prog': plot.r
}
if name:
stmt = GGStatement("ggsave", esc(name), varname, *args, **kwargs)
prog = "%s\n%s" % (prog, stmt.r)
if not quiet:
print prog
print
if name:
execute_r(prog, quiet)
return prog
|
Save a GGStatements object to destination name
@param name output file name. if None, don't run R command
@param kwargs keyword args to pass to ggsave. The following are special
keywords for the python save method
data: a python data object (list, dict, DataFrame) used to populate
the `data` variable in R
libs: list of library names to load in addition to ggplot2
prefix: string containing R code to run before any ggplot commands (including data loading)
postfix: string containing R code to run after data is loaded (e.g., if you want to rename variable names)
quiet: if Truthy, don't print out R program string
|
def get_max_size(self, commands):
"""Returns the largest name length of the specified command list.
Parameters
------------
commands: Sequence[:class:`Command`]
A sequence of commands to check for the largest size.
Returns
--------
:class:`int`
The maximum width of the commands.
"""
as_lengths = (
discord.utils._string_width(c.name)
for c in commands
)
return max(as_lengths, default=0)
|
Returns the largest name length of the specified command list.
Parameters
------------
commands: Sequence[:class:`Command`]
A sequence of commands to check for the largest size.
Returns
--------
:class:`int`
The maximum width of the commands.
|
def inferTM(self, bottomUp, externalInput):
"""
Run inference and return the set of predicted cells
"""
self.reset()
# print >> sys.stderr, "Bottom up: ", bottomUp
# print >> sys.stderr, "ExternalInput: ",externalInput
self.tm.compute(bottomUp,
basalInput=externalInput,
learn=False)
# print >> sys.stderr, ("new active cells " + str(self.tm.getActiveCells()))
# print >> sys.stderr, ("new predictive cells " + str(self.tm.getPredictiveCells()))
return self.tm.getPredictiveCells()
|
Run inference and return the set of predicted cells
|
def reset_query_marks(self):
"""
set or reset hyb and neighbors marks to atoms.
"""
for ml in (self.__reagents, self.__reactants, self.__products):
for m in ml:
if hasattr(m, 'reset_query_marks'):
m.reset_query_marks()
self.flush_cache()
|
set or reset hyb and neighbors marks to atoms.
|
def formatmonth(self, theyear, themonth, withyear=True, net=None, qs=None, template='happenings/partials/calendar/month_table.html'):
"""Return a formatted month as a table."""
context = self.get_context()
context['month_start_date'] = date(self.yr, self.mo, 1)
context['week_rows'] = []
for week in self.monthdays2calendar(theyear, themonth):
week_row = []
for day, weekday in week:
week_row.append(self.formatday(day, weekday))
context['week_rows'].append(week_row)
nxt, prev = get_next_and_prev(net)
extra_qs = ('&' + '&'.join(qs)) if qs else ''
context['prev_qs'] = mark_safe('?cal_prev=%d%s' % (prev, extra_qs))
context['next_qs'] = mark_safe('?cal_next=%d%s' % (nxt, extra_qs))
context['withyear'] = withyear
return render_to_string(template, context)
|
Return a formatted month as a table.
|
def encode_float(encoder, pcm, frame_size, max_data_bytes):
"""Encodes an Opus frame from floating point input"""
pcm = ctypes.cast(pcm, c_float_pointer)
data = (ctypes.c_char * max_data_bytes)()
result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes)
if result < 0:
raise OpusError(result)
return array.array('c', data[:result]).tostring()
|
Encodes an Opus frame from floating point input
|
def getObj(self):
'''
getObj - Fetch (if not fetched) and return the obj associated with this data.
'''
if self.obj is None:
if not self.pk:
return None
self.obj = self.foreignModel.objects.get(self.pk)
return self.obj
|
getObj - Fetch (if not fetched) and return the obj associated with this data.
|
def calculate_inner_product_output_shapes(operator):
'''
Allowed input/output patterns are
1. [N, C] ---> [N, C']
2. [N, C, 1, 1] ---> [N, C', 1, 1]
'''
check_input_and_output_numbers(operator, input_count_range=1, output_count_range=1)
check_input_and_output_types(operator, good_input_types=[FloatTensorType])
input = operator.inputs[0]
output = operator.outputs[0]
input_shape = input.type.shape
if len(input_shape) == 4 and (input_shape[2] != 1 or input_shape[3] != 1):
raise RuntimeError('If input is a 4-D tensor, its shape must be [N, C, 1, 1]')
params = operator.raw_operator.innerProduct
if input_shape[1] != params.inputChannels:
raise RuntimeError('Dimension mismatch along C-axis. Expected %s but got %s' %
(params.inputChannels, input_shape[1]))
if len(input_shape) == 4:
output.type.shape = [input_shape[0], params.outputChannels, 1, 1]
elif len(input_shape) == 2:
output.type.shape = [input_shape[0], params.outputChannels]
else:
raise RuntimeError('Input must be a 2-D or a 4-D tensor')
|
Allowed input/output patterns are
1. [N, C] ---> [N, C']
2. [N, C, 1, 1] ---> [N, C', 1, 1]
|
def modprobe(module, persist=True):
"""Load a kernel module and configure for auto-load on reboot."""
cmd = ['modprobe', module]
log('Loading kernel module %s' % module, level=INFO)
subprocess.check_call(cmd)
if persist:
persistent_modprobe(module)
|
Load a kernel module and configure for auto-load on reboot.
|
def get_settings(infile):
"""Read settings from input file.
:param infile: Input file for JSON settings.
:type infile: file or str path
:return: Settings parsed from file
:rtype: dict
"""
settings = yaml.load(_as_file(infile))
if not hasattr(settings, 'keys'):
raise ValueError("Settings not found in {}".format(infile))
# Processing of namespaced parameters in .pmgrc.yaml.
processed_settings = {}
for k, v in settings.items():
if k.startswith("PMG_DB_"):
processed_settings[k[7:].lower()] = v
else:
processed_settings[k] = v
auth_aliases(processed_settings)
return processed_settings
|
Read settings from input file.
:param infile: Input file for JSON settings.
:type infile: file or str path
:return: Settings parsed from file
:rtype: dict
|
def _replace(self, data, replacements):
"""
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
"""
for find, repl in replacements:
data = data.replace(find, repl)
return data
|
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
|
def combine_sources(sources, chunksize=None):
r""" Combines multiple data sources to stream from.
The given source objects (readers and transformers, eg. TICA) are concatenated in dimension axis during iteration.
This can be used to couple arbitrary features in order to pass them to an Estimator expecting only one source,
which is usually the case. All the parameters for iterator creation are passed to the actual sources, to ensure
consistent behaviour.
Parameters
----------
sources : list, tuple
list of DataSources (Readers, StreamingTransformers etc.) to combine for streaming access.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Notes
-----
This is currently only implemented for matching lengths trajectories.
Returns
-------
merger : :class:`SourcesMerger <pyemma.coordinates.data.sources_merger.SourcesMerger>`
"""
from pyemma.coordinates.data.sources_merger import SourcesMerger
return SourcesMerger(sources, chunk=chunksize)
|
r""" Combines multiple data sources to stream from.
The given source objects (readers and transformers, eg. TICA) are concatenated in dimension axis during iteration.
This can be used to couple arbitrary features in order to pass them to an Estimator expecting only one source,
which is usually the case. All the parameters for iterator creation are passed to the actual sources, to ensure
consistent behaviour.
Parameters
----------
sources : list, tuple
list of DataSources (Readers, StreamingTransformers etc.) to combine for streaming access.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Notes
-----
This is currently only implemented for matching lengths trajectories.
Returns
-------
merger : :class:`SourcesMerger <pyemma.coordinates.data.sources_merger.SourcesMerger>`
|
def battery2_send(self, voltage, current_battery, force_mavlink1=False):
'''
2nd Battery status
voltage : voltage in millivolts (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
'''
return self.send(self.battery2_encode(voltage, current_battery), force_mavlink1=force_mavlink1)
|
2nd Battery status
voltage : voltage in millivolts (uint16_t)
current_battery : Battery current, in 10*milliamperes (1 = 10 milliampere), -1: autopilot does not measure the current (int16_t)
|
def findLibrary(name):
"""
Look for a library in the system.
Emulate the algorithm used by dlopen.
`name`must include the prefix, e.g. ``libpython2.4.so``
"""
assert is_unix, "Current implementation for Unix only (Linux, Solaris, AIX)"
lib = None
# Look in the LD_LIBRARY_PATH
lp = compat.getenv('LD_LIBRARY_PATH', '')
for path in lp.split(os.pathsep):
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# Look in /etc/ld.so.cache
if lib is None:
expr = r'/[^\(\)\s]*%s\.[^\(\)\s]*' % re.escape(name)
m = re.search(expr, compat.exec_command('/sbin/ldconfig', '-p'))
if m:
lib = m.group(0)
# Look in the known safe paths
if lib is None:
paths = ['/lib', '/usr/lib']
if is_aix:
paths.append('/opt/freeware/lib')
for path in paths:
libs = glob(os.path.join(path, name + '*'))
if libs:
lib = libs[0]
break
# give up :(
if lib is None:
return None
# Resolve the file name into the soname
dir, file = os.path.split(lib)
return os.path.join(dir, getSoname(lib))
|
Look for a library in the system.
Emulate the algorithm used by dlopen.
`name`must include the prefix, e.g. ``libpython2.4.so``
|
def upload(self, photo_file, **kwds):
"""
Endpoint: /photo/upload.json
Uploads the specified photo filename.
"""
with open(photo_file, 'rb') as in_file:
result = self._client.post("/photo/upload.json",
files={'photo': in_file},
**kwds)["result"]
return Photo(self._client, result)
|
Endpoint: /photo/upload.json
Uploads the specified photo filename.
|
def _filter_dates(dates, time_difference):
"""
Filters out dates within time_difference, preserving only the oldest date.
:param dates: a list of datetime objects
:param time_difference: a ``datetime.timedelta`` representing the time difference threshold
:return: an ordered list of datetimes `d1<=d2<=...<=dn` such that `d[i+1]-di > time_difference`
:rtype: list(datetime.datetime)
"""
LOGGER.debug("dates=%s", dates)
if len(dates) <= 1:
return dates
sorted_dates = sorted(dates)
separate_dates = [sorted_dates[0]]
for curr_date in sorted_dates[1:]:
if curr_date - separate_dates[-1] > time_difference:
separate_dates.append(curr_date)
return separate_dates
|
Filters out dates within time_difference, preserving only the oldest date.
:param dates: a list of datetime objects
:param time_difference: a ``datetime.timedelta`` representing the time difference threshold
:return: an ordered list of datetimes `d1<=d2<=...<=dn` such that `d[i+1]-di > time_difference`
:rtype: list(datetime.datetime)
|
def convert_embedding(net, node, model, builder):
"""Convert an embedding layer from mxnet to coreml.
Parameters
----------
net: network
A mxnet network object.
node: layer
Node to convert.
model: model
An model for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
inputs = node['inputs']
outputs = node['outputs']
arg_params, aux_params = model.get_params()
W = arg_params[_get_node_name(net, inputs[1][0])].asnumpy()
if not ONE_HOT_ENCODE_HACK:
nC, nB = W.shape
W = W.T
builder.add_embedding(name = name,
W = W,
b = None,
input_dim = nC,
output_channels = nB,
has_bias = False,
input_name = input_name,
output_name = output_name)
else:
W = W.T
nC, nB = W.shape
builder.add_inner_product(name = name,
W = W,
b = None,
input_channels = nB,
output_channels = nC,
has_bias = False,
input_name = input_name,
output_name = output_name)
|
Convert an embedding layer from mxnet to coreml.
Parameters
----------
net: network
A mxnet network object.
node: layer
Node to convert.
model: model
An model for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
|
def enable_api_key(apiKey, region=None, key=None, keyid=None, profile=None):
'''
enable the given apiKey.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.enable_api_key api_key
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
response = _api_key_patch_replace(conn, apiKey, '/enabled', 'True')
return {'apiKey': _convert_datetime_str(response)}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
enable the given apiKey.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.enable_api_key api_key
|
def _update_list_store(config_m, list_store, ignore_keys=None):
"""Generic method to create list store for a given config model
:param ConfigModel config_m: Config model to read into list store
:param Gtk.ListStore list_store: List store to be filled
:param list ignore_keys: List of keys that should be ignored
"""
ignore_keys = [] if ignore_keys is None else ignore_keys
list_store.clear()
for config_key in sorted(config_m.config.keys):
if config_key in ignore_keys:
continue
config_value = config_m.get_current_config_value(config_key)
# (config_key, text, text_visible, toggle_activatable, toggle_visible, text_editable, toggle_state)
if isinstance(config_value, bool):
list_store.append((str(config_key), str(config_value), False, True, True, False, config_value))
else:
list_store.append((str(config_key), str(config_value), True, False, False, True, config_value))
|
Generic method to create list store for a given config model
:param ConfigModel config_m: Config model to read into list store
:param Gtk.ListStore list_store: List store to be filled
:param list ignore_keys: List of keys that should be ignored
|
def get_context_data(self, **kwargs):
"""
Hook for adding arguments to the context.
"""
context = {'obj': self.object }
if 'queryset' in kwargs:
context['conf_msg'] = self.get_confirmation_message(kwargs['queryset'])
context.update(kwargs)
return context
|
Hook for adding arguments to the context.
|
def get_linenumbers(functions, module, searchstr='def {}(image):\n'):
"""Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
"""
lines = inspect.getsourcelines(module)[0]
line_numbers = {}
for function in functions:
try:
line_numbers[function] = lines.index(
searchstr.format(function)) + 1
except ValueError:
print(r'Can not find `{}`'.format(searchstr.format(function)))
line_numbers[function] = 0
return line_numbers
|
Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
|
def start(self):
"""Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod."""
with self.synclock:
if self.syncthread is not None:
logging.warn(
"Logger: Start called on a syncer that is already running")
return
self.sync() # Attempt a sync right away
self.__setsync()
|
Start the logger background synchronization service. This allows you to not need to
worry about syncing with ConnectorDB - you just insert into the Logger, and the Logger
will by synced every syncperiod.
|
def update_sma(self, step):
"""
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
"""
if self.linear_growth:
sma = self.sma + step
else:
sma = self.sma * (1. + step)
return sma
|
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
|
def run(self, quil_program, classical_addresses: List[int] = None,
trials=1):
"""
Run a Quil program multiple times, accumulating the values deposited in
a list of classical addresses.
:param Program quil_program: A Quil program.
:param classical_addresses: The classical memory to retrieve. Specified as a list of
integers that index into a readout register named ``ro``. This function--and
particularly this argument--are included for backwards compatibility and will
be removed in the future.
:param int trials: Number of shots to collect.
:return: A list of dictionaries of bits. Each dictionary corresponds to the values in
`classical_addresses`.
:rtype: list
"""
if classical_addresses is None:
caddresses = get_classical_addresses_from_program(quil_program)
else:
caddresses = {'ro': classical_addresses}
buffers = self._connection._qvm_run(quil_program, caddresses, trials,
self.measurement_noise, self.gate_noise,
self.random_seed)
if len(buffers) == 0:
return []
if 'ro' in buffers:
return buffers['ro'].tolist()
raise ValueError("You are using QVMConnection.run with multiple readout registers not "
"named `ro`. Please use the new `QuantumComputer` abstraction.")
|
Run a Quil program multiple times, accumulating the values deposited in
a list of classical addresses.
:param Program quil_program: A Quil program.
:param classical_addresses: The classical memory to retrieve. Specified as a list of
integers that index into a readout register named ``ro``. This function--and
particularly this argument--are included for backwards compatibility and will
be removed in the future.
:param int trials: Number of shots to collect.
:return: A list of dictionaries of bits. Each dictionary corresponds to the values in
`classical_addresses`.
:rtype: list
|
def detect(agent, fill_none=False):
"""
fill_none: if name/version is not detected respective key is still added to the result with value None
"""
result = dict(platform=dict(name=None, version=None))
_suggested_detectors = []
for info_type in detectorshub:
detectors = _suggested_detectors or detectorshub[info_type]
for detector in detectors:
try:
detector.detect(agent, result)
except Exception as _err:
pass
if fill_none:
for outer_key in ('os', 'browser'):
outer_value = result.setdefault(outer_key, dict())
for inner_key in ('name', 'version'):
outer_value.setdefault(inner_key, None)
return result
|
fill_none: if name/version is not detected respective key is still added to the result with value None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.