code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def configure_firewall(self, FirewallRules):
"""
Helper function for automatically adding several FirewallRules in series.
"""
firewall_rule_bodies = [
FirewallRule.to_dict()
for FirewallRule in FirewallRules
]
return self.cloud_manager.configure_firewall(self, firewall_rule_bodies)
|
Helper function for automatically adding several FirewallRules in series.
|
def draw_polygon(
self,
*pts,
close_path=True,
stroke=None,
stroke_width=1,
stroke_dash=None,
fill=None
) -> None:
"""Draws the given polygon."""
c = self.c
c.saveState()
if stroke is not None:
c.setStrokeColorRGB(*stroke)
c.setLineWidth(stroke_width)
c.setDash(stroke_dash)
if fill is not None:
c.setFillColorRGB(*fill)
p = c.beginPath()
fn = p.moveTo
for x,y in zip(*[iter(pts)]*2):
fn(x, y)
fn = p.lineTo
if close_path:
p.close()
c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None))
c.restoreState()
|
Draws the given polygon.
|
def search_bm25(cls, term, weights=None, with_score=False,
score_alias='score', explicit_ordering=False):
"""Full-text search using selected `term`."""
if not weights:
rank = SQL('rank')
elif isinstance(weights, dict):
weight_args = []
for field in cls._meta.sorted_fields:
if isinstance(field, SearchField) and not field.unindexed:
weight_args.append(
weights.get(field, weights.get(field.name, 1.0)))
rank = fn.bm25(cls._meta.entity, *weight_args)
else:
rank = fn.bm25(cls._meta.entity, *weights)
selection = ()
order_by = rank
if with_score:
selection = (cls, rank.alias(score_alias))
if with_score and not explicit_ordering:
order_by = SQL(score_alias)
return (cls
.select(*selection)
.where(cls.match(FTS5Model.clean_query(term)))
.order_by(order_by))
|
Full-text search using selected `term`.
|
def _GetPluginData(self):
"""Retrieves the version and various plugin information.
Returns:
dict[str, list[str]]: available parsers and plugins.
"""
return_dict = {}
return_dict['Versions'] = [
('plaso engine', plaso.__version__),
('python', sys.version)]
hashers_information = hashers_manager.HashersManager.GetHashersInformation()
parsers_information = parsers_manager.ParsersManager.GetParsersInformation()
plugins_information = (
parsers_manager.ParsersManager.GetParserPluginsInformation())
presets_information = parsers_manager.ParsersManager.GetPresetsInformation()
return_dict['Hashers'] = hashers_information
return_dict['Parsers'] = parsers_information
return_dict['Parser Plugins'] = plugins_information
return_dict['Parser Presets'] = presets_information
return return_dict
|
Retrieves the version and various plugin information.
Returns:
dict[str, list[str]]: available parsers and plugins.
|
def load_collection_from_stream(resource, stream, content_type):
"""
Creates a new collection for the registered resource and calls
`load_into_collection_from_stream` with it.
"""
coll = create_staging_collection(resource)
load_into_collection_from_stream(coll, stream, content_type)
return coll
|
Creates a new collection for the registered resource and calls
`load_into_collection_from_stream` with it.
|
def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
"""
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
"""
t = start_token
advance = self.prev_token if reverse else self.next_token
while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
t = advance(t, include_extra=True)
return t
|
Looks for the first token, starting at start_token, that matches tok_type and, if given, the
token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
can check it with `token.ISEOF(t.type)`.
|
def restart(self):
"""
Restarts the whole wizard from the beginning.
"""
# hide all of the pages
for page in self._pages.values():
page.hide()
pageId = self.startId()
try:
first_page = self._pages[pageId]
except KeyError:
return
self._currentId = pageId
self._navigation = [pageId]
page_size = self.pageSize()
x = (self.width() - page_size.width()) / 2
y = (self.height() - page_size.height()) / 2
first_page.move(self.width()+first_page.width(), y)
first_page.show()
# animate the current page out
anim_out = QtCore.QPropertyAnimation(self)
anim_out.setTargetObject(first_page)
anim_out.setPropertyName('pos')
anim_out.setStartValue(first_page.pos())
anim_out.setEndValue(QtCore.QPoint(x, y))
anim_out.setDuration(self.animationSpeed())
anim_out.setEasingCurve(QtCore.QEasingCurve.Linear)
anim_out.finished.connect(anim_out.deleteLater)
# update the button states
self._buttons[self.WizardButton.BackButton].setVisible(False)
self._buttons[self.WizardButton.NextButton].setVisible(self.canGoForward())
self._buttons[self.WizardButton.CommitButton].setVisible(first_page.isCommitPage())
self._buttons[self.WizardButton.FinishButton].setVisible(first_page.isFinalPage())
self.adjustSize()
first_page.initializePage()
self.currentIdChanged.emit(pageId)
anim_out.start()
|
Restarts the whole wizard from the beginning.
|
def bundle(self, bundle_id, channel=None):
'''Get the default data for a bundle.
@param bundle_id The bundle's id.
@param channel Optional channel name.
'''
return self.entity(bundle_id, get_files=True, channel=channel)
|
Get the default data for a bundle.
@param bundle_id The bundle's id.
@param channel Optional channel name.
|
def put_archive(request, pid):
"""MNStorage.archive(session, did) → Identifier."""
d1_gmn.app.views.assert_db.is_not_replica(pid)
d1_gmn.app.views.assert_db.is_not_archived(pid)
d1_gmn.app.sysmeta.archive_sciobj(pid)
return pid
|
MNStorage.archive(session, did) → Identifier.
|
def getreferingobjs(referedobj, iddgroups=None, fields=None):
"""Get a list of objects that refer to this object"""
# pseudocode for code below
# referringobjs = []
# referedobj has: -> Name
# -> reference
# for each obj in idf:
# [optional filter -> objects in iddgroup]
# each field of obj:
# [optional filter -> field in fields]
# has object-list [refname]:
# if refname in reference:
# if Name = field value:
# referringobjs.append()
referringobjs = []
idf = referedobj.theidf
referedidd = referedobj.getfieldidd("Name")
try:
references = referedidd['reference']
except KeyError as e:
return referringobjs
idfobjs = idf.idfobjects.values()
idfobjs = list(itertools.chain.from_iterable(idfobjs)) # flatten list
if iddgroups: # optional filter
idfobjs = [anobj for anobj in idfobjs
if anobj.getfieldidd('key')['group'] in iddgroups]
for anobj in idfobjs:
if not fields:
thefields = anobj.objls
else:
thefields = fields
for field in thefields:
try:
itsidd = anobj.getfieldidd(field)
except ValueError as e:
continue
if 'object-list' in itsidd:
refname = itsidd['object-list'][0]
if refname in references:
if referedobj.isequal('Name', anobj[field]):
referringobjs.append(anobj)
return referringobjs
|
Get a list of objects that refer to this object
|
def keyPressEvent(self, event):
"""Qt Override."""
key = event.key()
if key in [Qt.Key_Enter, Qt.Key_Return]:
self.show_editor()
elif key in [Qt.Key_Tab]:
self.finder.setFocus()
elif key in [Qt.Key_Backtab]:
self.parent().reset_btn.setFocus()
elif key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_Left, Qt.Key_Right]:
super(ShortcutsTable, self).keyPressEvent(event)
elif key not in [Qt.Key_Escape, Qt.Key_Space]:
text = event.text()
if text:
if re.search(VALID_FINDER_CHARS, text) is not None:
self.finder.setFocus()
self.finder.set_text(text)
elif key in [Qt.Key_Escape]:
self.finder.keyPressEvent(event)
|
Qt Override.
|
def rpc_get_docstring(self, filename, source, offset):
"""Get the docstring for the symbol at the offset.
"""
return self._call_backend("rpc_get_docstring", None, filename,
get_source(source), offset)
|
Get the docstring for the symbol at the offset.
|
def get_any_nt_unit_rule(g):
"""Returns a non-terminal unit rule from 'g', or None if there is none."""
for rule in g.rules:
if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT):
return rule
return None
|
Returns a non-terminal unit rule from 'g', or None if there is none.
|
def gaussian(x, y, xsigma, ysigma):
"""
Two-dimensional oriented Gaussian pattern (i.e., 2D version of a
bell curve, like a normal distribution but not necessarily summing
to 1.0).
"""
if xsigma==0.0 or ysigma==0.0:
return x*0.0
with float_error_ignore():
x_w = np.divide(x,xsigma)
y_h = np.divide(y,ysigma)
return np.exp(-0.5*x_w*x_w + -0.5*y_h*y_h)
|
Two-dimensional oriented Gaussian pattern (i.e., 2D version of a
bell curve, like a normal distribution but not necessarily summing
to 1.0).
|
def symmetrise(matrix, tri='upper'):
"""
Will copy the selected (upper or lower) triangle of a square matrix
to the opposite side, so that the matrix is symmetrical.
Alters in place.
"""
if tri == 'upper':
tri_fn = np.triu_indices
else:
tri_fn = np.tril_indices
size = matrix.shape[0]
matrix[tri_fn(size)[::-1]] = matrix[tri_fn(size)]
return matrix
|
Will copy the selected (upper or lower) triangle of a square matrix
to the opposite side, so that the matrix is symmetrical.
Alters in place.
|
def _reset(self, server, **kwargs):
"""
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
"""
if server:
# handle storage, ip_address dicts and tags if they exist
Server._handle_server_subobjs(server, kwargs.get('cloud_manager'))
for key in server:
object.__setattr__(self, key, server[key])
for key in kwargs:
object.__setattr__(self, key, kwargs[key])
|
Reset the server object with new values given as params.
- server: a dict representing the server. e.g the API response.
- kwargs: any meta fields such as cloud_manager and populated.
Note: storage_devices and ip_addresses may be given in server as dicts or
in kwargs as lists containing Storage and IPAddress objects.
|
def method_name(func):
"""Method wrapper that adds the name of the method being called to its arguments list in Pascal case
"""
@wraps(func)
def _method_name(*args, **kwargs):
name = to_pascal_case(func.__name__)
return func(name=name, *args, **kwargs)
return _method_name
|
Method wrapper that adds the name of the method being called to its arguments list in Pascal case
|
def start(self) -> None:
"""
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
"""
self.stop() # stop an existing thread
self._thread = receiverThread(socket=self.sock, callbacks=self._callbacks)
self._thread.start()
|
Starts a new thread that handles the input. If a thread is already running, the thread will be restarted.
|
def zip_ll(data, means, M):
"""
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
"""
genes, cells = data.shape
clusters = means.shape[1]
ll = np.zeros((cells, clusters))
d0 = (data==0)
d1 = (data>0)
for i in range(clusters):
means_i = np.tile(means[:,i], (cells, 1))
means_i = means_i.transpose()
L_i = np.tile(M[:,i], (cells, 1))
L_i = L_i.transpose()
ll_0 = np.log(L_i + (1 - L_i)*np.exp(-means_i))
ll_0 = np.where((L_i==0) & (means_i==0), -means_i, ll_0)
# not including constant factors
ll_1 = np.log(1 - L_i) + xlogy(data, means_i) - means_i
ll_0 = np.where(d0, ll_0, 0.0)
ll_1 = np.where(d1, ll_1, 0.0)
ll[:,i] = np.sum(ll_0 + ll_1, 0)
return ll
|
Calculates the zero-inflated Poisson log-likelihood.
Args:
data (array): genes x cells
means (array): genes x k
M (array): genes x k - this is the zero-inflation parameter.
Returns:
cells x k array of log-likelihood for each cell/cluster pair.
|
def accuracy_study(tdm=None, u=None, s=None, vt=None, verbosity=0, **kwargs):
""" Reconstruct the term-document matrix and measure error as SVD terms are truncated
"""
smat = np.zeros((len(u), len(vt)))
np.fill_diagonal(smat, s)
smat = pd.DataFrame(smat, columns=vt.index, index=u.index)
if verbosity:
print()
print('Sigma:')
print(smat.round(2))
print()
print('Sigma without zeroing any dim:')
print(np.diag(smat.round(2)))
tdm_prime = u.values.dot(smat.values).dot(vt.values)
if verbosity:
print()
print('Reconstructed Term-Document Matrix')
print(tdm_prime.round(2))
err = [np.sqrt(((tdm_prime - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))]
if verbosity:
print()
print('Error without reducing dimensions:')
print(err[-1])
# 2.3481474529927113e-15
smat2 = smat.copy()
for numdim in range(len(s) - 1, 0, -1):
smat2.iloc[numdim, numdim] = 0
if verbosity:
print('Sigma after zeroing out dim {}'.format(numdim))
print(np.diag(smat2.round(2)))
# d0 d1 d2 d3 d4 d5
# ship 2.16 0.00 0.0 0.0 0.0 0.0
# boat 0.00 1.59 0.0 0.0 0.0 0.0
# ocean 0.00 0.00 0.0 0.0 0.0 0.0
# voyage 0.00 0.00 0.0 0.0 0.0 0.0
# trip 0.00 0.00 0.0 0.0 0.0 0.0
tdm_prime2 = u.values.dot(smat2.values).dot(vt.values)
err += [np.sqrt(((tdm_prime2 - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))]
if verbosity:
print('Error after zeroing out dim {}'.format(numdim))
print(err[-1])
return err
|
Reconstruct the term-document matrix and measure error as SVD terms are truncated
|
def run(self):
"""
Begin reading through audio files, saving false
activations and retraining when necessary
"""
for fn in glob_all(self.args.random_data_folder, '*.wav'):
if fn in self.trained_fns:
print('Skipping ' + fn + '...')
continue
print('Starting file ' + fn + '...')
self.train_on_audio(fn)
print('\r100% ')
self.trained_fns.append(fn)
save_trained_fns(self.trained_fns, self.args.model)
|
Begin reading through audio files, saving false
activations and retraining when necessary
|
def get_router_for_floatingip(self, context, internal_port,
internal_subnet, external_network_id):
"""We need to over-load this function so that we only return the
user visible router and never its redundancy routers (as they never
have floatingips associated with them).
"""
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet['id']
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id,
gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW
).distinct()
# Ensure that redundancy routers (in a ha group) are not returned,
# since only the user visible router should have floatingips.
# This can be done by checking that the id of routers does not
# appear in the 'redundancy_router_id' column in the
# 'cisco_router_redundancy_bindings' table.
routerport_qry = routerport_qry.outerjoin(
RouterRedundancyBinding,
RouterRedundancyBinding.redundancy_router_id ==
RouterPort.router_id)
routerport_qry = routerport_qry.filter(
RouterRedundancyBinding.redundancy_router_id == expr.null())
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == internal_subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
raise l3_exceptions.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet['id'],
external_network_id=external_network_id,
port_id=internal_port['id'])
|
We need to over-load this function so that we only return the
user visible router and never its redundancy routers (as they never
have floatingips associated with them).
|
def holdAcknowledge():
"""HOLD ACKNOWLEDGE Section 9.3.11"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x19) # 00011001
packet = a / b
return packet
|
HOLD ACKNOWLEDGE Section 9.3.11
|
def _set_cdn_access(self, container, public, ttl=None):
"""
Enables or disables CDN access for the specified container, and
optionally sets the TTL for the container when enabling access.
"""
headers = {"X-Cdn-Enabled": "%s" % public}
if public and ttl:
headers["X-Ttl"] = ttl
self.api.cdn_request("/%s" % utils.get_name(container), method="PUT",
headers=headers)
|
Enables or disables CDN access for the specified container, and
optionally sets the TTL for the container when enabling access.
|
def get_scoped_variable_m(self, data_port_id):
"""Returns the scoped variable model for the given data port id
:param data_port_id: The data port id to search for
:return: The model of the scoped variable with the given id
"""
for scoped_variable_m in self.scoped_variables:
if scoped_variable_m.scoped_variable.data_port_id == data_port_id:
return scoped_variable_m
return None
|
Returns the scoped variable model for the given data port id
:param data_port_id: The data port id to search for
:return: The model of the scoped variable with the given id
|
def _check_curtailment_target(curtailment, curtailment_target,
curtailment_key):
"""
Raises an error if curtailment target was not met in any time step.
Parameters
-----------
curtailment : :pandas:`pandas:DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step.
Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are
the generator representatives.
curtailment_target : :pandas:`pandas.Series<series>`
The curtailment in kW that was to be distributed amongst the
generators. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment was specified for.
"""
if not (abs(curtailment.sum(axis=1) - curtailment_target) < 1e-1).all():
message = 'Curtailment target not met for {}.'.format(curtailment_key)
logging.error(message)
raise TypeError(message)
|
Raises an error if curtailment target was not met in any time step.
Parameters
-----------
curtailment : :pandas:`pandas:DataFrame<dataframe>`
Dataframe containing the curtailment in kW per generator and time step.
Index is a :pandas:`pandas.DatetimeIndex<datetimeindex>`, columns are
the generator representatives.
curtailment_target : :pandas:`pandas.Series<series>`
The curtailment in kW that was to be distributed amongst the
generators. Index of the series is a
:pandas:`pandas.DatetimeIndex<datetimeindex>`.
curtailment_key : :obj:`str` or :obj:`tuple` with :obj:`str`
The technology and weather cell ID if :obj:`tuple` or only
the technology if :obj:`str` the curtailment was specified for.
|
def composite(
background_image,
foreground_image,
foreground_width_ratio=0.25,
foreground_position=(0.0, 0.0),
):
"""Takes two images and composites them."""
if foreground_width_ratio <= 0:
return background_image
composite = background_image.copy()
width = int(foreground_width_ratio * background_image.shape[1])
foreground_resized = resize(foreground_image, width)
size = foreground_resized.shape
x = int(foreground_position[1] * (background_image.shape[1] - size[1]))
y = int(foreground_position[0] * (background_image.shape[0] - size[0]))
# TODO: warn if resulting coordinates are out of bounds?
composite[y : y + size[0], x : x + size[1]] = foreground_resized
return composite
|
Takes two images and composites them.
|
def minion_pub(self, clear_load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
'''
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
|
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
|
def solid_angles(self, permutation=None):
"""
Returns the list of "perfect" solid angles Each edge is given as a
list of its end vertices coordinates.
"""
if permutation is None:
return self._solid_angles
else:
return [self._solid_angles[ii] for ii in permutation]
|
Returns the list of "perfect" solid angles Each edge is given as a
list of its end vertices coordinates.
|
def dump_json_data(page):
"""
Return a python dict representation of this page for use as part of
a JSON export.
"""
def content_langs_ordered():
"""
Return a list of languages ordered by the page content
with the latest creation date in each. This will be used
to maintain the state of the language_up_to_date template
tag when a page is restored or imported into another site.
"""
params = {'page': page}
if page.freeze_date:
params['creation_date__lte'] = page.freeze_date
cqs = Content.objects.filter(**params)
cqs = cqs.values('language').annotate(latest=Max('creation_date'))
return [c['language'] for c in cqs.order_by('latest')]
languages = content_langs_ordered()
def language_content(ctype):
return dict(
(lang, page.get_content(lang, ctype, language_fallback=False))
for lang in languages)
def placeholder_content():
"""Return content of each placeholder in each language."""
out = {}
for p in get_placeholders(page.get_template()):
if p.ctype in ('title', 'slug'):
continue # these were already included
out[p.name] = language_content(p.name)
return out
def isoformat(d):
return None if d is None else d.strftime(ISODATE_FORMAT)
def custom_email(user):
"""Allow a user's profile to return an email for the user."""
return user.email
tags = []
if settings.PAGE_TAGGING:
tags = [tag.name for tag in page.tags.all()]
return {
'complete_slug': dict(
(lang, page.get_complete_slug(lang, hideroot=False))
for lang in languages),
'title': language_content('title'),
'author_email': custom_email(page.author),
'creation_date': isoformat(page.creation_date),
'publication_date': isoformat(page.publication_date),
'publication_end_date': isoformat(page.publication_end_date),
'last_modification_date': isoformat(page.last_modification_date),
'status': {
Page.PUBLISHED: 'published',
Page.HIDDEN: 'hidden',
Page.DRAFT: 'draft'}[page.status],
'template': page.template,
'sites': (
[site.domain for site in page.sites.all()]
if settings.PAGE_USE_SITE_ID else []),
'redirect_to_url': page.redirect_to_url,
'redirect_to_complete_slug': dict(
(lang, page.redirect_to.get_complete_slug(
lang, hideroot=False))
for lang in page.redirect_to.get_languages()
) if page.redirect_to is not None else None,
'content': placeholder_content(),
'content_language_updated_order': languages,
'tags': tags,
}
|
Return a python dict representation of this page for use as part of
a JSON export.
|
def getModelPosterior(self,min):
"""
USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR
"""
Sigma = self.getLaplaceCovar(min)
n_params = self.vd.getNumberScales()
ModCompl = 0.5*n_params*SP.log(2*SP.pi)+0.5*SP.log(SP.linalg.det(Sigma))
RV = min['LML']+ModCompl
return RV
|
USES LAPLACE APPROXIMATION TO CALCULATE THE BAYESIAN MODEL POSTERIOR
|
def _animate_bbvi(self,stored_latent_variables,stored_predictive_likelihood):
""" Produces animated plot of BBVI optimization
Returns
----------
None (changes model attributes)
"""
from matplotlib.animation import FuncAnimation, writers
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ud = BBVINLLMAnimate(ax,self.data,stored_latent_variables,self.index,self.z_no,self.link)
anim = FuncAnimation(fig, ud, frames=np.arange(stored_latent_variables.shape[0]), init_func=ud.init,
interval=10, blit=True)
plt.plot(self.data)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
|
Produces animated plot of BBVI optimization
Returns
----------
None (changes model attributes)
|
def distance(self, loc):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
assert type(loc) == type(self)
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [
self.lon,
self.lat,
loc.lon,
loc.lat,
])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371000 # Radius of earth in meters.
return c * r
|
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
|
def run(command, parser, cl_args, unknown_args):
'''
:param command:
:param parser:
:param cl_args:
:param unknown_args:
:return:
'''
Log.debug("Deactivate Args: %s", cl_args)
return cli_helper.run(command, cl_args, "deactivate topology")
|
:param command:
:param parser:
:param cl_args:
:param unknown_args:
:return:
|
def write_unchecked_hmac_data(self, offsets, data):
# type: (Descriptor, Offsets, bytes) -> None
"""Write unchecked encrypted data to disk
:param Descriptor self: this
:param Offsets offsets: download offsets
:param bytes data: hmac/encrypted data
"""
fname = None
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as fd:
fname = fd.name
fd.write(data)
unchecked = UncheckedChunk(
data_len=len(data),
fd_start=0,
file_path=pathlib.Path(fname),
temp=True,
)
with self._meta_lock:
self._unchecked_chunks[offsets.chunk_num] = {
'ucc': unchecked,
'decrypted': False,
}
return str(unchecked.file_path)
|
Write unchecked encrypted data to disk
:param Descriptor self: this
:param Offsets offsets: download offsets
:param bytes data: hmac/encrypted data
|
def log_interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x_log = np.array([1e3, 1e4, 1e5, 1e6])
>>> y_log = np.log(x_log) * 2 + 3
>>> x_interp = np.array([5e3, 5e4, 5e5])
>>> metpy.calc.log_interp(x_interp, x_log, y_log)
array([20.03438638, 24.63955657, 29.24472675])
Notes
-----
xp and args must be the same shape.
"""
# Pull out kwargs
fill_value = kwargs.pop('fill_value', np.nan)
axis = kwargs.pop('axis', 0)
# Log x and xp
log_x = np.log(x)
log_xp = np.log(xp)
return interpolate_1d(log_x, log_xp, *args, axis=axis, fill_value=fill_value)
|
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x_log = np.array([1e3, 1e4, 1e5, 1e6])
>>> y_log = np.log(x_log) * 2 + 3
>>> x_interp = np.array([5e3, 5e4, 5e5])
>>> metpy.calc.log_interp(x_interp, x_log, y_log)
array([20.03438638, 24.63955657, 29.24472675])
Notes
-----
xp and args must be the same shape.
|
def get_pubkey(self):
"""
Get the public key of this certificate.
:return: The public key.
:rtype: :py:class:`PKey`
"""
pkey = PKey.__new__(PKey)
pkey._pkey = _lib.NETSCAPE_SPKI_get_pubkey(self._spki)
_openssl_assert(pkey._pkey != _ffi.NULL)
pkey._pkey = _ffi.gc(pkey._pkey, _lib.EVP_PKEY_free)
pkey._only_public = True
return pkey
|
Get the public key of this certificate.
:return: The public key.
:rtype: :py:class:`PKey`
|
def parse(self, filepath, dependencies=False, recursive=False, greedy=False):
"""Parses the fortran code in the specified file.
:arg dependencies: if true, all folder paths will be searched for modules
that have been referenced but aren't loaded in the parser.
:arg greedy: if true, when a module cannot be found using a file name
of module_name.f90, all modules in all folders are searched."""
#If we have already parsed this file path, we should check to see if the
#module file has changed and needs to be reparsed.
abspath = self.tramp.abspath(filepath)
self._add_current_codedir(abspath)
fname = filepath.split("/")[-1].lower()
mtime_check = self._check_parse_modtime(abspath, fname)
if mtime_check is None:
return
#Keep track of parsing times if we are running in verbose mode.
if self.verbose:
start_time = clock()
msg.okay("WORKING on {0}".format(abspath), 2)
if fname not in self._modulefiles:
self._modulefiles[fname] = []
if fname not in self._programfiles:
self._programfiles[fname] = []
#Check if we can load the file from a pickle instead of doing a time
#consuming file system parse.
pickle_load = False
pprograms = []
if len(mtime_check) == 1 and settings.use_filesystem_cache:
#We use the pickler to load the file since a cached version might
#be good enough.
pmodules = self.serialize.load_module(abspath, mtime_check[0], self)
if pmodules is not None:
for module in pmodules:
self.modules[module.name.lower()] = module
self._modulefiles[fname].append(module.name.lower())
pickle_load = True
else:
#We have to do a full load from the file system.
pmodules, pprograms = self._parse_from_file(abspath, fname,
dependencies, recursive, greedy)
else:
#We have to do a full load from the file system.
pmodules, pprograms = self._parse_from_file(abspath, fname,
dependencies, recursive, greedy)
#Add the filename to the list of files that have been parsed.
self._parsed.append(abspath.lower())
if not pickle_load and len(pmodules) > 0 and settings.use_filesystem_cache:
self.serialize.save_module(abspath, pmodules)
if self.verbose:
msg.info("PARSED: {} modules and {} ".format(len(pmodules), len(pprograms)) +
"programs in {} in {}".format(fname, secondsToStr(clock() - start_time)), 2)
for module in pmodules:
msg.gen("\tMODULE {}".format(module.name), 2)
for program in pprograms:
msg.gen("\tPROGRAM {}".format(program.name), 2)
if len(pmodules) > 0 or len(pprograms) > 0:
msg.blank()
self._parse_dependencies(pmodules, dependencies, recursive, greedy)
|
Parses the fortran code in the specified file.
:arg dependencies: if true, all folder paths will be searched for modules
that have been referenced but aren't loaded in the parser.
:arg greedy: if true, when a module cannot be found using a file name
of module_name.f90, all modules in all folders are searched.
|
def filter_generic(self, content_object=None, **kwargs):
"""Filter by a generic object.
:param content_object: the content object to filter on.
"""
if content_object:
kwargs['content_type'] = ContentType.objects.get_for_model(
content_object
)
kwargs['object_id'] = content_object.id
return self.filter(**kwargs)
|
Filter by a generic object.
:param content_object: the content object to filter on.
|
def save(self, *args, **kwargs):
"""
Generate a name, and ensure amount is less than or equal to 100
"""
self.name = str(self.parent.name) + " - " + str(self.child.name) + " - " + str(self.ownership_type)
if self.amount > 100:
raise ValueError("Ownership amount cannot be more than 100%")
elif self.amount < 0:
raise ValueError("Ownership amount cannot be less than 0%")
else:
super(Ownership, self).save(*args, **kwargs)
|
Generate a name, and ensure amount is less than or equal to 100
|
def free_index(self, name, free=True, **kwargs):
"""Free/Fix index of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
"""
src = self.roi.get_source_by_name(name)
self.free_source(name, free=free,
pars=index_parameters.get(src['SpectrumType'], []),
**kwargs)
|
Free/Fix index of a source.
Parameters
----------
name : str
Source name.
free : bool
Choose whether to free (free=True) or fix (free=False).
|
def remove_schema(self, database, schema):
"""Remove a schema from the set of known schemas (case-insensitive)
If the schema does not exist, it will be ignored - it could just be a
temporary table.
:param str database: The database name to remove.
:param str schema: The schema name to remove.
"""
self.schemas.discard((_lower(database), _lower(schema)))
|
Remove a schema from the set of known schemas (case-insensitive)
If the schema does not exist, it will be ignored - it could just be a
temporary table.
:param str database: The database name to remove.
:param str schema: The schema name to remove.
|
def _round_whole_even(i):
r'''Round a number to the nearest whole number. If the number is exactly
between two numbers, round to the even whole number. Used by
`viscosity_index`.
Parameters
----------
i : float
Number, [-]
Returns
-------
i : int
Rounded number, [-]
Notes
-----
Should never run with inputs from a practical function, as numbers on
computers aren't really normally exactly between two numbers.
Examples
--------
_round_whole_even(116.5)
116
'''
if i % .5 == 0:
if (i + 0.5) % 2 == 0:
i = i + 0.5
else:
i = i - 0.5
else:
i = round(i, 0)
return int(i)
|
r'''Round a number to the nearest whole number. If the number is exactly
between two numbers, round to the even whole number. Used by
`viscosity_index`.
Parameters
----------
i : float
Number, [-]
Returns
-------
i : int
Rounded number, [-]
Notes
-----
Should never run with inputs from a practical function, as numbers on
computers aren't really normally exactly between two numbers.
Examples
--------
_round_whole_even(116.5)
116
|
def get_object_by_name(content, object_type, name, regex=False):
'''
Get the vsphere object associated with a given text name
Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py
'''
container = content.viewManager.CreateContainerView(
content.rootFolder, [object_type], True
)
for c in container.view:
if regex:
if re.match(name, c.name):
return c
elif c.name == name:
return c
|
Get the vsphere object associated with a given text name
Source: https://github.com/rreubenur/vmware-pyvmomi-examples/blob/master/create_template.py
|
def create_invoice_from_albaran(pk, list_lines):
"""
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
"""
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineAlbaran.objects.values_list('line_order__pk').filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True)]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
context = GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
if 'error' not in context or not context['error']:
SalesLineAlbaran.objects.filter(
pk__in=[int(x) for x in list_lines]
).exclude(invoiced=True).update(invoiced=True)
return context
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context
|
la pk y list_lines son de albaranes, necesitamos la info de las lineas de pedidos
|
def _run(command, quiet=False, timeout=None):
"""Run a command, returns command output."""
try:
with _spawn(command, quiet, timeout) as child:
command_output = child.read().strip().replace("\r\n", "\n")
except pexpect.TIMEOUT:
logger.info(f"command {command} timed out")
raise Error()
return command_output
|
Run a command, returns command output.
|
def install_package_requirements(self, psrc, stream_output=None):
"""
Install from requirements.txt file found in psrc
:param psrc: name of directory in environment directory
"""
package = self.target + '/' + psrc
assert isdir(package), package
reqname = '/requirements.txt'
if not exists(package + reqname):
reqname = '/pip-requirements.txt'
if not exists(package + reqname):
return
return self.user_run_script(
script=scripts.get_script_path('install_reqs.sh'),
args=['/project/' + psrc + reqname],
rw_venv=True,
rw_project=True,
stream_output=stream_output
)
|
Install from requirements.txt file found in psrc
:param psrc: name of directory in environment directory
|
def dinic(graph, capacity, source, target):
"""Maximum flow by Dinic
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int source: vertex
:param int target: vertex
:returns: skew symmetric flow matrix, flow value
:complexity: :math:`O(|V|^2 |E|)`
"""
assert source != target
add_reverse_arcs(graph, capacity)
Q = deque()
total = 0
n = len(graph)
flow = [[0] * n for u in range(n)] # flow initially empty
while True: # repeat while we can increase
Q.appendleft(source)
lev = [None] * n # build levels, None = inaccessible
lev[source] = 0 # by BFS
while Q:
u = Q.pop()
for v in graph[u]:
if lev[v] is None and capacity[u][v] > flow[u][v]:
lev[v] = lev[u] + 1
Q.appendleft(v)
if lev[target] is None: # stop if sink is not reachable
return flow, total
up_bound = sum(capacity[source][v] for v in graph[source]) - total
total += _dinic_step(graph, capacity, lev, flow, source, target,
up_bound)
|
Maximum flow by Dinic
:param graph: directed graph in listlist or listdict format
:param capacity: in matrix format or same listdict graph
:param int source: vertex
:param int target: vertex
:returns: skew symmetric flow matrix, flow value
:complexity: :math:`O(|V|^2 |E|)`
|
def new(self, limit=None):
"""GETs new links from this subreddit. Calls :meth:`narwal.Reddit.new`.
:param limit: max number of links to return
"""
return self._reddit.new(self.display_name, limit=limit)
|
GETs new links from this subreddit. Calls :meth:`narwal.Reddit.new`.
:param limit: max number of links to return
|
def get_obj(self, vimtype, name, folder=None):
"""
Return an object by name, if name is None the
first found object is returned
"""
obj = None
content = self.service_instance.RetrieveContent()
if folder is None:
folder = content.rootFolder
container = content.viewManager.CreateContainerView(folder, [vimtype], True)
for c in container.view:
if c.name == name:
obj = c
break
container.Destroy()
return obj
|
Return an object by name, if name is None the
first found object is returned
|
def rewriteFasta(sequence, sequence_name, fasta_in, fasta_out):
"""
Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing
"""
f=open(fasta_in, 'r+')
f2=open(fasta_out,'w')
lines = f.readlines()
i=0
while i < len(lines):
line = lines[i]
if line[0] == ">":
f2.write(line)
fChr=line.split(" ")[0]
fChr=fChr[1:]
if fChr == sequence_name:
code=['N','A','C','T','G']
firstbase=lines[i+1][0]
while firstbase in code:
i=i+1
firstbase=lines[i][0]
s=0
while s <= len(sequence):
f2.write(sequence[s:s+60]+"\n")
s=s+60
else:
i=i+1
else:
f2.write(line)
i=i+1
f2.close
f.close
|
Rewrites a specific sequence in a multifasta file while keeping the sequence header.
:param sequence: a string with the sequence to be written
:param sequence_name: the name of the sequence to be retrieved eg. for '>2 dna:chromosome chromosome:GRCm38:2:1:182113224:1 REF' use: sequence_name=str(2)
:param fasta_in: /path/to/original.fa
:param fasta_out: /path/to/destination.fa
:returns: nothing
|
def is_ready(self):
"""Is thread & ioloop ready.
:returns bool:
"""
if not self._thread:
return False
if not self._ready.is_set():
return False
return True
|
Is thread & ioloop ready.
:returns bool:
|
def score_x_of_a_kind_yahtzee(dice: List[int], min_same_faces: int) -> int:
"""Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise
return zero. Only works for 3 or more min_same_faces.
"""
for die, count in Counter(dice).most_common(1):
if count >= min_same_faces:
return sum(dice)
return 0
|
Return sum of dice if there are a minimum of equal min_same_faces dice, otherwise
return zero. Only works for 3 or more min_same_faces.
|
def digest_file(fname):
"""
Digest files using SHA-2 (256-bit)
TESTING
Produces identical output to `openssl sha256 FILE` for the following:
* on all source .py files and some binary pyc files in parent dir
* empty files with different names
* 3.3GB DNAse Hypersensitive file
* empty file, file with one space, file with one return all produce
* distinct output
PERF takes about 20 seconds to hash 3.3GB file
on an empty file and on build.py
INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
WARNING: not clear if we need to pad file bytes for proper cryptographic
hashing
"""
#chunk size in bytes
size = 4096
hval = hashlib.new(HASH_TYPE)
with open(fname, 'rb') as fd:
for chunk in iter(lambda: fd.read(size), b''):
hval.update(chunk)
return hval.hexdigest()
|
Digest files using SHA-2 (256-bit)
TESTING
Produces identical output to `openssl sha256 FILE` for the following:
* on all source .py files and some binary pyc files in parent dir
* empty files with different names
* 3.3GB DNAse Hypersensitive file
* empty file, file with one space, file with one return all produce
* distinct output
PERF takes about 20 seconds to hash 3.3GB file
on an empty file and on build.py
INSPIRATION: http://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file
WARNING: not clear if we need to pad file bytes for proper cryptographic
hashing
|
def parse_scalar_type_definition(lexer: Lexer) -> ScalarTypeDefinitionNode:
"""ScalarTypeDefinition: Description? scalar Name Directives[Const]?"""
start = lexer.token
description = parse_description(lexer)
expect_keyword(lexer, "scalar")
name = parse_name(lexer)
directives = parse_directives(lexer, True)
return ScalarTypeDefinitionNode(
description=description, name=name, directives=directives, loc=loc(lexer, start)
)
|
ScalarTypeDefinition: Description? scalar Name Directives[Const]?
|
def Server(self):
"""Return server associated with this request.
>>> d = clc.v2.Datacenter()
>>> q = clc.v2.Server.Create(name="api2",cpu=1,memory=1,group_id=d.Groups().Get("Default Group").id,template=d.Templates().Search("centos-6-64")[0].id,network_id=d.Networks().networks[0].id,ttl=4000)
>>> q.WaitUntilComplete()
0
>>> q.success_requests[0].Server()
<clc.APIv2.server.Server object at 0x1095a8390>
>>> print _
VA1BTDIAPI214
"""
if self.context_key == 'newserver':
server_id = clc.v2.API.Call('GET', self.context_val,session=self.session)['id']
return(clc.v2.Server(id=server_id,alias=self.alias,session=self.session))
elif self.context_key == 'server':
return(clc.v2.Server(id=self.context_val,alias=self.alias,session=self.session))
else: raise(clc.CLCException("%s object not server" % self.context_key))
|
Return server associated with this request.
>>> d = clc.v2.Datacenter()
>>> q = clc.v2.Server.Create(name="api2",cpu=1,memory=1,group_id=d.Groups().Get("Default Group").id,template=d.Templates().Search("centos-6-64")[0].id,network_id=d.Networks().networks[0].id,ttl=4000)
>>> q.WaitUntilComplete()
0
>>> q.success_requests[0].Server()
<clc.APIv2.server.Server object at 0x1095a8390>
>>> print _
VA1BTDIAPI214
|
def isdir(self, path):
"""Return true if the path refers to an existing directory.
Parameters
----------
path : str
Path of directory on the remote side to check.
"""
result = True
try:
self.sftp_client.lstat(path)
except FileNotFoundError:
result = False
return result
|
Return true if the path refers to an existing directory.
Parameters
----------
path : str
Path of directory on the remote side to check.
|
def get_or_create(self, um_from_user, um_to_user, message):
"""
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
"""
created = False
try:
contact = self.get(Q(um_from_user=um_from_user, um_to_user=um_to_user) |
Q(um_from_user=um_to_user, um_to_user=um_from_user))
except self.model.DoesNotExist:
created = True
contact = self.create(um_from_user=um_from_user,
um_to_user=um_to_user,
latest_message=message)
return (contact, created)
|
Get or create a Contact
We override Django's :func:`get_or_create` because we want contact to
be unique in a bi-directional manner.
|
async def step(self, step_id, session, scenario=None):
""" single scenario call.
When it returns 1, it works. -1 the script failed,
0 the test is stopping or needs to stop.
"""
if scenario is None:
scenario = pick_scenario(self.wid, step_id)
try:
await self.send_event('scenario_start', scenario=scenario)
await scenario['func'](session, *scenario['args'],
**scenario['kw'])
await self.send_event('scenario_success', scenario=scenario)
if scenario['delay'] > 0.:
await cancellable_sleep(scenario['delay'])
return 1
except Exception as exc:
await self.send_event('scenario_failure',
scenario=scenario,
exception=exc)
if self.args.verbose > 0:
self.console.print_error(exc)
await self.console.flush()
return -1
|
single scenario call.
When it returns 1, it works. -1 the script failed,
0 the test is stopping or needs to stop.
|
def add_statements(self, pmid, stmts):
"""Add INDRA Statements to the incremental model indexed by PMID.
Parameters
----------
pmid : str
The PMID of the paper from which statements were extracted.
stmts : list[indra.statements.Statement]
A list of INDRA Statements to be added to the model.
"""
if pmid not in self.stmts:
self.stmts[pmid] = stmts
else:
self.stmts[pmid] += stmts
|
Add INDRA Statements to the incremental model indexed by PMID.
Parameters
----------
pmid : str
The PMID of the paper from which statements were extracted.
stmts : list[indra.statements.Statement]
A list of INDRA Statements to be added to the model.
|
def recursive_refs(envs, name):
"""
Return set of recursive refs for given env name
>>> local_refs = sorted(recursive_refs([
... {'name': 'base', 'refs': []},
... {'name': 'test', 'refs': ['base']},
... {'name': 'local', 'refs': ['test']},
... ], 'local'))
>>> local_refs == ['base', 'test']
True
"""
refs_by_name = {
env['name']: set(env['refs'])
for env in envs
}
refs = refs_by_name[name]
if refs:
indirect_refs = set(itertools.chain.from_iterable([
recursive_refs(envs, ref)
for ref in refs
]))
else:
indirect_refs = set()
return set.union(refs, indirect_refs)
|
Return set of recursive refs for given env name
>>> local_refs = sorted(recursive_refs([
... {'name': 'base', 'refs': []},
... {'name': 'test', 'refs': ['base']},
... {'name': 'local', 'refs': ['test']},
... ], 'local'))
>>> local_refs == ['base', 'test']
True
|
def em_schedule(**kwargs):
"""Run multiple energy minimizations one after each other.
:Keywords:
*integrators*
list of integrators (from 'l-bfgs', 'cg', 'steep')
[['bfgs', 'steep']]
*nsteps*
list of maximum number of steps; one for each integrator in
in the *integrators* list [[100,1000]]
*kwargs*
mostly passed to :func:`gromacs.setup.energy_minimize`
:Returns: dictionary with paths to final structure ('struct') and
other files
:Example:
Conduct three minimizations:
1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps
2. steepest descent for 200 steps
3. finish with BFGS for another 30 steps
We also do a multi-processor minimization when possible (i.e. for steep
(and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class
for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see
:mod:`gromacs.run` for details)::
import gromacs.run
gromacs.setup.em_schedule(struct='solvate/ionized.gro',
mdrunner=gromacs.run.MDrunnerOpenMP64,
integrators=['l-bfgs', 'steep', 'l-bfgs'],
nsteps=[50,200, 50])
.. Note:: You might have to prepare the mdp file carefully because at the
moment one can only modify the *nsteps* parameter on a
per-minimizer basis.
"""
mdrunner = kwargs.pop('mdrunner', None)
integrators = kwargs.pop('integrators', ['l-bfgs', 'steep'])
kwargs.pop('integrator', None) # clean input; we set intgerator from integrators
nsteps = kwargs.pop('nsteps', [100, 1000])
outputs = ['em{0:03d}_{1!s}.pdb'.format(i, integrator) for i,integrator in enumerate(integrators)]
outputs[-1] = kwargs.pop('output', 'em.pdb')
files = {'struct': kwargs.pop('struct', None)} # fake output from energy_minimize()
for i, integrator in enumerate(integrators):
struct = files['struct']
logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i])
kwargs.update({'struct':struct, 'output':outputs[i],
'integrator':integrator, 'nsteps': nsteps[i]})
if not integrator == 'l-bfgs':
kwargs['mdrunner'] = mdrunner
else:
kwargs['mdrunner'] = None
logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot "
"do parallel runs.", i)
files = energy_minimize(**kwargs)
return files
|
Run multiple energy minimizations one after each other.
:Keywords:
*integrators*
list of integrators (from 'l-bfgs', 'cg', 'steep')
[['bfgs', 'steep']]
*nsteps*
list of maximum number of steps; one for each integrator in
in the *integrators* list [[100,1000]]
*kwargs*
mostly passed to :func:`gromacs.setup.energy_minimize`
:Returns: dictionary with paths to final structure ('struct') and
other files
:Example:
Conduct three minimizations:
1. low memory Broyden-Goldfarb-Fletcher-Shannon (BFGS) for 30 steps
2. steepest descent for 200 steps
3. finish with BFGS for another 30 steps
We also do a multi-processor minimization when possible (i.e. for steep
(and conjugate gradient) by using a :class:`gromacs.run.MDrunner` class
for a :program:`mdrun` executable compiled for OpenMP in 64 bit (see
:mod:`gromacs.run` for details)::
import gromacs.run
gromacs.setup.em_schedule(struct='solvate/ionized.gro',
mdrunner=gromacs.run.MDrunnerOpenMP64,
integrators=['l-bfgs', 'steep', 'l-bfgs'],
nsteps=[50,200, 50])
.. Note:: You might have to prepare the mdp file carefully because at the
moment one can only modify the *nsteps* parameter on a
per-minimizer basis.
|
def loads(s, model=None, parser=None):
"""Deserialize s (a str) to a Python object."""
with StringIO(s) as f:
return load(f, model=model, parser=parser)
|
Deserialize s (a str) to a Python object.
|
def offset_mask(mask):
""" Returns a mask shrunk to the 'minimum bounding rectangle' of the
nonzero portion of the previous mask, and its offset from the original.
Useful to find the smallest rectangular section of the image that can be
extracted to include the entire geometry. Conforms to the y-first
expectations of numpy arrays rather than x-first (geodata).
"""
def axis_data(axis):
"""Gets the bounds of a masked area along a certain axis"""
x = mask.sum(axis)
trimmed_front = N.trim_zeros(x,"f")
offset = len(x)-len(trimmed_front)
size = len(N.trim_zeros(trimmed_front,"b"))
return offset,size
xo,xs = axis_data(0)
yo,ys = axis_data(1)
array = mask[yo:yo+ys,xo:xo+xs]
offset = (yo,xo)
return offset, array
|
Returns a mask shrunk to the 'minimum bounding rectangle' of the
nonzero portion of the previous mask, and its offset from the original.
Useful to find the smallest rectangular section of the image that can be
extracted to include the entire geometry. Conforms to the y-first
expectations of numpy arrays rather than x-first (geodata).
|
def _get_writable_metadata(self):
"""Get the object / blob metadata which is writable.
This is intended to be used when creating a new object / blob.
See the `API reference docs`_ for more information, the fields
marked as writable are:
* ``acl``
* ``cacheControl``
* ``contentDisposition``
* ``contentEncoding``
* ``contentLanguage``
* ``contentType``
* ``crc32c``
* ``md5Hash``
* ``metadata``
* ``name``
* ``storageClass``
For now, we don't support ``acl``, access control lists should be
managed directly through :class:`ObjectACL` methods.
"""
# NOTE: This assumes `self.name` is unicode.
object_metadata = {"name": self.name}
for key in self._changes:
if key in _WRITABLE_FIELDS:
object_metadata[key] = self._properties[key]
return object_metadata
|
Get the object / blob metadata which is writable.
This is intended to be used when creating a new object / blob.
See the `API reference docs`_ for more information, the fields
marked as writable are:
* ``acl``
* ``cacheControl``
* ``contentDisposition``
* ``contentEncoding``
* ``contentLanguage``
* ``contentType``
* ``crc32c``
* ``md5Hash``
* ``metadata``
* ``name``
* ``storageClass``
For now, we don't support ``acl``, access control lists should be
managed directly through :class:`ObjectACL` methods.
|
def process_module(self, module):
"""inspect the source file to find encoding problem"""
if module.file_encoding:
encoding = module.file_encoding
else:
encoding = "ascii"
with module.stream() as stream:
for lineno, line in enumerate(stream):
self._check_encoding(lineno + 1, line, encoding)
|
inspect the source file to find encoding problem
|
def advance_operation_time(self, operation_time):
"""Update the operation time for this session.
:Parameters:
- `operation_time`: The
:data:`~pymongo.client_session.ClientSession.operation_time` from
another `ClientSession` instance.
"""
if not isinstance(operation_time, Timestamp):
raise TypeError("operation_time must be an instance "
"of bson.timestamp.Timestamp")
self._advance_operation_time(operation_time)
|
Update the operation time for this session.
:Parameters:
- `operation_time`: The
:data:`~pymongo.client_session.ClientSession.operation_time` from
another `ClientSession` instance.
|
def print_experiments(experiments):
"""
Prints job details in a table. Includes urls and mode parameters
"""
headers = ["JOB NAME", "CREATED", "STATUS", "DURATION(s)", "INSTANCE", "DESCRIPTION", "METRICS"]
expt_list = []
for experiment in experiments:
expt_list.append([normalize_job_name(experiment.name),
experiment.created_pretty, experiment.state,
experiment.duration_rounded, experiment.instance_type_trimmed,
experiment.description, format_metrics(experiment.latest_metrics)])
floyd_logger.info(tabulate(expt_list, headers=headers))
|
Prints job details in a table. Includes urls and mode parameters
|
def DeleteInstance(self, si, logger, session, vcenter_data_model, vm_uuid, vm_name):
"""
:param logger:
:param CloudShellAPISession session:
:param str vm_name: This is the resource name
:return:
"""
# find vm
vm = self.pv_service.find_by_uuid(si, vm_uuid)
if vm is not None:
# destroy vm
result = self.pv_service.destroy_vm(vm=vm, logger=logger)
else:
resource___format = "Could not find the VM {0},will remove the resource.".format(vm_name)
logger.info(resource___format)
result = resource___format
return result
|
:param logger:
:param CloudShellAPISession session:
:param str vm_name: This is the resource name
:return:
|
def to_json(self):
"""
Returns the JSON representation of the content type.
"""
result = super(ContentType, self).to_json()
result.update({
'name': self.name,
'description': self.description,
'displayField': self.display_field,
'fields': [f.to_json() for f in self.fields]
})
return result
|
Returns the JSON representation of the content type.
|
def _close(self):
"""Same as `_close` but expects `lock` acquired.
"""
if self._state != "closed":
self.event(DisconnectedEvent(self._dst_addr))
self._set_state("closed")
if self._socket is None:
return
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._socket.close()
self._socket = None
self._write_queue.clear()
self._write_queue_cond.notify()
|
Same as `_close` but expects `lock` acquired.
|
def hash(self):
'''
:rtype: int
:return: hash of the field
'''
hashed = super(Group, self).hash()
return khash(hashed, frozenset(self._values))
|
:rtype: int
:return: hash of the field
|
def crc(self):
"""
A checksum for the current visual object and its parent mesh.
Returns
----------
crc: int, checksum of data in visual object and its parent mesh
"""
# will make sure everything has been transferred
# to datastore that needs to be before returning crc
result = self._data.fast_hash()
if hasattr(self.mesh, 'crc'):
# bitwise xor combines hashes better than a sum
result ^= self.mesh.crc()
return result
|
A checksum for the current visual object and its parent mesh.
Returns
----------
crc: int, checksum of data in visual object and its parent mesh
|
def restore_review_history_for(brain_or_object):
"""Restores the review history for the given brain or object
"""
# Get the review history. Note this comes sorted from oldest to newest
review_history = get_purged_review_history_for(brain_or_object)
obj = api.get_object(brain_or_object)
wf_tool = api.get_tool("portal_workflow")
wf_ids = get_workflow_ids_for(brain_or_object)
wfs = map(lambda wf_id: wf_tool.getWorkflowById(wf_id), wf_ids)
wfs = filter(lambda wf: wf.state_var == "review_state", wfs)
if not wfs:
logger.error("No valid workflow found for {}".format(api.get_id(obj)))
else:
# It should not be possible to have more than one workflow with same
# state_variable here. Anyhow, we don't care in this case (we only want
# the object to have a review history).
workflow = wfs[0]
create_action = False
for history in review_history:
action_id = history["action"]
if action_id is None:
if create_action:
# We don't want multiple creation events, we only stick to
# one workflow, so if this object had more thone one wf
# bound in the past, we still want only one creation action
continue
create_action = True
# Change status and reindex
wf_tool.setStatusOf(workflow.id, obj, history)
indexes = ["review_state", "is_active"]
obj.reindexObject(idxs=indexes)
|
Restores the review history for the given brain or object
|
def request(self, method, path, contents, headers, decode_json=False,
stream=False, query=None, cdn=False):
"""
See :py:func:`swiftly.client.client.Client.request`
"""
if cdn:
raise Exception('CDN not yet supported with LocalClient')
if isinstance(contents, six.string_types):
contents = StringIO(contents)
if not headers:
headers = {}
if not query:
query = {}
rpath = path.lstrip('/')
if '/' in rpath:
container_name, object_name = rpath.split('/', 1)
else:
container_name = rpath
object_name = ''
if not container_name:
status, reason, hdrs, body = self._account(
method, contents, headers, stream, query, cdn)
elif not object_name:
status, reason, hdrs, body = self._container(
method, container_name, contents, headers, stream, query, cdn)
else:
status, reason, hdrs, body = self._object(
method, container_name, object_name, contents, headers, stream,
query, cdn)
if status and status // 100 != 5:
if not stream and decode_json and status // 100 == 2:
if body:
body = loads(body)
else:
body = None
return (status, reason, hdrs, body)
raise Exception('%s %s failed: %s %s' % (method, path, status, reason))
|
See :py:func:`swiftly.client.client.Client.request`
|
def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1):
"""Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
"""
hparams = trainer_lib.create_hparams(
hparams_set, data_dir=data_dir, problem_name=problem_name)
translate_model = registry.model(model_name)(
hparams, tf.estimator.ModeKeys.EVAL)
inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="inputs")
targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="targets")
translate_model({
"inputs": inputs,
"targets": targets,
})
# Must be called after building the training graph, so that the dict will
# have been filled with the attention tensors. BUT before creating the
# inference graph otherwise the dict will be filled with tensors from
# inside a tf.while_loop from decoding and are marked unfetchable.
att_mats = get_att_mats(translate_model)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
samples = translate_model.infer({
"inputs": inputs,
}, beam_size=beam_size)["outputs"]
return inputs, targets, samples, att_mats
|
Build the graph required to fetch the attention weights.
Args:
hparams_set: HParams set to build the model with.
model_name: Name of model.
data_dir: Path to directory containing training data.
problem_name: Name of problem.
beam_size: (Optional) Number of beams to use when decoding a translation.
If set to 1 (default) then greedy decoding is used.
Returns:
Tuple of (
inputs: Input placeholder to feed in ids to be translated.
targets: Targets placeholder to feed to translation when fetching
attention weights.
samples: Tensor representing the ids of the translation.
att_mats: Tensors representing the attention weights.
)
|
def expandService(service_element):
"""Take a service element and expand it into an iterator of:
([type_uri], uri, service_element)
"""
uris = sortedURIs(service_element)
if not uris:
uris = [None]
expanded = []
for uri in uris:
type_uris = getTypeURIs(service_element)
expanded.append((type_uris, uri, service_element))
return expanded
|
Take a service element and expand it into an iterator of:
([type_uri], uri, service_element)
|
def filter_iqr(array, lower, upper):
"""
Return elements which falls within specified interquartile range.
Arguments:
array (list): Sequence of numbers.
lower (float): Lower bound for IQR, in range 0 <= lower <= 1.
upper (float): Upper bound for IQR, in range 0 <= upper <= 1.
Returns:
list: Copy of original list, with elements outside of IQR
removed.
"""
upper, lower = iqr(array, upper, lower)
new = list(array)
for x in new[:]:
if x < lower or x > upper:
new.remove(x)
return new
|
Return elements which falls within specified interquartile range.
Arguments:
array (list): Sequence of numbers.
lower (float): Lower bound for IQR, in range 0 <= lower <= 1.
upper (float): Upper bound for IQR, in range 0 <= upper <= 1.
Returns:
list: Copy of original list, with elements outside of IQR
removed.
|
def stats(self, start, end, fields=None):
'''Perform a multivariate statistic calculation of this
:class:`ColumnTS` from a *start* date/datetime to an
*end* date/datetime.
:param start: Start date for analysis.
:param end: End date for analysis.
:param fields: Optional subset of :meth:`fields` to perform analysis on.
If not provided all fields are included in the analysis.
'''
start = self.pickler.dumps(start)
end = self.pickler.dumps(end)
backend = self.read_backend
return backend.execute(
backend.structure(self).stats(start, end, fields), self._stats)
|
Perform a multivariate statistic calculation of this
:class:`ColumnTS` from a *start* date/datetime to an
*end* date/datetime.
:param start: Start date for analysis.
:param end: End date for analysis.
:param fields: Optional subset of :meth:`fields` to perform analysis on.
If not provided all fields are included in the analysis.
|
def _make_walker(self, *args, **kwargs):
# type: (*Any, **Any) -> Walker
"""Create a walker instance.
"""
walker = self.walker_class(*args, **kwargs)
return walker
|
Create a walker instance.
|
def check_label(labels, required, value_regex, target_labels):
"""
Check if the label is required and match the regex
:param labels: [str]
:param required: bool (if the presence means pass or not)
:param value_regex: str (using search method)
:param target_labels: [str]
:return: bool (required==True: True if the label is present and match the regex if specified)
(required==False: True if the label is not present)
"""
present = target_labels is not None and not set(labels).isdisjoint(set(target_labels))
if present:
if required and not value_regex:
return True
elif value_regex:
pattern = re.compile(value_regex)
present_labels = set(labels) & set(target_labels)
for l in present_labels:
if not bool(pattern.search(target_labels[l])):
return False
return True
else:
return False
else:
return not required
|
Check if the label is required and match the regex
:param labels: [str]
:param required: bool (if the presence means pass or not)
:param value_regex: str (using search method)
:param target_labels: [str]
:return: bool (required==True: True if the label is present and match the regex if specified)
(required==False: True if the label is not present)
|
def show_vcs_output_virtual_ip_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
virtual_ip_address = ET.SubElement(output, "virtual-ip-address")
virtual_ip_address.text = kwargs.pop('virtual_ip_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def path_to_resource(project, path, type=None):
"""Get the resource at path
You only need to specify `type` if `path` does not exist. It can
be either 'file' or 'folder'. If the type is `None` it is assumed
that the resource already exists.
Note that this function uses `Project.get_resource()`,
`Project.get_file()`, and `Project.get_folder()` methods.
"""
project_path = path_relative_to_project_root(project, path)
if project_path is None:
project_path = rope.base.project._realpath(path)
project = rope.base.project.get_no_project()
if type is None:
return project.get_resource(project_path)
if type == 'file':
return project.get_file(project_path)
if type == 'folder':
return project.get_folder(project_path)
return None
|
Get the resource at path
You only need to specify `type` if `path` does not exist. It can
be either 'file' or 'folder'. If the type is `None` it is assumed
that the resource already exists.
Note that this function uses `Project.get_resource()`,
`Project.get_file()`, and `Project.get_folder()` methods.
|
def getmergerequests(self, project_id, page=1, per_page=20, state=None):
"""
Get all the merge requests for a project.
:param project_id: ID of the project to retrieve merge requests for
:param page: Page Number
:param per_page: Records per page
:param state: Passes merge request state to filter them by it
:return: list with all the merge requests
"""
data = {'page': page, 'per_page': per_page, 'state': state}
request = requests.get(
'{0}/{1}/merge_requests'.format(self.projects_url, project_id),
params=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False
|
Get all the merge requests for a project.
:param project_id: ID of the project to retrieve merge requests for
:param page: Page Number
:param per_page: Records per page
:param state: Passes merge request state to filter them by it
:return: list with all the merge requests
|
def connect(db_url=None,
pooling=hgvs.global_config.uta.pooling,
application_name=None,
mode=None,
cache=None):
"""Connect to a uta/ncbi database instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:anonymous@uta.biocommons.org/uta'
A local postgresql database:
postgresql://localhost/uta
A local SQLite database:
sqlite:////tmp/uta-0.0.6.db
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool.
"""
_logger.debug('connecting to ' + str(db_url) + '...')
if db_url is None:
db_url = _get_ncbi_db_url()
url = _parse_url(db_url)
if url.scheme == 'postgresql':
conn = NCBI_postgresql(
url=url, pooling=pooling, application_name=application_name, mode=mode, cache=cache)
else:
# fell through connection scheme cases
raise RuntimeError("{url.scheme} in {url} is not currently supported".format(url=url))
_logger.info('connected to ' + str(db_url) + '...')
return conn
|
Connect to a uta/ncbi database instance.
:param db_url: URL for database connection
:type db_url: string
:param pooling: whether to use connection pooling (postgresql only)
:type pooling: bool
:param application_name: log application name in connection (useful for debugging; PostgreSQL only)
:type application_name: str
When called with an explicit db_url argument, that db_url is used for connecting.
When called without an explicit argument, the function default is
determined by the environment variable UTA_DB_URL if it exists, or
hgvs.datainterface.uta.public_db_url otherwise.
>>> hdp = connect()
>>> hdp.schema_version()
'1.1'
The format of the db_url is driver://user:pass@host/database (the same
as that used by SQLAlchemy). Examples:
A remote public postgresql database:
postgresql://anonymous:anonymous@uta.biocommons.org/uta'
A local postgresql database:
postgresql://localhost/uta
A local SQLite database:
sqlite:////tmp/uta-0.0.6.db
For postgresql db_urls, pooling=True causes connect to use a
psycopg2.pool.ThreadedConnectionPool.
|
def crc(self):
"""
A zlib.crc32 or zlib.adler32 checksum
of the current data.
Returns
-----------
crc: int, checksum from zlib.crc32 or zlib.adler32
"""
if self._modified_c or not hasattr(self, '_hashed_crc'):
if self.flags['C_CONTIGUOUS']:
self._hashed_crc = crc32(self)
else:
# the case where we have sliced our nice
# contiguous array into a non- contiguous block
# for example (note slice *after* track operation):
# t = util.tracked_array(np.random.random(10))[::-1]
contiguous = np.ascontiguousarray(self)
self._hashed_crc = crc32(contiguous)
self._modified_c = False
return self._hashed_crc
|
A zlib.crc32 or zlib.adler32 checksum
of the current data.
Returns
-----------
crc: int, checksum from zlib.crc32 or zlib.adler32
|
def reset(self, labels=None):
"""Reset specified timer(s).
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be stopped. If it is
``None``, stop the default timer with label specified by the
``dfltlbl`` parameter of :meth:`__init__`. If it is equal to
the string specified by the ``alllbl`` parameter of
:meth:`__init__`, stop all timers.
"""
# Default label is self.dfltlbl
if labels is None:
labels = self.dfltlbl
# All timers are affected if label is equal to self.alllbl,
# otherwise only the timer(s) specified by label
if labels == self.alllbl:
labels = self.t0.keys()
elif not isinstance(labels, (list, tuple)):
labels = [labels,]
# Iterate over specified label(s)
for lbl in labels:
if lbl not in self.t0:
raise KeyError('Unrecognized timer key %s' % lbl)
# Set start time to None to indicate timer is not running
self.t0[lbl] = None
# Set time accumulator to zero
self.td[lbl] = 0.0
|
Reset specified timer(s).
Parameters
----------
labels : string or list, optional (default None)
Specify the label(s) of the timer(s) to be stopped. If it is
``None``, stop the default timer with label specified by the
``dfltlbl`` parameter of :meth:`__init__`. If it is equal to
the string specified by the ``alllbl`` parameter of
:meth:`__init__`, stop all timers.
|
def origin_west_asia(origin):
"""\
Returns if the origin is located in Western Asia.
Holds true for the following countries:
* Armenia
* Azerbaijan
* Bahrain
* Cyprus
* Georgia
* Iraq
* Israel
* Jordan
* Kuwait
* Lebanon
* Oman
* Qatar
* Saudi Arabia
* Syria
* Turkey
* United Arab Emirates
* Yemen
`origin`
The origin to check.
"""
return origin_armenia(origin) or origin_azerbaijan(origin) \
or origin_bahrain(origin) or origin_cyprus(origin) \
or origin_georgia(origin) or origin_georgia(origin) \
or origin_iraq(origin) or origin_israel(origin) \
or origin_jordan(origin) or origin_kuwait(origin) \
or origin_lebanon(origin) or origin_oman(origin) \
or origin_qatar(origin) or origin_saudi_arabia(origin) \
or origin_syria(origin) or origin_turkey(origin) \
or origin_united_arab_emirates(origin) or origin_yemen(origin)
|
\
Returns if the origin is located in Western Asia.
Holds true for the following countries:
* Armenia
* Azerbaijan
* Bahrain
* Cyprus
* Georgia
* Iraq
* Israel
* Jordan
* Kuwait
* Lebanon
* Oman
* Qatar
* Saudi Arabia
* Syria
* Turkey
* United Arab Emirates
* Yemen
`origin`
The origin to check.
|
def get_file_listing_sha(listing_paths: Iterable) -> str:
"""Return sha256 string for group of FTP listings."""
return sha256(''.join(sorted(listing_paths)).encode('utf-8')).hexdigest()
|
Return sha256 string for group of FTP listings.
|
def init(self):
"""Initialize histograms."""
evclass_shape = [16, 40, 10]
evtype_shape = [16, 16, 40, 10]
evclass_psf_shape = [16, 40, 10, 100]
evtype_psf_shape = [16, 16, 40, 10, 100]
self._hists_eff = dict()
self._hists = dict(evclass_on=np.zeros(evclass_shape),
evclass_off=np.zeros(evclass_shape),
evclass_alpha=np.zeros([16, 40, 1]),
evtype_on=np.zeros(evtype_shape),
evtype_off=np.zeros(evtype_shape),
evtype_alpha=np.zeros([16, 1, 40, 1]),
evclass_psf_on=np.zeros(evclass_psf_shape),
evclass_psf_off=np.zeros(evclass_psf_shape),
evtype_psf_on=np.zeros(evtype_psf_shape),
evtype_psf_off=np.zeros(evtype_psf_shape),
)
|
Initialize histograms.
|
def bind_bar(self, sender=None, **kwargs):
"""Binds a navigation bar into this extension instance."""
bar = kwargs.pop('bar')
self.bars[bar.name] = bar
|
Binds a navigation bar into this extension instance.
|
def get_ajax_url(self):
"""Get ajax url"""
if self.ajax_url:
return self.ajax_url
return reverse('trionyx:model-list-ajax', kwargs=self.kwargs)
|
Get ajax url
|
def replace_markdown_cells(src, dst):
"""
Overwrite markdown cells in notebook object `dst` with corresponding
cells in notebook object `src`.
"""
# It is an error to attempt markdown replacement if src and dst
# have different numbers of cells
if len(src['cells']) != len(dst['cells']):
raise ValueError('notebooks do not have the same number of cells')
# Iterate over cells in src
for n in range(len(src['cells'])):
# It is an error to attempt markdown replacement if any
# corresponding pair of cells have different type
if src['cells'][n]['cell_type'] != dst['cells'][n]['cell_type']:
raise ValueError('cell number %d of different type in src and dst')
# If current src cell is a markdown cell, copy the src cell to
# the dst cell
if src['cells'][n]['cell_type'] == 'markdown':
dst['cells'][n]['source'] = src['cells'][n]['source']
|
Overwrite markdown cells in notebook object `dst` with corresponding
cells in notebook object `src`.
|
def drop_scored_calls(self,names):
"""
Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified.
"""
def _remove(calls,names):
d = dict([(k,v) for k,v in calls.items() if k not in names])
return d
if isinstance(names, str):
names = [names]
output = self.copy()
output['scored_calls'] = output['scored_calls'].\
apply(lambda x: _remove(x,names))
return output
|
Take a name or list of scored call names and drop those from the scored calls
Args:
names (list): list of names to drop or a single string name to drop
Returns:
CellDataFrame: The CellDataFrame modified.
|
def _G(self, x, p):
"""
analytic solution of the 2d projected mass integral
integral: 2 * pi * x * kappa * dx
:param x:
:param p:
:return:
"""
prefactor = (p + p ** 3) ** -1 * p
if isinstance(x, np.ndarray):
inds0 = np.where(x * p == 1)
inds1 = np.where(x * p < 1)
inds2 = np.where(x * p > 1)
func = np.ones_like(x)
func[inds0] = np.log(0.25 * x[inds0] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds0]) - 1) + \
2 * p ** 2 * (self._u(x[inds0]) * np.arctanh(self._u(x[inds0]) ** -1) +
np.log(0.5 * x[inds0]))
func[inds1] = np.log(0.25 * x[inds1] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds1]) - 1) + \
2 * p ** 2 * (self._u(x[inds1]) * np.arctanh(self._u(x[inds1]) ** -1) +
np.log(0.5 * x[inds1])) + 2 * self._g(x[inds1], p) * np.arctanh(
self._g(x[inds1], p))
func[inds2] = np.log(0.25 * x[inds2] ** 2 * p ** 2) + np.pi * p * (self._u(x[inds2]) - 1) + \
2 * p ** 2 * (self._u(x[inds2]) * np.arctanh(self._u(x[inds2]) ** -1) +
np.log(0.5 * x[inds2])) - 2 * self._f(x[inds2], p) * np.arctan(
self._f(x[inds2], p))
else:
if x * p == 1:
func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \
2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) +
np.log(0.5 * x))
elif x * p < 1:
func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \
2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) +
np.log(0.5 * x)) + 2 * self._g(x, p) * np.arctanh(self._g(x, p))
else:
func = np.log(0.25 * x ** 2 * p ** 2) + np.pi * p * (self._u(x) - 1) + \
2 * p ** 2 * (self._u(x) * np.arctanh(self._u(x) ** -1) +
np.log(0.5 * x)) - 2 * self._f(x, p) * np.arctan(self._f(x, p))
return func * prefactor
|
analytic solution of the 2d projected mass integral
integral: 2 * pi * x * kappa * dx
:param x:
:param p:
:return:
|
def with_wrapper(self, wrapper=None, name=None):
""" Copy this BarSet, and return a new BarSet with the specified
name and wrapper.
If no name is given, `{self.name}_custom_wrapper` is used.
If no wrapper is given, the new BarSet will have no wrapper.
"""
name = name or '{}_custom_wrapper'.format(self.name)
return self.__class__(self.data, name=name, wrapper=wrapper)
|
Copy this BarSet, and return a new BarSet with the specified
name and wrapper.
If no name is given, `{self.name}_custom_wrapper` is used.
If no wrapper is given, the new BarSet will have no wrapper.
|
def guess_locktime(redeem_script):
'''
str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV
'''
try:
script_array = redeem_script.split()
loc = script_array.index('OP_CHECKLOCKTIMEVERIFY')
return int(script_array[loc - 1], 16)
except ValueError:
return 0
|
str -> int
If OP_CLTV is used, guess an appropriate lock_time
Otherwise return 0 (no lock time)
Fails if there's not a constant before OP_CLTV
|
def _update_states(self,
final_states: RnnStateStorage,
restoration_indices: torch.LongTensor) -> None:
"""
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
"""
# TODO(Mark): seems weird to sort here, but append zeros in the subclasses.
# which way around is best?
new_unsorted_states = [state.index_select(1, restoration_indices)
for state in final_states]
if self._states is None:
# We don't already have states, so just set the
# ones we receive to be the current state.
self._states = tuple(state.data for state in new_unsorted_states)
else:
# Now we've sorted the states back so that they correspond to the original
# indices, we need to figure out what states we need to update, because if we
# didn't use a state for a particular row, we want to preserve its state.
# Thankfully, the rows which are all zero in the state correspond exactly
# to those which aren't used, so we create masks of shape (new_batch_size,),
# denoting which states were used in the RNN computation.
current_state_batch_size = self._states[0].size(1)
new_state_batch_size = final_states[0].size(1)
# Masks for the unused states of shape (1, new_batch_size, 1)
used_new_rows_mask = [(state[0, :, :].sum(-1)
!= 0.0).float().view(1, new_state_batch_size, 1)
for state in new_unsorted_states]
new_states = []
if current_state_batch_size > new_state_batch_size:
# The new state is smaller than the old one,
# so just update the indices which we used.
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows in the previous state
# which _were_ used in the current state.
masked_old_state = old_state[:, :new_state_batch_size, :] * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
old_state[:, :new_state_batch_size, :] = new_state + masked_old_state
new_states.append(old_state.detach())
else:
# The states are the same size, so we just have to
# deal with the possibility that some rows weren't used.
new_states = []
for old_state, new_state, used_mask in zip(self._states,
new_unsorted_states,
used_new_rows_mask):
# zero out all rows which _were_ used in the current state.
masked_old_state = old_state * (1 - used_mask)
# The old state is larger, so update the relevant parts of it.
new_state += masked_old_state
new_states.append(new_state.detach())
# It looks like there should be another case handled here - when
# the current_state_batch_size < new_state_batch_size. However,
# this never happens, because the states themeselves are mutated
# by appending zeros when calling _get_inital_states, meaning that
# the new states are either of equal size, or smaller, in the case
# that there are some unused elements (zero-length) for the RNN computation.
self._states = tuple(new_states)
|
After the RNN has run forward, the states need to be updated.
This method just sets the state to the updated new state, performing
several pieces of book-keeping along the way - namely, unsorting the
states and ensuring that the states of completely padded sequences are
not updated. Finally, it also detaches the state variable from the
computational graph, such that the graph can be garbage collected after
each batch iteration.
Parameters
----------
final_states : ``RnnStateStorage``, required.
The hidden states returned as output from the RNN.
restoration_indices : ``torch.LongTensor``, required.
The indices that invert the sorting used in ``sort_and_run_forward``
to order the states with respect to the lengths of the sequences in
the batch.
|
def formatted_message(self):
"""Method that will return the formatted message for the event.
This formatting is done with Jinja and the template text is stored in
the ``body`` attribute. The template is supplied the following
variables, as well as the built in Flask ones:
- ``event``: This is the event instance that this method belongs to.
- ``meta``: This is a dictionary of cached values that have been stored
when the event was created based upon the event's DSL.
- ``original``: This is a dump of the instance before the instance was
updated.
- ``updated``: This is a dump of the instance after it was updated.
- ``version``: This is the version of the event DSL.
This property is cached because Jinja rendering is slower than raw
Python string formatting.
"""
return render_template_string(
self.body,
event=self,
meta=self.meta,
original=self.original,
updated=self.updated,
version=self.version,
)
|
Method that will return the formatted message for the event.
This formatting is done with Jinja and the template text is stored in
the ``body`` attribute. The template is supplied the following
variables, as well as the built in Flask ones:
- ``event``: This is the event instance that this method belongs to.
- ``meta``: This is a dictionary of cached values that have been stored
when the event was created based upon the event's DSL.
- ``original``: This is a dump of the instance before the instance was
updated.
- ``updated``: This is a dump of the instance after it was updated.
- ``version``: This is the version of the event DSL.
This property is cached because Jinja rendering is slower than raw
Python string formatting.
|
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\\_ - The id of the element to be found.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
|
Finds an element by id.
:Args:
- id\\_ - The id of the element to be found.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
::
element = driver.find_element_by_id('foo')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.