text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from __future__ import division
from collections import defaultdict
from rdkit.Chem import HybridizationType
from rdkit.Chem.rdMolDescriptors import CalcFractionCSP3
from ._base import Descriptor
__all__ = ("CarbonTypes", "HybridizationRatio", "FractionCSP3")
class CarbonTypesBase(Descriptor):
__slots__ = ()
explicit_hydrogens = False
kekulize = True
class CarbonTypesCache(CarbonTypesBase):
__slots__ = ()
def parameters(self):
return ()
_hybridization = {
HybridizationType.SP: 1,
HybridizationType.SP2: 2,
HybridizationType.SP3: 3,
HybridizationType.SP3D: 3,
HybridizationType.SP3D2: 3,
}
def calculate(self):
r = defaultdict(lambda: defaultdict(int))
for a in self.mol.GetAtoms():
if a.GetAtomicNum() != 6:
continue
carbon = sum(other.GetAtomicNum() == 6 for other in a.GetNeighbors())
SP = self._hybridization.get(a.GetHybridization())
r[SP][carbon] += 1
return r
class CarbonTypes(CarbonTypesBase):
r"""carbon types descriptor.
:type nCarbon: int
:param nCarbon: count `n`-carbon bonded carbon
:type SP: int
:param SP: count :math:`{\rm SP}n` carbon
"""
since = "1.0.0"
__slots__ = ("_nCarbon", "_SP")
def description(self):
return "SP{} carbon bound to {} other carbon{}".format(
self._SP if self._SP != 1 else "",
self._nCarbon,
"s" if self._nCarbon > 1 else "",
)
@classmethod
def preset(cls, version):
return map(
lambda args: cls(*args),
[(1, 1), (2, 1), (1, 2), (2, 2), (3, 2), (1, 3), (2, 3), (3, 3), (4, 3)],
)
def __str__(self):
return "C{}SP{}".format(self._nCarbon, self._SP)
def parameters(self):
return self._nCarbon, self._SP
def __init__(self, nCarbon=1, SP=3):
assert SP in [1, 2, 3]
self._nCarbon = nCarbon
self._SP = SP
def dependencies(self):
return {"CT": CarbonTypesCache()}
def calculate(self, CT):
return CT[self._SP][self._nCarbon]
rtype = int
class HybridizationRatio(CarbonTypesBase):
r"""hybridization ratio descriptor.
.. math::
{\rm HybRatio} = \frac{N_{\rm SP3}}{N_{\rm SP2} + N_{\rm SP3}}
:returns: NaN when :math:`N_{\rm SP2} + N_{\rm SP3} = 0`.
"""
def description(self):
return "hybridization ratio"
since = "1.0.0"
__slots__ = ()
@classmethod
def preset(cls, version):
yield cls()
def __str__(self):
return "HybRatio"
def parameters(self):
return ()
def dependencies(self):
return {"CT": CarbonTypesCache()}
def calculate(self, CT):
Nsp3 = sum(CT[3].values())
Nsp2 = sum(CT[2].values())
if Nsp3 == Nsp2 == 0:
self.fail(ValueError("there are no sp3 and sp2 carbons"))
return Nsp3 / (Nsp2 + Nsp3)
rtype = float
class FractionCSP3(Descriptor):
r"""the fraction of C atoms that are SP3 hybridized."""
__slots__ = ()
since = "1.1.0"
@classmethod
def preset(cls, version):
yield cls()
def description(self):
return "the fraction of C atoms that are SP3 hybridized"
def __str__(self):
return "FCSP3"
def parameters(self):
return ()
def calculate(self):
return CalcFractionCSP3(self.mol)
rtype = float
|
mordred-descriptor/mordred
|
mordred/CarbonTypes.py
|
Python
|
bsd-3-clause
| 3,502
|
[
"RDKit"
] |
e36ade5a9ecd94b88102c6b938c46aff02634664b746029252640633cfc2bcce
|
#!/usr/bin/env python
"""
Program to automatically print out CFS pager feeds.
Copyright 2010 - 2015 Michael Farrell <http://micolous.id.au/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from .scrapers import get_scraper
from .browsersupport import get_browser
from .mappingsupport import get_map
from .plugins import get_plugin
# configparser3
from configparser import SafeConfigParser, NoOptionError
from argparse import ArgumentParser, FileType
import re
def run(fh=None):
print """\
pagerprinter v0.1.3+
Copyright 2010 - 2015 Michael Farrell <http://micolous.id.au/>
"""
# parse config
c = SafeConfigParser()
c.read_dict({
'pagerprinter': {
'update-freq': '30',
'backend': 'sacfs',
'browser': 'firefox',
'browser-exec': 'firefox',
'browser-wait': '20',
'trigger': 'RESPOND',
'trigger-end': 'MAP',
'mapper': 'google',
'print-copies': '1',
'unit': 'all',
'home': '141 King William Street, Adelaide SA 5000',
},
})
if fh is not None:
c.readfp(fh)
# get a scraper instance
scraper = get_scraper(
c.get('pagerprinter', 'backend')
)(
c.getint('pagerprinter', 'update-freq')
)
# get a browser helper instance
browser = get_browser(
c.get('pagerprinter', 'browser')
)(
c.get('pagerprinter', 'browser-exec'),
c.getint('pagerprinter', 'browser-wait')
)
trigger = c.get('pagerprinter', 'trigger').lower().strip()
trigger_end = c.get('pagerprinter', 'trigger-end').lower().strip()
my_unit = c.get('pagerprinter', 'unit').lower().strip()
try:
printer = c.get('pagerprinter', 'printer')
except NoOptionError:
printer = None
print_copies = c.getint('pagerprinter', 'print-copies')
if print_copies < 1:
print "ERROR: print-copies is set to less than one. You probably don't want this."
return
mapper = c.get('pagerprinter', 'mapper')
plugins = []
if c.has_option('pagerprinter', 'plugins'):
plugins = [
get_plugin(x.strip())
for x
in c.get('pagerprinter', 'plugins').lower().split(',')
]
for plugin in plugins:
plugin.configure(c)
mapper = get_map(mapper)
# special case: all units.
# may result in dupe printouts
if my_unit == 'all':
my_unit = ''
# home
home = c.get('pagerprinter', 'home')
# now, lets setup a handler for these events.
def page_handler(good_parse, msg, date=None, unit=None):
if good_parse:
print "Good parse! %s - %s" % (repr(msg), unit)
# filter for unit
if my_unit in unit.lower():
# this is for me!!!
print "- This is a message for my unit!"
# check for trigger
lmsg = msg.lower()
if trigger in lmsg:
# trigger found
# split by trigger and find address nicely.
addr = lmsg.split(trigger)[1]
if trigger_end in lmsg:
addr = addr.split(trigger_end)[0]
# Remove the @ symbols in the message, and the ASE device number (#nnn/nnn)
addr = re.sub(r'#\d{3}/\d{3}|@|\s:\s', '', addr)
# now split that up into parts, discarding the first
# which is a description of the event
addr_p = addr.split(',')[-2:]
# clone the list for iteration as we well modify in this operation as well
for i, part in enumerate(list(addr_p)):
if 'alarm level' in part:
del addr_p[i]
break
# reassemble the address
addr = ','.join(addr_p)
del addr_p
# we have an address. feed it to the mapping engine
url = mapper.get_url(home, addr)
print "- Address: %s" % addr
print "- URL for directions: %s" % url
# sending to browser
browser.print_url(url, printer, print_copies)
# now, send to plugins
for plugin in plugins:
try:
plugin.execute(msg, unit, addr, date, printer, print_copies)
except Exception, e:
print "Exception caught in plugin %s" % type(plugin)
print e
else:
print "- WARNING: End trigger not found! Skipping..."
else:
print "- Trigger not found. Skipping..."
else:
print "- This isn't for my unit. Skipping..."
else:
print "ERROR: THIS IS A BUG!!!"
print "Couldn't handle the following message, please file a bug report."
print repr(msg)
print "updating forever"
scraper.update_forever(page_handler)
def main():
parser = ArgumentParser()
parser.add_argument(
'--config', '-c', type=FileType('rb'),
help='Configuration file to use'
)
options = parser.parse_args()
run(options.config)
if __name__ == '__main__':
main()
|
micolous/cfsprinter
|
src/pagerprinter/pagerprinter.py
|
Python
|
gpl-3.0
| 5,037
|
[
"ASE"
] |
eab89b31bc8bff6a5a149fef137b36f28c6d7b4d8c5dba1259e3cc09ee400c2f
|
#!/usr/bin/env python3
import abc
import torch
from ..distributions import MultivariateNormal
from ..lazy import CholLazyTensor, TriangularLazyTensor
from ._variational_distribution import _VariationalDistribution
class _NaturalVariationalDistribution(_VariationalDistribution, abc.ABC):
r"""Any :obj:`~gpytorch.variational._VariationalDistribution` which calculates
natural gradients with respect to its parameters.
"""
pass
class NaturalVariationalDistribution(_NaturalVariationalDistribution):
r"""A multivariate normal :obj:`~gpytorch.variational._VariationalDistribution`,
parameterized by **natural** parameters.
.. note::
The :obj:`~gpytorch.variational.NaturalVariationalDistribution` can only
be used with :obj:`gpytorch.optim.NGD`, or other optimizers
that follow exactly the gradient direction. Failure to do so will cause
the natural matrix :math:`\mathbf \Theta_\text{mat}` to stop being
positive definite, and a :obj:`~RuntimeError` will be raised.
.. seealso::
The `natural gradient descent tutorial
<examples/04_Variational_and_Approximate_GPs/Natural_Gradient_Descent.ipynb>`_
for use instructions.
The :obj:`~gpytorch.variational.TrilNaturalVariationalDistribution` for
a more numerically stable parameterization, at the cost of needing more
iterations to make variational regression converge.
:param int num_inducing_points: Size of the variational distribution. This implies that the variational mean
should be this size, and the variational covariance matrix should have this many rows and columns.
:param batch_shape: Specifies an optional batch size
for the variational parameters. This is useful for example when doing additive variational inference.
:type batch_shape: :obj:`torch.Size`, optional
:param float mean_init_std: (Default: 1e-3) Standard deviation of gaussian noise to add to the mean initialization.
"""
def __init__(self, num_inducing_points, batch_shape=torch.Size([]), mean_init_std=1e-3, **kwargs):
super().__init__(num_inducing_points=num_inducing_points, batch_shape=batch_shape, mean_init_std=mean_init_std)
scaled_mean_init = torch.zeros(num_inducing_points)
neg_prec_init = torch.eye(num_inducing_points, num_inducing_points).mul(-0.5)
scaled_mean_init = scaled_mean_init.repeat(*batch_shape, 1)
neg_prec_init = neg_prec_init.repeat(*batch_shape, 1, 1)
# eta1 and eta2 parameterization of the variational distribution
self.register_parameter(name="natural_vec", parameter=torch.nn.Parameter(scaled_mean_init))
self.register_parameter(name="natural_mat", parameter=torch.nn.Parameter(neg_prec_init))
def forward(self):
mean, chol_covar = _NaturalToMuVarSqrt.apply(self.natural_vec, self.natural_mat)
res = MultivariateNormal(mean, CholLazyTensor(TriangularLazyTensor(chol_covar)))
return res
def initialize_variational_distribution(self, prior_dist):
prior_prec = prior_dist.covariance_matrix.inverse()
prior_mean = prior_dist.mean
noise = torch.randn_like(prior_mean).mul_(self.mean_init_std)
self.natural_vec.data.copy_((prior_prec @ prior_mean).add_(noise))
self.natural_mat.data.copy_(prior_prec.mul(-0.5))
def _triangular_inverse(A, upper=False):
eye = torch.eye(A.size(-1), dtype=A.dtype, device=A.device)
return eye.triangular_solve(A, upper=upper).solution
def _phi_for_cholesky_(A):
"Modifies A to be the phi function used in differentiating through Cholesky"
A.tril_().diagonal(offset=0, dim1=-2, dim2=-1).mul_(0.5)
return A
def _cholesky_backward(dout_dL, L, L_inverse):
# c.f. https://github.com/pytorch/pytorch/blob/25ba802ce4cbdeaebcad4a03cec8502f0de9b7b3/
# tools/autograd/templates/Functions.cpp
A = L.transpose(-1, -2) @ dout_dL
phi = _phi_for_cholesky_(A)
grad_input = (L_inverse.transpose(-1, -2) @ phi) @ L_inverse
# Symmetrize gradient
return grad_input.add(grad_input.transpose(-1, -2)).mul_(0.5)
class _NaturalToMuVarSqrt(torch.autograd.Function):
@staticmethod
def _forward(nat_mean, nat_covar):
try:
L_inv = torch.cholesky(-2.0 * nat_covar, upper=False)
except RuntimeError as e:
if str(e).startswith("cholesky"):
raise RuntimeError(
"Non-negative-definite natural covariance. You probably "
"updated it using an optimizer other than gpytorch.optim.NGD (such as Adam). "
"This is not supported."
)
else:
raise e
L = _triangular_inverse(L_inv, upper=False)
S = L.transpose(-1, -2) @ L
mu = (S @ nat_mean.unsqueeze(-1)).squeeze(-1)
# Two choleskys are annoying, but we don't have good support for a
# LazyTensor of form L.T @ L
return mu, torch.cholesky(S, upper=False)
@staticmethod
def forward(ctx, nat_mean, nat_covar):
mu, L = _NaturalToMuVarSqrt._forward(nat_mean, nat_covar)
ctx.save_for_backward(mu, L)
return mu, L
@staticmethod
def _backward(dout_dmu, dout_dL, mu, L, C):
"""Calculate dout/d(eta1, eta2), which are:
eta1 = mu
eta2 = mu*mu^T + LL^T = mu*mu^T + Sigma
Thus:
dout/deta1 = dout/dmu + dout/dL dL/deta1
dout/deta2 = dout/dL dL/deta1
For L = chol(eta2 - eta1*eta1^T).
dout/dSigma = _cholesky_backward(dout/dL, L)
dout/deta2 = dout/dSigma
dSigma/deta1 = -2* (dout/dSigma) mu
"""
dout_dSigma = _cholesky_backward(dout_dL, L, C)
dout_deta1 = dout_dmu - 2 * (dout_dSigma @ mu.unsqueeze(-1)).squeeze(-1)
return dout_deta1, dout_dSigma
@staticmethod
def backward(ctx, dout_dmu, dout_dL):
"Calculates the natural gradient with respect to nat_mean, nat_covar"
mu, L = ctx.saved_tensors
C = _triangular_inverse(L, upper=False)
return _NaturalToMuVarSqrt._backward(dout_dmu, dout_dL, mu, L, C)
|
jrg365/gpytorch
|
gpytorch/variational/natural_variational_distribution.py
|
Python
|
mit
| 6,153
|
[
"Gaussian"
] |
15b75665fd1af360a42a34e7d45712cf4bece1aebf73f60a5fa925a95cd3f531
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2005 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Peter Landgren
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2012,2017 Paul Franklin
# Copyright (C) 2014 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Paragraph/Font style editor
"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".")
import re
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gtk, Gdk
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.plug.docgen import (StyleSheet, FONT_SERIF, FONT_SANS_SERIF,
PARA_ALIGN_RIGHT, PARA_ALIGN_CENTER, PARA_ALIGN_LEFT,
PARA_ALIGN_JUSTIFY, ParagraphStyle, TableStyle, TableCellStyle,
GraphicsStyle)
from ...listmodel import ListModel
from ...managedwindow import ManagedWindow
from ...glade import Glade
from ...dialog import ErrorDialog
from ...display import display_help
from gramps.gen.const import URL_MANUAL_PAGE
WIKI_HELP_PAGE = URL_MANUAL_PAGE + "_-_Settings"
#------------------------------------------------------------------------
#
# StyleListDisplay class
#
#------------------------------------------------------------------------
class StyleListDisplay(ManagedWindow):
"""
Shows the available paragraph/font styles. Allows the user to select,
add, edit, and delete styles from a StyleSheetList.
"""
def __init__(self, stylesheetlist, uistate, track, callback=None):
"""
Create a StyleListDisplay object that displays the styles in the
StyleSheetList.
stylesheetlist - styles for editing: a :class:`.StyleSheetList` instance
callback - task called when an object has been added.
"""
ManagedWindow.__init__(self, uistate, track, self.__class__, modal=True)
# the self.window.run() below makes Gtk make it modal, so any change
# to the previous line's "modal" would require that line to be changed
self.callback = callback
self.sheetlist = stylesheetlist
self.top = Glade(toplevel='styles')
self.set_window(self.top.toplevel, self.top.get_object('title'),
_('Document Styles'))
self.setup_configs('interface.stylelistdisplay', 400, 300)
self.show()
self.top.connect_signals({
"on_ok_clicked" : self.on_ok_clicked,
"on_add_clicked" : self.on_add_clicked,
"on_delete_clicked" : self.on_delete_clicked,
"on_button_press" : self.on_button_press,
"on_edit_clicked" : self.on_edit_clicked,
"on_cancel_clicked" : self.__cancel,
"on_help_btn_clicked" : lambda x: display_help(
WIKI_HELP_PAGE, _('manual|Document_Styles_dialog')),
"on_cancel_style_clicked" : dummy_callback,
"on_save_style_clicked" : dummy_callback,
"on_help_btn_style_clicked" : dummy_callback,
})
self.list = ListModel(self.top.get_object("list"),
[(_('Style'), -1, 10)], )
self.redraw()
# the self.window.run() makes Gtk make it modal, so any change to that
# line would require the ManagedWindow.__init__ to be changed also
self.window.run()
if self.opened:
self.close()
def build_menu_names(self, obj): # meaningless while it's modal
"""Override :class:`.ManagedWindow` method."""
return (_('Document Styles'), ' ')
def __cancel(self, obj):
pass
def redraw(self):
"""Redraws the list of styles that are currently available"""
self.list.model.clear()
self.list.add([_("default")])
index = 1
for style in sorted(self.sheetlist.get_style_names()):
if style == "default":
continue
self.list.add([style])
index += 1
def on_add_clicked(self, obj):
"""Called when the ADD button is clicked. Invokes the StyleEditor to
create a new style"""
style = self.sheetlist.get_style_sheet("default")
StyleEditor(_("New Style"), style, self)
def on_ok_clicked(self, obj):
"""Called when the OK button is clicked; Calls the callback task,
then saves the stylesheet."""
if self.callback is not None:
self.callback()
try:
self.sheetlist.save()
except IOError as msg:
ErrorDialog(_("Error saving stylesheet"), str(msg),
parent=self.window)
except:
log.error("Failed to save stylesheet", exc_info=True)
def on_button_press(self, obj, event):
if event.type == Gdk.EventType._2BUTTON_PRESS and event.button == 1:
self.on_edit_clicked(obj)
def on_edit_clicked(self, obj):
"""
Called when the EDIT button is clicked.
Calls the StyleEditor to edit the selected style.
"""
store, node = self.list.selection.get_selected()
if not node:
ErrorDialog(_("Missing information"), _("Select a style"),
parent=self.window)
return
name = str(self.list.model.get_value(node, 0))
if name == _('default'): # the default style cannot be edited
return
style = self.sheetlist.get_style_sheet(name)
StyleEditor(name, style, self)
def on_delete_clicked(self, obj):
"""Deletes the selected style."""
store, node = self.list.selection.get_selected()
if not node:
ErrorDialog(_("Missing information"), _("Select a style"),
parent=self.window)
return
name = str(self.list.model.get_value(node, 0))
if name == _('default'): # the default style cannot be removed
return
self.sheetlist.delete_style_sheet(name)
self.redraw()
#------------------------------------------------------------------------
#
# StyleEditor class
#
#------------------------------------------------------------------------
class StyleEditor(ManagedWindow):
"""
Edits the current style definition.
Presents a dialog allowing the values in the style to be altered.
"""
def __init__(self, name, style, parent):
"""
Create the StyleEditor.
name - name of the style that is to be edited
style - style object to be edited: a :class:`.StyleSheet` instance
parent - StyleListDisplay object that called the editor
"""
ManagedWindow.__init__(self, parent.uistate, parent.track,
self.__class__, modal=True)
# the self.window.run() below makes Gtk make it modal, so any change
# to the previous line's "modal" would require that line to be changed
self.current_style = None
self.current_name = None
self.style = StyleSheet(style)
self.parent = parent
self.top = Glade(
toplevel='editor',
also_load=[
"adjustment1", "adjustment2", "adjustment3", "adjustment4",
"adjustment5", "adjustment6", "adjustment7", "adjustment8",
"adjustment9", "adjustment10", "adjustment11"])
self.set_window(self.top.toplevel, self.top.get_object('title'),
_('Style editor'))
self.setup_configs('interface.styleeditor', 550, 610)
self.show()
self.top.connect_signals({
"on_save_style_clicked" : self.on_save_style_clicked,
"on_cancel_style_clicked" : self.__cancel,
"on_help_btn_style_clicked" : lambda x: display_help(
WIKI_HELP_PAGE, _('manual|Style_editor_dialog')),
"on_cancel_clicked" : dummy_callback,
"on_ok_clicked" : dummy_callback,
"on_add_clicked" : dummy_callback,
"on_delete_clicked" : dummy_callback,
"on_button_press" : dummy_callback,
"on_edit_clicked" : dummy_callback,
"on_help_btn_clicked" : dummy_callback,
})
self.pname = self.top.get_object('pname')
self.pdescription = self.top.get_object('pdescription')
self.notebook = self.top.get_object('notebook1')
self.vbox = self.top.get_object('column_widths')
self.line_style = self.top.get_object('line_style')
line_styles = Gtk.ListStore(int, str)
line_styles.append([0, "Solid"])
line_styles.append([1, "Dashed"])
line_styles.append([2, "Dotted"])
self.line_style.set_model(line_styles)
renderer_text = Gtk.CellRendererText()
self.line_style.pack_start(renderer_text, True)
self.line_style.add_attribute(renderer_text, "text", 1)
self.top.get_object("label6").set_text(_("point size|pt"))
titles = [(_('Style'), 0, 130)]
self.plist = ListModel(self.top.get_object("ptree"), titles,
self.change_display)
for widget_name in ('color', 'bgcolor', 'line_color', 'fill_color'):
color = self.top.get_object(widget_name)
label = self.top.get_object(widget_name + '_code')
color.connect('notify::color', self.color_changed, label)
self.top.get_object("style_name").set_text(name)
def _alphanumeric_sort(iterable):
""" sort the given iterable in the way that humans expect """
convert = lambda text: int(text) if text.isdigit() else text
sort_key = lambda k: [convert(c) for c in re.split('([0-9]+)', k)]
return sorted(iterable, key=sort_key)
names = _alphanumeric_sort(self.style.get_paragraph_style_names())
for p_name in names:
self.plist.add([p_name], self.style.get_paragraph_style(p_name))
names = _alphanumeric_sort(self.style.get_table_style_names())
for t_name in names:
self.plist.add([t_name], self.style.get_table_style(t_name))
names = _alphanumeric_sort(self.style.get_cell_style_names())
for c_name in names:
self.plist.add([c_name], self.style.get_cell_style(c_name))
names = _alphanumeric_sort(self.style.get_draw_style_names())
for d_name in names:
self.plist.add([d_name], self.style.get_draw_style(d_name))
self.plist.select_row(0)
# the self.window.run() makes Gtk make it modal, so any change to that
# line would require the ManagedWindow.__init__ to be changed also
self.window.run()
if self.opened:
self.close()
def build_menu_names(self, obj): # meaningless while it's modal
"""Override :class:`.ManagedWindow` method."""
return (_('Style editor'), None)
def __cancel(self, obj):
pass
def show_pages(self, show_pages):
"""
Make the given pages visible.
"""
for page_num in range(self.notebook.get_n_pages()):
page = self.notebook.get_nth_page(page_num)
if page_num in show_pages:
page.show()
else:
page.hide()
def draw(self):
"""
Updates the display with the selected style.
"""
if isinstance(self.current_style, ParagraphStyle):
self.show_pages([0, 1, 2])
self.draw_paragraph()
elif isinstance(self.current_style, TableStyle):
self.show_pages([0, 3])
self.draw_table()
elif isinstance(self.current_style, TableCellStyle):
self.show_pages([0, 4])
self.draw_cell()
elif isinstance(self.current_style, GraphicsStyle):
self.show_pages([0, 5])
self.draw_graphics()
def draw_graphics(self):
"""
Updates the display with the selected graphics style.
"""
g = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = g.get_description()
descr = descr or _("No description available")
p_style = g.get_paragraph_style()
if p_style:
para_note = _("(Embedded style '%s' must be edited separately)")
descr += '\n\n' + para_note % p_style
self.pdescription.set_text(descr)
self.top.get_object("line_style").set_active(g.get_line_style())
self.top.get_object("line_width").set_value(g.get_line_width())
self.line_color = rgb2color(g.get_color())
self.top.get_object("line_color").set_color(self.line_color)
self.fill_color = rgb2color(g.get_fill_color())
self.top.get_object("fill_color").set_color(self.fill_color)
self.top.get_object("shadow").set_active(g.get_shadow())
self.top.get_object("shadow_space").set_value(g.get_shadow_space())
def draw_cell(self):
"""
Updates the display with the selected cell style.
"""
c = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = c.get_description()
self.pdescription.set_text(descr or _("No description available"))
self.top.get_object("cell_lborder").set_active(c.get_left_border())
self.top.get_object("cell_rborder").set_active(c.get_right_border())
self.top.get_object("cell_tborder").set_active(c.get_top_border())
self.top.get_object("cell_bborder").set_active(c.get_bottom_border())
self.top.get_object("cell_padding").set_value(c.get_padding())
def draw_table(self):
"""
Updates the display with the selected table style.
"""
t = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = t.get_description()
self.pdescription.set_text(descr or _("No description available"))
self.top.get_object("table_width").set_value(t.get_width())
self.column = []
for widget in self.vbox.get_children():
self.vbox.remove(widget)
for i in range(t.get_columns()):
hbox = Gtk.Box()
label = Gtk.Label(label=_('Column %d:') % (i + 1))
hbox.pack_start(label, False, False, 6)
spin = Gtk.SpinButton()
spin.set_range(0, 100)
spin.set_increments(1, 10)
spin.set_numeric(True)
spin.set_value(t.get_column_width(i))
self.column.append(spin)
hbox.pack_start(spin, False, False, 6)
hbox.pack_start(Gtk.Label('%'), False, False, 6)
hbox.show_all()
self.vbox.pack_start(hbox, False, False, 3)
def draw_paragraph(self):
"""
Updates the display with the selected paragraph style.
"""
p = self.current_style
self.pname.set_text( '<span size="larger" weight="bold">%s</span>' %
self.current_name)
self.pname.set_use_markup(True)
descr = p.get_description()
self.pdescription.set_text(descr or _("No description available") )
font = p.get_font()
self.top.get_object("size").set_value(font.get_size())
if font.get_type_face() == FONT_SERIF:
self.top.get_object("roman").set_active(1)
else:
self.top.get_object("swiss").set_active(1)
self.top.get_object("bold").set_active(font.get_bold())
self.top.get_object("italic").set_active(font.get_italic())
self.top.get_object("underline").set_active(font.get_underline())
if p.get_alignment() == PARA_ALIGN_LEFT:
self.top.get_object("lalign").set_active(1)
elif p.get_alignment() == PARA_ALIGN_RIGHT:
self.top.get_object("ralign").set_active(1)
elif p.get_alignment() == PARA_ALIGN_CENTER:
self.top.get_object("calign").set_active(1)
else:
self.top.get_object("jalign").set_active(1)
self.top.get_object("rmargin").set_value(p.get_right_margin())
self.top.get_object("lmargin").set_value(p.get_left_margin())
self.top.get_object("pad").set_value(p.get_padding())
self.top.get_object("tmargin").set_value(p.get_top_margin())
self.top.get_object("bmargin").set_value(p.get_bottom_margin())
self.top.get_object("indent").set_value(p.get_first_indent())
self.top.get_object("tborder").set_active(p.get_top_border())
self.top.get_object("lborder").set_active(p.get_left_border())
self.top.get_object("rborder").set_active(p.get_right_border())
self.top.get_object("bborder").set_active(p.get_bottom_border())
color = rgb2color(font.get_color())
self.top.get_object("color").set_color(color)
bg_color = rgb2color(p.get_background_color())
self.top.get_object("bgcolor").set_color(bg_color)
def color_changed(self, color, name, label):
"""
Called to set the color code when a color is changed.
"""
rgb = color2rgb(color.get_color())
label.set_text("#%02X%02X%02X" % color2rgb(color.get_color()))
def save(self):
"""
Saves the current style displayed on the dialog.
"""
if isinstance(self.current_style, ParagraphStyle):
self.save_paragraph()
elif isinstance(self.current_style, TableStyle):
self.save_table()
elif isinstance(self.current_style, TableCellStyle):
self.save_cell()
elif isinstance(self.current_style, GraphicsStyle):
self.save_graphics()
def save_graphics(self):
"""
Saves the current graphics style displayed on the dialog.
"""
g = self.current_style
g.set_line_style(self.top.get_object("line_style").get_active())
g.set_line_width(self.top.get_object("line_width").get_value())
line_color = self.top.get_object("line_color").get_color()
g.set_color(color2rgb(line_color))
fill_color = self.top.get_object("fill_color").get_color()
g.set_fill_color(color2rgb(fill_color))
shadow = self.top.get_object("shadow").get_active()
shadow_space = self.top.get_object("shadow_space").get_value()
g.set_shadow(shadow, shadow_space)
self.style.add_draw_style(self.current_name, self.current_style)
def save_cell(self):
"""
Saves the current cell style displayed on the dialog.
"""
c = self.current_style
c.set_left_border(self.top.get_object("cell_lborder").get_active())
c.set_right_border(self.top.get_object("cell_rborder").get_active())
c.set_top_border(self.top.get_object("cell_tborder").get_active())
c.set_bottom_border(self.top.get_object("cell_bborder").get_active())
c.set_padding(self.top.get_object("cell_padding").get_value())
self.style.add_cell_style(self.current_name, self.current_style)
def save_table(self):
"""
Saves the current table style displayed on the dialog.
"""
t = self.current_style
t.set_width(self.top.get_object("table_width").get_value_as_int())
for i in range(t.get_columns()):
t.set_column_width(i, self.column[i].get_value_as_int())
self.style.add_table_style(self.current_name, self.current_style)
def save_paragraph(self):
"""
Saves the current paragraph style displayed on the dialog.
"""
p = self.current_style
font = p.get_font()
font.set_size(self.top.get_object("size").get_value_as_int())
if self.top.get_object("roman").get_active():
font.set_type_face(FONT_SERIF)
else:
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(self.top.get_object("bold").get_active())
font.set_italic(self.top.get_object("italic").get_active())
font.set_underline(self.top.get_object("underline").get_active())
if self.top.get_object("lalign").get_active():
p.set_alignment(PARA_ALIGN_LEFT)
elif self.top.get_object("ralign").get_active():
p.set_alignment(PARA_ALIGN_RIGHT)
elif self.top.get_object("calign").get_active():
p.set_alignment(PARA_ALIGN_CENTER)
else:
p.set_alignment(PARA_ALIGN_JUSTIFY)
p.set_right_margin(self.top.get_object("rmargin").get_value())
p.set_left_margin(self.top.get_object("lmargin").get_value())
p.set_top_margin(self.top.get_object("tmargin").get_value())
p.set_bottom_margin(self.top.get_object("bmargin").get_value())
p.set_padding(self.top.get_object("pad").get_value())
p.set_first_indent(self.top.get_object("indent").get_value())
p.set_top_border(self.top.get_object("tborder").get_active())
p.set_left_border(self.top.get_object("lborder").get_active())
p.set_right_border(self.top.get_object("rborder").get_active())
p.set_bottom_border(self.top.get_object("bborder").get_active())
color = self.top.get_object("color").get_color()
font.set_color(color2rgb(color))
bg_color = self.top.get_object("bgcolor").get_color()
p.set_background_color(color2rgb(bg_color))
self.style.add_paragraph_style(self.current_name, self.current_style)
def on_save_style_clicked(self, obj):
"""
Saves the current style sheet and causes the parent to be updated with
the changes.
"""
name = str(self.top.get_object("style_name").get_text())
self.save()
self.style.set_name(name)
self.parent.sheetlist.set_style_sheet(name, self.style)
self.parent.redraw()
def change_display(self, obj):
"""
Called when the paragraph selection has been changed. Saves the
old paragraph, then draws the newly selected paragraph.
"""
# Don't save until current_name is defined
# If it's defined, save under the current paragraph name
if self.current_name:
self.save()
# Then change to new paragraph
objs = self.plist.get_selected_objects()
store, node = self.plist.get_selected()
self.current_name = store.get_value(node, 0)
self.current_style = objs[0]
self.draw()
def rgb2color(rgb):
"""
Convert a tuple containing RGB values into a Gdk Color.
"""
return Gdk.Color(rgb[0] << 8, rgb[1] << 8, rgb[2] << 8)
def color2rgb(color):
"""
Convert a Gdk Color into a tuple containing RGB values.
"""
return (color.red >> 8, color.green >> 8, color.blue >> 8)
def dummy_callback(obj):
"""Dummy callback to satisfy gtkbuilder on connect of signals.
There are two widgets in the glade file, although only one is needed,
the signals of the other must be connected too
"""
pass
|
dermoth/gramps
|
gramps/gui/plug/report/_styleeditor.py
|
Python
|
gpl-2.0
| 24,387
|
[
"Brian"
] |
bafd103e81250cd76b3eb41530a602a81beef3fb57d3ade1c5f978987043e9e2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = '''
---
module: runit
author: "James Sumners (@jsumners)"
version_added: "2.3"
short_description: Manage runit services.
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
required: true
description:
- Name of the service to manage.
state:
required: false
choices: [ started, stopped, restarted, killed, reloaded, once ]
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
enabled:
required: false
choices: [ "yes", "no" ]
description:
- Wheater the service is enabled or not, if disabled it also implies stopped.
service_dir:
required: false
default: /var/service
description:
- directory runsv watches for services
service_src:
required: false
default: /etc/sv
description:
- directory where services are defined, the source of symlinks to service_dir.
'''
EXAMPLES = '''
# Example action to start sv dnscache, if not running
- sv:
name: dnscache
state: started
# Example action to stop sv dnscache, if running
- sv:
name: dnscache
state: stopped
# Example action to kill sv dnscache, in all cases
- sv:
name: dnscache
state: killed
# Example action to restart sv dnscache, in all cases
- sv:
name: dnscache
state: restarted
# Example action to reload sv dnscache, in all cases
- sv:
name: dnscache
state: reloaded
# Example using alt sv directory location
- sv:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import platform
import shlex
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.basic import *
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
#def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = [ ]
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([ self.service_dir, self.name ])
self.src_full = '/'.join([ self.service_src, self.name ])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError:
e = get_exception()
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % str(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd,'force-stop',self.src_full])
try:
os.unlink(self.svc_full)
except OSError:
e = get_exception()
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % str(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
m = re.search('\(pid (\d+)\)', out)
if m:
self.pid = m.group(1)
m = re.search(' (\d+)s', out)
if m:
self.duration = m.group(1)
if re.search('run:', out):
self.state = 'started'
elif re.search('down:', out):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception:
e = get_exception()
self.module.fail_json(msg="failed to execute: %s" % str(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
# ===========================================
# Main control flow
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
enabled = dict(required=False, type='bool'),
dist = dict(required=False, default='runit'),
service_dir = dict(required=False, default='/var/service'),
service_src = dict(required=False, default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError):
e = get_exception()
module.fail_json(msg="Could not change service link: %s" % str(e))
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv,state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/system/runit.py
|
Python
|
gpl-3.0
| 9,044
|
[
"Brian"
] |
a10b0b94803d070adc09bb07705934c38f55c3858cb448b5a3f6292e6cef3aa1
|
"""
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
MATLAB |reg| [*]_ analogs and similar arguments.
.. |reg| unicode:: 0xAE
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
autoscale - turn axis autoscaling on or off, and apply it
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imsave - save array as an image file
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
locator_params - adjust parameters used in locating axis ticks
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
margins - set margins used in autoscaling
pause - pause for a specified interval
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (nrows, ncols, plot_number)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
tick_params - control the appearance of ticks and tick labels
ticklabel_format - control the format of tick labels
title - add a title to the current axes
tricontour - make a contour plot on a triangular grid
tricontourf - make a filled contour plot on a triangular grid
tripcolor - make a pseudocolor plot on a triangular grid
triplot - plot a triangular grid
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
amax - the maximum along dimension m
amin - the minimum along dimension m
corrcoef - correlation coefficient
cov - covariance matrix
mean - the mean along dimension m
median - the median along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - Deprecated--please use loadtxt.
loadtxt - load ASCII data into array.
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - Deprecated--please use savetxt.
savetxt - save an array to an ASCII file.
trapz - trapezoidal integration
__end
.. [*] MATLAB is a registered trademark of The MathWorks, Inc.
"""
from __future__ import print_function
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, \
silent_list, iterable, dedent
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates # Do we need this at all?
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
## We are still importing too many things from mlab; more cleanup is needed.
from matplotlib.mlab import griddata, stineman_interp, slopes, \
inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from matplotlib.mlab import window_hanning, window_none, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, \
prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, \
get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, identity, \
base_repr, binary_repr, log2, ispower2, \
rec_append_fields, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
def load(*args, **kwargs):
raise NotImplementedError(load.__doc__)
load.__doc__ = """\
pylab no longer provides a load function, though the old pylab
function is still available as matplotlib.mlab.load (you can refer
to it in pylab as "mlab.load"). However, for plain text files, we
recommend numpy.loadtxt, which was inspired by the old pylab.load
but now has more features. For loading numpy arrays, we recommend
numpy.load, and its analog numpy.save, which are available in
pylab as np.load and np.save.
"""
def save(*args, **kwargs):
raise NotImplementedError(save.__doc__)
save.__doc__ = """\
pylab no longer provides a save function, though the old pylab
function is still available as matplotlib.mlab.save (you can still
refer to it in pylab as "mlab.save"). However, for plain text
files, we recommend numpy.savetxt. For saving numpy arrays,
we recommend numpy.save, and its analog numpy.load, which are
available in pylab as np.save and np.load."""
# don't let numpy's datetime hide stdlib
import datetime
if sys.version_info > (2, 6, 0):
bytes = __builtins__['bytes']
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/matplotlib/pylab.py
|
Python
|
mit
| 11,780
|
[
"Gaussian"
] |
62d0d331cba85beb2444d7befbfdae04ee2377c3c736200d8a95a7988fcf5333
|
import sys
import os.path
import argparse
import sqlite3
import ast
import locale
import codecs
TERM_ENCODING = locale.getdefaultlocale()[1]
VERSION = '0.4dev'
def execute_sql(conn, sql, params):
cur = conn.cursor()
try:
cur.execute(sql, params)
conn.commit()
except sqlite3.OperationalError, e:
if ': tags' in e.message:
conn.execute('CREATE TABLE tags (path text, tag text,'
'value INTEGER, UNIQUE(path, tag) ON CONFLICT REPLACE)')
cur.execute(sql, params)
conn.commit()
else:
raise
return cur
def add_tags(conn, path, *tags, **valued_tags):
sql = 'INSERT INTO tags (path, tag, value) VALUES (?, ?, ?)'
for tag in tags:
execute_sql(conn, sql, (path, tag, None))
for tag, value in valued_tags.items():
execute_sql(conn, sql, (path, tag, value))
def remove_tags(conn, path, *tags):
if not tags:
execute_sql(conn, 'DELETE FROM tags WHERE path=?', [path])
else:
execute_sql(conn,
'DELETE FROM tags WHERE path=? and tag IN (%s)' % ','.join('?'*len(tags)), (path,) + tags)
def set_tags(conn, path, *tags, **valued_tags):
remove_tags(conn, path)
add_tags(conn, path, *tags, **valued_tags)
def get_tags(conn, path):
result = execute_sql(conn, 'SELECT * FROM tags WHERE path=?', [path])
return dict((r[1], r[2]) for r in result)
class ExprGenerator(ast.NodeVisitor):
def __init__(self):
self.expr = ''
self.params = []
def visit_Name(self, node):
tag_name = node.id
if tag_name == 'anytag':
self.expr += 'EXISTS (select tag from tags where path = _p)'
else:
self.expr += 'EXISTS (select tag from tags where path = _p and tag = ?)'
self.params.append(tag_name)
def visit_BoolOp(self, node):
op_name = {ast.And:' and ', ast.Or:' or '}[node.op.__class__]
self.expr += '('
self.visit(node.values[0])
self.expr += ')'
for v in node.values[1:]:
self.expr += op_name
self.expr += '('
self.visit(v)
self.expr += ')'
def visit_UnaryOp(self, node):
op_name = {ast.Not:'not '}[node.op.__class__]
self.expr += op_name
self.expr += '('
self.visit(node.operand)
self.expr += ')'
def visit_Compare(self, node):
op_names = {
ast.Gt: '>',
ast.GtE: '>=',
ast.Lt: '<',
ast.LtE: '<=',
ast.Eq: '=',
}
left = node.left
self.expr += 'EXISTS (select tag from tags where path = _p'
tag_added = [False]
def add_tag(r):
if not tag_added[0]:
self.expr += ' and tag = ?'
self.params.append(r.id)
tag_added[0] = True
for op, right in zip(node.ops, node.comparators):
op_name = op_names[op.__class__]
if left.__class__ is ast.Num:
assert right.__class__ is ast.Name, 'Right side of compare must be a name'
add_tag(right)
self.expr += ' and ? %s value' % op_name
self.params.append(left.n)
elif left.__class__ is ast.Name:
assert right.__class__ is ast.Num, 'Right side of compare must be a number'
add_tag(left)
self.expr += ' and value %s ?' % op_name
self.params.append(right.n)
else:
assert False, 'Invalid compare'
left = right
self.expr += ')'
def generate_sql_expr(query):
node = ast.parse(query)
g = ExprGenerator()
g.generic_visit(node)
return g.expr, g.params
def find(conn, query, root=None):
expr, params = generate_sql_expr(query)
result = execute_sql(conn, 'SELECT DISTINCT path as _p FROM tags WHERE ' + expr, params)
return [r[0] for r in result]
###########################################
# CLI interface
def get_mpd_client(client=[]):
if not client:
import mpd
addr = os.environ.get('MPD_HOST', 'localhost:6600')
host, _, port = addr.partition(':')
c = mpd.MPDClient()
c.connect(host, port)
client.append(c)
return client[0]
def get_sources(args):
if args.file:
return [args.file.decode(TERM_ENCODING)]
elif args.filelist:
if args.filelist == '-':
f = sys.stdin
else:
f = open(args.filelist)
return (l.rstrip('\r\n') for l in f)
elif args.playlist:
return (r['file'].decode('utf-8') for r in get_mpd_client().playlistinfo())
else:
if args.filter:
return []
else:
return [get_mpd_client().currentsong()['file'].decode('utf-8')]
def process_playlist_actions(sources, args):
if args.use_as_playlist:
c = get_mpd_client()
c.command_list_ok_begin()
if not args.add_to_playlist:
c.clear()
for r in sources:
c.add(r.encode('utf-8'))
c.command_list_end()
def filter_sources(sources, args, conn):
if args.filter:
if sources:
matched = set(find(conn(), args.filter))
if args.remove:
result = (r for r in sources if r not in matched)
else:
result = (r for r in sources if r in matched)
else:
result = find(conn(), args.filter)
return result
else:
return sources
def show_all_tags(conn):
conn = conn()
for r in execute_sql(conn, 'SELECT DISTINCT tag FROM tags', []):
print r[0]
def show_with_tags(sources, conn):
conn = conn()
for r in sources:
tags = get_tags(conn, r)
tags_str = ' '.join(k if v is None else ('%s=%s' % (k,v)) for k, v in tags.items())
print u'{}\t{}'.format(tags_str, r)
def show_without_tags(sources):
for r in sources:
print r
def parse_tags_with_values(args):
tags, valued_tags = [], {}
for r in args:
if '=' in r:
tag, _, value = r.partition('=')
valued_tags[tag] = value
else:
tags.append(r)
return tags, valued_tags
def process_tag_actions(sources, args, conn):
conn = conn()
if args.clear:
for r in sources:
remove_tags(conn, r)
if args.delete:
for r in sources:
remove_tags(conn, r, *args.delete)
if args.set:
tags, vtags = parse_tags_with_values(args.set)
for r in sources:
set_tags(conn, r, *tags, **vtags)
if args.add:
tags, vtags = parse_tags_with_values(args.add)
for r in sources:
add_tags(conn, r, *tags, **vtags)
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--db', dest='db',
help="Specify alternative tag db location")
source = parser.add_mutually_exclusive_group()
source.add_argument('-i', dest='file')
source.add_argument('-l', dest='filelist', nargs='?', const='-')
source.add_argument('-p', dest='playlist', action='store_true')
parser.add_argument('-f', dest='filter')
parser.add_argument('-r', dest='remove', action='store_true')
parser.add_argument('-S', dest='set', nargs='+')
parser.add_argument('-A', dest='add', nargs='+')
parser.add_argument('-D', dest='delete', nargs='+')
parser.add_argument('-C', dest='clear', action='store_true')
parser.add_argument('-T', dest='alltags', action='store_true')
parser.add_argument('-P', dest='use_as_playlist', action='store_true')
parser.add_argument('-n', dest='only_filenames', action='store_true')
parser.add_argument('-a', dest='add_to_playlist', action='store_true')
parser.add_argument('--version', action='version', version=VERSION)
parser.set_defaults(db=os.path.join(os.getenv('XDG_DATA_HOME',
os.path.expanduser('~/.local/share')), 'mpd_tag', 'tags.sqlite'))
args = parser.parse_args()
dirname = os.path.dirname(args.db)
if not os.path.exists(dirname):
os.makedirs(dirname)
def conn(cn=[]):
if not cn:
cn.append(sqlite3.connect(args.db))
return cn[0]
sys.stdout = codecs.getwriter(TERM_ENCODING)(sys.stdout)
sys.stdin = codecs.getreader(TERM_ENCODING)(sys.stdin)
if args.alltags:
show_all_tags(conn)
else:
sources = get_sources(args)
sources = filter_sources(sources, args, conn)
process_tag_actions(sources, args, conn)
process_playlist_actions(sources, args)
if args.only_filenames:
show_without_tags(sources)
else:
show_with_tags(sources, conn)
|
baverman/mpd-tag
|
mpd_tag.py
|
Python
|
mit
| 8,783
|
[
"VisIt"
] |
768a721783a9f85f0bdc3b8667232989fca8601051a8f75bc6bc201617393cde
|
# -*- coding: utf-8 -*-
"""
E2E tests for the LMS.
"""
from unittest import skip
from .helpers import UniqueCourseTest, load_data_str
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.find_courses import FindCoursesPage
from ..pages.lms.course_about import CourseAboutPage
from ..pages.lms.course_info import CourseInfoPage
from ..pages.lms.tab_nav import TabNavPage
from ..pages.lms.course_nav import CourseNavPage
from ..pages.lms.progress import ProgressPage
from ..pages.lms.dashboard import DashboardPage
from ..pages.lms.video import VideoPage
from ..pages.xblock.acid import AcidView
from ..fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
class RegistrationTest(UniqueCourseTest):
"""
Test the registration process.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(RegistrationTest, self).setUp()
self.find_courses_page = FindCoursesPage(self.browser)
self.course_about_page = CourseAboutPage(self.browser, self.course_id)
# Create a course to register for
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register(self):
# Visit the main page with the list of courses
self.find_courses_page.visit()
# Expect that the fixture course exists
course_ids = self.find_courses_page.course_id_list
self.assertIn(self.course_id, course_ids)
# Go to the course about page and click the register button
self.course_about_page.visit()
register_page = self.course_about_page.register()
# Fill in registration info and submit
username = "test_" + self.unique_id[0:6]
register_page.provide_info(
username + "@example.com", "test", username, "Test User"
)
dashboard = register_page.submit()
# We should end up at the dashboard
# Check that we're registered for the course
course_names = dashboard.available_courses
self.assertIn(self.course_info['display_name'], course_names)
class LanguageTest(UniqueCourseTest):
"""
Tests that the change language functionality on the dashboard works
"""
def setUp(self):
"""
Initiailize dashboard page
"""
super(LanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.test_new_lang = 'eo'
# This string is unicode for "ÇÜRRÉNT ÇØÜRSÉS", which should appear in our Dummy Esperanto page
# We store the string this way because Selenium seems to try and read in strings from
# the HTML in this format. Ideally we could just store the raw ÇÜRRÉNT ÇØÜRSÉS string here
self.current_courses_text = u'\xc7\xdcRR\xc9NT \xc7\xd6\xdcRS\xc9S'
self.username = "test"
self.password = "testpass"
self.email = "test@example.com"
def test_change_lang(self):
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.dashboard_page.visit()
# Change language to Dummy Esperanto
self.dashboard_page.change_language(self.test_new_lang)
changed_text = self.dashboard_page.current_courses_text
# We should see the dummy-language text on the page
self.assertIn(self.current_courses_text, changed_text)
def test_language_persists(self):
auto_auth_page = AutoAuthPage(self.browser, username=self.username, password=self.password, email=self.email, course_id=self.course_id)
auto_auth_page.visit()
self.dashboard_page.visit()
# Change language to Dummy Esperanto
self.dashboard_page.change_language(self.test_new_lang)
# destroy session
self.browser.delete_all_cookies()
# log back in
auto_auth_page.visit()
self.dashboard_page.visit()
changed_text = self.dashboard_page.current_courses_text
# We should see the dummy-language text on the page
self.assertIn(self.current_courses_text, changed_text)
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3'),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Course Info')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
def test_courseware_nav(self):
"""
Navigate to a particular unit in the courseware.
"""
# Navigate to the courseware page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
# Check that the courseware navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.course_nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
class VideoTest(UniqueCourseTest):
"""
Navigate to a video in the courseware and play it.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(VideoTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course fixture with a video component
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('video', 'Video')
)))).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
@skip("BLD-563: Video Player Stuck on Pause")
def test_video_player(self):
"""
Play a video in the courseware.
"""
# Navigate to a video
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
# The video should start off paused
# Since the video hasn't loaded yet, it's elapsed time is 0
self.assertFalse(self.video.is_playing)
self.assertEqual(self.video.elapsed_time, 0)
# Play the video
self.video.play()
# Now we should be playing
self.assertTrue(self.video.is_playing)
# Commented the below EmptyPromise, will move to its page once this test is working and stable
# Also there is should be no Promise check in any test as this should be done in Page Object
# Wait for the video to load the duration
# EmptyPromise(
# lambda: self.video.duration > 0,
# 'video has duration', timeout=20
# ).fulfill()
# Pause the video
self.video.pause()
# Expect that the elapsed time and duration are reasonable
# Again, we can't expect the video to actually play because of
# latency through the ssh tunnel
self.assertGreaterEqual(self.video.elapsed_time, 0)
self.assertGreaterEqual(self.video.duration, self.video.elapsed_time)
class XBlockAcidBase(UniqueCourseTest):
"""
Base class for tests that verify that XBlock integration is working correctly
"""
__test__ = False
def setUp(self):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(XBlockAcidBase, self).setUp()
self.setup_fixtures()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
def validate_acid_block_view(self, acid_block):
"""
Verify that the LMS view for the Acid Block is correct
"""
self.assertTrue(acid_block.init_fn_passed)
self.assertTrue(acid_block.resource_url_passed)
self.assertTrue(acid_block.scope_passed('user_state'))
self.assertTrue(acid_block.scope_passed('user_state_summary'))
self.assertTrue(acid_block.scope_passed('preferences'))
self.assertTrue(acid_block.scope_passed('user_info'))
def test_acid_block(self):
"""
Verify that all expected acid block tests pass in the lms.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
acid_block = AcidView(self.browser, '.xblock-student_view[data-block-type=acid]')
self.validate_acid_block_view(acid_block)
class XBlockAcidNoChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with no children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid', 'Acid Block')
)
)
)
).install()
class XBlockAcidChildTest(XBlockAcidBase):
"""
Tests of an AcidBlock with children
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('acid_parent', 'Acid Parent Block').add_children(
XBlockFixtureDesc('acid', 'First Acid Child', metadata={'name': 'first'}),
XBlockFixtureDesc('acid', 'Second Acid Child', metadata={'name': 'second'}),
XBlockFixtureDesc('html', 'Html Child', data="<html>Contents</html>"),
)
)
)
)
).install()
def validate_acid_block_view(self, acid_block):
super(XBlockAcidChildTest, self).validate_acid_block_view()
self.assertTrue(acid_block.child_tests_passed)
@skip('This will fail until we fix support of children in pure XBlocks')
def test_acid_block(self):
super(XBlockAcidChildTest, self).test_acid_block()
|
yokose-ks/edx-platform
|
common/test/acceptance/tests/test_lms.py
|
Python
|
agpl-3.0
| 15,062
|
[
"VisIt"
] |
fdd0bcaee02e6d641d8a63225e44cd9e04e2a874b41922c6f66a60e49953dd26
|
#!/usr/bin/env python
## category RNA-seq
## desc Finds regions of unusual deletions (CLIP-seq)
## experimental
"""
Finds regions of unusual deletions (CLIP-seq)
Given a set of BAM files, we search for areas where there are an unusual
amount of deletions. For CLIP-Seq, this can be an indicator of the location
of protein-RNA interaction.
Output is either a BED file or a FASTA format file containing these hotspots.
Only unique regions are returned across all files.
See: Zhang and Darnell, Nature Biotechnology (2011)
doi:10.1038/nbt.1873
pmid:21633356
"""
import os
import sys
from ngsutils.bam import bam_pileup_iter
import pysam
def usage():
print __doc__
print """
Usage: bamutils cims {opts} in.bam {in.bam...}
Options:
-fasta ref.fa Ouput in FASTA format (requires reference genome.fa)
[default: BED output]
-flanking N The number of flanking bases on either side to report
(FASTA output only) [default: 12]
-cutoff N Cut-off % for deletions - if the % of reads that
include a deletion at a position is higher than this
number, the fragment is reported (0->1.0)
[default: 0.1]
-ns Don't take the strand of the read into account
-window N The maximum length of a deletion window
[default: 20]
"""
sys.exit(1)
class BEDEmitter(object):
def __init__(self, out=None):
self.num = 1
if out:
self.out = out
else:
self.out = sys.stdout
pass
def emit(self, chrom, start, end, strand):
if not strand:
strand = '+'
self.out.write('%s\t%s\t%s\tregion_%s\t%s\t%s\n' % (chrom, start, end, self.num, 0, strand))
self.num += 1
def close(self):
if self.out != sys.stdout:
self.out.close()
class FASTAEmitter(object):
def __init__(self, ref_fname, flanking=12, out=None):
self.num = 1
self.ref = pysam.Fastafile(ref_fname)
assert flanking > 0
self.flanking = flanking
if out:
self.out = out
else:
self.out = sys.stdout
def close(self):
self.ref.close()
if self.out != sys.stdout:
self.out.close()
def emit(self, chrom, start, end, strand):
seq = self.ref.fetch(chrom, start - self.flanking, end + self.flanking)
seq = '%s%s%s' % (seq[:self.flanking].upper(), seq[self.flanking:end - start + self.flanking].lower(), seq[-self.flanking:].upper())
if strand == '-':
rc = []
for base in seq[::-1]:
if base == 'A':
rc.append('T')
elif base == 'T':
rc.append('A')
elif base == 'C':
rc.append('G')
elif base == 'G':
rc.append('C')
elif base == 'a':
rc.append('t')
elif base == 't':
rc.append('a')
elif base == 'c':
rc.append('g')
elif base == 'g':
rc.append('c')
seq = ''.join(rc)
self.out.write('>%s:%s%s%s\n%s\n' % (chrom, start - self.flanking, strand, end + self.flanking, seq))
class RegionManager(object):
def __init__(self, emitter, strand='', max_window=20):
self.emitter = emitter
self.strand = strand
self.max_window = max_window
self.last_chrom = None
self.start = 0
self.end = 0
self.del_reads = set()
self.total_reads = set()
def emit(self):
if self.last_chrom:
self.emitter.emit(self.last_chrom, self.start, self.end, self.strand)
def reset(self, new_chrom, new_pos):
self.last_chrom = new_chrom
self.start = new_pos
self.end = new_pos
self.del_reads = set()
self.total_reads = set()
def add(self, chrom, pos, strand, del_reads, total_reads):
if self.strand and strand != self.strand:
# ignore this if the strand doesn't match
return
if chrom != self.last_chrom:
self.emit()
self.reset(chrom, pos)
elif pos - self.start >= self.max_window:
self.emit()
self.reset(chrom, pos)
self.end = pos
self.del_reads |= del_reads
self.total_reads |= total_reads
def close(self):
self.emit()
def is_read_del_at_pos(read, pos, ppos=0):
last_op = None
idx = 0
for op, length in read.cigar:
if op in [0, 1]:
idx += length
if pos < idx:
if op == 2 and last_op != 3:
return True
last_op = op
return False
#TODO: Check this...
def is_read_match_at_pos(read, pos):
idx = 0
for op, length in read.cigar:
if op in [0, 1]:
idx += length
if pos < idx:
if op == 2 or op == 0:
return True
return False
def bam_cims_finder(bam_fnames, output='bed', ref_fname=None, flanking=12, cutoff=0.1, stranded=True, window_size=20):
for bam_fname in bam_fnames:
sys.stderr.write('%s\n' % bam_fname)
bam = pysam.Samfile(bam_fname, "rb")
if output == 'fasta':
emitter = FASTAEmitter(ref_fname, flanking)
else:
emitter = BEDEmitter()
if stranded:
strands = ['+', '-']
else:
strands = ['']
for strand in strands:
manager = RegionManager(emitter, strand, window_size)
for pileup in bam_pileup_iter(bam, mask=1540):
chrom = bam.getrname(pileup.tid)
deletions = 0.0
total = 0.0
del_reads = set()
total_reads = set()
for pileupread in pileup.pileups:
if not strand or (strand == '+' and not pileupread.alignment.is_reverse) or (strand == '-' and pileupread.alignment.is_reverse):
if is_read_match_at_pos(pileupread.alignment, pileupread.qpos):
total += 1
total_reads.add(pileupread.alignment.qname)
if is_read_del_at_pos(pileupread.alignment, pileupread.qpos):
deletions += 1
del_reads.add(pileupread.alignment.qname)
# print ""
# print chrom
# print pileup.pos
# print pileupread.alignment.qname
# print pileupread.alignment.pos
# print pileupread.alignment.cigar
# print pileupread.qpos
if total > 0:
pct = deletions / total
if pct > cutoff:
manager.add(chrom, pileup.pos, strand, del_reads, total_reads)
manager.close()
bam.close()
emitter.close()
if __name__ == '__main__':
bams = []
ref = None
output = 'bed'
cutoff = 0.1
flanking = 12
stranded = True
window = 20
last = None
for arg in sys.argv[1:]:
if last == '-flanking':
flanking = int(arg)
last = None
elif last == '-cutoff':
cutoff = float(arg)
last = None
elif last == '-window':
window = float(arg)
last = None
elif last == '-fasta' and not ref and os.path.exists(arg) and os.path.exists('%s.fai' % arg):
output = 'fasta'
ref = arg
last = None
elif arg == '-h':
usage()
elif arg in ['-flanking', '-fasta', '-cutoff', '-window']:
last = arg
elif arg == '-ns':
stranded = False
elif os.path.exists(arg) and os.path.exists('%s.bai' % arg):
bams.append(arg)
else:
print "Unknown option or missing index: %s" % arg
usage()
if not bams:
usage()
else:
bam_cims_finder(bams, output, ref, flanking, cutoff, stranded, window)
|
ngsutils/ngsutils
|
ngsutils/bam/cims.py
|
Python
|
bsd-3-clause
| 8,325
|
[
"pysam"
] |
8d92591c4ceb0369ace73beaf46cef313a0bf6e947a52b95bf636c4f353bc59f
|
from __future__ import division, print_function
import abc
import numpy as np
from menpo.transform import Scale, AlignmentSimilarity
from menpo.model.pdm import PDM, OrthoPDM
from menpo.transform.modeldriven import ModelDrivenTransform, OrthoMDTransform
from menpo.fit.regression.trainer import (
NonParametricRegressorTrainer, ParametricRegressorTrainer,
SemiParametricClassifierBasedRegressorTrainer)
from menpo.fit.regression.regressionfunctions import mlr
from menpo.fit.regression.parametricfeatures import weights
from menpo.fitmultilevel.functions import mean_pointcloud
from menpo.fitmultilevel.featurefunctions import compute_features, sparse_hog
from .base import (SupervisedDescentMethodFitter, SupervisedDescentAAMFitter,
SupervisedDescentCLMFitter)
# TODO: document me
class SupervisedDescentTrainer(object):
r"""
"""
__metaclass__ = abc.ABCMeta
def __init__(self, regression_type=mlr, regression_features=None,
feature_type=None, n_levels=3, downscale=2,
scaled_levels=True, noise_std=0.04, rotation=False,
n_perturbations=10, interpolator='scipy', **kwargs):
self.regression_type = regression_type
self.regression_features = regression_features
self.feature_type = feature_type
self.n_levels = n_levels
self.downscale = downscale
self.scaled_levels = scaled_levels
self.noise_std = noise_std
self.rotation = rotation
self.n_perturbations = n_perturbations
self.interpolator = interpolator
def train(self, images, group=None, label='all', **kwargs):
r"""
"""
print('- Computing reference shape')
self.reference_shape = self._compute_reference_shape(images, group,
label)
print('- Normalizing object size')
self._rescale_reference_shape()
images = [i.rescale_to_reference_shape(self.reference_shape,
group=group, label=label,
interpolator=self.interpolator)
for i in images]
print('- Generating multilevel scale space')
if self.scaled_levels:
# Gaussian pyramid
generator = [i.gaussian_pyramid(n_levels=self.n_levels,
downscale=self.downscale)
for i in images]
else:
# Smoothing pyramid
generator = [i.smoothing_pyramid(n_levels=self.n_levels,
downscale=self.downscale)
for i in images]
print('- Generating multilevel feature space')
images = []
for _ in np.arange(self.n_levels):
images.append([compute_features(g.next(), self.feature_type)
for g in generator])
images.reverse()
print('- Extracting ground truth shapes')
gt_shapes = [[i.landmarks[group][label].lms for i in img]
for img in images]
print('- Building regressors')
regressors = []
# for each level
for j, (level_images, level_gt_shapes) in enumerate(zip(images,
gt_shapes)):
print(' - Level {}'.format(j))
trainer = self._set_regressor_trainer(j)
if j == 0:
regressor = trainer.train(level_images, level_gt_shapes,
**kwargs)
else:
regressor = trainer.train(level_images, level_gt_shapes,
level_shapes, **kwargs)
print(' - Generating next level data')
level_shapes = trainer.perturb_shapes(gt_shapes[0])
regressors.append(regressor)
count = 0
total = len(regressors) * len(images[0]) * len(level_shapes[0])
for k, r in enumerate(regressors):
test_images = images[k]
test_gt_shapes = gt_shapes[k]
fittings = []
for (i, gt_s, level_s) in zip(test_images, test_gt_shapes,
level_shapes):
fitting_sublist = []
for ls in level_s:
fitting = r.fit(i, ls)
fitting.gt_shape = gt_s
fitting_sublist.append(fitting)
count += 1
fittings.append(fitting_sublist)
print(' - {} % '.format(round(100*(count+1)/total)),
end='\r')
if self.scaled_levels:
level_shapes = [[Scale(self.downscale,
n_dims=self.reference_shape.n_dims
).apply(f.final_shape)
for f in fitting_sublist]
for fitting_sublist in fittings]
else:
level_shapes = [[f.final_shape for f in fitting_sublist]
for fitting_sublist in fittings]
mean_error = np.mean(np.array([f.final_error
for fitting_sublist in fittings
for f in fitting_sublist]))
print(' - Mean error = {}'.format(mean_error))
return self._build_supervised_descent_fitter(regressors)
@abc.abstractmethod
def _compute_reference_shape(self, images, group, label):
r"""
"""
pass
def _rescale_reference_shape(self):
r"""
"""
pass
@abc.abstractmethod
def _set_regressor_trainer(self, **kwargs):
r"""
"""
pass
@abc.abstractmethod
def _build_supervised_descent_fitter(self, regressors):
pass
#TODO: Document me
class SupervisedDescentMethodTrainer(SupervisedDescentTrainer):
r"""
"""
def __init__(self, regression_type=mlr, regression_features=sparse_hog,
patch_shape=(16, 16), feature_type=None, n_levels=3,
downscale=1.5, scaled_levels=True, noise_std=0.04,
rotation=False, n_perturbations=10, diagonal_range=None,
interpolator='scipy'):
super(SupervisedDescentMethodTrainer, self).__init__(
regression_type=regression_type,
regression_features=regression_features,
feature_type=feature_type, n_levels=n_levels,
downscale=downscale, scaled_levels=scaled_levels,
noise_std=noise_std, rotation=rotation,
n_perturbations=n_perturbations, interpolator=interpolator)
self.patch_shape = patch_shape
self.diagonal_range = diagonal_range
def _compute_reference_shape(self, images, group, label):
shapes = [i.landmarks[group][label].lms for i in images]
return mean_pointcloud(shapes)
def _rescale_reference_shape(self):
if self.diagonal_range:
x, y = self.reference_shape.range()
scale = self.diagonal_range / np.sqrt(x**2 + y**2)
Scale(scale, self.reference_shape.n_dims
).apply_inplace(self.reference_shape)
def _set_regressor_trainer(self, level):
return NonParametricRegressorTrainer(
self.reference_shape, regression_type=self.regression_type,
regression_features=self.regression_features,
patch_shape=self.patch_shape, noise_std=self.noise_std,
rotation=self.rotation, n_perturbations=self.n_perturbations)
def _build_supervised_descent_fitter(self, regressors):
return SupervisedDescentMethodFitter(
regressors, self.feature_type, self.reference_shape,
self.downscale, self.scaled_levels, self.interpolator)
#TODO: Document me
class SupervisedDescentAAMTrainer(SupervisedDescentTrainer):
r"""
"""
def __init__(self, aam, regression_type=mlr, regression_features=weights,
noise_std=0.04, rotation=False, n_perturbations=10,
update='compositional', md_transform=OrthoMDTransform,
global_transform=AlignmentSimilarity, n_shape=None,
n_appearance=None):
super(SupervisedDescentAAMTrainer, self).__init__(
regression_type=regression_type,
regression_features=regression_features,
feature_type=aam.feature_type, n_levels=aam.n_levels,
downscale=aam.downscale, scaled_levels=aam.scaled_levels,
noise_std=noise_std, rotation=rotation,
n_perturbations=n_perturbations, interpolator=aam.interpolator)
self.aam = aam
self.update = update
self.md_transform = md_transform
self.global_transform = global_transform
if n_shape is not None:
if type(n_shape) is int:
for sm in self.aam.shape_models:
sm.n_active_components = n_shape
elif len(n_shape) is 1 and self.aam.n_levels > 1:
for sm in self.aam.shape_models:
sm.n_active_components = n_shape[0]
elif len(n_shape) is self.aam.n_levels:
for sm, n in zip(self.aam.shape_models, n_shape):
sm.n_active_components = n
else:
raise ValueError('n_shape can be integer, integer list '
'containing 1 or {} elements or '
'None'.format(self.aam.n_levels))
if n_appearance is not None:
if type(n_appearance) is int:
for am in self.aam.appearance_models:
am.n_active_components = n_appearance
elif len(n_appearance) is 1 and self.aam.n_levels > 1:
for am in self.aam.appearance_models:
am.n_active_components = n_appearance[0]
elif len(n_appearance) is self.aam.n_levels:
for am, n in zip(self.aam.appearance_models, n_shape):
am.n_active_components = n
else:
raise ValueError('n_appearance can be integer, integer list '
'containing 1 or {} elements or '
'None'.format(self.aam.n_levels))
def _compute_reference_shape(self, images, group, label):
return self.aam.reference_shape
def _normalize_object_size(self, images, group, label):
return [i.rescale_to_reference_shape(self.reference_shape,
group=group, label=label,
interpolator=self.interpolator)
for i in images]
def _set_regressor_trainer(self, level):
am = self.aam.appearance_models[level]
sm = self.aam.shape_models[level]
if self.md_transform is not ModelDrivenTransform:
md_transform = self.md_transform(
sm, self.aam.transform, self.global_transform,
source=am.mean.landmarks['source'].lms)
else:
md_transform = self.md_transform(
sm, self.aam.transform,
source=am.mean.landmarks['source'].lms)
return ParametricRegressorTrainer(
am, md_transform, self.reference_shape,
regression_type=self.regression_type, regression_features=self
.regression_features, update=self.update,
noise_std=self.noise_std, rotation=self.rotation,
n_perturbations=self.n_perturbations,
interpolator=self.interpolator)
def _build_supervised_descent_fitter(self, regressors):
return SupervisedDescentAAMFitter(self.aam, regressors)
#TODO: Document me
#TODO: Finish me
class SupervisedDescentCLMTrainer(SupervisedDescentTrainer):
r"""
"""
def __init__(self, clm, regression_type=mlr, regression_features=weights,
noise_std=0.04, rotation=False, n_perturbations=10,
pdm_transform=OrthoPDM,
global_transform=AlignmentSimilarity, n_shape=None):
super(SupervisedDescentCLMTrainer, self).__init__(
regression_type=regression_type,
regression_features=regression_features,
feature_type=clm.feature_type, n_levels=clm.n_levels,
downscale=clm.downscale, scaled_levels=clm.scaled_levels,
noise_std=noise_std, rotation=rotation,
n_perturbations=n_perturbations, interpolator=clm.interpolator)
self.clm = clm
self.patch_shape = clm.patch_shape
self.pdm_transform = pdm_transform
self.global_transform = global_transform
if n_shape is not None:
if type(n_shape) is int:
for sm in self.clm.shape_models:
sm.n_active_components = n_shape
elif len(n_shape) is 1 and self.clm.n_levels > 1:
for sm in self.clm.shape_models:
sm.n_active_components = n_shape[0]
elif len(n_shape) is self.clm.n_levels:
for sm, n in zip(self.clm.shape_models, n_shape):
sm.n_active_components = n
else:
raise ValueError('n_shape can be integer, integer list '
'containing 1 or {} elements or '
'None'.format(self.clm.n_levels))
def _compute_reference_shape(self, images, group, label):
return self.clm.reference_shape
#TODO: Finish me
def _set_regressor_trainer(self, level):
clfs = self.clm.classifiers[level]
sm = self.clm.shape_models[level]
if self.pdm_transform is not PDM:
pdm_transform = self.pdm_transform(sm, self.global_transform)
else:
pdm_transform = self.pdm_transform(sm)
return SemiParametricClassifierBasedRegressorTrainer(
clfs, pdm_transform, self.reference_shape,
regression_type=self.regression_type,
patch_shape=self.patch_shape,
noise_std=self.noise_std, rotation=self.rotation,
n_perturbations=self.n_perturbations)
def _build_supervised_descent_fitter(self, regressors):
return SupervisedDescentCLMFitter(self.clm, regressors)
|
jabooth/menpo-archive
|
menpo/fitmultilevel/sdm/trainer.py
|
Python
|
bsd-3-clause
| 14,535
|
[
"Gaussian"
] |
44ba0f9e25b9aea8b886f48980cd526d5bc0a74bfdc4ea7a58baad7d5865fb98
|
"""
IO for ADF files.
"""
from pymatgen.core.structure import Molecule
from monty.json import MSONable
import re
import os
from monty.itertools import chunks
from monty.io import reverse_readline
__author__ = 'Xin Chen, chenxin13@mails.tsinghua.edu.cn'
def is_numeric(s):
"""
Return True is the string ``s`` is a numeric string.
Parameters
----------
s : str
A string.
Returns
-------
res : bool
If True, ``s`` is a numeric string and can be converted to an int or a
float. Otherwise False will be returned.
"""
try:
float(s)
except ValueError:
return False
else:
return True
def iterlines(s):
r"""
A generator form of s.split('\n') for reducing memory overhead.
Parameters
----------
s : str
A multi-line string.
Yields
------
line : str
A string.
"""
prevnl = -1
while True:
nextnl = s.find('\n', prevnl + 1)
if nextnl < 0:
yield s[(prevnl+1):]
break
else:
yield s[(prevnl+1):nextnl]
prevnl = nextnl
class AdfInputError(Exception):
"""
The default error class for ADF.
"""
pass
class AdfOutputError(Exception):
"""
The default error class for errors raised by ``AdfOutput``.
"""
pass
class AdfKey(MSONable):
"""
The basic input unit for ADF. A key is a string of characters that does not
contain a delimiter (blank, comma or equal sign). A key may have multiple
subkeys and a set of options.
"""
block_keys = {"SCF", "GEOMETRY", "XC", "UNITS", "ATOMS", "CHARGE", "BASIS",
"SYMMETRY", "RELATIVISTIC", "OCCUPATIONS", "SAVE", "A1FIT",
"INTEGRATION", "UNRESTRICTED", "ZLMFIT", "TITLE",
"EXACTDENSITY", "TOTALENERGY", "ANALYTICALFREQ"}
sub_keys = {"AtomDepQuality"}
# Full blocks are blocks that must have an 'END'.
_full_blocks = {"GEOMETRY", "SCF", "UNITS", "BASIS", "ANALYTICALFREQ"}
def __init__(self, name, options=None, subkeys=None):
"""
Initialization method.
Parameters
----------
name : str
The name of this key.
options : Sized
The options for this key. Each element can be a primitive object or
a tuple/list with two elements: the first is the name and the second
is a primitive object.
subkeys : Sized
The subkeys for this key.
Raises
------
ValueError
If elements in ``subkeys`` are not ``AdfKey`` objects.
"""
self.name = name
self.options = options if options is not None else []
self.subkeys = subkeys if subkeys is not None else []
if len(self.subkeys) > 0:
for k in subkeys:
if not isinstance(k, AdfKey):
raise ValueError("Not all subkeys are ``AdfKey`` objects!")
self._sized_op = None
if len(self.options) > 0:
self._sized_op = isinstance(self.options[0], (list, tuple))
def _options_string(self):
"""
Return the option string.
"""
if len(self.options) > 0:
s = ""
for op in self.options:
if self._sized_op:
s += "{:s}={:s} ".format(*map(str, op))
else:
s += "{:s} ".format(str(op))
return s.strip()
else:
return ""
def is_block_key(self):
"""
Return True if this key is a block key.
"""
return bool(self.name.upper() in self.block_keys)
@property
def key(self):
"""
Return the name of this key. If this is a block key, the name will be
converted to upper cases.
"""
if self.is_block_key():
return self.name.upper()
else:
return self.name
def __str__(self):
"""
Return the string representation of this ``AdfKey``.
Notes
-----
If this key is 'Atoms' and the coordinates are in Cartesian form, a
different string format will be used.
"""
s = "{:s}".format(self.key)
if len(self.options) > 0:
s += " {:s}".format(self._options_string())
s += "\n"
if len(self.subkeys) > 0:
if self.key.lower() == 'atoms':
for subkey in self.subkeys:
s += "{:2s} {: 14.8f} {: 14.8f} {: 14.8f}\n".format(
subkey.name, *subkey.options)
else:
for subkey in self.subkeys:
s += str(subkey)
if self.is_block_key():
s += "END\n"
else:
s += "subend\n"
elif self.key.upper() in self._full_blocks:
s += "END\n"
return s
def __eq__(self, other):
if not isinstance(other, AdfKey):
return False
else:
return str(self) == str(other)
def has_subkey(self, subkey):
"""
Return True if this AdfKey contains the given subkey.
Parameters
----------
subkey : str or AdfKey
A key name or an AdfKey object.
Returns
-------
has : bool
True if this key contains the given key. Otherwise False.
"""
if isinstance(subkey, str):
key = subkey
elif isinstance(subkey, AdfKey):
key = subkey.key
else:
raise ValueError("The subkey should be an AdfKey or a string!")
if len(self.subkeys) > 0:
if key in map(lambda k: k.key, self.subkeys):
return True
return False
def add_subkey(self, subkey):
"""
Add a new subkey to this key.
Parameters
----------
subkey : AdfKey
A new subkey.
Notes
-----
Duplicate check will not be performed if this is an 'Atoms' block.
"""
if self.key.lower() == 'atoms' or not self.has_subkey(subkey):
self.subkeys.append(subkey)
def remove_subkey(self, subkey):
"""
Remove the given subkey, if existed, from this AdfKey.
Parameters
----------
subkey : str or AdfKey
The subkey to remove.
"""
if len(self.subkeys) > 0:
key = subkey if isinstance(subkey, str) else subkey.key
for i in range(len(self.subkeys)):
if self.subkeys[i].key == key:
self.subkeys.pop(i)
break
def add_option(self, option):
"""
Add a new option to this key.
Parameters
----------
option : Sized or str or int or float
A new option to add. This must have the same format with exsiting
options.
Raises
------
TypeError
If the format of the given ``option`` is different.
"""
if len(self.options) == 0:
self.options.append(option)
else:
sized_op = isinstance(option, (list, tuple))
if self._sized_op != sized_op:
raise TypeError("Option type is mismatched!")
self.options.append(option)
def remove_option(self, option):
"""
Remove an option.
Parameters
----------
option : str or int
The name (str) or index (int) of the option to remove.
Raises
------
TypeError
If the option has a wrong type.
"""
if len(self.options) > 0:
if self._sized_op:
if not isinstance(option, str):
raise TypeError("``option`` should be a name string!")
for i in range(len(self.options)):
if self.options[i][0] == option:
self.options.pop(i)
break
else:
if not isinstance(option, int):
raise TypeError("``option`` should be an integer index!")
self.options.pop(option)
def has_option(self, option):
"""
Return True if the option is included in this key.
Parameters
----------
option : str
The option.
Returns
-------
has : bool
True if the option can be found. Otherwise False will be returned.
"""
if len(self.options) == 0:
return False
for op in self.options:
if (self._sized_op and op[0] == option) or (op == option):
return True
return False
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"name": self.name, "options": self.options}
if len(self.subkeys) > 0:
subkeys = []
for subkey in self.subkeys:
subkeys.append(subkey.as_dict())
d.update({"subkeys": subkeys})
return d
def to_json(self):
"""
Return a json string representation of the MSONable AdfKey object.
"""
return super().to_json()
@classmethod
def from_dict(cls, d):
"""
Construct a MSONable AdfKey object from the JSON dict.
Parameters
----------
d : dict
A dict of saved attributes.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the JSON dict ``d``.
"""
key = d.get("name")
options = d.get("options", None)
subkey_list = d.get("subkeys", [])
if len(subkey_list) > 0:
subkeys = list(map(lambda k: AdfKey.from_dict(k), subkey_list))
else:
subkeys = None
return cls(key, options, subkeys)
@staticmethod
def from_string(string):
"""
Construct an AdfKey object from the string.
Parameters
----------
string : str
A string.
Returns
-------
adfkey : AdfKey
An AdfKey object recovered from the string.
Raises
------
ValueError
Currently nested subkeys are not supported. If ``subend`` was found
a ValueError would be raised.
Notes
-----
Only the first block key will be returned.
"""
def is_float(s):
if '.' in s or 'E' in s or 'e' in s:
return True
else:
return False
if string.find("\n") == -1:
el = string.split()
if len(el) > 1:
if string.find("=") != -1:
options = list(map(lambda s: s.split("="), el[1:]))
else:
options = el[1:]
for i, op in enumerate(options):
if isinstance(op, list) and is_numeric(op[1]):
op[1] = float(op[1]) if is_float(op[1]) else int(op[1])
elif is_numeric(op):
options[i] = float(op) if is_float(op) else int(op)
else:
options = None
return AdfKey(el[0], options)
if string.find('subend') != -1:
raise ValueError("Nested subkeys are not supported!")
key = None
for line in iterlines(string):
if line == "":
continue
el = line.strip().split()
if len(el) == 0:
continue
if el[0].upper() in AdfKey.block_keys:
if key is None:
key = AdfKey.from_string(line)
else:
return key
elif el[0].upper() == 'END':
return key
elif key is not None:
key.add_subkey(AdfKey.from_string(line))
else:
raise Exception("IncompleteKey: 'END' is missing!")
class AdfTask(MSONable):
"""
Basic task for ADF. All settings in this class are independent of molecules.
Notes
-----
Unlike other quantum chemistry packages (NWChem, Gaussian, ...), ADF does
not support calculating force/gradient.
"""
operations = {"energy": "Evaluate the single point energy.",
"optimize": "Minimize the energy by varying the molecular "
"structure.",
"frequencies": "Compute second derivatives and print out an "
"analysis of molecular vibrations.",
"freq": "Same as frequencies.",
"numerical_frequencies": "Compute molecular frequencies using"
" numerical method."}
def __init__(self, operation="energy", basis_set=None, xc=None,
title="ADF_RUN", units=None, geo_subkeys=None, scf=None,
other_directives=None):
"""
Initialization method.
Parameters
----------
operation : str
The target operation.
basis_set : AdfKey
The basis set definitions for this task. Defaults to 'DZ/Large'.
xc : AdfKey
The exchange-correlation functionals. Defaults to PBE.
title : str
The title of this ADF task.
units : AdfKey
The units. Defaults to Angstroms/Degree.
geo_subkeys : Sized
The subkeys for the block key 'GEOMETRY'.
scf : AdfKey
The scf options.
other_directives : Sized
User-defined directives.
"""
if operation not in self.operations.keys():
raise AdfInputError("Invalid ADF task {:s}".format(operation))
self.operation = operation
self.title = title
self.basis_set = basis_set if basis_set is not None else \
self.get_default_basis_set()
self.xc = xc if xc is not None else self.get_default_xc()
self.units = units if units is not None else self.get_default_units()
self.scf = scf if scf is not None else self.get_default_scf()
self.other_directives = other_directives \
if other_directives is not None else []
self._setup_task(geo_subkeys)
@staticmethod
def get_default_basis_set():
"""
Returns: Default basis set
"""
return AdfKey.from_string("Basis\ntype DZ\ncore small\nEND")
@staticmethod
def get_default_scf():
"""
Returns: ADF using default SCF.
"""
return AdfKey.from_string("SCF\niterations 300\nEND")
@staticmethod
def get_default_geo():
"""
Returns: ADFKey using default geometry.
"""
return AdfKey.from_string("GEOMETRY SinglePoint\nEND")
@staticmethod
def get_default_xc():
"""
Returns: ADFKey using default XC.
"""
return AdfKey.from_string("XC\nGGA PBE\nEND")
@staticmethod
def get_default_units():
"""
Returns: Default units.
"""
return AdfKey.from_string("Units\nlength angstrom\nangle degree\nEnd")
def _setup_task(self, geo_subkeys):
"""
Setup the block 'Geometry' given subkeys and the task.
Parameters
----------
geo_subkeys : Sized
User-defined subkeys for the block 'Geometry'.
Notes
-----
Most of the run types of ADF are specified in the Geometry block except
the 'AnalyticFreq'.
"""
self.geo = AdfKey("Geometry", subkeys=geo_subkeys)
if self.operation.lower() == "energy":
self.geo.add_option("SinglePoint")
if self.geo.has_subkey("Frequencies"):
self.geo.remove_subkey("Frequencies")
elif self.operation.lower() == "optimize":
self.geo.add_option("GeometryOptimization")
if self.geo.has_subkey("Frequencies"):
self.geo.remove_subkey("Frequencies")
elif self.operation.lower() == "numerical_frequencies":
self.geo.add_subkey(AdfKey("Frequencies"))
else:
self.other_directives.append(AdfKey("AnalyticalFreq"))
if self.geo.has_subkey("Frequencies"):
self.geo.remove_subkey("Frequencies")
def __str__(self):
s = """TITLE {title}\n
{units}
{xc}
{basis_set}
{scf}
{geo}""".format(
title=self.title, units=str(self.units), xc=str(self.xc),
basis_set=str(self.basis_set), scf=str(self.scf), geo=str(self.geo)
)
s += "\n"
for block_key in self.other_directives:
if not isinstance(block_key, AdfKey):
raise ValueError("{} is not an AdfKey!".format(str(block_key)))
s += str(block_key) + "\n"
return s
def as_dict(self):
"""
A JSON serializable dict representation of self.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"operation": self.operation, "title": self.title,
"xc": self.xc.as_dict(), "basis_set": self.basis_set.as_dict(),
"units": self.units.as_dict(), "scf": self.scf.as_dict(),
"geo": self.geo.as_dict(),
"others": [k.as_dict() for k in self.other_directives]}
def to_json(self):
"""
Return a json string representation of the MSONable AdfTask object.
"""
return super().to_json()
@classmethod
def from_dict(cls, d):
"""
Construct a MSONable AdfTask object from the JSON dict.
Parameters
----------
d : dict
A dict of saved attributes.
Returns
-------
task : AdfTask
An AdfTask object recovered from the JSON dict ``d``.
"""
def _from_dict(_d):
return AdfKey.from_dict(_d) if _d is not None else None
operation = d.get("operation")
title = d.get("title")
basis_set = _from_dict(d.get("basis_set"))
xc = _from_dict(d.get("xc"))
units = _from_dict(d.get("units"))
scf = _from_dict(d.get("scf"))
others = [AdfKey.from_dict(o) for o in d.get("others", [])]
geo = _from_dict(d.get("geo"))
return cls(operation, basis_set, xc, title, units, geo.subkeys, scf,
others)
class AdfInput:
"""
A basic ADF input file writer.
"""
def __init__(self, task):
"""
Initialization method.
Parameters
----------
task : AdfTask
An ADF task.
"""
self.task = task
def write_file(self, molecule, inpfile):
"""
Write an ADF input file.
Parameters
----------
molecule : Molecule
The molecule for this task.
inpfile : str
The name where the input file will be saved.
"""
mol_blocks = []
atom_block = AdfKey("Atoms", options=["cartesian"])
for site in molecule:
atom_block.add_subkey(AdfKey(str(site.specie), list(site.coords)))
mol_blocks.append(atom_block)
if molecule.charge != 0:
netq = molecule.charge
ab = molecule.spin_multiplicity - 1
charge_block = AdfKey("Charge", [netq, ab])
mol_blocks.append(charge_block)
if ab != 0:
unres_block = AdfKey("Unrestricted")
mol_blocks.append(unres_block)
with open(inpfile, "w+") as f:
for block in mol_blocks:
f.write(str(block) + "\n")
f.write(str(self.task) + "\n")
f.write("END INPUT")
class AdfOutput:
"""
A basic ADF output file parser.
Attributes
----------
is_failed : bool
True is the ADF job is terminated without success. Otherwise False.
is_internal_crash : bool
True if the job is terminated with internal crash. Please read 'TAPE13'
of the ADF manual for more detail.
error : str
The error description.
run_type : str
The RunType of this ADF job. Possible options are: 'SinglePoint',
'GeometryOptimization', 'AnalyticalFreq' and 'NUmericalFreq'.
final_energy : float
The final molecule energy (a.u).
final_structure : GMolecule
The final structure of the molecule.
energies : Sized
The energy of each cycle.
structures : Sized
The structure of each cycle If geometry optimization is performed.
frequencies : array_like
The frequencies of the molecule.
normal_modes : array_like
The normal modes of the molecule.
freq_type : str
Either 'Analytical' or 'Numerical'.
"""
def __init__(self, filename):
"""
Initialization method.
Parameters
----------
filename : str
The ADF output file to parse.
"""
self.filename = filename
self._parse()
def _parse(self):
"""
Parse the ADF outputs. There are two files: one is 'logfile', the other
is the ADF output file. The final energy and structures are parsed from
the 'logfile'. Frequencies and normal modes are parsed from the ADF
output file.
"""
workdir = os.path.dirname(self.filename)
logfile = os.path.join(workdir, "logfile")
if not os.path.isfile(logfile):
raise IOError("The ADF logfile can not be accessed!")
self.is_failed = False
self.error = None
self.final_energy = None
self.final_structure = None
self.energies = []
self.structures = []
self.frequencies = []
self.normal_modes = None
self.freq_type = None
self.run_type = None
self.is_internal_crash = False
self._parse_logfile(logfile)
if not self.is_failed and self.run_type != "SinglePoint":
self._parse_adf_output()
@staticmethod
def _sites_to_mol(sites):
"""
Return a ``Molecule`` object given a list of sites.
Parameters
----------
sites : list
A list of sites.
Returns
-------
mol : Molecule
A ``Molecule`` object.
"""
return Molecule([site[0] for site in sites],
[site[1] for site in sites])
def _parse_logfile(self, logfile):
"""
Parse the formatted logfile.
"""
cycle_patt = re.compile(r"Coordinates\sin\sGeometry\sCycle\s(\d+)")
coord_patt = re.compile(r"\s+([0-9]+)\.([A-Za-z]+)"+3*r"\s+([-\.0-9]+)")
energy_patt = re.compile(r"<.*>\s<.*>\s+current\senergy\s+([-\.0-9]+)\s"
"Hartree")
final_energy_patt = re.compile(
r"<.*>\s<.*>\s+Bond\sEnergy\s+([-\.0-9]+)\sa\.u\.")
error_patt = re.compile(r"<.*>\s<.*>\s+ERROR\sDETECTED:\s(.*)")
runtype_patt = re.compile(r"<.*>\s<.*>\s+RunType\s+:\s(.*)")
end_patt = re.compile(r"<.*>\s<.*>\s+END")
parse_cycle = False
sites = []
last_cycle = -1
parse_final = False
# Stop parsing the logfile is this job is not terminated successfully.
# The last non-empty line of the logfile must match the end pattern.
# Otherwise the job has some internal failure. The TAPE13 part of the
# ADF manual has a detailed explanantion.
with open(logfile, "r") as f:
for line in reverse_readline(f):
if line == "":
continue
if end_patt.search(line) is None:
self.is_internal_crash = True
self.error = "Internal crash. TAPE13 is generated!"
self.is_failed = True
return
else:
break
with open(logfile, "r") as f:
for line in f:
m = error_patt.search(line)
if m:
self.is_failed = True
self.error = m.group(1)
break
if self.run_type is None:
m = runtype_patt.search(line)
if m:
if m.group(1) == 'FREQUENCIES':
self.freq_type = "Numerical"
self.run_type = "NumericalFreq"
elif m.group(1) == 'GEOMETRY OPTIMIZATION':
self.run_type = "GeometryOptimization"
elif m.group(1) == 'CREATE':
self.run_type = None
elif m.group(1) == 'SINGLE POINT':
self.run_type = 'SinglePoint'
else:
raise AdfOutputError("Undefined Runtype!")
elif self.run_type == 'SinglePoint':
m = coord_patt.search(line)
if m:
sites.append([m.groups()[0],
list(map(float, m.groups()[2:]))])
else:
m = final_energy_patt.search(line)
if m:
self.final_energy = float(m.group(1))
self.final_structure = self._sites_to_mol(sites)
elif self.run_type == 'GeometryOptimization':
m = cycle_patt.search(line)
if m:
cycle = int(m.group(1))
if cycle <= 0:
raise AdfOutputError("Wrong cycle {}".format(cycle))
if cycle > last_cycle:
parse_cycle = True
last_cycle = cycle
else:
parse_final = True
elif parse_cycle:
m = coord_patt.search(line)
if m:
sites.append([m.groups()[1],
list(map(float, m.groups()[2:]))])
else:
m = energy_patt.search(line)
if m:
self.energies.append(float(m.group(1)))
mol = self._sites_to_mol(sites)
self.structures.append(mol)
parse_cycle = False
sites = []
elif parse_final:
m = final_energy_patt.search(line)
if m:
self.final_energy = float(m.group(1))
elif self.run_type == "NumericalFreq":
break
if not self.is_failed:
if self.run_type == "GeometryOptimization":
if len(self.structures) > 0:
self.final_structure = self.structures[-1]
if self.final_energy is None:
raise AdfOutputError("The final energy can not be read!")
elif self.run_type == "SinglePoint":
if self.final_structure is None:
raise AdfOutputError("The final structure is missing!")
if self.final_energy is None:
raise AdfOutputError("The final energy can not be read!")
def _parse_adf_output(self):
"""
Parse the standard ADF output file.
"""
numerical_freq_patt = re.compile(
r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sI\sE\sS\s+\*")
analytic_freq_patt = re.compile(
r"\s+\*\s+F\sR\sE\sQ\sU\sE\sN\sC\sY\s+A\sN\sA\sL\sY\sS\sI\sS\s+\*")
freq_on_patt = re.compile(r"Vibrations\sand\sNormal\sModes\s+\*+.*\*+")
freq_off_patt = re.compile(r"List\sof\sAll\sFrequencies:")
mode_patt = re.compile(r"\s+(\d+)\.([A-Za-z]+)\s+(.*)")
coord_patt = re.compile(r"\s+(\d+)\s+([A-Za-z]+)" + 6 * r"\s+([0-9\.-]+)")
coord_on_patt = re.compile(r"\s+\*\s+R\sU\sN\s+T\sY\sP\sE\s:\sFREQUENCIES\s+\*")
parse_freq = False
parse_mode = False
nnext = 0
nstrike = 0
sites = []
self.frequencies = []
self.normal_modes = []
if self.final_structure is None:
find_structure = True
parse_coord = False
natoms = 0
else:
find_structure = False
parse_coord = False
natoms = self.final_structure.num_sites
with open(self.filename, "r") as f:
for line in f:
if self.run_type == "NumericalFreq" and find_structure:
if not parse_coord:
m = coord_on_patt.search(line)
if m:
parse_coord = True
else:
m = coord_patt.search(line)
if m:
sites.append(
[m.group(2), list(map(float, m.groups()[2:5]))])
nstrike += 1
elif nstrike > 0:
find_structure = False
self.final_structure = self._sites_to_mol(sites)
natoms = self.final_structure.num_sites
elif self.freq_type is None:
if numerical_freq_patt.search(line):
self.freq_type = "Numerical"
elif analytic_freq_patt.search(line):
self.freq_type = "Analytical"
self.run_type = "AnalyticalFreq"
elif freq_on_patt.search(line):
parse_freq = True
elif parse_freq:
if freq_off_patt.search(line):
break
el = line.strip().split()
if 1 <= len(el) <= 3 and line.find(".") != -1:
nnext = len(el)
parse_mode = True
parse_freq = False
self.frequencies.extend(map(float, el))
for i in range(nnext):
self.normal_modes.append([])
elif parse_mode:
m = mode_patt.search(line)
if m:
v = list(chunks(map(float, m.group(3).split()), 3))
if len(v) != nnext:
raise AdfOutputError("Odd Error!")
for i, k in enumerate(range(-nnext, 0, 1)):
self.normal_modes[k].extend(v[i])
if int(m.group(1)) == natoms:
parse_freq = True
parse_mode = False
if isinstance(self.final_structure, list):
self.final_structure = self._sites_to_mol(self.final_structure)
if self.freq_type is not None:
if len(self.frequencies) != len(self.normal_modes):
raise AdfOutputError("The number of normal modes is wrong!")
if len(self.normal_modes[0]) != natoms * 3:
raise AdfOutputError("The dimensions of the modes are wrong!")
|
gVallverdu/pymatgen
|
pymatgen/io/adf.py
|
Python
|
mit
| 31,771
|
[
"ADF",
"Gaussian",
"NWChem",
"pymatgen"
] |
babd8eb46b751c4c2bd4d9412072357ea05432f3f70eb73cd5018c096d0a478a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# lsresowners - simple list of resource owners for a resource with access
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""List all CNs in the list of administrators for a given resource if user is
an owner.
"""
import os
import shared.returnvalues as returnvalues
from shared.findtype import is_owner
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.listhandling import list_items_in_pickled_list
def signature():
"""Signature of the main function"""
defaults = {'unique_resource_name': REJECT_UNSET}
return ['list', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
unique_resource_name = accepted['unique_resource_name'][-1]
if not is_owner(client_id, unique_resource_name,
configuration.resource_home, logger):
output_objects.append({'object_type': 'error_text', 'text'
: 'You must be an owner of %s to get the list of owners!'
% unique_resource_name})
return (output_objects, returnvalues.CLIENT_ERROR)
# is_owner incorporates unique_resource_name verification - no need to
# specifically check for illegal directory traversal
base_dir = os.path.abspath(os.path.join(configuration.resource_home,
unique_resource_name)) + os.sep
owners_file = os.path.join(base_dir, 'owners')
(status, msg) = list_items_in_pickled_list(owners_file, logger)
if not status:
output_objects.append({'object_type': 'error_text', 'text'
: 'Could not get list of owners, reason: %s'
% msg})
return (output_objects, returnvalues.SYSTEM_ERROR)
output_objects.append({'object_type': 'list', 'list': msg})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/lsresowners.py
|
Python
|
gpl-2.0
| 3,190
|
[
"Brian"
] |
49769199ac604e8ba0a9a03e20cc32d8ba64a2ce0ca4b93e7217815b1f9246c0
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing an enumerated list of possible values.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
#-------------------------------------------------------------------------
#
# set up logging
#
#-------------------------------------------------------------------------
import logging
#-------------------------------------------------------------------------
#
# EnumeratedListOption class
#
#-------------------------------------------------------------------------
class EnumeratedListOption(Option):
"""
This class describes an option that provides a finite number of values.
Each possible value is assigned a value and a description.
"""
__signals__ = { 'options-changed' : None }
def __init__(self, label, value):
"""
:param label: A friendly label to be applied to this option.
Example: "Paper Size"
:type label: string
:param value: An initial value for this option.
Example: 5
:type value: int
:return: nothing
"""
Option.__init__(self, label, value)
self.__items = []
def add_item(self, value, description):
"""
Add an item to the list of possible values.
:param value: The value that corresponds to this item.
Example: 5
:type value: int
:param description: A description of this value.
Example: "8.5 x 11"
:type description: string
:return: nothing
"""
self.__items.append((value, description))
self.emit('options-changed')
def set_items(self, items):
"""
Add a list of items to the list of possible values.
:param items: A list of tuples containing value, description pairs.
Example: [ (5,"8.5 x 11"), (6,"11 x 17")]
:type items: array
:return: nothing
"""
self.__items = items
self.emit('options-changed')
def get_items(self):
"""
Get all the possible values for this option.
:return: an array of tuples containing (value,description) pairs.
"""
return self.__items
def clear(self):
"""
Clear all possible values from this option.
:return: nothing.
"""
self.__items = []
self.emit('options-changed')
def set_value(self, value):
"""
Set the value of this option.
:param value: A value for this option.
Example: True
:type value: The type will depend on the type of option.
:return: nothing
"""
if value in (v for v, d in self.__items):
Option.set_value(self, value)
else:
logging.warning(_("Value '%(val)s' not found for option '%(opt)s'") %
{'val' : str(value), 'opt' : self.get_label()})
|
pmghalvorsen/gramps_branch
|
gramps/gen/plug/menu/_enumeratedlist.py
|
Python
|
gpl-2.0
| 4,029
|
[
"Brian"
] |
e029e68b33dd13ca4187bbfc5d4bb599eefcbce7ddffe8ae3d554ca10c03191d
|
# Copyright (C) 2007-2011 Franco M. Luque
# URL: <http://www.cs.famaf.unc.edu.ar/~francolq/>
# For license information, see LICENSE.txt
# util.py: Some utilities, mainly for serialization (pickling) of objects.
import os
import pickle
import sys
import nltk
obj_basedir = 'lq-nlp-commons'
def write_file(filename, content):
f = open(filename, 'w')
f.write(content)
f.close()
def read_file(filename):
f = open(filename)
content = f.read()
f.close()
return content
# XXX: trabaja con listas aunque podria hacerse con set.
def powerset(s):
if len(s) == 0:
return [[]]
else:
e = s[0]
p = powerset(s[1:])
return p + map(lambda x: x+[e], p)
# me fijo si un bracketing no tiene cosas que se cruzan
def tree_consistent(b):
"""FIXME: move this to the bracketing package.
"""
def crosses((a,b),(c,d)):
return (a < c and c < b and b < d) or (c < a and a < d and d < b)
for i in range(len(b)):
for j in range(i+1,len(b)):
if crosses(b[i], b[j]):
return False
return True
def get_obj_basedir():
try:
return nltk.data.find(obj_basedir)
except LookupError:
os.mkdir(os.path.join(nltk.data.path[0], obj_basedir))
return nltk.data.find(obj_basedir)
# Guarda un objeto en un archivo, para luego ser cargado con load_obj.
def save_obj(object, filename):
path = os.path.join(get_obj_basedir(), filename)
f = open(path, 'w')
pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)
f.close()
# Carga un objeto guardado en un archivo con save_obj.
def load_obj(filename):
path = os.path.join(get_obj_basedir(), filename)
try:
f = open(path, 'r')
object = pickle.load(f)
f.close()
except IOError:
object = None
return object
# Carga una lista de objetos guardados en un archivo usando ObjectSaver.
def load_objs(filename):
path = os.path.join(get_obj_basedir(), filename)
try:
f = open(path, 'r')
objects = []
try:
while True:
objects += [pickle.load(f)]
except EOFError: # It will always be thrown
f.close()
except IOError:
objects = None
return objects
class ObjectSaver:
# Si el archivo existe, lo abre, lo lee y comienza a escribir al final
def __init__(self, filename):
path = os.path.join(get_obj_basedir(), filename)
self.f = open(path, 'a+')
self.orig_objs = []
try:
while True:
self.orig_objs += [pickle.load(self.f)]
except EOFError: # It will always be thrown
pass
def save_obj(self, object):
pickle.dump(object, self.f, pickle.HIGHEST_PROTOCOL)
def flush(self):
self.f.flush()
def close(self):
self.f.close()
class Progress:
"""
Helper class to ouput to stdout a fancy indicator of the progress of something.
See model.Model for an example of usage.
>>> p = Progress('Parsed', 0, 200)
Parsed 0 of 200
>>> p.next()
1 of 200
>>> p.next()
2 of 200
"""
def __init__(self, prefix, n_init, n_max):
m = len(str(n_max))
o = "%"+str(m)+"d of "+str(n_max)
self.i = 0
print prefix, o % self.i,
sys.stdout.flush()
self.o = ("\b"*(2*m+5)) + o
def next(self):
self.i += 1
print self.o % self.i,
sys.stdout.flush()
# Recipe 364469: "Safe" Eval (Python) by Michael Spencer
# ActiveState Code (http://code.activestate.com/recipes/364469/)
import compiler
class Unsafe_Source_Error(Exception):
def __init__(self,error,descr = None,node = None):
self.error = error
self.descr = descr
self.node = node
self.lineno = getattr(node,"lineno",None)
def __repr__(self):
return "Line %d. %s: %s" % (self.lineno, self.error, self.descr)
__str__ = __repr__
class SafeEval(object):
def visit(self, node,**kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
visitExpression = default
def visitConst(self, node, **kw):
return node.value
def visitDict(self,node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self,node, **kw):
return tuple(self.visit(i) for i in node.nodes)
def visitList(self,node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
class SafeEvalWithErrors(SafeEval):
def default(self, node, **kw):
raise Unsafe_Source_Error("Unsupported source construct",
node.__class__,node)
def visitName(self,node, **kw):
raise Unsafe_Source_Error("Strings must be quoted",
node.name, node)
# Add more specific errors if desired
def safe_eval(source, fail_on_error = True):
walker = fail_on_error and SafeEvalWithErrors() or SafeEval()
try:
ast = compiler.parse(source,"eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except Unsafe_Source_Error, err:
raise
|
davidswelt/dmvccm
|
lq-nlp-commons/util.py
|
Python
|
gpl-3.0
| 5,462
|
[
"VisIt"
] |
a7af99a335557ccfc992f09eb3bc24d4ca332894eb2a2f2cb1c6ed3d44cecb38
|
##
# Copyright 2009-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing DIRAC, implemented as an easyblock
"""
import os
import re
import shutil
import tempfile
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakemake import CMakeMake
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.run import run_cmd
class EB_DIRAC(CMakeMake):
"""Support for building/installing DIRAC."""
def configure_step(self):
"""Custom configuration procedure for DIRAC."""
# make very sure the install directory isn't there yet, since it may cause problems if it used (forced rebuild)
if os.path.exists(self.installdir):
self.log.warning("Found existing install directory %s, removing it to avoid problems", self.installdir)
try:
shutil.rmtree(self.installdir)
except OSError as err:
raise EasyBuildError("Failed to remove existing install directory %s: %s", self.installdir, err)
self.cfg['separate_build_dir'] = True
self.cfg.update('configopts', "-DENABLE_MPI=ON -DCMAKE_BUILD_TYPE=release")
# complete configuration with configure_method of parent
super(EB_DIRAC, self).configure_step()
def test_step(self):
"""Custom built-in test procedure for DIRAC."""
if self.cfg['runtest']:
if not build_option('mpi_tests'):
self.log.info("Skipping testing of DIRAC since MPI testing is disabled")
return
# set up test environment
# see http://diracprogram.org/doc/release-14/installation/testing.html
env.setvar('DIRAC_TMPDIR', tempfile.mkdtemp(prefix='dirac-test-'))
env.setvar('DIRAC_MPI_COMMAND', self.toolchain.mpi_cmd_for('', self.cfg['parallel']))
# run tests (may take a while, especially if some tests take a while to time out)
self.log.info("Running tests may take a while, especially if some tests timeout (default timeout is 1500s)")
cmd = "make test"
out, ec = run_cmd(cmd, simple=False, log_all=False, log_ok=False)
# check that majority of tests pass
# some may fail due to timeout, but that's acceptable
# cfr. https://groups.google.com/forum/#!msg/dirac-users/zEd5-xflBnY/OQ1pSbuX810J
# over 90% of tests should pass
passed_regex = re.compile('^(9|10)[0-9.]+% tests passed', re.M)
if not passed_regex.search(out) and not self.dry_run:
raise EasyBuildError("Too many failed tests; '%s' not found in test output: %s",
passed_regex.pattern, out)
# extract test results
test_result_regex = re.compile(r'^\s*[0-9]+/[0-9]+ Test \s*#[0-9]+: .*', re.M)
test_results = test_result_regex.findall(out)
if test_results:
self.log.info("Found %d test results: %s", len(test_results), test_results)
elif self.dry_run:
# dummy test result
test_results = ["1/1 Test #1: dft_alda_xcfun ............................. Passed 72.29 sec"]
else:
raise EasyBuildError("Couldn't find *any* test results?")
test_count_regex = re.compile(r'^\s*[0-9]+/([0-9]+)')
res = test_count_regex.search(test_results[0])
if res:
test_count = int(res.group(1))
elif self.dry_run:
# a single dummy test result
test_count = 1
else:
raise EasyBuildError("Failed to determine total test count from %s using regex '%s'",
test_results[0], test_count_regex.pattern)
if len(test_results) != test_count:
raise EasyBuildError("Expected to find %s test results, but found %s", test_count, len(test_results))
# check test results, only 'Passed' or 'Timeout' are acceptable outcomes
faulty_tests = []
for test_result in test_results:
if ' Passed ' not in test_result:
self.log.warning("Found failed test: %s", test_result)
if '***Timeout' not in test_result:
faulty_tests.append(test_result)
if faulty_tests:
raise EasyBuildError("Found tests failing due to something else than timeout: %s", faulty_tests)
def sanity_check_step(self):
"""Custom sanity check for DIRAC."""
custom_paths = {
'files': ['bin/pam-dirac'],
'dirs': ['share/dirac'],
}
super(EB_DIRAC, self).sanity_check_step(custom_paths=custom_paths)
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/d/dirac.py
|
Python
|
gpl-2.0
| 5,945
|
[
"DIRAC"
] |
29bc5602596cc7e468a76b2a3a1ef33fc04ec7eab6434a40d7a7d32eb71b9c63
|
# This file contains all the possible names used in toon name generation.
# Each name has a unique id and a category:
# 0 - boyFirst
# 1 - girlFirst
# 2 - neutralFirst
PETNAMES = '''0*0*Achilles
1*0*Adolfo
2*0*Adonis
3*0*Aesop
4*0*Agamemnon
5*0*Ahab
6*0*Ahmed
7*0*Ajax
8*0*Alastair
9*0*Alberto
10*0*Alexander
11*0*Alfonso
12*0*Alonzo
13*0*Amadeus
14*0*Ambrose
15*0*Andre
16*0*Angelo
17*0*Angus
18*0*Antoine
19*0*Anton
20*0*Antonio
21*0*Archibald
22*0*Archimedes
23*0*Arnold
24*0*Atilla
25*0*Attaboy
26*0*Avi
27*0*Barney
28*0*Bart
29*0*Bartleby
30*0*Basil
31*0*Baxter
32*0*Beardsley
33*0*Beauchamp
34*0*Beauregard
35*0*Ben
36*0*Benny
37*0*Bernie
38*0*Billygoat
39*0*Bluebeard
40*0*Bo
41*0*Bogart
42*0*Bogie
43*0*Bowser
44*0*Brando
45*0*Bruce
46*0*Bruno
47*0*Brutus
48*0*Bubba
49*0*Buck
50*0*Buster
51*0*Butch
52*0*Cagney
53*0*Carlton
54*0*Casanova
55*0*Casper
56*0*Cecil
57*0*Cedric
58*0*Charlie
59*0*Chekhov
60*0*Chico
61*0*Chief
62*0*Chip
63*0*Chuck
64*0*Chuckie
65*0*Chucko
66*0*Clancy
67*0*Clark
68*0*Claude
69*0*Clayton
70*0*Cliff
71*0*Clive
72*0*Clyde
73*0*Cody
74*0*Colonel
75*0*Confucius
76*0*Conrad
77*0*Cooper
78*0*Cowboy
79*0*Cyril
80*0*Dagwood
81*0*Dante
82*0*Deano
83*0*Donahue
84*0*Donatello
85*0*Drysdale
86*0*Duke
87*0*Dweezil
88*0*Eddie
89*0*Edgar
90*0*Edison
91*0*Eggman
92*0*Einstein
93*0*Elmer
94*0*Elwood
95*0*Emerson
96*0*Emmet
97*0*Errol
98*0*Ethan
99*0*Fafnir
100*0*Farnsworth
101*0*Felix
102*0*Fenimore
103*0*Fenwick
104*0*Ferdinand
105*0*Fergus
106*0*Fido
107*0*Figaro
108*0*Filbert
109*0*Fitzgerald
110*0*Floyd
111*0*Frampton
112*0*Fred
113*0*Gabriel
114*0*Garfunkel
115*0*General
116*0*Genghis
117*0*Gershwin
118*0*Gladiator
119*0*Gladstone
120*0*Gorilla
121*0*Grampa
122*0*Gramps
123*0*Groucho
124*0*Gulliver
125*0*Gunther
126*0*Gus
127*0*Hamlet
128*0*Harold
129*0*Harpo
130*0*Harvey
131*0*Haseltine
132*0*Heathcliff
133*0*Hector
134*0*Hemingway
135*0*Hendrix
136*0*Herbert
137*0*Hercules
138*0*Herman
139*0*Hermes
140*0*Higgins
141*0*Hobart
142*0*Homestar
143*0*Horace
144*0*Horatio
145*0*Horton
146*0*Houdini
147*0*Humphrey
148*0*Ichabod
149*0*Iggy
150*0*Ignatius
151*0*Ignatz
152*0*Igor
153*0*Ike
154*0*Iron Ike
155*0*Ironside
156*0*Irving
157*0*Irwin
158*0*Isaac
159*0*Ishmael
160*0*Ivan
161*0*Ivanhoe
162*0*Jack
163*0*Jake
164*0*Jasper
165*0*Jaymo
166*0*Jerry
167*0*Jethro
168*0*Jimmy
169*0*Jonah
170*0*Karloff
171*0*Kasper
172*0*Keanu
173*0*Kilroy
174*0*King
175*0*Kingfish
176*0*Kirby
177*0*Kissinger
178*0*Lambert
179*0*Lancelot
180*0*Lazarus
181*0*Leonardo
182*0*Lindbergh
183*0*Linus
184*0*Lionel
185*0*Luigi
#186*0*M
187*0*Macarthur
188*0*Macbeth
189*0*Mack
190*0*Malcolm
191*0*Mandrake
192*0*Marcel
193*0*Marco
194*0*Mario
195*0*Marmalade
196*0*Matador
197*0*Maurice
198*0*Maximilian
199*0*Maxwell
200*0*Maynard
201*0*Melville
202*0*Merlin
203*0*Michelangelo
204*0*Milo
205*0*Moe
206*0*Morissey
207*0*Mortimer
208*0*Mozart
209*0*Mugsy
210*0*Muldoon
211*0*Nabokov
212*0*Napoleon
213*0*Narcissus
214*0*Nelson
215*0*Nero
216*0*Nimoy
217*0*Nimrod
218*0*Norbert
219*0*Norman
220*0*Norris
221*0*Obadiah
222*0*Odysseus
223*0*Ogden
224*0*Olaf
225*0*Omar
226*0*Opie
227*0*Opus
228*0*Oswald
229*0*Othello
230*0*Otto
231*0*Pablo
232*0*Paco
233*0*Paddington
234*0*Pancho
235*0*Paolo
236*0*Pappy
237*0*Pavarotti
238*0*Pavlov
239*0*Peabody
240*0*Petey
241*0*Picasso
242*0*Pierre
243*0*Pinkerton
244*0*Pippin
245*0*Poindexter
246*0*Pop
247*0*Popperton
248*0*Poseidon
249*0*Prince
250*0*Puccini
251*0*Puck
252*0*Pugsly
253*0*Pushkin
254*0*Quigley
255*0*Quimby
256*0*Quincy
257*0*Reggie
258*0*Regis
259*0*Reinhold
260*0*Rembrandt
261*0*Renoir
262*0*Reuben
263*0*Rex
264*0*Rhett
265*0*Ricardo
266*0*Richter
267*0*Rocky
268*0*Roderick
269*0*Romeo
270*0*Romulus
271*0*Roosevelt
272*0*Rover
273*0*Rudolph
274*0*Rudy
275*0*Rufus
#276*0*Rumpelstiltskin
277*0*Rupert
278*0*Salvador
279*0*Sandeep
280*0*Schubert
281*0*Scooby
282*0*Scotty
283*0*Scruffy
284*0*Sebastian
285*0*Seymour
286*0*Shakespeare
287*0*Shatner
288*0*Siegfried
289*0*Sigmund
290*0*Simon
291*0*Sinbad
292*0*Skyler
293*0*Snowman
294*0*Snyder
295*0*Socrates
296*0*Sorcerer
297*0*Spalding
298*0*Spanky
299*0*Spencer
300*0*Spinoza
301*0*Spock
302*0*Stallion
303*0*Steinbeck
304*0*Stradivarius
305*0*T Bone
#306*0*Tarzan
307*0*Tchaikovsky
308*0*Terminator
309*0*Tesla
310*0*Thaxter
311*0*Theo
312*0*Theodore
313*0*Thessalus
314*0*Throckmorton
315*0*Tiberius
316*0*Tiger
317*0*Tobias
318*0*Tolkein
319*0*Toreador
320*0*Travis
321*0*Travolta
322*0*Triton
323*0*Ulysses
324*0*Umberto
325*0*Victor
326*0*Vincent
327*0*Vinnie
328*0*Virgil
329*0*Vlad
330*0*Vladimir
331*0*Voltaire
332*0*Vonnegut
333*0*Wainwright
334*0*Waldo
335*0*Waldorf
336*0*Walter
337*0*Warhol
338*0*Watson
339*0*Wayne
340*0*Wilbur
341*0*Wilfred
342*0*Wilhelm
343*0*Winston
344*0*Wiseguy
345*0*Wolfgang
346*0*Woodbury
347*0*Woodrow
348*0*Wyatt
349*0*Xanthus
350*0*Xavier
351*0*Zeus
352*0*Zorro
353*1*Abby
354*1*Adorabelle
355*1*Aggie
356*1*Akiko
357*1*Allessandria
358*1*Amaryllis
359*1*Ambrosia
360*1*Andrea
361*1*Anita
362*1*Annette
363*1*Annie
364*1*Antionette
365*1*April
366*1*Arabella
367*1*Ariel
368*1*Ashley
369*1*Athena
370*1*Babette
371*1*Babs
372*1*Babydoll
373*1*Ballerina
374*1*Beatrice
375*1*Bertie
376*1*Beulah
377*1*Beverly
378*1*Bianca
379*1*Blossom
380*1*Bozette
381*1*Brittany
#382*1*Brunhilda
383*1*Buttercup
384*1*Callalilly
385*1*Candy
386*1*Carmen
387*1*Carnation
388*1*Cassandra
389*1*Cece
390*1*Celeste
391*1*Chanel
392*1*Chantilly
393*1*Charlotte
394*1*Chelsea
395*1*Cher
396*1*Cherry
397*1*Chickie
398*1*Chloe
399*1*Cindy
400*1*Cinnamon
401*1*Cissy
402*1*Claire
403*1*Clementine
404*1*Cleopatra
405*1*Coco
406*1*Cowgirl
407*1*Daffodil
408*1*Dagmar
409*1*Daisy
410*1*Daphne
411*1*Dee Dee
412*1*Delilah
413*1*Dixie
414*1*Dolly
415*1*Dot
416*1*Dottie
417*1*Duchess
418*1*Edith
419*1*Electra
420*1*Elsie
421*1*Elvira
422*1*Emerald
423*1*Europa
424*1*Fannie
425*1*Fatima
426*1*Felicity
427*1*Fifi
428*1*Flopsy
429*1*Flower
430*1*Funnygirl
431*1*Gabby
432*1*Garbo
433*1*Geranium
434*1*Gidget
435*1*Gigi
436*1*Ginger
437*1*Goldie
#438*1*Goldilocks
439*1*Granny
#440*1*Griselda
441*1*Guinevere
442*1*Gwen
443*1*Hannah
444*1*Heidi
445*1*Henrietta
446*1*Hera
447*1*Hermione
448*1*Hildegard
449*1*Honey
450*1*Honeysuckle
451*1*Hortense
452*1*Iris
453*1*Ivy
454*1*Jade
455*1*Jaquelin
456*1*Jasmine
457*1*Jewel
458*1*Jo
459*1*Josephine
460*1*Joy
461*1*Juliet
462*1*June
463*1*Juniper
464*1*Kandy
465*1*Kate
466*1*Katrina
467*1*Koko
468*1*Kornelia
469*1*Krystal
470*1*Lacey
471*1*Lady
472*1*Lambchop
473*1*Lapis Lazuli
474*1*Lassie
475*1*Laverne
476*1*Leonora
477*1*Libby
478*1*Libra
479*1*Lilac
480*1*Lily
481*1*Lipstick
482*1*Lisa
483*1*Lottie
484*1*Lovebird
485*1*Lovesong
486*1*Lucretia
487*1*Lucy
488*1*Lulu
489*1*Luna
490*1*MK
491*1*Mabel
492*1*Magenta
493*1*Maggie
494*1*Mamacita
495*1*Mantissa
496*1*Maxine
497*1*Mayflower
498*1*Medusa
499*1*Melody
500*1*Mercedes
501*1*Meriwether
502*1*Mermaid
503*1*Mildred
504*1*Minerva
505*1*Missy
506*1*Mitzi
507*1*Mocha
508*1*Molly
509*1*Mona Lisa
510*1*Moneypenny
511*1*Monique
512*1*Mopsy
#513*1*Morticia
514*1*Nadia
515*1*Nancy
516*1*Nannygoat
517*1*Naomi
518*1*Nellybelle
519*1*Nightingale
520*1*Nikita
521*1*Odelia
522*1*Olga
523*1*Olive
524*1*Olympia
525*1*Oona
526*1*Opal
527*1*Ophelia
528*1*Organdy
529*1*Pandora
530*1*Patrikia
531*1*Penelope
532*1*Penny
533*1*Petinka
534*1*Petunia
535*1*Phoebe
536*1*Phoenicia
537*1*Pixie
538*1*Poinsettia
539*1*Pookie
540*1*Precious
541*1*Prettipaws
542*1*Primrose
543*1*Princess
544*1*Pudgette
545*1*Queenie
546*1*Raphael
547*1*Rapunzel
548*1*Raquel
549*1*Rosabelle
550*1*Rosebud
551*1*Roxanne
552*1*Roxy
553*1*Ruby
554*1*Sable
555*1*Sabrina
556*1*Sadie
557*1*Saffron
558*1*Sage
559*1*Sapphire
560*1*Sassafras
561*1*Savannah
562*1*Scarlett
563*1*Scheherazade
564*1*Serenade
565*1*Sienna
566*1*Silky
567*1*Sissy
568*1*Snoogie
569*1*Snookie
570*1*Sonja
571*1*Sophia
572*1*Sophie
573*1*Sorceress
574*1*Summer
575*1*Sunbonnet
576*1*Sunny
577*1*Sunshine
578*1*Tabitha
579*1*Tasha
580*1*Titania
581*1*Tootsie
582*1*Topaz
583*1*Trixie
584*1*Truffles
585*1*Tulip
586*1*Twiggy
587*1*Velma
588*1*Venice
589*1*Venus
590*1*Veronica
591*1*Vicki
592*1*Victoria
593*1*Viola
594*1*Violet
595*1*Wallflower
596*1*Winnie
597*1*Winona
598*1*Yum Yum
599*1*Zelda
600*1*Zsa Zsa
601*2*Abner
602*2*Abracadabra
603*2*Acacia
604*2*Ace
605*2*Admiral
606*2*Aglet
607*2*Akimbo
608*2*Alabaster
609*2*Alcatraz
610*2*Alchemy
611*2*Alfalfa
612*2*Alien
613*2*Allegro
614*2*Alto
#615*2*Amaretto
616*2*Amazon
617*2*Ambassador
618*2*Amethyst
619*2*Amnesia
620*2*Ampersand
621*2*Anaconda
622*2*Anchovy
623*2*Andretti
624*2*Andromeda
625*2*Angstrom
626*2*Animal
627*2*Antergy
628*2*Anubus
629*2*Apogee
630*2*Apollo
631*2*Applesauce
632*2*Apprentice
633*2*Apricot
634*2*Aquarius
635*2*Aramis
636*2*Archer
637*2*Arf
638*2*Argus
639*2*Argyle
640*2*Aristotle
641*2*Arizona
642*2*Arrow
643*2*Arsenio
644*2*Asimov
645*2*Asparagus
646*2*Asphalt
647*2*Astro
648*2*Atom
649*2*Augie
650*2*August
651*2*Avalanche
652*2*Avalon
653*2*Avenger
654*2*Avis
655*2*Axel
656*2*Axle
657*2*Azalea
658*2*Aztec
659*2*Azure
660*2*Babbit
661*2*Babble
662*2*Babbler
663*2*Baby
664*2*Babykins
665*2*Baccarat
666*2*Backgammon
667*2*Badger
668*2*Baggy
669*2*Bailey
670*2*Baja
671*2*Balboa
672*2*Baldo
673*2*Baldric
674*2*Baldy
675*2*Ballyhoo
676*2*Bambino
677*2*Bamboo
678*2*Bamboozle
679*2*Bamboozler
680*2*Banana
681*2*Bandit
682*2*Bandito
683*2*Bangle
684*2*Banjo
685*2*Banshee
686*2*Banzai
687*2*Barbarian
688*2*Bargraph
689*2*Barky
690*2*Barnacle
691*2*Barracuda
692*2*Bashful
693*2*Bazooka
694*2*Beaker
695*2*Bean
696*2*Beaner
697*2*Beanstalk
698*2*Beany
699*2*Bear
700*2*Beauty
701*2*Beaver
702*2*Beep
703*2*Beeper
704*2*Beepy
705*2*Beethoven
706*2*Beezneez
707*2*Begonia
708*2*Bellyflop
709*2*Benson
710*2*Bentley
711*2*Beppo
712*2*Bermuda
713*2*Betatest
714*2*Bewitched
715*2*Big Red
716*2*Big Shot
717*2*Bigbelly
718*2*Bigfoot
719*2*Bijou
720*2*Bindle
721*2*Bing
722*2*Bingo
723*2*Binky
724*2*Biscuit
725*2*Bitsy
726*2*Bizzy
727*2*Blackberry
728*2*Blackbird
729*2*Blackfoot
730*2*Blackie
731*2*Blackjack
732*2*Blacktop
733*2*Blanket
734*2*Blaze
735*2*Bleary
736*2*Blimp
737*2*Blimpo
738*2*Blimpy
739*2*Blip Blop
740*2*Blizzard
741*2*Blockhead
742*2*Blondie
743*2*Blooper
744*2*Blorange
745*2*Blot
746*2*Blotto
747*2*Bluebell
748*2*Blueberry
749*2*Bluegrass
750*2*Bluenose
751*2*Blueprint
752*2*Blunder
753*2*Blurp
754*2*Boa
755*2*Bobbin
756*2*Bobo
757*2*Bobtail
758*2*Bodyguard
759*2*Boggs
760*2*Bojangles
761*2*Bolt
762*2*Bombshell
763*2*Bonbon
764*2*Bones
765*2*Bongo
766*2*Bonkers
767*2*Bono
768*2*Bonsai
769*2*Boo Boo
770*2*Boober
771*2*Boogeyman
772*2*Boom Boom
773*2*Boomer
774*2*Boots
775*2*Borax
776*2*Borg
777*2*Bosco
778*2*Bottlecap
779*2*Bottlenose
780*2*Boulder
781*2*Bounce Bounce
782*2*Bouncer
783*2*Bouncy
784*2*Bowtie
785*2*Bowzer
786*2*Boyd
787*2*Bozo
788*2*Brahms
789*2*Brainchild
790*2*Bratwurst
791*2*Bravo
792*2*Briar
793*2*Brie
794*2*Broadway
795*2*Broccoli
796*2*Bronco
797*2*Brooklyn
798*2*Brouhaha
799*2*Brownie
800*2*Bruiser
801*2*Bubblegum
802*2*Bubbles
803*2*Bubo
804*2*Buccaneer
805*2*Buckaroo
806*2*Buckeye
807*2*Buckingham
808*2*Buckle
809*2*Buckshot
810*2*Buckskin
811*2*Buckwheat
812*2*Budapest
813*2*Buddy
814*2*Buffalo
815*2*Buffoon
816*2*Bug
817*2*Bugaboo
818*2*Bugeye
819*2*Bugsy
820*2*Bullet
821*2*Bullwinkle
822*2*Bumblebee
823*2*Bumbler
824*2*Bumper
825*2*Bumpkin
826*2*Bumpus
827*2*Bumpy
828*2*Bungee
829*2*Bunko
830*2*Bunyan
831*2*Burbank
832*2*Butterball
833*2*Butterscotch
834*2*Buzz
835*2*Buzzard
836*2*Buzzy
837*2*Byte
838*2*Caboodle
839*2*Caboose
840*2*Cadbury
841*2*Cadet
842*2*Caesar
843*2*Calculus
844*2*Calico
845*2*Callisto
846*2*Calypso
847*2*Calzone
848*2*Cambridge
849*2*Camelot
850*2*Camembert
851*2*Cameo
852*2*Campbell
853*2*Canoe
854*2*Cap
855*2*Cappy
856*2*Capricorn
857*2*Captain
858*2*Caramba
859*2*Caramel
860*2*Cargo
861*2*Carlos
862*2*Carpool
863*2*Caruso
864*2*Casablanca
865*2*Casbah
866*2*Casey
867*2*Cashmere
868*2*Cassidy
869*2*Castaway
870*2*Catamaran
871*2*Caviar
872*2*Cayenne
873*2*Cerebro
874*2*Cha Cha
875*2*Challenger
876*2*Chamberlain
877*2*Chamomile
878*2*Champ
879*2*Chaos
880*2*Charade
881*2*Charcoal
882*2*Charger
883*2*Charmed
884*2*Chaser
885*2*Chasm
886*2*Checkers
887*2*Cheesecake
888*2*Cheesy
889*2*Cheetah
890*2*Chester
891*2*Chewie
892*2*Chicory
893*2*Chiffon
894*2*Chigger
895*2*Chili
896*2*Chili Bean
897*2*Chipmunk
898*2*Chipper
899*2*Chips
900*2*Chocolate
901*2*Choo Choo
902*2*Chopin
903*2*Chopper
904*2*Chops
905*2*Chopsticks
906*2*Chowder
907*2*Chowderhead
908*2*Chromium
909*2*Chubby
910*2*Chuckles
911*2*Chutzpah
912*2*Cinder
913*2*Citrus
914*2*Clipper
915*2*Cloud
916*2*Cloudy
917*2*Clover
918*2*Clue
919*2*Clueless
920*2*Clunky
921*2*Coach
922*2*Cobol
923*2*Cobra
924*2*Cocoa
925*2*Coconut
926*2*Cola
927*2*Colorado
928*2*Comet
929*2*Comma
930*2*Confetti
931*2*Connecticut
932*2*Cookie
933*2*Cooter
934*2*Copper
935*2*Coral
936*2*Corduroy
937*2*Corky
938*2*Cornball
939*2*Corncob
940*2*Cornelia
941*2*Cornmo
942*2*Corny
943*2*Corona
944*2*Coronet
945*2*Cosmo
946*2*Cottontail
947*2*Couscous
948*2*Coyote
949*2*Crackers
950*2*Cranberry
951*2*Crash
952*2*Crawdad
953*2*Creamy
954*2*Creeper
955*2*Creepy
956*2*Crescent
957*2*Cricket
958*2*Critter
959*2*Croissant
960*2*Crouton
961*2*Crufty
962*2*Cruiser
963*2*Crumb
964*2*Crumbly
965*2*Cruncher
966*2*Crunchy
967*2*Cucamonga
968*2*Cucaracha
969*2*Cuckoo
970*2*Cucumber
971*2*Cuddles
972*2*Cupcake
973*2*Cupid
974*2*Curmudgeon
975*2*Curry
976*2*Cutie
977*2*Cutie Pie
978*2*Cypress
979*2*DJ
980*2*Dakota
981*2*Dali
982*2*Damascus
983*2*Dancer
984*2*Dandelion
985*2*Danger
986*2*Database
987*2*Dazzle
988*2*Dazzler
989*2*Dazzy
990*2*December
991*2*Delaware
992*2*Delphi
993*2*Detonator
994*2*Detour
995*2*Detroit
996*2*Deuce
997*2*Dewfall
998*2*Diamond
999*2*Dice
1000*2*Diego
1001*2*Digby
1002*2*Digger
1003*2*Digit
1004*2*Dimples
1005*2*Dingo
1006*2*Dingus
1007*2*Dinky
1008*2*Divot
1009*2*Dizzy
1010*2*Doc
1011*2*Dodger
1012*2*Dodo
1013*2*Dolby
1014*2*Donut
1015*2*Doodad
1016*2*Doodah
1017*2*Doodle
1018*2*Draco
1019*2*Dracula
1020*2*Drawers
1021*2*Dreadnought
1022*2*Dream
1023*2*Dreamer
1024*2*Dreamweaver
1025*2*Dreamy
1026*2*Drippy
1027*2*Droopy
1028*2*Drummer
1029*2*Dumpling
1030*2*Durango
1031*2*Dustmop
1032*2*Dynamite
1033*2*Dynamo
1034*2*Eagle
1035*2*Ebony
1036*2*Eccentric
1037*2*Echo
1038*2*Eclipse
1039*2*Ecstatic
1040*2*Edge
1041*2*Egad
1042*2*Egghead
1043*2*Eggnog
1044*2*Eggtoss
1045*2*Egypt
1046*2*Elderberry
1047*2*Electron
1048*2*Elf
1049*2*Elijah
1050*2*Elvis
1051*2*Email
1052*2*Ember
1053*2*Encore
1054*2*Enoch
1055*2*Epcot
1056*2*Epic
1057*2*Epilog
1058*2*Equinox
1059*2*Ergo
1060*2*Escapade
1061*2*Eskimo
1062*2*Espresso
1063*2*Euclid
1064*2*Euphoria
1065*2*Euphrates
1066*2*Excalibur
1067*2*Exeter
1068*2*Eyespy
1069*2*Faberge
1070*2*Fable
1071*2*Facepuff
1072*2*Fahrenheit
1073*2*Fairbanks
1074*2*Fairfax
1075*2*Faith
1076*2*Faithful
1077*2*Fajita
1078*2*Falafel
1079*2*Falcon
1080*2*Fancy
1081*2*Fandango
1082*2*Fang
1083*2*Fangtastic
1084*2*Fantasia
1085*2*Fats
1086*2*Fatso
1087*2*Fatty
1088*2*Faust
1089*2*Fax
1090*2*Fearless
1091*2*Feather
1092*2*Feedback
1093*2*Fellini
1094*2*Ferrari
1095*2*Fester
1096*2*Fetch
1097*2*Fiddler
1098*2*Fiddlesticks
1099*2*Fiesta
1100*2*Filibuster
1101*2*Fingerprint
1102*2*Fingers
1103*2*Fink
1104*2*Fishbone
1105*2*Fishface
1106*2*Fishhook
1107*2*Fizz
1108*2*Fizzle
1109*2*Fizzy
1110*2*Flabby
1111*2*Flagpole
1112*2*Flame
1113*2*Flannel
1114*2*Flapjack
1115*2*Flash
1116*2*Flatfoot
1117*2*Flicker
1118*2*Fling
1119*2*Flip
1120*2*Flipper
1121*2*Flop
1122*2*Flopper
1123*2*Floppy
1124*2*Florida
1125*2*Fluffster
1126*2*Fluffy
1127*2*Flurry
1128*2*Flute
1129*2*Fog
1130*2*Fogarty
1131*2*Fondue
1132*2*Fortran
1133*2*Fox
1134*2*Foxy
1135*2*Frankfurter
1136*2*Freckles
1137*2*Freebie
1138*2*Freezerburn
1139*2*French Toast
1140*2*Friday
1141*2*Frogface
1142*2*Frogmar
1143*2*Frost
1144*2*Frosty
1145*2*Fruitcake
1146*2*Frump
1147*2*Frumpson
1148*2*Frumpy
1149*2*Fudd
1150*2*Fuddles
1151*2*Fudge
1152*2*Fugitive
1153*2*Fuji
1154*2*Fungus
1155*2*Funky
1156*2*Furball
1157*2*Furface
1158*2*Fusebox
1159*2*Fuzzball
1160*2*Fuzzy
1161*2*Fuzzyface
1162*2*Fuzzykins
1163*2*Gabardine
1164*2*Galaxy
1165*2*Gallop
1166*2*Gambit
1167*2*Gambler
1168*2*Gangway
1169*2*Garlic
1170*2*Garlicbreath
1171*2*Garnet
1172*2*Garth
1173*2*Gavotte
1174*2*Gecko
1175*2*Geewhiz
1176*2*Gem
1177*2*Gemini
1178*2*Gerbil
1179*2*Gewgaw
1180*2*Ghost
1181*2*Giddyup
1182*2*Giggles
1183*2*Gingerbread
1184*2*Gingersnap
1185*2*Glick
1186*2*Gnash
1187*2*Gnasher
1188*2*Gnocchi
1189*2*Gnome
1190*2*Gobbledegook
1191*2*Gobbler
1192*2*Goblet
1193*2*Goblin
1194*2*Gobo
1195*2*Gogo
1196*2*Goiter
1197*2*Goliath
1198*2*Gomer
1199*2*Goober
1200*2*Gooch
1201*2*Gooey
1202*2*Googol
1203*2*Goose
1204*2*Gooseberry
1205*2*Goosebump
1206*2*Gopher
1207*2*Gouda
1208*2*Governor
1209*2*Gracie
1210*2*Graffiti
1211*2*Graham
1212*2*Granite
1213*2*Granola
1214*2*Gravlax
1215*2*Gremlin
1216*2*Grep
1217*2*Grok
1218*2*Grue
1219*2*Grumpus
1220*2*Grumpy
1221*2*Grungy
1222*2*Guffaw
1223*2*Gumbo
1224*2*Gumdrop
1225*2*Gump
1226*2*Gumpus
1227*2*Gumshoe
1228*2*Gypsy
1229*2*Gyro
1230*2*Haggis
1231*2*Haha
1232*2*Hairball
1233*2*Half Pint
1234*2*Halibut
1235*2*Halifax
1236*2*Halloweenie
1237*2*Halo
1238*2*Halogen
1239*2*Hambone
1240*2*Hamburger
1241*2*Hammer
1242*2*Hammerhead
1243*2*Hammerstein
1244*2*Hammy
1245*2*Hamster
1246*2*Happy
1247*2*Harlequin
1248*2*Harley
1249*2*Harmonica
1250*2*Harmony
1251*2*Harvard
1252*2*Havoc
1253*2*Hawk
1254*2*Hawkeye
1255*2*Hayseed
1256*2*Haywire
1257*2*Hazel
1258*2*Heartbreaker
1259*2*Heathrow
1260*2*Heckler
1261*2*Hedgehog
1262*2*Heehee
1263*2*Hemlock
1264*2*Herringbone
1265*2*Hiccup
1266*2*Hifi
1267*2*Hip Hip
1268*2*Hippo
1269*2*Hippodrome
1270*2*Hoagie
1271*2*Hobo
1272*2*Hoho
1273*2*Holmes
1274*2*Honeybee
1275*2*Hooligan
1276*2*Hoops
1277*2*Hoosier
1278*2*Horoscope
1279*2*Hot Dog
1280*2*Hotfoot
1281*2*Hotshot
1282*2*Houston
1283*2*Howard
1284*2*Huckleberry
1285*2*Huffy
1286*2*Huggy
1287*2*Hugo
1288*2*Humdinger
1289*2*Humdrum
1290*2*Hurricane
1291*2*Hydraulic
1292*2*Hypnos
1293*2*IOU
1294*2*Iapyx
1295*2*Ibex
1296*2*Icarus
1297*2*Icky
1298*2*Icon
1299*2*Icy
1300*2*Idiom
1301*2*Idlewild
1302*2*Igloo
1303*2*Iguana
1304*2*Illogical
1305*2*Illusion
1306*2*Imagine
1307*2*Imp
1308*2*Impy
1309*2*Imus
1310*2*Indigo
1311*2*Indy
1312*2*Inferno
1313*2*Infinity
1314*2*Infrared
1315*2*Ingot
1316*2*Inkblot
1317*2*Inkwell
1318*2*Innie
1319*2*Input
1320*2*Insomnia
1321*2*Intro
1322*2*Iodine
1323*2*Iota
1324*2*Itchy
1325*2*Ivory
1326*2*Izzat
1327*2*Izzy
1328*2*Jabber
1329*2*Jabberwock
1330*2*Jackaroo
1331*2*Jackhammer
1332*2*Jackpot
1333*2*Jackrabbit
1334*2*Jacuzzi
1335*2*Jag
1336*2*Jaguar
1337*2*Jalapeno
1338*2*Jambalaya
1339*2*Jamboree
1340*2*January
1341*2*Jargon
1342*2*Java
1343*2*Jaws
1344*2*Jaybird
1345*2*Jester
1346*2*Jet
1347*2*Jicko
1348*2*Jiffy
1349*2*Jigsaw
1350*2*Jimjam
1351*2*Jingle
1352*2*Jinx
1353*2*Jitterbug
1354*2*Jocko
1355*2*Jojo
1356*2*Joker
1357*2*Jokester
1358*2*Joliet
1359*2*Joplin
1360*2*Jordie
1361*2*Jove
1362*2*Joyride
1363*2*Jubilee
1364*2*Jughead
1365*2*Jujitsu
1366*2*Jukebox
1367*2*July
1368*2*Jumbo
1369*2*Jumpy
1370*2*Junior
1371*2*Juno
1372*2*Kabob
1373*2*Kabuki
1374*2*Kafka
1375*2*Kahuna
1376*2*Kalamazoo
1377*2*Kaleidoscope
1378*2*Kalmuk
1379*2*Kansas
1380*2*Kappa
1381*2*Karamazov
1382*2*Karate
1383*2*Karma
1384*2*Katmandu
1385*2*Katsumi
1386*2*Kayak
1387*2*Keepsake
1388*2*Kellogg
1389*2*Kelvin
1390*2*Ketchup
1391*2*Kewpie
1392*2*Keyboard
1393*2*Keyring
1394*2*Khaki
1395*2*Kibbles
1396*2*Kiddo
1397*2*Kielbasa
1398*2*Kilimanjaro
1399*2*Kilowatt
1400*2*Kimono
1401*2*Kinetic
1402*2*Kipling
1403*2*Kismet
1404*2*Kissyface
1405*2*Kitten
1406*2*Klinger
1407*2*Klondike
1408*2*Kludge
1409*2*Klute
1410*2*Klutz
1411*2*Klutzy
1412*2*Knickerbocker
1413*2*Knievel
1414*2*Knish
1415*2*Knock Knock
1416*2*Knockwurst
1417*2*Knoop
1418*2*Knucklebone
1419*2*Knuckles
1420*2*Koala
1421*2*Kodiak
1422*2*Kong
1423*2*Kookaburra
1424*2*Kooky
1425*2*Kool Beanz
1426*2*Kornball
1427*2*Kosmix
1428*2*Krakatoa
1429*2*Kramer
1430*2*Krispy
1431*2*Krooner
#1432*2*Krypton
1433*2*Lab Rat
1434*2*Labyrinth
1435*2*Lacquer
1436*2*Laddie
1437*2*Ladybug
1438*2*Lambada
1439*2*Lamborghini
1440*2*Lampoon
1441*2*Lamster
1442*2*Landmark
1443*2*Landshark
1444*2*Lanky
1445*2*Lapper
1446*2*Laptop
1447*2*Lasagna
1448*2*Laser
1449*2*Latex
1450*2*Lava
1451*2*Lava Lamp
1452*2*Lavender
1453*2*Layaway
1454*2*Leafy
1455*2*Leaky
1456*2*Lefty
1457*2*Legend
1458*2*Lemming
1459*2*Lemonade
1460*2*Lentil
1461*2*Lettuce
1462*2*Lexy
1463*2*Lickety Split
1464*2*Lickums
1465*2*Licky
1466*2*Licorice
1467*2*Lightning
1468*2*Lima
1469*2*Limbo
1470*2*Limey
1471*2*Linguini
1472*2*Link
1473*2*Lintball
1474*2*Lionheart
1475*2*Lithgow
1476*2*Litmus
1477*2*Littlefoot
1478*2*Liverpool
1479*2*Liverwurst
1480*2*Lizard
1481*2*Lobo
1482*2*Lofty
1483*2*Logan
1484*2*Logical
1485*2*Lollipop
1486*2*London
1487*2*Longfellow
1488*2*Longshot
1489*2*Loofah
1490*2*Looney
1491*2*Loopy
1492*2*Lotus
1493*2*Luau
1494*2*Lucky
1495*2*Ludwig
1496*2*Lullaby
1497*2*Lumpy
1498*2*Lunatic
1499*2*Lynx
1500*2*Lyrical
1501*2*Macaroni
1502*2*Macaroon
#1503*2*Machete
1504*2*Macho
1505*2*Macintosh
1506*2*Mackerel
1507*2*Mad Max
1508*2*Madison
1509*2*Maestro
1510*2*Mage
1511*2*Magic
1512*2*Magma
1513*2*Magnet
1514*2*Magnolia
1515*2*Magnus
1516*2*Magoo
1517*2*Magpie
1518*2*Mahogany
1519*2*Majestic
1520*2*Malibu
1521*2*Mambo
1522*2*Mango
1523*2*Manhattan
1524*2*Manitoba
1525*2*Mantra
1526*2*Maple Syrup
1527*2*Maraschino
1528*2*Marathon
1529*2*Mariachi
1530*2*Marquee
1531*2*Marshmallow
1532*2*Martian
1533*2*Marzipan
1534*2*Mascot
1535*2*Matchmaker
1536*2*Matzoh
1537*2*Maverick
1538*2*Max
1539*2*Maybe
1540*2*Mayhem
1541*2*Mazy
1542*2*Meanie
1543*2*Meatball
1544*2*Meatloaf
1545*2*Melange
1546*2*Melbourne
1547*2*Mellifluent
1548*2*Melrose
1549*2*Memo
1550*2*Memphis
1551*2*Menthol
1552*2*Meow Meow
1553*2*Meringue
1554*2*Mesopotamia
1555*2*Mesquite
1556*2*Meta
1557*2*Metric
1558*2*Metro
1559*2*Mezzo
1560*2*Miami
1561*2*Microfilm
1562*2*Microwave
1563*2*Midas
1564*2*Midget
1565*2*Midnight
1566*2*Mikado
1567*2*Milestone
1568*2*Milkshake
1569*2*Minty
1570*2*Minuet
1571*2*Minus
1572*2*Mischief
1573*2*Misery
1574*2*Mist
1575*2*Misty
1576*2*Mittens
1577*2*Mo
1578*2*Mobius
1579*2*Modesta
1580*2*Mohair
1581*2*Mohawk
1582*2*Mojo
1583*2*Molasses
1584*2*Mole
1585*2*Molecule
1586*2*Monday
1587*2*Mongoose
1588*2*Monkey
1589*2*Monogram
1590*2*Montgomery
1591*2*Monty
1592*2*Moocher
1593*2*Moochie
1594*2*Moonbeam
1595*2*Moondancer
1596*2*Moondoggie
1597*2*Moonmist
1598*2*Moose
1599*2*Mooshoo
1600*2*Moptop
1601*2*Mork
1602*2*Morocco
1603*2*Mosaic
1604*2*Moscow
1605*2*Motley
1606*2*Moustache
1607*2*Moxie
1608*2*Mudpie
1609*2*Muffin
1610*2*Mulberry
1611*2*Mumbles
1612*2*Mumford
1613*2*Mumpy
1614*2*Munchkin
1615*2*Murphy
1616*2*Mushmouth
1617*2*Mushroom
1618*2*Mustang
1619*2*Mustard
1620*2*Mutt
1621*2*Muttzie
1622*2*Mylar
1623*2*Nacho
1624*2*Nameless
1625*2*Nardek
1626*2*Nashville
1627*2*Naugahyde
1628*2*Navel
1629*2*Naxos
1630*2*Nectarine
1631*2*Ned
1632*2*Needle
1633*2*Needler
1634*2*Needles
1635*2*Nehru
1636*2*Neon
1637*2*Neptune
1638*2*Network
1639*2*Neuron
1640*2*Neutron
1641*2*Nevada
1642*2*Newt
1643*2*Newton
1644*2*Niagara
1645*2*Nibbler
1646*2*Nibbles
1647*2*Nibbly
1648*2*Niccolo
1649*2*Nickel
1650*2*Nifty
1651*2*Niftykins
1652*2*Nightmare
1653*2*Nim
1654*2*Nimbus
1655*2*Nitro
1656*2*Nix
1657*2*Nixy
1658*2*Noisemaker
1659*2*Nomad
1660*2*Noname
1661*2*Noodles
1662*2*Nooly
1663*2*Norbie
#1664*2*Nosferatu
1665*2*Nostrildamus
1666*2*Nosy
1667*2*November
1668*2*Nugget
1669*2*Numbers
1670*2*Nutmeg
1671*2*Oasis
1672*2*Oatcake
1673*2*Oatmeal
1674*2*Oberon
1675*2*Oblong
1676*2*Oboe
1677*2*Obsession
1678*2*Octagon
1679*2*October
1680*2*Oddjob
1681*2*Odzanends
1682*2*Offbeat
1683*2*Ogee
1684*2*Ohio
1685*2*Oink Oink
1686*2*Oinker
1687*2*Okeedoke
1688*2*Okra
1689*2*Oleander
1690*2*Omega
1691*2*Omelet
1692*2*Onion
1693*2*Onionbreath
1694*2*Onionhead
1695*2*Ono
1696*2*Onomatopoeia
1697*2*Onyx
1698*2*Oozy
1699*2*Opaque
1700*2*Opossum
1701*2*Orangeade
1702*2*Orbit
1703*2*Orchid
1704*2*Oregano
1705*2*Original
1706*2*Oriole
1707*2*Orlando
1708*2*Oroonoko
1709*2*Orpheus
1710*2*Orville
1711*2*Oscar
1712*2*Osmosis
1713*2*Ostrich
1714*2*Outie
1715*2*Outlaw
1716*2*Outlet
1717*2*Outrageous
1718*2*Owl
1719*2*Ox
1720*2*Oyster
1721*2*Ozzie
1722*2*PJ
1723*2*Pacemaker
1724*2*Pachyderm
1725*2*Padlock
1726*2*Pagoda
1727*2*Paisley
1728*2*Paladin
1729*2*Palomino
1730*2*Panache
1731*2*Panda
1732*2*Pandemonium
1733*2*Panfried
1734*2*Pantaloon
1735*2*Panther
1736*2*Paprika
1737*2*Papyrus
1738*2*Parachute
1739*2*Paradise
1740*2*Parakeet
1741*2*Parallax
1742*2*Paris
1743*2*Parmesan
1744*2*Parsley
1745*2*Parsnip
1746*2*Pascal
1747*2*Pasternak
1748*2*Patches
1749*2*Patchouli
1750*2*Patchwork
1751*2*Patience
1752*2*Pauper
1753*2*Paws
1754*2*Peachy
1755*2*Peanut
1756*2*Pearl
1757*2*Pebbles
1758*2*Pecan
1759*2*Peck Peck
1760*2*Peepers
1761*2*Peewee
1762*2*Pegasus
1763*2*Pele
1764*2*Pendragon
1765*2*Penguin
1766*2*Peoria
1767*2*Pepper
1768*2*Peppermint
1769*2*Pepperoni
1770*2*Peppy
1771*2*Percival
1772*2*Periwinkle
1773*2*Peroxide
1774*2*Persephone
1775*2*Perseus
1776*2*Persnickety
1777*2*Pesto
1778*2*Pesty
1779*2*Petra
1780*2*Petros
1781*2*Pettibones
1782*2*Phantom
1783*2*Philadelphia
1784*2*Phoenix
1785*2*Phynotaprox
1786*2*Piano
1787*2*Piccolo
1788*2*Pickles
1789*2*Pickwick
1790*2*Pico
1791*2*Piecewise
#1792*2*Piglet
1793*2*Pigpen
1794*2*Pilaf
1795*2*Pimento
1796*2*Ping
1797*2*Ping Pong
1798*2*Pip
1799*2*Pipsqueak
1800*2*Pistachio
1801*2*Piston
1802*2*Pitabread
1803*2*Pixel
1804*2*Pizza
1805*2*Pizzazz
1806*2*Plato
1807*2*Plumb Bob
1808*2*Plumpy
1809*2*Plunko
1810*2*Pluto
1811*2*Pocket
1812*2*Poco
1813*2*Poe
1814*2*Pointer
1815*2*Pointy
1816*2*Pokey
1817*2*Polaris
1818*2*Polifax
1819*2*Pollywog
1820*2*Poltergeist
1821*2*Pom Pom
1822*2*Poofball
1823*2*Poofy
1824*2*Popcorn
1825*2*Poppy
1826*2*Porcupine
1827*2*Porkchop
1828*2*Portobello
1829*2*Postcard
1830*2*Potbelly
1831*2*Potpie
1832*2*Potzy
1833*2*Pouncer
1834*2*Powder
1835*2*Prancer
1836*2*Preston
1837*2*Pretzel
1838*2*Priscilla
1839*2*Prissy
1840*2*Procyon
1841*2*Prodigy
1842*2*Proton
1843*2*Providence
1844*2*Prowler
1845*2*Proxy
1846*2*Pudding
1847*2*Puddles
1848*2*Pudgy
1849*2*Puff
1850*2*Puffball
1851*2*Puffin
1852*2*Puffy
1853*2*Pugnose
1854*2*Pumpernickel
1855*2*Pumpkin
1856*2*Punch
1857*2*Punky
1858*2*Puree
1859*2*Purrfect
1860*2*Pygmy
1861*2*Pyrex
1862*2*Python
1863*2*Quack Quack
1864*2*Quagmire
1865*2*Quartz
1866*2*Quasar
1867*2*Quasi
1868*2*Queasy
1869*2*Quenby
1870*2*Quesadilla
1871*2*Quester
1872*2*Quetzal
1873*2*Quibbler
1874*2*Quicksand
1875*2*Quicksilver
1876*2*Quinn
1877*2*Quippy
1878*2*Quiqui
1879*2*Quirky
1880*2*Quixote
1881*2*Quizzical
1882*2*Quizzix
1883*2*Rabbit
1884*2*Raccoon
1885*2*Racecar
1886*2*Rachel
1887*2*Radar
1888*2*Radcliffe
1889*2*Radish
1890*2*Radium
1891*2*Radix
1892*2*Radman
1893*2*Raffle
1894*2*Ragamuffin
1895*2*Ragdoll
1896*2*Ragmop
1897*2*Rags
1898*2*Ragtime
1899*2*Ragweed
1900*2*Rainbow
1901*2*Rainstorm
1902*2*Rainwater
1903*2*Raisin
1904*2*Rambler
1905*2*Ramrod
1906*2*Ranger
1907*2*Rascal
1908*2*Raspberry
1909*2*Rathbone
1910*2*Rattler
1911*2*Raven
1912*2*Ravioli
1913*2*Rawhide
1914*2*Raymond
1915*2*Razzmatazz
1916*2*Reactor
1917*2*Recall
1918*2*Recycler
1919*2*Redwood
1920*2*Relay
1921*2*Relic
1922*2*Relish
1923*2*Renegade
1924*2*Repeat
1925*2*Rescuer
1926*2*Retread
1927*2*Rewind
1928*2*Rhapsody
1929*2*Rhinestone
1930*2*Rhino
1931*2*Rhymer
1932*2*Rhythm
1933*2*Ribbons
1934*2*Rickrack
1935*2*Ricochet
1936*2*Riddler
1937*2*Riddles
1938*2*Riffraff
1939*2*Rigatoni
1940*2*Righty
1941*2*Ringleader
1942*2*Ringo
1943*2*Riot
1944*2*Ripley
1945*2*Ripple
1946*2*Ripples
1947*2*Risky
1948*2*Ritz
1949*2*Ritzy
1950*2*Roamer
1951*2*Robin
1952*2*Rocco
1953*2*Rocket
1954*2*Rockhead
1955*2*Rockwell
1956*2*Rococo
1957*2*Rogue
1958*2*Rolex
1959*2*Rollo
1960*2*Roly Poly
1961*2*Ronno
1962*2*Rookie
1963*2*Rooster
1964*2*Roscoe
1965*2*Rosebug
1966*2*Rothchild
1967*2*Rowf
1968*2*Rowser
1969*2*Ruffles
1970*2*Ruggelah
1971*2*Ruggles
1972*2*Rumba
1973*2*Runaway
1974*2*Runt
1975*2*Rushmore
1976*2*Rusty
#1977*2*S
1978*2*Sabotage
1979*2*Safari
1980*2*Saga
1981*2*Saggy
1982*2*Sagittarius
1983*2*Sahara
1984*2*Salamander
1985*2*Salazar
1986*2*Salinger
1987*2*Salisbury
1988*2*Salsa
1989*2*Salty
1990*2*Sam
1991*2*Samba
1992*2*Sammy
1993*2*Samson
1994*2*Samurai
1995*2*Sandman
1996*2*Sandy
1997*2*Santana
1998*2*Santiago
1999*2*Sasha
2000*2*Sashimi
2001*2*Sasquatch
2002*2*Sassy
2003*2*Satchel
2004*2*Satire
2005*2*Saturn
2006*2*Saucer
2007*2*Saucy
2008*2*Sausage
2009*2*Savage
2010*2*Saxophone
2011*2*Scallop
2012*2*Scamp
2013*2*Scamper
2014*2*Scandal
2015*2*Scarecrow
2016*2*Scary
2017*2*Schlemmer
2018*2*Schmooze
2019*2*Schnook
2020*2*Schnookie
2021*2*Scone
2022*2*Scoops
2023*2*Scoot
2024*2*Scooter
2025*2*Scorpio
2026*2*Scorpion
2027*2*Scoundrel
2028*2*Scout
2029*2*Scrabble
2030*2*Scrambler
2031*2*Scrappy
2032*2*Scrooge
2033*2*Scrumptious
2034*2*Scupper
2035*2*Seagull
2036*2*Seismic
2037*2*Seltzer
2038*2*Seneca
2039*2*September
2040*2*Sepulveda
2041*2*Serengeti
2042*2*Shackles
2043*2*Shamrock
2044*2*Shangri La
2045*2*Sharky
2046*2*Shazam
2047*2*Sheba
2048*2*Shelby
2049*2*Sheldrake
2050*2*Shelley
2051*2*Shelton
2052*2*Shenanigan
2053*2*Shep
2054*2*Sherbet
2055*2*Sherlock
2056*2*Sherwood
2057*2*Shiny
2058*2*Shmunday
2059*2*Shoeless
2060*2*Shogun
2061*2*Shortcake
2062*2*Shortstack
2063*2*Shortstop
2064*2*Shorty
2065*2*Showboat
2066*2*Showoff
2067*2*Shredder
2068*2*Shrimpy
2069*2*Shylock
2070*2*Sideshow
2071*2*Sideways
2072*2*Sidney
2073*2*Silhouette
2074*2*Silverspoon
2075*2*Siren
2076*2*Skeet
2077*2*Skeeter
2078*2*Skelton
2079*2*Skidder
2080*2*Skidoo
2081*2*Skidsy
2082*2*Skinky
2083*2*Skipper
2084*2*Skippy
2085*2*Skunk
2086*2*Skunkmuffin
2087*2*Skunky
2088*2*Sky
2089*2*Skyrocket
2090*2*Slappy
2091*2*Slate
2092*2*Slick
2093*2*Slicker
2094*2*Slippers
2095*2*Sloth
2096*2*Slothful
2097*2*Slugger
2098*2*Sly
2099*2*Smarty
2100*2*Smartypants
2101*2*Smash
2102*2*Smasher
2103*2*Smilestone
2104*2*Smocks
2105*2*Smoke
2106*2*Smoky
2107*2*Smoocher
2108*2*Smoothie
2109*2*Smores
2110*2*Smug
2111*2*Snaggletooth
2112*2*Snapdragon
2113*2*Snappy
2114*2*Snaps
2115*2*Snarly
2116*2*Sneaker
2117*2*Sneakers
2118*2*Sneezer
2119*2*Sneezy
2120*2*Snickers
2121*2*Sniffer
2122*2*Sniffler
2123*2*Sniffles
2124*2*Sniffy
2125*2*Snooker
2126*2*Snookums
2127*2*Snooper
2128*2*Snoots
2129*2*Snooty
2130*2*Snowball
2131*2*Snowberry
2132*2*Snowbunny
2133*2*Snowcap
2134*2*Snowflake
2135*2*Snowpea
2136*2*Snowshoe
2137*2*Snowy
2138*2*Snuffles
2139*2*Snuffy
2140*2*Snugglepot
2141*2*Snuggles
2142*2*Socks
2143*2*Sodapop
2144*2*Soho
2145*2*Sojourner
2146*2*Solo
2147*2*Sonar
2148*2*Sorbet
2149*2*Souffle
2150*2*Soupbowl
2151*2*Soupy
2152*2*Sourball
2153*2*Sourdough
2154*2*Southpaw
2155*2*Spacey
2156*2*Spades
2157*2*Spaghetti
2158*2*Spam
2159*2*Sparkler
2160*2*Sparkplug
2161*2*Sparky
2162*2*Sparrow
2163*2*Sparx
2164*2*Speck
2165*2*Speckles
2166*2*Spectro
2167*2*Spectrum
2168*2*Speedy
2169*2*Spex
2170*2*Sphinx
2171*2*Spicy
2172*2*Spider
2173*2*Spiffy
2174*2*Spike
2175*2*Spiky
2176*2*Spinach
2177*2*Spinner
2178*2*Spiral
2179*2*Spirit
2180*2*Spiro
2181*2*Spitfire
2182*2*Splash
2183*2*Splashy
2184*2*Spoiler
2185*2*Spoof
2186*2*Spooky
2187*2*Sport
2188*2*Sporty
2189*2*Spot
2190*2*Spots
2191*2*Spotty
2192*2*Spring
2193*2*Springbok
2194*2*Springy
2195*2*Sprinkle
2196*2*Sprinkles
2197*2*Spud
2198*2*Spunky
2199*2*Sputnik
2200*2*Spy
2201*2*Squash
2202*2*Squeak
2203*2*Squeaky
2204*2*Squid
2205*2*Squiddly
2206*2*Squidface
2207*2*Squiggle
2208*2*Squiggly
2209*2*Squishy
2210*2*Stalactite
2211*2*Stalagmite
2212*2*Starbuck
2213*2*Stardust
2214*2*Starfire
2215*2*Stargazer
2216*2*Starlight
2217*2*Steamy
2218*2*Steely
2219*2*Stereopsis
2220*2*Sterling
2221*2*Stethoscope
2222*2*Stetson
2223*2*Stewart
2224*2*Sticky
2225*2*Stinger
2226*2*Stingy
2227*2*Stinkwell
2228*2*Stinky
2229*2*Stony
2230*2*Storm
2231*2*Stormy
2232*2*Stowaway
2233*2*Strange
2234*2*Strawberry
2235*2*Strep
2236*2*Stretch
2237*2*Stretchy
2238*2*Stripe
2239*2*Striper
2240*2*Stripes
2241*2*Strudel
2242*2*Stubby
2243*2*Stumpy
2244*2*Sudsy
2245*2*Suede
2246*2*Sugar
2247*2*Sugarbaby
2248*2*Sulfur
2249*2*Sultan
2250*2*Sumo
2251*2*Sundance
2252*2*Sundown
2253*2*Sunflower
2254*2*Sunset
2255*2*Superdoodle
2256*2*Surprise
2257*2*Sushi
2258*2*Swabbie
2259*2*Swampy
2260*2*Sweathog
2261*2*Sweetheart
2262*2*Sweetie Pie
2263*2*Sweetness
2264*2*Sweets
2265*2*Swift
2266*2*Swifty
2267*2*Swizzle
2268*2*Sylvester
2269*2*Synergy
2270*2*Syrup
2271*2*TNT
2272*2*Tablespoon
2273*2*Tabloid
2274*2*Taboo
2275*2*Tacky
2276*2*Taco
2277*2*Tacos
2278*2*Tadpole
2279*2*Taffeta
2280*2*Taffy
2281*2*Tagalong
2282*2*Tags
2283*2*Tahiti
2284*2*Taj
2285*2*Takeout
2286*2*Talisman
2287*2*Tallahassee
2288*2*Tallulah
2289*2*Talon
2290*2*Tamale
2291*2*Tambourine
2292*2*Tandem
2293*2*Tanglewood
2294*2*Tango
2295*2*Tank
2296*2*Tanker
2297*2*Tanner
2298*2*Tantrum
2299*2*Tapestry
2300*2*Tapioca
2301*2*Tapper
2302*2*Tarantula
2303*2*Tardy
2304*2*Target
2305*2*Tarkington
2306*2*Tartan
2307*2*Tasty
2308*2*Tatsu
2309*2*Tatters
2310*2*Tattletale
2311*2*Tattoo
2312*2*Taurus
2313*2*Tawny
2314*2*Taxbreak
2315*2*Taxcut
2316*2*Taxi
2317*2*Taxicab
2318*2*Teacup
2319*2*Teaky
2320*2*Teapot
2321*2*Teaspoon
2322*2*Technicolor
2323*2*Teddy
2324*2*Teehee
2325*2*Teevee
2326*2*Telegram
2327*2*Telepath
2328*2*Telescope
2329*2*Telex
2330*2*Tempest
2331*2*Templeton
2332*2*Tempo
2333*2*Tenderfoot
2334*2*Tennessee
2335*2*Tennisball
2336*2*Tennyson
2337*2*Terence
2338*2*Teriyaki
2339*2*Terror
2340*2*Texas
2341*2*Textbook
2342*2*Thackeray
2343*2*Thaddeus
2344*2*Tharp
2345*2*Tharpo
2346*2*Theoretical
2347*2*Theory
2348*2*Thermos
2349*2*Thickie
2350*2*Thimble
2351*2*Thistle
2352*2*Thorny
2353*2*Thriller
2354*2*Thud
2355*2*Thumbtack
2356*2*Thunder
2357*2*Thunderbird
2358*2*Thurgood
2359*2*Thursday
2360*2*Thyme
2361*2*Tickets
2362*2*Tidbit
2363*2*Tilly
2364*2*Tilted
2365*2*Timbuktu
2366*2*Tinkles
2367*2*Tinsel
2368*2*Tintin
2369*2*Tiny
2370*2*Tipperary
2371*2*Tipsy
2372*2*Titanium
2373*2*Tizzy
2374*2*Toad
2375*2*Toadstool
2376*2*Toady
2377*2*Toaster
2378*2*Toby
2379*2*Toco
2380*2*Toffee
2381*2*Tofu
2382*2*Tokyo
2383*2*Toledo
2384*2*Tomato
2385*2*Tomorrow
2386*2*Tonic
2387*2*Toodleoo
2388*2*Toodles
2389*2*Toot
2390*2*Tooter
2391*2*Toothsome
2392*2*Toothy
2393*2*Tootles
2394*2*Toots
2395*2*Topeka
2396*2*Topper
2397*2*Topsy
2398*2*Tornado
2399*2*Torpedo
2400*2*Tortellini
2401*2*Tortoni
2402*2*Totem
2403*2*Toto
2404*2*Totsy
2405*2*Toucan
2406*2*Toupee
2407*2*Toy
2408*2*Toybox
2409*2*Tracer
2410*2*Trailblazer
2411*2*Tramp
2412*2*Trampoline
2413*2*Trancer
2414*2*Trapper
2415*2*Traveller
2416*2*Treasure
2417*2*Treetop
2418*2*Trekker
2419*2*Trickster
2420*2*Trickstick
2421*2*Tricky
2422*2*Trident
2423*2*Trilogy
2424*2*Trinidad
2425*2*Trinket
2426*2*Trio
2427*2*Tripper
2428*2*Troll
2429*2*Tropix
2430*2*Trouble
2431*2*Trout
2432*2*Trumpet
2433*2*Trusty
2434*2*Tsunami
2435*2*Tuba
2436*2*Tubbo
2437*2*Tubby
2438*2*Tuesday
2439*2*Tuffy
2440*2*Tugboat
2441*2*Tumblebumble
2442*2*Tumbler
2443*2*Tumbleweed
2444*2*Tunafish
2445*2*Tundra
2446*2*Turbo
2447*2*Turkey
2448*2*Turnip
2449*2*Turtleneck
2450*2*Tutu
2451*2*Twaddler
2452*2*Twain
2453*2*Tweezer
2454*2*Twerp
2455*2*Twiggs
2456*2*Twilight
2457*2*Twinkle
2458*2*Twirler
2459*2*Twister
2460*2*Twittery
2461*2*Tycoon
2462*2*Typhoon
2463*2*UFO
2464*2*Ubu
2465*2*Ugly
2466*2*Ukelele
2467*2*Ultimate
2468*2*Ultra
2469*2*Ultrasonic
2470*2*Ultraviolet
2471*2*Umber
2472*2*Umbrella
2473*2*Umpire
#2474*2*Underdog
2475*2*Underfoot
2476*2*Underwood
2477*2*Unicorn
2478*2*Unique
2479*2*Upbeat
2480*2*Upshot
2481*2*Upside
2482*2*Upstart
2483*2*Uptight
2484*2*Urchin
2485*2*Ursula
2486*2*Utoo
2487*2*Utopia
2488*2*Vacuum
2489*2*Vagabond
2490*2*Valentine
2491*2*Valerie
2492*2*Valiant
2493*2*Vamp
2494*2*Vanderbilt
2495*2*Vanilla
2496*2*Vanity
2497*2*Vaudeville
2498*2*Vegas
2499*2*Velvet
2500*2*Venezuela
2501*2*Vermicelli
2502*2*Vermont
2503*2*Vern
2504*2*Vernon
2505*2*Vertigo
2506*2*Vexy
2507*2*Vibes
2508*2*Victrola
2509*2*Video
2510*2*Viking
2511*2*Vinaigrette
2512*2*Vintage
2513*2*Viper
2514*2*Virtuosity
2515*2*Vivian
2516*2*Voodoo
2517*2*Vulcan
2518*2*Vulture
2519*2*Wabble
2520*2*Wabbler
2521*2*Wacky
2522*2*Waddle
2523*2*Waddler
2524*2*Waddles
2525*2*Wafer
2526*2*Waffle
2527*2*Waffler
2528*2*Waffles
2529*2*Wag
2530*2*Waggles
2531*2*Wags
2532*2*Wagtail
2533*2*Wahoo
2534*2*Waikiki
2535*2*Wallaby
2536*2*Wallaroo
2537*2*Walnut
2538*2*Walnuts
2539*2*Walrus
2540*2*Waltzy
2541*2*Wanderer
2542*2*Warthog
2543*2*Warty
2544*2*Wasco
2545*2*Waterberry
2546*2*Watergate
2547*2*Wavy
2548*2*Waxy
2549*2*Weasel
2550*2*Weaver
2551*2*Webster
2552*2*Wedgewood
2553*2*Wedgie
2554*2*Wednesday
2555*2*Weekend
2556*2*Weepy
2557*2*Weezer
2558*2*Weezie
2559*2*Welcome
2560*2*Wellington
2561*2*Wembly
2562*2*Wendy
2563*2*Wesley
2564*2*Whatchamacallit
2565*2*Whatever
2566*2*Wheatcake
2567*2*Wheedler
2568*2*Whim
2569*2*Whimsy
2570*2*Whipple
2571*2*Whirlwind
2572*2*Whisker
2573*2*Whiskers
2574*2*Whisper
2575*2*Whistler
2576*2*Whistlestop
2577*2*Whittaker
2578*2*Whiz
2579*2*Whizzer
2580*2*Wholesale
2581*2*Whoopdedoo
2582*2*Whoopie
2583*2*Wiggle
2584*2*Wiggler
2585*2*Wiggles
2586*2*Wiggly
2587*2*Wildberry
2588*2*Wildcat
2589*2*Wildflower
2590*2*Wildwood
2591*2*Willy
2592*2*Wily
2593*2*Wimbledon
2594*2*Wimpster
2595*2*Wimpy
2596*2*Windjammer
2597*2*Wing
2598*2*Wink
2599*2*Winker
2600*2*Winkle
2601*2*Winkles
2602*2*Winky
2603*2*Winx
2604*2*Wiretap
2605*2*Wisecrack
2606*2*Wispy
2607*2*Wisteria
2608*2*Wizard
2609*2*Wizkid
2610*2*Wobble
2611*2*Wobbler
2612*2*Wobbles
2613*2*Wobbly
2614*2*Wolverine
2615*2*Wonder
2616*2*Wonderbunny
2617*2*Wonton
2618*2*Wooble
2619*2*Woobles
2620*2*Woobly
2621*2*Woodchuck
2622*2*Woodstock
2623*2*Woodwind
2624*2*Woof Woof
2625*2*Woofer
2626*2*Woolly
2627*2*Woolworth
2628*2*Wordsworth
2629*2*Worm
2630*2*Wormy
2631*2*Wrex
2632*2*Wriggler
2633*2*Wriggles
2634*2*Wriggly
2635*2*Wrinkle
2636*2*Wrinkler
2637*2*Wrinkles
2638*2*Wuggums
2639*2*Wumpus
2640*2*Wuzzie
2641*2*Wyoming
#2642*2*X
2643*2*Xanadu
2644*2*Xenobia
2645*2*Xifto
2646*2*Xinx
2647*2*Xinxu
2648*2*Xippy
2649*2*Xowie
2650*2*Xoxo
2651*2*Xoxxy
2652*2*Xpresso
2653*2*Yahoo
2654*2*Yammie
2655*2*Yancy
2656*2*Yappy
2657*2*Yardstick
2658*2*Yasu
2659*2*Yeasty
2660*2*Yellowstone
2661*2*Yelper
2662*2*Yertle
2663*2*Yesterday
2664*2*Yeti
2665*2*Yippie
2666*2*Yodeler
2667*2*Yoga
2668*2*Yoko
2669*2*Yonder
2670*2*Yonkers
2671*2*Yorty
2672*2*Yosemite
2673*2*Yukon
2674*2*Yummy
2675*2*Yutu
2676*2*Yvonne
2677*2*Zack
2678*2*Zadok
2679*2*Zaftig
2680*2*Zaire
2681*2*Zapata
2682*2*Zappa
2683*2*Zazen
2684*2*Zebu
2685*2*Zebulon
2686*2*Zen
2687*2*Zenith
2688*2*Zenzen
2689*2*Zepellin
2690*2*Zephyr
2691*2*Zeppo
2692*2*Zero
2693*2*Zesty
2694*2*Zigzag
2695*2*Zilch
2696*2*Zillion
2697*2*Zing
2698*2*Zinger
2699*2*Zingy
2700*2*Zipcode
2701*2*Zircon
2702*2*Zodiac
2703*2*Zoltan
2704*2*Zonk
2705*2*Zoo
2706*2*Zooble
2707*2*Zoom
2708*2*Zoomer
2709*2*Zoomy
2710*2*Zowie
2711*2*Zucchini
2712*2*Zylon'''
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/pets/PetNamesEnglish.py
|
Python
|
apache-2.0
| 38,924
|
[
"Bowtie",
"ESPResSo",
"Galaxy",
"Jaguar",
"MOE",
"MOOSE",
"NEURON"
] |
67b8bd3d3d09e8522b5bfbfb9635fb5f98d8f2d2d952d6820d7eb872f7f97478
|
"""
Save per-interface and per-feed configuration information.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os
from xml.dom import minidom, XMLNS_NAMESPACE
from zeroinstall.support import basedir
from zeroinstall.injector import model
from zeroinstall.injector.namespaces import config_site, config_prog, XMLNS_IFACE
def _atomic_save(doc, parent, uri):
import tempfile
tmp_fd, tmp_name = tempfile.mkstemp(dir = parent)
try:
tmp_file = os.fdopen(tmp_fd, 'w')
doc.writexml(tmp_file, addindent = " ", newl = '\n')
tmp_file.close()
path = os.path.join(parent, model._pretty_escape(uri))
os.rename(tmp_name, path)
except:
os.unlink(tmp_name)
raise
def save_feed(feed):
"""Save information about a feed. Currently, this is the last_checked time and any user-set stability ratings.
@since: 0.49"""
feeds = basedir.save_config_path(config_site, config_prog, 'feeds')
impl = minidom.getDOMImplementation()
doc = impl.createDocument(XMLNS_IFACE, 'feed-preferences', None)
root = doc.documentElement
root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns', XMLNS_IFACE)
if feed.last_checked:
root.setAttribute('last-checked', str(feed.last_checked))
impls = feed.implementations.values()
impls.sort()
for impl in impls:
if impl.user_stability:
node = doc.createElementNS(XMLNS_IFACE, 'implementation')
root.appendChild(node)
node.setAttribute('user-stability', str(impl.user_stability))
node.setAttribute('id', impl.id)
_atomic_save(doc, feeds, feed.url)
def save_interface(interface):
user_overrides = basedir.save_config_path(config_site, config_prog, 'interfaces')
impl = minidom.getDOMImplementation()
doc = impl.createDocument(XMLNS_IFACE, 'interface-preferences', None)
root = doc.documentElement
root.setAttributeNS(XMLNS_NAMESPACE, 'xmlns', XMLNS_IFACE)
root.setAttribute('uri', interface.uri)
if interface.stability_policy:
root.setAttribute('stability-policy', str(interface.stability_policy))
for feed in interface.extra_feeds:
if feed.user_override:
elem = doc.createElementNS(XMLNS_IFACE, 'feed')
root.appendChild(elem)
elem.setAttribute('src', feed.uri)
if feed.arch:
elem.setAttribute('arch', feed.arch)
_atomic_save(doc, user_overrides, interface.uri)
|
timdiels/zeroinstall
|
zeroinstall/injector/writer.py
|
Python
|
lgpl-2.1
| 2,305
|
[
"VisIt"
] |
93c93c9df4853d6945576bcf7205cee99b0e1e0164afc650842c49f938b7ee10
|
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Hidetsugu's Second Rite")
def hidetsugus_second_rite(card, abilities):
def hidetsugus_second_rite():
return AbilityNotImplemented
return hidetsugus_second_rite,
@card("Kitsune Dawnblade")
def kitsune_dawnblade(card, abilities):
def kitsune_dawnblade():
return AbilityNotImplemented
def kitsune_dawnblade():
return AbilityNotImplemented
return kitsune_dawnblade, kitsune_dawnblade,
@card("Measure of Wickedness")
def measure_of_wickedness(card, abilities):
def measure_of_wickedness():
return AbilityNotImplemented
def measure_of_wickedness():
return AbilityNotImplemented
return measure_of_wickedness, measure_of_wickedness,
@card("Pithing Needle")
def pithing_needle(card, abilities):
def pithing_needle():
return AbilityNotImplemented
def pithing_needle():
return AbilityNotImplemented
return pithing_needle, pithing_needle,
@card("Kuon, Ogre Ascendant")
def kuon_ogre_ascendant(card, abilities):
def kuon_ogre_ascendant():
return AbilityNotImplemented
def kuon_ogre_ascendant():
return AbilityNotImplemented
return kuon_ogre_ascendant, kuon_ogre_ascendant,
@card("Kaho, Minamo Historian")
def kaho_minamo_historian(card, abilities):
def kaho_minamo_historian():
return AbilityNotImplemented
def kaho_minamo_historian():
return AbilityNotImplemented
return kaho_minamo_historian, kaho_minamo_historian,
@card("Inner-Chamber Guard")
def innerchamber_guard(card, abilities):
def innerchamber_guard():
return AbilityNotImplemented
return innerchamber_guard,
@card("Nikko-Onna")
def nikkoonna(card, abilities):
def nikkoonna():
return AbilityNotImplemented
def nikkoonna():
return AbilityNotImplemented
return nikkoonna, nikkoonna,
@card("Oppressive Will")
def oppressive_will(card, abilities):
def oppressive_will():
return AbilityNotImplemented
return oppressive_will,
@card("Soramaro, First to Dream")
def soramaro_first_to_dream(card, abilities):
def soramaro_first_to_dream():
return AbilityNotImplemented
def soramaro_first_to_dream():
return AbilityNotImplemented
def soramaro_first_to_dream():
return AbilityNotImplemented
return soramaro_first_to_dream, soramaro_first_to_dream, soramaro_first_to_dream,
@card("Kashi-Tribe Elite")
def kashitribe_elite(card, abilities):
def kashitribe_elite():
return AbilityNotImplemented
def kashitribe_elite():
return AbilityNotImplemented
return kashitribe_elite, kashitribe_elite,
@card("Arashi, the Sky Asunder")
def arashi_the_sky_asunder(card, abilities):
def arashi_the_sky_asunder():
return AbilityNotImplemented
def arashi_the_sky_asunder():
return AbilityNotImplemented
return arashi_the_sky_asunder, arashi_the_sky_asunder,
@card("Rune-Tail's Essence")
def runetails_essence(card, abilities):
def runetails_essence():
return AbilityNotImplemented
def runetails_essence():
return AbilityNotImplemented
return runetails_essence, runetails_essence,
@card("Captive Flame")
def captive_flame(card, abilities):
def captive_flame():
return AbilityNotImplemented
return captive_flame,
@card("Shinen of Life's Roar")
def shinen_of_lifes_roar(card, abilities):
def shinen_of_lifes_roar():
return AbilityNotImplemented
def shinen_of_lifes_roar():
return AbilityNotImplemented
return shinen_of_lifes_roar, shinen_of_lifes_roar,
@card("Oboro Envoy")
def oboro_envoy(card, abilities):
def oboro_envoy():
return AbilityNotImplemented
def oboro_envoy():
return AbilityNotImplemented
return oboro_envoy, oboro_envoy,
@card("Shifting Borders")
def shifting_borders(card, abilities):
def shifting_borders():
return AbilityNotImplemented
def shifting_borders():
return AbilityNotImplemented
return shifting_borders, shifting_borders,
@card("Presence of the Wise")
def presence_of_the_wise(card, abilities):
def presence_of_the_wise():
return AbilityNotImplemented
return presence_of_the_wise,
@card("Curtain of Light")
def curtain_of_light(card, abilities):
def curtain_of_light():
return AbilityNotImplemented
def curtain_of_light():
return AbilityNotImplemented
def curtain_of_light():
return AbilityNotImplemented
return curtain_of_light, curtain_of_light, curtain_of_light,
@card("Endless Swarm")
def endless_swarm(card, abilities):
def endless_swarm():
return AbilityNotImplemented
def endless_swarm():
return AbilityNotImplemented
return endless_swarm, endless_swarm,
@card("Sokenzan Spellblade")
def sokenzan_spellblade(card, abilities):
def sokenzan_spellblade():
return AbilityNotImplemented
def sokenzan_spellblade():
return AbilityNotImplemented
return sokenzan_spellblade, sokenzan_spellblade,
@card("Hand of Cruelty")
def hand_of_cruelty(card, abilities):
def hand_of_cruelty():
return AbilityNotImplemented
def hand_of_cruelty():
return AbilityNotImplemented
return hand_of_cruelty, hand_of_cruelty,
@card("Neverending Torment")
def neverending_torment(card, abilities):
def neverending_torment():
return AbilityNotImplemented
def neverending_torment():
return AbilityNotImplemented
return neverending_torment, neverending_torment,
@card("Sekki, Seasons' Guide")
def sekki_seasons_guide(card, abilities):
def sekki_seasons_guide():
return AbilityNotImplemented
def sekki_seasons_guide():
return AbilityNotImplemented
def sekki_seasons_guide():
return AbilityNotImplemented
return sekki_seasons_guide, sekki_seasons_guide, sekki_seasons_guide,
@card("Kagemaro's Clutch")
def kagemaros_clutch(card, abilities):
def kagemaros_clutch():
return AbilityNotImplemented
def kagemaros_clutch():
return AbilityNotImplemented
return kagemaros_clutch, kagemaros_clutch,
@card("Shinen of Fear's Chill")
def shinen_of_fears_chill(card, abilities):
def shinen_of_fears_chill():
return AbilityNotImplemented
def shinen_of_fears_chill():
return AbilityNotImplemented
return shinen_of_fears_chill, shinen_of_fears_chill,
@card("Adamaro, First to Desire")
def adamaro_first_to_desire(card, abilities):
def adamaro_first_to_desire():
return AbilityNotImplemented
return adamaro_first_to_desire,
@card("Celestial Kirin")
def celestial_kirin(card, abilities):
def celestial_kirin():
return AbilityNotImplemented
def celestial_kirin():
return AbilityNotImplemented
return celestial_kirin, celestial_kirin,
@card("Ideas Unbound")
def ideas_unbound(card, abilities):
def ideas_unbound():
return AbilityNotImplemented
return ideas_unbound,
@card("Godo's Irregulars")
def godos_irregulars(card, abilities):
def godos_irregulars():
return AbilityNotImplemented
return godos_irregulars,
@card("Kitsune Loreweaver")
def kitsune_loreweaver(card, abilities):
def kitsune_loreweaver():
return AbilityNotImplemented
return kitsune_loreweaver,
@card("Promise of Bunrei")
def promise_of_bunrei(card, abilities):
def promise_of_bunrei():
return AbilityNotImplemented
return promise_of_bunrei,
@card("Nightsoil Kami")
def nightsoil_kami(card, abilities):
def nightsoil_kami():
return AbilityNotImplemented
return nightsoil_kami,
@card("Kagemaro, First to Suffer")
def kagemaro_first_to_suffer(card, abilities):
def kagemaro_first_to_suffer():
return AbilityNotImplemented
def kagemaro_first_to_suffer():
return AbilityNotImplemented
return kagemaro_first_to_suffer, kagemaro_first_to_suffer,
@card("Homura, Human Ascendant")
def homura_human_ascendant(card, abilities):
def homura_human_ascendant():
return AbilityNotImplemented
def homura_human_ascendant():
return AbilityNotImplemented
def homura_human_ascendant():
return AbilityNotImplemented
return homura_human_ascendant, homura_human_ascendant, homura_human_ascendant,
@card("Erayo's Essence")
def erayos_essence(card, abilities):
def erayos_essence():
return AbilityNotImplemented
def erayos_essence():
return AbilityNotImplemented
return erayos_essence, erayos_essence,
@card("Eternal Dominion")
def eternal_dominion(card, abilities):
def eternal_dominion():
return AbilityNotImplemented
def eternal_dominion():
return AbilityNotImplemented
return eternal_dominion, eternal_dominion,
@card("Kami of the Tended Garden")
def kami_of_the_tended_garden(card, abilities):
def kami_of_the_tended_garden():
return AbilityNotImplemented
def kami_of_the_tended_garden():
return AbilityNotImplemented
return kami_of_the_tended_garden, kami_of_the_tended_garden,
@card("Oboro, Palace in the Clouds")
def oboro_palace_in_the_clouds(card, abilities):
def oboro_palace_in_the_clouds():
return AbilityNotImplemented
def oboro_palace_in_the_clouds():
return AbilityNotImplemented
return oboro_palace_in_the_clouds, oboro_palace_in_the_clouds,
@card("Rally the Horde")
def rally_the_horde(card, abilities):
def rally_the_horde():
return AbilityNotImplemented
return rally_the_horde,
@card("Sasaya, Orochi Ascendant")
def sasaya_orochi_ascendant(card, abilities):
def sasaya_orochi_ascendant():
return AbilityNotImplemented
def sasaya_orochi_ascendant():
return AbilityNotImplemented
return sasaya_orochi_ascendant, sasaya_orochi_ascendant,
@card("Minamo Scrollkeeper")
def minamo_scrollkeeper(card, abilities):
def minamo_scrollkeeper():
return AbilityNotImplemented
def minamo_scrollkeeper():
return AbilityNotImplemented
return minamo_scrollkeeper, minamo_scrollkeeper,
@card("Footsteps of the Goryo")
def footsteps_of_the_goryo(card, abilities):
def footsteps_of_the_goryo():
return AbilityNotImplemented
return footsteps_of_the_goryo,
@card("Cut the Earthly Bond")
def cut_the_earthly_bond(card, abilities):
def cut_the_earthly_bond():
return AbilityNotImplemented
return cut_the_earthly_bond,
@card("Dosan's Oldest Chant")
def dosans_oldest_chant(card, abilities):
def dosans_oldest_chant():
return AbilityNotImplemented
def dosans_oldest_chant():
return AbilityNotImplemented
return dosans_oldest_chant, dosans_oldest_chant,
@card("Ghost-Lit Warder")
def ghostlit_warder(card, abilities):
def ghostlit_warder():
return AbilityNotImplemented
def ghostlit_warder():
return AbilityNotImplemented
return ghostlit_warder, ghostlit_warder,
@card("Fiddlehead Kami")
def fiddlehead_kami(card, abilities):
def fiddlehead_kami():
return AbilityNotImplemented
return fiddlehead_kami,
@card("Dense Canopy")
def dense_canopy(card, abilities):
def dense_canopy():
return AbilityNotImplemented
return dense_canopy,
@card("Elder Pine of Jukai")
def elder_pine_of_jukai(card, abilities):
def elder_pine_of_jukai():
return AbilityNotImplemented
def elder_pine_of_jukai():
return AbilityNotImplemented
return elder_pine_of_jukai, elder_pine_of_jukai,
@card("Iname as One")
def iname_as_one(card, abilities):
def iname_as_one():
return AbilityNotImplemented
def iname_as_one():
return AbilityNotImplemented
return iname_as_one, iname_as_one,
@card("Rune-Tail, Kitsune Ascendant")
def runetail_kitsune_ascendant(card, abilities):
def runetail_kitsune_ascendant():
return AbilityNotImplemented
def runetail_kitsune_ascendant():
return AbilityNotImplemented
return runetail_kitsune_ascendant, runetail_kitsune_ascendant,
@card("Gnat Miser")
def gnat_miser(card, abilities):
def gnat_miser():
return AbilityNotImplemented
return gnat_miser,
@card("Akki Underling")
def akki_underling(card, abilities):
def akki_underling():
return AbilityNotImplemented
return akki_underling,
@card("Kami of Empty Graves")
def kami_of_empty_graves(card, abilities):
def kami_of_empty_graves():
return AbilityNotImplemented
return kami_of_empty_graves,
@card("Kami of the Crescent Moon")
def kami_of_the_crescent_moon(card, abilities):
def kami_of_the_crescent_moon():
return AbilityNotImplemented
return kami_of_the_crescent_moon,
@card("Twincast")
def twincast(card, abilities):
def twincast():
return AbilityNotImplemented
return twincast,
@card("Deathknell Kami")
def deathknell_kami(card, abilities):
def deathknell_kami():
return AbilityNotImplemented
def deathknell_kami():
return AbilityNotImplemented
def deathknell_kami():
return AbilityNotImplemented
return deathknell_kami, deathknell_kami, deathknell_kami,
@card("Sakashima the Impostor")
def sakashima_the_impostor(card, abilities):
def sakashima_the_impostor():
return AbilityNotImplemented
return sakashima_the_impostor,
@card("Promised Kannushi")
def promised_kannushi(card, abilities):
def promised_kannushi():
return AbilityNotImplemented
return promised_kannushi,
@card("Homura's Essence")
def homuras_essence(card, abilities):
def homuras_essence():
return AbilityNotImplemented
def homuras_essence():
return AbilityNotImplemented
return homuras_essence, homuras_essence,
@card("Sokenzan Renegade")
def sokenzan_renegade(card, abilities):
def sokenzan_renegade():
return AbilityNotImplemented
def sokenzan_renegade():
return AbilityNotImplemented
return sokenzan_renegade, sokenzan_renegade,
@card("Overwhelming Intellect")
def overwhelming_intellect(card, abilities):
def overwhelming_intellect():
return AbilityNotImplemented
return overwhelming_intellect,
@card("Descendant of Kiyomaro")
def descendant_of_kiyomaro(card, abilities):
def descendant_of_kiyomaro():
return AbilityNotImplemented
return descendant_of_kiyomaro,
@card("Dreamcatcher")
def dreamcatcher(card, abilities):
def dreamcatcher():
return AbilityNotImplemented
return dreamcatcher,
@card("Kemuri-Onna")
def kemurionna(card, abilities):
def kemurionna():
return AbilityNotImplemented
def kemurionna():
return AbilityNotImplemented
return kemurionna, kemurionna,
@card("Oni of Wild Places")
def oni_of_wild_places(card, abilities):
def oni_of_wild_places():
return AbilityNotImplemented
def oni_of_wild_places():
return AbilityNotImplemented
return oni_of_wild_places, oni_of_wild_places,
@card("One with Nothing")
def one_with_nothing(card, abilities):
def one_with_nothing():
return AbilityNotImplemented
return one_with_nothing,
@card("Soratami Cloud Chariot")
def soratami_cloud_chariot(card, abilities):
def soratami_cloud_chariot():
return AbilityNotImplemented
def soratami_cloud_chariot():
return AbilityNotImplemented
return soratami_cloud_chariot, soratami_cloud_chariot,
@card("Spiraling Embers")
def spiraling_embers(card, abilities):
def spiraling_embers():
return AbilityNotImplemented
return spiraling_embers,
@card("Akuta, Born of Ash")
def akuta_born_of_ash(card, abilities):
def akuta_born_of_ash():
return AbilityNotImplemented
def akuta_born_of_ash():
return AbilityNotImplemented
return akuta_born_of_ash, akuta_born_of_ash,
@card("Feral Lightning")
def feral_lightning(card, abilities):
def feral_lightning():
return AbilityNotImplemented
return feral_lightning,
@card("Trusted Advisor")
def trusted_advisor(card, abilities):
def trusted_advisor():
return AbilityNotImplemented
def trusted_advisor():
return AbilityNotImplemented
return trusted_advisor, trusted_advisor,
@card("Deathmask Nezumi")
def deathmask_nezumi(card, abilities):
def deathmask_nezumi():
return AbilityNotImplemented
return deathmask_nezumi,
@card("Path of Anger's Flame")
def path_of_angers_flame(card, abilities):
def path_of_angers_flame():
return AbilityNotImplemented
return path_of_angers_flame,
@card("Rushing-Tide Zubera")
def rushingtide_zubera(card, abilities):
def rushingtide_zubera():
return AbilityNotImplemented
return rushingtide_zubera,
@card("Descendant of Masumaro")
def descendant_of_masumaro(card, abilities):
def descendant_of_masumaro():
return AbilityNotImplemented
return descendant_of_masumaro,
@card("Kitsune Bonesetter")
def kitsune_bonesetter(card, abilities):
def kitsune_bonesetter():
return AbilityNotImplemented
return kitsune_bonesetter,
@card("Ashes of the Fallen")
def ashes_of_the_fallen(card, abilities):
def ashes_of_the_fallen():
return AbilityNotImplemented
def ashes_of_the_fallen():
return AbilityNotImplemented
return ashes_of_the_fallen, ashes_of_the_fallen,
@card("Ghost-Lit Nourisher")
def ghostlit_nourisher(card, abilities):
def ghostlit_nourisher():
return AbilityNotImplemented
def ghostlit_nourisher():
return AbilityNotImplemented
return ghostlit_nourisher, ghostlit_nourisher,
@card("Eiganjo Free-Riders")
def eiganjo_freeriders(card, abilities):
def eiganjo_freeriders():
return AbilityNotImplemented
def eiganjo_freeriders():
return AbilityNotImplemented
return eiganjo_freeriders, eiganjo_freeriders,
@card("Kiyomaro, First to Stand")
def kiyomaro_first_to_stand(card, abilities):
def kiyomaro_first_to_stand():
return AbilityNotImplemented
def kiyomaro_first_to_stand():
return AbilityNotImplemented
def kiyomaro_first_to_stand():
return AbilityNotImplemented
return kiyomaro_first_to_stand, kiyomaro_first_to_stand, kiyomaro_first_to_stand,
@card("Evermind")
def evermind(card, abilities):
def evermind():
return AbilityNotImplemented
def evermind():
return AbilityNotImplemented
def evermind():
return AbilityNotImplemented
def evermind():
return AbilityNotImplemented
return evermind, evermind, evermind, evermind,
@card("Bounteous Kirin")
def bounteous_kirin(card, abilities):
def bounteous_kirin():
return AbilityNotImplemented
def bounteous_kirin():
return AbilityNotImplemented
return bounteous_kirin, bounteous_kirin,
@card("Enduring Ideal")
def enduring_ideal(card, abilities):
def enduring_ideal():
return AbilityNotImplemented
def enduring_ideal():
return AbilityNotImplemented
return enduring_ideal, enduring_ideal,
@card("Ivory Crane Netsuke")
def ivory_crane_netsuke(card, abilities):
def ivory_crane_netsuke():
return AbilityNotImplemented
return ivory_crane_netsuke,
@card("Blood Clock")
def blood_clock(card, abilities):
def blood_clock():
return AbilityNotImplemented
return blood_clock,
@card("Infernal Kirin")
def infernal_kirin(card, abilities):
def infernal_kirin():
return AbilityNotImplemented
def infernal_kirin():
return AbilityNotImplemented
return infernal_kirin, infernal_kirin,
@card("Michiko Konda, Truth Seeker")
def michiko_konda_truth_seeker(card, abilities):
def michiko_konda_truth_seeker():
return AbilityNotImplemented
return michiko_konda_truth_seeker,
@card("Manriki-Gusari")
def manrikigusari(card, abilities):
def manrikigusari():
return AbilityNotImplemented
def manrikigusari():
return AbilityNotImplemented
return manrikigusari, manrikigusari,
@card("Masumaro, First to Live")
def masumaro_first_to_live(card, abilities):
def masumaro_first_to_live():
return AbilityNotImplemented
return masumaro_first_to_live,
@card("Cowed by Wisdom")
def cowed_by_wisdom(card, abilities):
def cowed_by_wisdom():
return AbilityNotImplemented
def cowed_by_wisdom():
return AbilityNotImplemented
return cowed_by_wisdom, cowed_by_wisdom,
@card("Choice of Damnations")
def choice_of_damnations(card, abilities):
def choice_of_damnations():
return AbilityNotImplemented
return choice_of_damnations,
@card("Skull Collector")
def skull_collector(card, abilities):
def skull_collector():
return AbilityNotImplemented
def skull_collector():
return AbilityNotImplemented
return skull_collector, skull_collector,
@card("Meishin, the Mind Cage")
def meishin_the_mind_cage(card, abilities):
def meishin_the_mind_cage():
return AbilityNotImplemented
return meishin_the_mind_cage,
@card("Shinen of Stars' Light")
def shinen_of_stars_light(card, abilities):
def shinen_of_stars_light():
return AbilityNotImplemented
def shinen_of_stars_light():
return AbilityNotImplemented
return shinen_of_stars_light, shinen_of_stars_light,
@card("Hand of Honor")
def hand_of_honor(card, abilities):
def hand_of_honor():
return AbilityNotImplemented
def hand_of_honor():
return AbilityNotImplemented
return hand_of_honor, hand_of_honor,
@card("Pure Intentions")
def pure_intentions(card, abilities):
def pure_intentions():
return AbilityNotImplemented
def pure_intentions():
return AbilityNotImplemented
return pure_intentions, pure_intentions,
@card("Murmurs from Beyond")
def murmurs_from_beyond(card, abilities):
def murmurs_from_beyond():
return AbilityNotImplemented
return murmurs_from_beyond,
@card("Hail of Arrows")
def hail_of_arrows(card, abilities):
def hail_of_arrows():
return AbilityNotImplemented
return hail_of_arrows,
@card("Seek the Horizon")
def seek_the_horizon(card, abilities):
def seek_the_horizon():
return AbilityNotImplemented
return seek_the_horizon,
@card("Gaze of Adamaro")
def gaze_of_adamaro(card, abilities):
def gaze_of_adamaro():
return AbilityNotImplemented
return gaze_of_adamaro,
@card("Reki, the History of Kamigawa")
def reki_the_history_of_kamigawa(card, abilities):
def reki_the_history_of_kamigawa():
return AbilityNotImplemented
return reki_the_history_of_kamigawa,
@card("Kuon's Essence")
def kuons_essence(card, abilities):
def kuons_essence():
return AbilityNotImplemented
def kuons_essence():
return AbilityNotImplemented
return kuons_essence, kuons_essence,
@card("Charge Across the Araba")
def charge_across_the_araba(card, abilities):
def charge_across_the_araba():
return AbilityNotImplemented
return charge_across_the_araba,
@card("Death Denied")
def death_denied(card, abilities):
def death_denied():
return AbilityNotImplemented
return death_denied,
@card("Ghost-Lit Raider")
def ghostlit_raider(card, abilities):
def ghostlit_raider():
return AbilityNotImplemented
def ghostlit_raider():
return AbilityNotImplemented
return ghostlit_raider, ghostlit_raider,
@card("Thoughts of Ruin")
def thoughts_of_ruin(card, abilities):
def thoughts_of_ruin():
return AbilityNotImplemented
return thoughts_of_ruin,
@card("Kataki, War's Wage")
def kataki_wars_wage(card, abilities):
def kataki_wars_wage():
return AbilityNotImplemented
return kataki_wars_wage,
@card("O-Naginata")
def onaginata(card, abilities):
def onaginata():
return AbilityNotImplemented
def onaginata():
return AbilityNotImplemented
def onaginata():
return AbilityNotImplemented
return onaginata, onaginata, onaginata,
@card("Briarknit Kami")
def briarknit_kami(card, abilities):
def briarknit_kami():
return AbilityNotImplemented
return briarknit_kami,
@card("Glitterfang")
def glitterfang(card, abilities):
def glitterfang():
return AbilityNotImplemented
def glitterfang():
return AbilityNotImplemented
return glitterfang, glitterfang,
@card("Reverence")
def reverence(card, abilities):
def reverence():
return AbilityNotImplemented
return reverence,
@card("Plow Through Reito")
def plow_through_reito(card, abilities):
def plow_through_reito():
return AbilityNotImplemented
return plow_through_reito,
@card("Undying Flames")
def undying_flames(card, abilities):
def undying_flames():
return AbilityNotImplemented
def undying_flames():
return AbilityNotImplemented
return undying_flames, undying_flames,
@card("Sunder from Within")
def sunder_from_within(card, abilities):
def sunder_from_within():
return AbilityNotImplemented
return sunder_from_within,
@card("Barrel Down Sokenzan")
def barrel_down_sokenzan(card, abilities):
def barrel_down_sokenzan():
return AbilityNotImplemented
return barrel_down_sokenzan,
@card("Shape Stealer")
def shape_stealer(card, abilities):
def shape_stealer():
return AbilityNotImplemented
return shape_stealer,
@card("Razorjaw Oni")
def razorjaw_oni(card, abilities):
def razorjaw_oni():
return AbilityNotImplemented
return razorjaw_oni,
@card("Sink into Takenuma")
def sink_into_takenuma(card, abilities):
def sink_into_takenuma():
return AbilityNotImplemented
return sink_into_takenuma,
@card("Ghost-Lit Redeemer")
def ghostlit_redeemer(card, abilities):
def ghostlit_redeemer():
return AbilityNotImplemented
def ghostlit_redeemer():
return AbilityNotImplemented
return ghostlit_redeemer, ghostlit_redeemer,
@card("Inner Fire")
def inner_fire(card, abilities):
def inner_fire():
return AbilityNotImplemented
return inner_fire,
@card("Moonwing Moth")
def moonwing_moth(card, abilities):
def moonwing_moth():
return AbilityNotImplemented
def moonwing_moth():
return AbilityNotImplemented
return moonwing_moth, moonwing_moth,
@card("Kiku's Shadow")
def kikus_shadow(card, abilities):
def kikus_shadow():
return AbilityNotImplemented
return kikus_shadow,
@card("Locust Miser")
def locust_miser(card, abilities):
def locust_miser():
return AbilityNotImplemented
return locust_miser,
@card("Sakura-Tribe Scout")
def sakuratribe_scout(card, abilities):
def sakuratribe_scout():
return AbilityNotImplemented
return sakuratribe_scout,
@card("Rending Vines")
def rending_vines(card, abilities):
def rending_vines():
return AbilityNotImplemented
def rending_vines():
return AbilityNotImplemented
return rending_vines, rending_vines,
@card("Stampeding Serow")
def stampeding_serow(card, abilities):
def stampeding_serow():
return AbilityNotImplemented
def stampeding_serow():
return AbilityNotImplemented
return stampeding_serow, stampeding_serow,
@card("Skyfire Kirin")
def skyfire_kirin(card, abilities):
def skyfire_kirin():
return AbilityNotImplemented
def skyfire_kirin():
return AbilityNotImplemented
return skyfire_kirin, skyfire_kirin,
@card("Cloudhoof Kirin")
def cloudhoof_kirin(card, abilities):
def cloudhoof_kirin():
return AbilityNotImplemented
def cloudhoof_kirin():
return AbilityNotImplemented
return cloudhoof_kirin, cloudhoof_kirin,
@card("Spiritual Visit")
def spiritual_visit(card, abilities):
def spiritual_visit():
return AbilityNotImplemented
def spiritual_visit():
return AbilityNotImplemented
return spiritual_visit, spiritual_visit,
@card("Into the Fray")
def into_the_fray(card, abilities):
def into_the_fray():
return AbilityNotImplemented
def into_the_fray():
return AbilityNotImplemented
return into_the_fray, into_the_fray,
@card("Ghost-Lit Stalker")
def ghostlit_stalker(card, abilities):
def ghostlit_stalker():
return AbilityNotImplemented
def ghostlit_stalker():
return AbilityNotImplemented
return ghostlit_stalker, ghostlit_stalker,
@card("Ronin Cavekeeper")
def ronin_cavekeeper(card, abilities):
def ronin_cavekeeper():
return AbilityNotImplemented
return ronin_cavekeeper,
@card("Araba Mothrider")
def araba_mothrider(card, abilities):
def araba_mothrider():
return AbilityNotImplemented
def araba_mothrider():
return AbilityNotImplemented
return araba_mothrider, araba_mothrider,
@card("Okina Nightwatch")
def okina_nightwatch(card, abilities):
def okina_nightwatch():
return AbilityNotImplemented
return okina_nightwatch,
@card("Ayumi, the Last Visitor")
def ayumi_the_last_visitor(card, abilities):
def ayumi_the_last_visitor():
return AbilityNotImplemented
return ayumi_the_last_visitor,
@card("Haru-Onna")
def haruonna(card, abilities):
def haruonna():
return AbilityNotImplemented
def haruonna():
return AbilityNotImplemented
return haruonna, haruonna,
@card("Molting Skin")
def molting_skin(card, abilities):
def molting_skin():
return AbilityNotImplemented
return molting_skin,
@card("Freed from the Real")
def freed_from_the_real(card, abilities):
def freed_from_the_real():
return AbilityNotImplemented
def freed_from_the_real():
return AbilityNotImplemented
def freed_from_the_real():
return AbilityNotImplemented
return freed_from_the_real, freed_from_the_real, freed_from_the_real,
@card("Inner Calm, Outer Strength")
def inner_calm_outer_strength(card, abilities):
def inner_calm_outer_strength():
return AbilityNotImplemented
return inner_calm_outer_strength,
@card("Raving Oni-Slave")
def raving_onislave(card, abilities):
def raving_onislave():
return AbilityNotImplemented
return raving_onislave,
@card("Seed the Land")
def seed_the_land(card, abilities):
def seed_the_land():
return AbilityNotImplemented
return seed_the_land,
@card("Descendant of Soramaro")
def descendant_of_soramaro(card, abilities):
def descendant_of_soramaro():
return AbilityNotImplemented
return descendant_of_soramaro,
@card("Maga, Traitor to Mortals")
def maga_traitor_to_mortals(card, abilities):
def maga_traitor_to_mortals():
return AbilityNotImplemented
def maga_traitor_to_mortals():
return AbilityNotImplemented
return maga_traitor_to_mortals, maga_traitor_to_mortals,
@card("Kiri-Onna")
def kirionna(card, abilities):
def kirionna():
return AbilityNotImplemented
def kirionna():
return AbilityNotImplemented
return kirionna, kirionna,
@card("Iizuka the Ruthless")
def iizuka_the_ruthless(card, abilities):
def iizuka_the_ruthless():
return AbilityNotImplemented
def iizuka_the_ruthless():
return AbilityNotImplemented
return iizuka_the_ruthless, iizuka_the_ruthless,
@card("Yuki-Onna")
def yukionna(card, abilities):
def yukionna():
return AbilityNotImplemented
def yukionna():
return AbilityNotImplemented
return yukionna, yukionna,
@card("Death of a Thousand Stings")
def death_of_a_thousand_stings(card, abilities):
def death_of_a_thousand_stings():
return AbilityNotImplemented
def death_of_a_thousand_stings():
return AbilityNotImplemented
return death_of_a_thousand_stings, death_of_a_thousand_stings,
@card("Pain's Reward")
def pains_reward(card, abilities):
def pains_reward():
return AbilityNotImplemented
return pains_reward,
@card("Scroll of Origins")
def scroll_of_origins(card, abilities):
def scroll_of_origins():
return AbilityNotImplemented
return scroll_of_origins,
@card("Jiwari, the Earth Aflame")
def jiwari_the_earth_aflame(card, abilities):
def jiwari_the_earth_aflame():
return AbilityNotImplemented
def jiwari_the_earth_aflame():
return AbilityNotImplemented
return jiwari_the_earth_aflame, jiwari_the_earth_aflame,
@card("Oboro Breezecaller")
def oboro_breezecaller(card, abilities):
def oboro_breezecaller():
return AbilityNotImplemented
def oboro_breezecaller():
return AbilityNotImplemented
return oboro_breezecaller, oboro_breezecaller,
@card("Sasaya's Essence")
def sasayas_essence(card, abilities):
def sasayas_essence():
return AbilityNotImplemented
def sasayas_essence():
return AbilityNotImplemented
return sasayas_essence, sasayas_essence,
@card("Tomb of Urami")
def tomb_of_urami(card, abilities):
def tomb_of_urami():
return AbilityNotImplemented
def tomb_of_urami():
return AbilityNotImplemented
return tomb_of_urami, tomb_of_urami,
@card("Shinen of Flight's Wings")
def shinen_of_flights_wings(card, abilities):
def shinen_of_flights_wings():
return AbilityNotImplemented
def shinen_of_flights_wings():
return AbilityNotImplemented
return shinen_of_flights_wings, shinen_of_flights_wings,
@card("Torii Watchward")
def torii_watchward(card, abilities):
def torii_watchward():
return AbilityNotImplemented
def torii_watchward():
return AbilityNotImplemented
return torii_watchward, torii_watchward,
@card("Akki Drillmaster")
def akki_drillmaster(card, abilities):
def akki_drillmaster():
return AbilityNotImplemented
return akki_drillmaster,
@card("Secretkeeper")
def secretkeeper(card, abilities):
def secretkeeper():
return AbilityNotImplemented
return secretkeeper,
@card("Ebony Owl Netsuke")
def ebony_owl_netsuke(card, abilities):
def ebony_owl_netsuke():
return AbilityNotImplemented
return ebony_owl_netsuke,
@card("Moonbow Illusionist")
def moonbow_illusionist(card, abilities):
def moonbow_illusionist():
return AbilityNotImplemented
def moonbow_illusionist():
return AbilityNotImplemented
return moonbow_illusionist, moonbow_illusionist,
@card("Kuro's Taken")
def kuros_taken(card, abilities):
def kuros_taken():
return AbilityNotImplemented
def kuros_taken():
return AbilityNotImplemented
return kuros_taken, kuros_taken,
@card("Burning-Eye Zubera")
def burningeye_zubera(card, abilities):
def burningeye_zubera():
return AbilityNotImplemented
return burningeye_zubera,
@card("AEther Shockwave")
def aether_shockwave(card, abilities):
def aether_shockwave():
return AbilityNotImplemented
return aether_shockwave,
@card("Exile into Darkness")
def exile_into_darkness(card, abilities):
def exile_into_darkness():
return AbilityNotImplemented
def exile_into_darkness():
return AbilityNotImplemented
return exile_into_darkness, exile_into_darkness,
@card("Wine of Blood and Iron")
def wine_of_blood_and_iron(card, abilities):
def wine_of_blood_and_iron():
return AbilityNotImplemented
return wine_of_blood_and_iron,
@card("Matsu-Tribe Birdstalker")
def matsutribe_birdstalker(card, abilities):
def matsutribe_birdstalker():
return AbilityNotImplemented
def matsutribe_birdstalker():
return AbilityNotImplemented
return matsutribe_birdstalker, matsutribe_birdstalker,
@card("Shinen of Fury's Fire")
def shinen_of_furys_fire(card, abilities):
def shinen_of_furys_fire():
return AbilityNotImplemented
def shinen_of_furys_fire():
return AbilityNotImplemented
return shinen_of_furys_fire, shinen_of_furys_fire,
@card("Miren, the Moaning Well")
def miren_the_moaning_well(card, abilities):
def miren_the_moaning_well():
return AbilityNotImplemented
def miren_the_moaning_well():
return AbilityNotImplemented
return miren_the_moaning_well, miren_the_moaning_well,
@card("Erayo, Soratami Ascendant")
def erayo_soratami_ascendant(card, abilities):
def erayo_soratami_ascendant():
return AbilityNotImplemented
def erayo_soratami_ascendant():
return AbilityNotImplemented
def erayo_soratami_ascendant():
return AbilityNotImplemented
return erayo_soratami_ascendant, erayo_soratami_ascendant, erayo_soratami_ascendant,
@card("Mikokoro, Center of the Sea")
def mikokoro_center_of_the_sea(card, abilities):
def mikokoro_center_of_the_sea():
return AbilityNotImplemented
def mikokoro_center_of_the_sea():
return AbilityNotImplemented
return mikokoro_center_of_the_sea, mikokoro_center_of_the_sea,
|
Julian/cardboard
|
cardboard/cards/sets/saviors_of_kamigawa.py
|
Python
|
mit
| 37,555
|
[
"VisIt"
] |
ef2043c4b93cc50649e65aace12b2a9ec70459cb5b1913d27d29cce7872361c7
|
#!/usr/bin/env python
#
# Perform PRS calculations given and MD trajectory and a final state
# co-ordinate file
#
# Script distributed under GNU GPL 3.0
#
# Author: David Penkler
# Date: 17-11-2016
import sys, argparse
import numpy
import mdtraj as md
from math import log10, floor, sqrt
from lib import sdrms
from lib.cli import CLI
from lib.utils import Logger
from lib.trajectory import load_trajectory
def round_sig(x, sig=2):
return round(x,sig-int(floor(log10(x)))-1)
def trajectory_to_array(traj, totalframes, totalres):
trajectory = numpy.zeros((totalframes, totalres*3))
for row, frame in enumerate(traj):
top = frame.topology
col = 0
for atom_index, atom in enumerate(top.atoms):
if atom.name == "CA":
trajectory[row,col:col+3] = frame.xyz[0,atom_index]*10
col += 3
return trajectory
def align_frame(reference_frame, alternative_frame, aln=False):
totalres = reference_frame.shape[0]
if aln:
return sdrms.superpose3D(alternative_frame.reshape(totalres, 3), reference_frame, refmask=mask, targetmask=mask)[0].reshape(1, totalres*3)[0]
else:
return sdrms.superpose3D(alternative_frame.reshape(totalres, 3), reference_frame)[0].reshape(1, totalres*3)[0]
def calc_rmsd(reference_frame, alternative_frame, aln=False):
if aln:
return sdrms.superpose3D(alternative_frame, reference_frame, refmask=mask, targetmask=mask)[1]
else:
return sdrms.superpose3D(alternative_frame, reference_frame)[1]
def main(args):
if not args.final:
log.error("a final co-ordinate file must be supplied via the --final argument\n")
sys.exit(1)
initial = md.load_frame(args.trajectory, 0, top=args.topology)
if not args.initial:
args.initial = "initial.xyz"
log.info("Generating initial co-ordinate file: %s\n" % args.initial)
initial[0].save(args.initial)
log.info("Loading trajectory...\n")
if args.num_frames:
traj, totalframes = load_trajectory(args.trajectory, args.topology, args.step, True)
totalframes = args.num_frames
else:
traj, totalframes = load_trajectory(args.trajectory, args.topology, args.step, False)
totalres = initial.n_residues
log.info('- Total number of frames = %d\n- Number of residues = %d\n' % (totalframes, totalres))
trajectory = trajectory_to_array(traj, totalframes, totalres)
log.info('- Final trajectory matrix size: %s\n' % str(trajectory.shape))
del traj
log.info("Aligning trajectory frames...\n")
aligned_mat = numpy.zeros((totalframes,3*totalres))
frame_0 = trajectory[0].reshape(totalres, 3)
for frame in range(0, totalframes):
aligned_mat[frame] = align_frame(frame_0, trajectory[frame], args.aln)
del trajectory
log.info("- Calculating average structure...\n")
average_structure_1 = numpy.mean(aligned_mat, axis=0).reshape(totalres, 3)
log.info("- Aligning to average structure...\n")
for i in range(0, 10):
for frame in range(0, totalframes):
aligned_mat[frame] = align_frame(average_structure_1, aligned_mat[frame], args.aln)
average_structure_2 = numpy.average(aligned_mat, axis=0).reshape(totalres, 3)
rmsd = calc_rmsd(average_structure_1, average_structure_2, args.aln)
log.info(' - %s Angstroms from previous structure\n' % str(rmsd))
average_structure_1 = average_structure_2
del average_structure_2
if rmsd <= 0.000001:
for frame in range(0, totalframes):
aligned_mat[frame] = align_frame(average_structure_1, aligned_mat[frame], args.aln)
break
log.info("Calculating difference between frame atoms and average atoms...\n")
meanstructure = average_structure_1.reshape(totalres*3)
del average_structure_1
log.info('- Calculating R_mat\n')
R_mat = numpy.zeros((totalframes, totalres*3))
for frame in range(0, totalframes):
R_mat[frame,:] = (aligned_mat[frame,:]) - meanstructure
log.info('- Transposing\n')
RT_mat = numpy.transpose(R_mat)
RT_mat = numpy.mat(RT_mat)
R_mat = numpy.mat(R_mat)
log.info('- Calculating corr_mat\n')
corr_mat = (RT_mat * R_mat)/ (totalframes-1)
numpy.savetxt("corr_mat.txt", corr_mat)
del aligned_mat
del meanstructure
del R_mat
del RT_mat
log.info('Reading initial and final PDB co-ordinates...\n')
initial = numpy.zeros((totalres, 3))
final = numpy.zeros((totalres, 3))
with open(args.initial, 'r') as initial_lines:
with open(args.final, 'r') as final_lines:
res_index = 0
for line_index, initial_line in enumerate(initial_lines):
final_line = final_lines.readline()
if line_index >= 2 and res_index < totalres:
initial_res = initial_line.strip().split()
if initial_res[0] == "CA":
final_res = final_line.strip().split()
initial[res_index,] = initial_res[1:]
final[res_index,] = final_res[1:]
res_index += 1
log.info('Calculating experimental difference between initial and final co-ordinates...\n')
if args.aln:
log.info("- Using NTD alignment restrictions\n")
final_alg = sdrms.superpose3D(final, initial, refmask=mask, targetmask=mask)[0]
else:
final_alg = sdrms.superpose3D(final, initial)[0]
diffE = (final_alg-initial).reshape(totalres*3, 1)
del final
del final_alg
log.info('Implementing perturbations sequentially...\n')
perturbations = int(args.perturbations)
diffP = numpy.zeros((totalres, totalres*3, perturbations))
initial_trans = initial.reshape(1, totalres*3)
for s in range(0, perturbations):
for i in range(0, totalres):
delF = numpy.zeros((totalres*3))
f = 2 * numpy.random.random((3, 1)) - 1
j = (i + 1) * 3
delF[j-3] = round_sig(abs(f[0,0]), 5)* -1 if f[0,0]< 0 else round_sig(abs(f[0,0]), 5)
delF[j-2] = round_sig(abs(f[1,0]), 5)* -1 if f[1,0]< 0 else round_sig(abs(f[1,0]), 5)
delF[j-1] = round_sig(abs(f[2,0]), 5)* -1 if f[2,0]< 0 else round_sig(abs(f[2,0]), 5)
diffP[i,:,s] = numpy.dot((delF), (corr_mat))
diffP[i,:,s] = diffP[i,:,s] + initial_trans[0]
if args.aln:
diffP[i,:,s] = ((sdrms.superpose3D(diffP[i,:,s].reshape(totalres, 3), initial, refmask=mask, targetmask=mask)[0].reshape(1, totalres*3))[0]) - initial_trans[0]
else:
diffP[i,:,s] = ((sdrms.superpose3D(diffP[i,:,s].reshape(totalres, 3), initial)[0].reshape(1, totalres*3))[0]) - initial_trans[0]
del delF
del initial_trans
del initial
del corr_mat
log.info("Calculating Pearson's correlations coefficient...\n")
DTarget = numpy.zeros(totalres)
DIFF = numpy.zeros((totalres, totalres, perturbations))
RHO = numpy.zeros((totalres, perturbations))
for i in range(0, totalres):
DTarget[i] = sqrt(diffE[3*(i+1)-3]**2 + diffE[3*(i+1)-2]**2 + diffE[3*(i+1)-1]**2)
for j in range(0, perturbations):
for i in range(0, totalres):
for k in range(0, totalres):
DIFF[k,i,j] = sqrt((diffP[i, 3*(k+1)-3, j]**2) + (diffP[i, 3*(k+1)-2, j]**2) + (diffP[i, 3*(k+1)-1, j]**2))
del diffP
for i in range(0, perturbations):
for j in range(0, totalres):
RHO[j,i] = numpy.corrcoef(numpy.transpose(DIFF[:,j,i]), DTarget)[0,1]
del DIFF
del DTarget
maxRHO = numpy.zeros(totalres)
for i in range(0, totalres):
maxRHO[i] = numpy.amax(abs(RHO[i,:]))
numpy.savetxt("%s.csv" % args.prefix, maxRHO, delimiter=",", header=args.prefix)
del maxRHO
log = Logger()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("trajectory", help="Trajectory file")
parser.add_argument("--topology", help="Topology PDB file (required if trajectory does not contain topology information)")
parser.add_argument("--step", help="Size of step when iterating through trajectory frames", default=1, type=int)
parser.add_argument("--initial", help="Initial state co-ordinate file (default: generated from first frame of trajectory)", default=None)
parser.add_argument("--final", help="Final state co-ordinate file (must be provided)")
parser.add_argument("--perturbations", help="Number of perturbations (default: 250)", type=int, default=250)
parser.add_argument("--num-frames", help="The number of frames in the trajectory (provides improved performance for large trajectories that cannot be loaded into memory)", type=int, default=None)
parser.add_argument("--aln", help="Restrict N-Terminal alignment", action="store_true")
parser.add_argument("--prefix", help="Prefix for CSV output file (default: result)", default="result")
CLI(parser, main, log)
|
RUBi-ZA/MD-TASK
|
prs.py
|
Python
|
gpl-3.0
| 9,080
|
[
"MDTraj"
] |
42ca1a0f147e25af1c5b6a76ce2c7a4da3428483a6c8b027809c9b849905f27f
|
"""
:copyright: (c) 2014 Building Energy Inc
:license: see LICENSE for more details.
"""
import random
from django.contrib.webdesign import lorem_ipsum
import datetime
import base64
from decimal import getcontext, Decimal
getcontext().prec = 7
from localflavor.us.us_states import STATE_CHOICES
from seed.test_helpers.factory.lib.chomsky import generate_chomsky
class DjangoFunctionalFactory:
@classmethod
def rand_int(cls, start=0, end=100):
return random.randint(start, end)
@classmethod
def rand_float(cls, start=0, end=100):
return random.uniform(start, end)
@classmethod
def rand_str(cls, length=None):
# from http://stackoverflow.com/questions/785058/random-strings-in-python-2-6-is-this-ok
if not length:
length = cls.rand_int(end=10)
nbits = length * 6 + 1
bits = random.getrandbits(nbits)
uc = u"%0x" % bits
newlen = int(len(uc) / 2) * 2
ba = bytearray.fromhex(uc[:newlen])
return base64.urlsafe_b64encode(str(ba))[:length]
@classmethod
def rand_text(cls, num_paragraphs=None):
if not num_paragraphs:
num_paragraphs = cls.rand_int(start=1, end=5)
return lorem_ipsum.paragraphs(num_paragraphs, common=False)
@classmethod
def rand_phone(cls):
area = cls.rand_int(start=100, end=999)
first = cls.rand_int(start=100, end=999)
last = cls.rand_int(start=1000, end=9999)
return "%s-%s-%s" % (area, first, last)
@classmethod
def rand_street_address(cls):
s = "%s %s %s" % (cls.rand_int(end=10000), cls.rand_plant_name(), cls.rand_street_suffix())
return s[:63]
@classmethod
def rand_city(cls):
return "%s%s" % (cls.rand_plant_name(), cls.rand_city_suffix())
@classmethod
def rand_bool(cls):
return cls.rand_int(0, 1) == 0
@classmethod
def rand_name(cls):
return RANDOM_NAME_SOURCE[cls.rand_int(0, len(RANDOM_NAME_SOURCE) - 1)]
@classmethod
def rand_plant_name(cls):
return RANDOM_PLANT_NAME_SOURCE[cls.rand_int(0, len(RANDOM_PLANT_NAME_SOURCE) - 1)]
@classmethod
def rand_street_suffix(cls):
return RANDOM_STREET_SUFFIX_SOURCE[cls.rand_int(0, len(RANDOM_STREET_SUFFIX_SOURCE) - 1)]
@classmethod
def rand_city_suffix(cls):
return RANDOM_CITY_SUFFIX_SOURCE[cls.rand_int(0, len(RANDOM_CITY_SUFFIX_SOURCE) - 1)]
@classmethod
def rand_date(cls, start_year=1900, end_year=2011):
return datetime.date(year=cls.rand_int(start_year, end_year), month=cls.rand_int(1, 12), day=cls.rand_int(1, 28))
@classmethod
def rand_currency(cls, start=0, end=100):
return Decimal(cls.rand_int(start=start * 100, end=end * 100)) / 100
@classmethod
def rand_email(cls):
return "%s@%s" % (cls.rand_name().lower(), cls.rand_domain())
@classmethod
def rand_domain(cls):
return RANDOM_EMAIL_DOMAINS[cls.rand_int(0, len(RANDOM_EMAIL_DOMAINS) - 1)]
@classmethod
def rand_us_state(cls):
return STATE_CHOICES[cls.rand_int(0, len(STATE_CHOICES) - 1)][0]
@classmethod
def valid_test_cc_number(cls):
return "4242424242424242"
@classmethod
def invalid_test_cc_number(cls):
return "4242424242424241"
@classmethod
def test_cc_number(cls, valid=True):
if valid:
return cls.valid_test_cc_number()
else:
return cls.invalid_test_cc_number()
@classmethod
def random_conversation(cls, paragraphs=3):
return generate_chomsky(paragraphs)
RANDOM_NAME_SOURCE = ["Atricia", "Linda", "Barbara", "Elizabeth", "Jennifer",
"Maria", "Susan", "Margaret", "Dorothy", "Lisa", "Nancy", "Karen", "Betty",
"Helen", "Sandra", "Donna", "Carol", "Ruth", "Sharon", "Michelle", "Laura",
"Sarah", "Kimberly", "Deborah", "Jessica", "Shirley", "Cynthia", "Angela",
"Melissa", "Brenda", "Amy", "Anna", "Rebecca", "Virginia", "Kathleen",
"Pamela", "Martha", "Debra", "Amanda", "Stephanie", "Carolyn", "Christine",
"Marie", "Janet", "Catherine", "Frances", "Ann", "Joyce", "Diane", "Alice",
"Julie", "Heather", "Teresa", "Doris", "Gloria", "Evelyn", "Jean", "Cheryl",
"Mildred", "Katherine", "Joan", "Ashley", "Judith", "Rose", "Janice", "Kelly",
"Nicole", "Judy", "Christina", "Kathy", "Theresa", "Beverly", "Denise",
"Tammy", "Irene", "Jane", "Lori", "Rachel", "Marilyn", "Andrea", "Kathryn",
"Louise", "Sara", "Anne", "Jacquelin", "Wanda", "Bonnie", "Julia", "Ruby",
"Lois", "Tina", "Phyllis", "Norma", "Paula", "Diana", "Annie", "Lillian",
"Emily", "Robin", "Peggy", "Crystal", "Gladys", "Rita", "Dawn", "Connie",
"Florence", "Tracy", "Edna", "Tiffany", "Carmen", "Rosa", "Cindy", "Grace",
"Wendy", "Victoria", "Edith", "Kim", "Sherry", "Sylvia", "Josephine", "Thelma",
"Shannon", "Sheila", "Ethel", "Ellen", "Elaine", "Marjorie", "Carrie",
"Charlotte", "Monica", "Esther", "Pauline", "Emma", "Juanita", "Anita",
"Rhonda", "Hazel", "Amber", "Eva", "Debbie", "April", "Leslie", "Clara",
"Lucille", "Jamie", "Joanne", "Eleanor", "Valerie", "Danielle", "Megan",
"Alicia", "Suzanne", "Michele", "Gail", "Bertha", "Darlene", "Veronica",
"Jill", "Erin", "Geraldine", "Lauren", "Cathy", "Joann", "Lorraine", "Lynn",
"Sally", "Regina", "Erica", "Beatrice", "Dolores", "Bernice", "Audrey",
"Yvonne", "Annette", "June", "Samantha", "Marion", "Dana", "Stacy", "Ana",
"Renee", "Ida", "Vivian", "Roberta", "Holly", "Brittany", "Melanie",
"Loretta", "Yolanda", "Jeanette", "Laurie", "Katie", "Kristen", "Vanessa",
"Alma", "Sue", "Elsie", "Beth", "Jeanne", "Vicki", "Carla", "Tara",
"Rosemary", "Eileen", "Terri", "Gertrude", "Lucy", "Tonya", "Ella",
"Stacey", "Wilma", "Gina", "Kristin", "Jessie", "Natalie", "Agnes", "Vera",
"Willie", "Charlene", "Bessie", "Delores", "Melinda", "Pearl", "Arlene",
"Maureen", "Colleen", "Allison", "Tamara", "Joy", "Georgia", "Constance",
"Lillie", "Claudia", "Jackie", "Marcia", "Tanya", "Nellie", "Minnie",
"Marlene", "Heidi", "Glenda", "Lydia", "Viola", "Courtney", "Marian",
"Stella", "Caroline", "Dora", "Jo", "Vickie", "Mattie", "Terry", "Maxine",
"Irma", "Mabel", "Marsha", "Myrtle", "Lena", "Christy", "Deanna", "Patsy",
"Hilda", "Gwendolyn", "Jennie", "Nora", "Margie", "Nina", "Cassandra",
"Leah", "Penny", "Kay", "Priscilla", "Naomi", "Carole", "Brandy", "Olga",
"Billie", "Dianne", "Tracey", "Leona", "Jenny", "Felicia", "Sonia", "Miriam",
"Velma", "Becky", "Bobbie", "Violet", "Kristina", "Toni", "Misty", "Mae",
"Shelly", "Daisy", "Ramona", "Sherri", "Erika", "Katrina", "Claire",
"Lindsey", "Lindsay", "Geneva", "Guadalupe", "Belinda", "Margarita", "Sheryl",
"Cora", "Faye", "Ada", "Natasha", "Sabrina", "Isabel", "Marguerit", "Hattie",
"Harriet", "Molly", "Cecilia", "Kristi", "Brandi", "Blanche", "Sandy", "Rosie",
"Joanna", "Iris", "Eunice", "Angie", "Inez", "Lynda", "Madeline", "Amelia",
"Alberta", "Genevieve", "Monique", "Jodi", "Janie", "Maggie", "Kayla", "Sonya",
"Jan", "Lee", "Kristine", "Candace", "Fannie", "Maryann", "Opal", "Alison",
"Yvette", "Melody", "Luz", "Susie", "Olivia", "Flora", "Shelley", "Kristy",
"Mamie", "Lula", "Lola", "Verna", "Beulah", "Antoinett", "Candice", "Juana",
"Jeannette", "Pam", "Kelli", "Hannah", "Whitney", "Bridget", "Karla", "Celia",
"Latoya", "Patty", "Shelia", "Gayle", "Della", "Vicky", "Lynne", "Sheri",
"Marianne", "Kara", "Jacquelyn", "Erma", "Blanca", "Myra", "Leticia", "Pat",
"Krista", "Roxanne", "Angelica", "Johnnie", "Robyn", "Francis", "Adrienne",
"Rosalie", "Alexandra", "Brooke", "Bethany", "Sadie", "Bernadett", "Traci",
"Jody", "Kendra", "Jasmine", "Nichole", "Rachael", "Chelsea", "Mable",
"Ernestine", "Muriel", "Marcella", "Elena", "Krystal", "Angelina", "Nadine",
"Kari", "Estelle", "Dianna", "Paulette", "Lora", "Mona", "Doreen", "Rosemarie",
"Angel", "Desiree", "Antonia", "Hope", "Ginger", "Janis", "Betsy", "Christie",
"Freda", "Mercedes", "Meredith", "Lynette", "Teri", "Cristina", "Eula",
"Leigh", "Meghan", "Sophia", "Eloise", "James", "John", "Robert",
"Michael", "William", "David", "Richard", "Charles", "Joseph", "Thomas",
"Christoph", "Daniel", "Paul", "Mark", "Donald", "George", "Kenneth", "Steven",
"Edward", "Brian", "Ronald", "Anthony", "Kevin", "Jason", "Matthew", "Gary",
"Timothy", "Jose", "Larry", "Jeffrey", "Frank", "Scott", "Eric", "Stephen",
"Andrew", "Raymond", "Gregory", "Joshua", "Jerry", "Dennis", "Walter",
"Patrick", "Peter", "Harold", "Douglas", "Henry", "Carl", "Arthur", "Ryan",
"Roger", "Joe", "Juan", "Jack", "Albert", "Jonathan", "Justin", "Terry",
"Gerald", "Keith", "Samuel", "Willie", "Ralph", "Lawrence", "Nicholas", "Roy",
"Benjamin", "Bruce", "Brandon", "Adam", "Harry", "Fred", "Wayne", "Billy",
"Steve", "Louis", "Jeremy", "Aaron", "Randy", "Howard", "Eugene", "Carlos",
"Russell", "Bobby", "Victor", "Martin", "Ernest", "Phillip", "Todd", "Jesse",
"Craig", "Alan", "Shawn", "Clarence", "Sean", "Philip", "Chris", "Johnny",
"Earl", "Jimmy", "Antonio", "Danny", "Bryan", "Tony", "Luis", "Mike",
"Stanley", "Leonard", "Nathan", "Dale", "Manuel", "Rodney", "Curtis", "Norman",
"Allen", "Marvin", "Vincent", "Glenn", "Jeffery", "Travis", "Jeff", "Chad",
"Jacob", "Lee", "Melvin", "Alfred", "Kyle", "Francis", "Bradley", "Jesus",
"Herbert", "Frederick", "Ray", "Joel", "Edwin", "Don", "Eddie", "Ricky",
"Troy", "Randall", "Barry", "Alexander", "Bernard", "Mario", "Leroy",
"Francisco", "Marcus", "Micheal", "Theodore", "Clifford", "Miguel", "Oscar",
"Jay", "Jim", "Tom", "Calvin", "Alex", "Jon", "Ronnie", "Bill", "Lloyd",
"Tommy", "Leon", "Derek", "Warren", "Darrell", "Jerome", "Floyd", "Leo",
"Alvin", "Tim", "Wesley", "Gordon", "Dean", "Greg", "Jorge", "Dustin", "Pedro",
"Derrick", "Dan", "Lewis", "Zachary", "Corey", "Herman", "Maurice", "Vernon",
"Roberto", "Clyde", "Glen", "Hector", "Shane", "Ricardo", "Sam", "Rick",
"Lester", "Brent", "Ramon", "Charlie", "Tyler", "Gilbert", "Gene", "Marc",
"Reginald", "Ruben", "Brett", "Angel", "Nathaniel", "Rafael", "Leslie",
"Edgar", "Milton", "Raul", "Ben", "Chester", "Cecil", "Duane", "Franklin",
"Andre", "Elmer", "Brad", "Gabriel", "Ron", "Mitchell", "Roland", "Arnold",
"Harvey", "Jared", "Adrian", "Karl", "Cory", "Claude", "Erik", "Darryl",
"Jamie", "Neil", "Jessie", "Christian", "Javier", "Fernando", "Clinton", "Ted",
"Mathew", "Tyrone", "Darren", "Lonnie", "Lance", "Cody", "Julio", "Kelly",
"Kurt", "Allan", "Nelson", "Guy", "Clayton", "Hugh", "Max", "Dwayne", "Dwight",
"Armando", "Felix", "Jimmie", "Everett", "Jordan", "Ian", "Wallace", "Ken",
"Bob", "Jaime", "Casey", "Alfredo", "Alberto", "Dave", "Ivan", "Johnnie",
"Sidney", "Byron", "Julian", "Isaac", "Morris", "Clifton", "Willard", "Daryl",
"Ross", "Virgil", "Andy", "Marshall", "Salvador", "Perry", "Kirk", "Sergio",
"Marion", "Tracy", "Seth", "Kent", "Terrance", "Rene", "Eduardo", "Terrence",
"Enrique", "Freddie", "Wade", "Austin", "Stuart", "Fredrick", "Arturo",
"Alejandro", "Jackie", "Joey", "Nick", "Luther", "Wendell", "Jeremiah", "Evan",
"Julius", "Dana", "Donnie", "Otis", "Shannon", "Trevor", "Oliver", "Luke",
"Homer", "Gerard", "Doug", "Kenny", "Hubert", "Angelo", "Shaun", "Lyle",
"Matt", "Lynn", "Alfonso", "Orlando", "Rex", "Carlton", "Ernesto", "Cameron",
"Neal", "Pablo", "Lorenzo", "Omar", "Wilbur", "Blake", "Grant", "Horace",
"Roderick", "Kerry", "Abraham", "Willis", "Rickey", "Jean", "Ira", "Andres",
"Cesar", "Johnathan", "Malcolm", "Rudolph", "Damon", "Kelvin", "Rudy",
"Preston", "Alton", "Archie", "Marco", "Wm", "Pete", "Randolph", "Garry",
"Geoffrey", "Jonathon", "Felipe", "Bennie", "Gerardo", "Ed", "Dominic",
"Robin", "Loren", "Delbert", "Colin", "Guillermo", "Earnest", "Lucas", "Benny",
"Noel", "Spencer", "Rodolfo", "Myron", "Edmund", "Garrett", "Salvatore",
"Cedric", "Lowell", "Gregg", "Sherman", "Wilson", "Devin", "Sylvester", "Kim",
"Roosevelt", "Israel", "Jermaine", "Forrest", "Wilbert", "Leland", "Simon",
"Guadalupe", "Clark", "Irving", "Carroll", "Bryant", "Owen", "Rufus",
"Woodrow", "Sammy", "Kristophe", "Mack", "Levi", "Marcos", "Gustavo", "Jake",
"Lionel", "Marty", "Taylor", "Ellis", "Dallas", "Gilberto", "Clint", "Nicolas",
"Laurence", "Ismael", "Orville", "Drew", "Jody", "Ervin", "Dewey", "Al",
"Wilfred", "Josh", "Hugo", "Ignacio", "Caleb", "Tomas", "Sheldon", "Erick",
"Frankie", "Stewart", "Doyle", "Darrel", "Rogelio", "Terence", "Santiago",
"Alonzo", "Elias", "Bert", "Elbert", "Ramiro", "Conrad", "Pat", "Noah",
"Grady", "Phil", "Cornelius", "Lamar", "Rolando", "Clay", "Percy", "Dexter",
"Bradford", "Merle", "Darin", "Amos", "Terrell", "Moses", "Irvin", "Saul",
"Roman", "Darnell", "Randal", "Tommie", "Timmy", "Darrin", "Winston",
"Brendan", "Toby", "Van", "Abel", "Dominick", "Boyd", "Courtney", "Jan",
"Emilio", "Elijah", "Cary", "Domingo", "Santos", "Aubrey", "Emmett", "Marlon",
"Emanuel", "Jerald", "Edmond", "Emil", "Dewayne", "Will", "Otto", "Teddy",
"Reynaldo", "Bret", "Morgan", "Jess", "Trent", "Humberto", "Emmanuel",
"Stephan", "Louie", "Vicente", "Lamont", "Stacy", "Garland", "Miles", "Micah",
"Efrain", "Billie", "Logan", "Heath", "Rodger", "Harley", "Demetrius",
"Ethan", "Eldon", "Rocky", "Pierre", "Junior", "Freddy", "Eli", "Bryce",
"Antoine", "Robbie", "Kendall", "Royce", "Sterling", "Mickey", "Chase",
"Grover", "Elton", "Cleveland", "Dylan", "Chuck", "Damian", "Reuben", "Stan",
"August", "Leonardo", "Jasper", "Russel", "Erwin", "Benito", "Hans", "Monte",
"Blaine", "Ernie", "Curt", "Quentin", "Agustin", "Murray", "Jamal", "Devon",
"Adolfo", "Harrison", "Tyson", "Burton", "Brady", "Elliott", "Wilfredo",
"Bart", "Jarrod", "Vance", "Denis", "Damien", "Joaquin", "Harlan", "Desmond",
"Elliot", "Darwin", "Ashley", "Gregorio", "Buddy", "Xavier", "Kermit",
"Roscoe", "Esteban", "Anton", "Solomon", "Scotty", "Norbert", "Elvin",
"Williams", "Nolan", "Carey", "Rod", "Quinton", "Hal", "Brain", "Rob",
"Elwood", "Kendrick", "Darius", "Moises", "Son", "Marlin", "Fidel",
"Thaddeus", "Cliff", "Marcel", "Ali", "Jackson", "Raphael", "Bryon", "Armand",
"Alvaro", "Jeffry", "Dane", "Joesph", "Thurman", "Ned", "Sammie", "Rusty",
"Michel", "Monty", "Rory", "Fabian", "Reggie", "Mason", "Graham", "Kris",
"Isaiah", "Vaughn", "Gus", "Avery", "Loyd", "Diego", "Alexis", "Adolph",
"Norris", "Millard", "Rocco", "Gonzalo", "Derick", "Rodrigo", "Gerry",
"Stacey", "Carmen", "Wiley", "Rigoberto", "Alphonso", "Ty", "Shelby",
"Rickie", "Noe", "Vern", "Bobbie", "Reed", "Jefferson", "Elvis", "Bernardo",
"Mauricio", "Hiram", "Donovan", "Basil", "Riley", "Ollie", "Nickolas",
"Maynard", "Scot", "Vince", "Quincy", "Eddy", "Sebastian", "Federico",
"Ulysses", "Heriberto", "Donnell", "Cole", "Denny", "Davis", "Gavin", "Emery",
"Ward", "Romeo", "Jayson", "Dion", "Dante", "Clement", "Coy", "Odell",
"Maxwell", "Jarvis", "Bruno", "Issac", "Mary", "Dudley", "Brock", "Sanford",
"Colby", "Carmelo", "Barney", "Nestor", "Hollis", "Stefan", "Donny", "Art",
"Linwood", "Beau", "Weldon", "Galen", "Isidro", "Truman", "Delmar",
"Johnathon", "Silas", "Frederic", "Dick", "Kirby", "Irwin", "Cruz", "Merlin",
"Merrill", "Charley", "Marcelino", "Lane", "Harris", "Cleo", "Carlo",
"Trenton", "Kurtis", "Hunter", "Aurelio", "Winfred", "Vito", "Collin",
"Denver", "Carter", "Leonel", "Emory", "Pasquale", "Mohammad", "Mariano",
"Danial", "Blair", "Landon", "Dirk", "Branden", "Adan", "Numbers", "Clair",
"Buford", "German", "Bernie", "Wilmer", "Joan", "Emerson", "Zachery",
"Fletcher", "Jacques", "Errol", "Dalton", "Monroe", "Josue", "Dominique",
"Edwardo", "Booker", "Wilford", "Sonny", "Shelton", "Carson", "Theron",
"Raymundo", "Daren", "Tristan", "Houston", "Robby", "Lincoln", "Jame",
"Genaro", "Gale", "Bennett", "Octavio", "Cornell", "Laverne", "Hung", "Arron",
"Antony", "Herschel", "Alva", "Giovanni", "Garth", "Cyrus", "Cyril", "Ronny",
"Stevie", "Lon", "Freeman", "Erin", "Duncan", "Kennith", "Carmine",
"Augustine", "Young", "Erich", "Chadwick", "Wilburn", "Russ", "Reid", "Myles",
"Anderson", "Morton", "Jonas", "Forest", "Mitchel", "Mervin", "Zane", "Rich",
"Jamel", "Lazaro", "Alphonse", "Randell", "Major", "Johnie", "Jarrett",
"Brooks", "Ariel", "Abdul", "Dusty", "Luciano", "Lindsey", "Tracey", "Seymour",
"Scottie", "Eugenio", "Mohammed", "Sandy", "Valentin", "Chance", "Arnulfo",
"Lucien", "Ferdinand", "Thad", "Ezra", "Sydney", "Aldo", "Rubin", "Royal",
"Mitch", "Earle", "Abe", "Wyatt", "Marquis", "Lanny", "Kareem", "Jamar",
"Boris", "Isiah", "Emile", "Elmo", "Aron", "Leopoldo", "Everette", "Josef",
"Gail", "Eloy", "Dorian", "Rodrick", "Reinaldo", "Lucio", "Jerrod", "Weston",
"Hershel", "Barton", "Parker", "Lemuel", "Lavern", "Burt", "Jules", "Gil",
"Eliseo", "Ahmad", "Nigel", "Efren", "Antwan", "Alden", "Margarito", "Coleman",
"Refugio", "Dino", "Osvaldo", "Les", "Deandre", "Normand", "Kieth", "Ivory",
"Andrea", "Trey", "Norberto", "Napoleon", "Jerold", "Fritz", "Rosendo",
"Milford", "Sang", "Deon", "Christope", "Alfonzo", "Lyman", "Josiah", "Brant",
"Wilton", "Rico", "Jamaal", "Dewitt", "Carol", "Brenton", "Yong", "Olin",
"Foster", "Faustino", "Claudio", "Judson", "Gino", "Edgardo", "Berry", "Alec",
"Tanner", "Jarred", "Donn", "Trinidad", "Tad", "Shirley", "Prince", "Porfirio",
"Odis", "Maria", "Lenard", "Chauncey", "Chang", "Tod", "Mel", "Marcelo", "Kory",
"Augustus", "Keven", "Hilario", "Bud", "Sal", "Rosario", "Orval", "Mauro",
"Dannie", "Zachariah", "Olen", "Anibal", "Milo", "Jed", "Frances", "Thanh",
"Dillon", "Amado", "Newton", "Connie", "Lenny", "Tory", "Richie", "Lupe",
"Horacio", "Brice", "Mohamed", "Delmer", "Dario", "Reyes", "Dee", "Mac",
"Jonah", "Jerrold", "Robt", "Hank", "Sung", "Rupert", "Rolland", "Kenton",
"Damion", "Chi", "Antone", "Waldo", "Fredric", "Bradly", "Quinn", "Kip", "Burl",
"Walker", "Tyree", "Jefferey", "Ahmed", "Willy", "Stanford", "Oren", "Noble",
"Moshe", "Mikel", "Enoch", "Brendon", "Quintin", "Jamison", "Florencio",
"Darrick", "Tobias", "Minh", "Hassan", "Giuseppe", "Demarcus", "Cletus",
"Tyrell", "Lyndon", "Keenan", "Werner", "Theo", "Geraldo", "Lou", "Columbus",
"Chet", "Bertram", "Markus", "Huey", "Hilton", "Dwain", "Donte", "Tyron",
"Omer", "Isaias", "Hipolito", "Fermin", "Chung", "Adalberto", "Valentine",
"Jamey", "Bo", "Barrett", "Whitney", "Teodoro", "Mckinley", "Maximo",
"Garfield", "Sol", "Raleigh", "Lawerence", "Abram", "Rashad", "King", "Emmitt",
"Daron", "Chong", "Samual", "Paris", "Otha", "Miquel", "Lacy", "Eusebio",
"Dong", "Domenic", "Darron", "Buster", "Antonia", "Wilber", "Renato", "Jc",
"Hoyt", "Haywood", "Ezekiel", "Chas", "Florentin", "Elroy", "Clemente",
"Arden", "Neville", "Kelley", "Edison", "Deshawn", "Carrol", "Shayne",
"Nathanial", "Jordon", "Danilo", "Claud", "Val", "Sherwood", "Raymon",
"Rayford", "Cristobal", "Ambrose", "Titus", "Hyman", "Felton", "Ezequiel",
"Erasmo", "Stanton", "Lonny", "Len", "Ike", "Milan", "Lino", "Jarod", "Herb",
"Andreas", "Walton", "Rhett", "Palmer", "Jude", "Douglass", "Cordell",
"Oswaldo", "Ellsworth", "Virgilio", "Toney", "Nathanael", "Del", "Britt",
"Benedict", "Mose", "Hong", "Leigh", "Johnson", "Isreal", "Gayle", "Garret",
"Fausto", "Asa", "Arlen", "Zack", "Warner", "Modesto", "Francesco", "Manual",
"Jae", "Gaylord", "Gaston", "Filiberto", "Deangelo", "Michale", "Granville",
"Wes", "Malik", "Zackary", "Tuan", "Nicky", "Eldridge", "Cristophe", "Cortez",
"Antione", "Malcom", "Long", "Korey", "Jospeh", "Colton", "Waylon", "Von",
"Hosea", "Shad", "Santo", "Rudolf", "Rolf", "Rey", "Renaldo", "Marcellus",
"Lucius", "Lesley", "Kristofer", "Boyce", "Benton", "Man", "Kasey", "Jewell",
"Hayden", "Harland", "Arnoldo", "Rueben", "Leandro", "Kraig", "Jerrell",
"Jeromy", "Hobert", "Cedrick", "Arlie", "Winford", "Wally", "Patricia",
"Luigi", "Keneth", "Jacinto", "Graig", "Franklyn", "Edmundo", "Sid", "Porter",
"Leif", "Lauren", "Jeramy", "Elisha", "Buck", "Willian", "Vincenzo", "Shon",
"Michal", "Lynwood", "Lindsay", "Jewel", "Jere", "Hai", "Elden", "Dorsey",
"Darell", "Broderick", "Alonso"]
RANDOM_PLANT_NAME_SOURCE = ["Abelia", "Acacia", "Acer", "Acevedo", "Afra", "Akina",
"Alaleh", "Alani", "Alder", "Almond", "Althea ", "Alyssum", "Amaranta", "Amaryllis",
"Anita", "Apricot", "Arousa", "Arusa", "Ash", "Aspen ", "Aster", "Astera", "Avishan",
"Ayame", "Ayla", "Azalea", "Azargol", "Azargoon", "Azarin", "Azhand", "Babuk",
"Bahar", "Baharak", "Banafsheh", "Barnacle", "Basil", "Bay", "Beech", "Begonia",
"Belladonna", "Birch", "Blackberry", "Blossom", "Bluebell ", "Booker", "Botan",
"Bramble", "Bryony", "Bud", "Burke ", "Buttercup", "Cactus", "Caltha", "Camelai",
"Camellia", "Carnation", "Cedar", "Cherise", "Cherry", "Cinnamon", "Cliantha",
"Clover", "Cosmos", "Cyclamen", "Cypress", "Daffodil", "Dahlia", "Daisy", "Dandelion",
"Daphne", "Dianthe", "Dianthus", "Enola ", "Eranthe", "Fern", "Fiorenza", "Fleur",
"Fern", "Fiorenza", "Fleur", "Flora", "Freesia", "Fuchsia", "Gardenia", "Garland",
"Gazania", "Geranium", "Ginger", "Gooseberry", "Gul", "Hawthorne", "Hazel", "Holly",
"Hollyhock", "Honeysuckle", "Hyacinth", "Iris ", "Ivy", "Jacaranda", "Jasmine",
"Jessamine", "Juniper", "Kalei", "Lantana", "Laurel", "Leilani", "Licorice ",
"Lilac", "Lily ", "Lobelia", "Lotus", "Magnolia", "Mallow ", "Mandrake", "Maple",
"Marguerite", "Marigold", "Mayflower", "Miki", "Mimosa", "Mulberry", "Myrtle ",
"Nihal", "Olive", "Pansy ", "Patience", "Peach", "Peony", "Peppermint", "Periwinkle",
"Persimmon", "Petunia", "Pimpernel", "Poppy", "Posey", "Primrose", "Pumpkin",
"Quince", "Rose", "Rosemary", "Saffron", "Sage", "Shamrock", "Snapdragon",
"Snowdrop", "Sorrel", "Sunflower", "Sweet Pea", "Tansy ", "Thistle", "Tiger-lily",
"Truffle", "Tulip", "Verbena ", "Violet", "Willow", "Yasaman", "Yasmin", "Yasminah",
"Yew", "Zara"]
RANDOM_STREET_SUFFIX_SOURCE = ["St.", "Ave.", "Blvd.", "Ln.", "Ct.", "Pl.", "Way"]
RANDOM_EMAIL_DOMAINS = ["example.com", "example.net", "example.org"]
# "gmail.com", "yahoo.com", "hotmail.com", "live.com",
# "comcast.net", "qwest.com",
RANDOM_CITY_SUFFIX_SOURCE = ["ville", "berg", "ton", "y", "", "land"]
|
buildingenergy/buildingenergy-platform
|
seed/test_helpers/factory/helpers.py
|
Python
|
apache-2.0
| 21,921
|
[
"Amber",
"Brian",
"COLUMBUS",
"CRYSTAL",
"Dalton",
"Desmond",
"FLEUR"
] |
902ae5e00919a02254fe01b1c14ccace5ead2beb5b06bb2dfe6f4ec3c1576161
|
#!/usr/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Pipeline
========
Utility functions to help setting up Yank configurations.
"""
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import collections
import copy
import inspect
import itertools
import json
import logging
import os
import re
import sys
import mdtraj
import mpiplus
import numpy as np
import openmmtools as mmtools
import openmoltools as moltools
import yaml
from pdbfixer import PDBFixer
from simtk import openmm, unit
from simtk.openmm.app import PDBFile
from . import utils
logger = logging.getLogger(__name__)
# ==============================================================================
# Utility functions
# ==============================================================================
def compute_squared_distances(molecule1_positions, molecule2_positions):
"""Compute the squared distances between the atoms of two molecules.
All the positions must be expressed in the same unit of measure.
Parameters
----------
molecule1_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of molecule1.
molecule2_positions : numpy.ndarray
An Mx3 array where, M is the number of atoms, containing the positions of
the atoms of the molecule2.
Returns
-------
squared_distances : numpy.ndarray
An NxM array of squared distances. distances_squared[i][j] is the squared
distance between atom i of molecule1 and atom j of molecule 2.
"""
squared_distances = np.array([((molecule2_positions - atom_pos)**2).sum(1)
for atom_pos in molecule1_positions])
return squared_distances
def compute_min_dist(mol_positions, *args):
"""Compute the minimum distance between a molecule and a set of other molecules.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule for which we want to compute the minimum distance
from the others
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the other
molecules
Returns
-------
min_dist : float
The minimum distance between ``mol_positions`` and the other set of positions
"""
for argmol_positions in args:
# Compute squared distances. Each row is an array of distances
# from a mol_positions atom to all argmol_positions atoms.
distances2 = compute_squared_distances(mol_positions, argmol_positions)
# Find closest atoms and their distance
min_idx = np.unravel_index(distances2.argmin(), distances2.shape)
try:
min_dist = min(min_dist, np.sqrt(distances2[min_idx]))
except UnboundLocalError:
min_dist = np.sqrt(distances2[min_idx])
return min_dist
def compute_min_max_dist(mol_positions, *args):
"""Compute minimum and maximum distances between a molecule and a set of
other molecules.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule for which we want to compute the minimum distance
from the others
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the other
molecules
Returns
-------
min_dist : float
The minimum distance between mol_positions and the atoms of the other positions
max_dist : float
The maximum distance between mol_positions and the atoms of the other positions
Examples
--------
>>> mol1_pos = np.array([[-1, -1, -1], [1, 1, 1]], np.float)
>>> mol2_pos = np.array([[2, 2, 2], [2, 4, 5]], np.float) # determine min dist
>>> mol3_pos = np.array([[3, 3, 3], [3, 4, 5]], np.float) # determine max dist
>>> min_dist, max_dist = compute_min_max_dist(mol1_pos, mol2_pos, mol3_pos)
>>> min_dist == np.linalg.norm(mol1_pos[1] - mol2_pos[0])
True
>>> max_dist == np.linalg.norm(mol1_pos[1] - mol3_pos[1])
True
"""
min_dist = None
for argmol_positions in args:
# Compute squared distances of all atoms. Each row is an array of distances
# from an atom in argmol_positions to all the atoms in mol_positions.
distances2 = compute_squared_distances(argmol_positions, mol_positions)
# Find distances of each arg_pos atom to mol_positions
distances2 = np.amin(distances2, axis=1)
# Find closest and distant atom
if min_dist is None:
min_dist = np.sqrt(distances2.min())
max_dist = np.sqrt(distances2.max())
else:
min_dist = min(min_dist, np.sqrt(distances2.min()))
max_dist = max(max_dist, np.sqrt(distances2.max()))
return min_dist, max_dist
def compute_radius_of_gyration(positions):
"""
Compute the radius of gyration of the specified coordinate set.
Parameters
----------
positions : simtk.unit.Quantity with units compatible with angstrom
The coordinate set (natoms x 3) for which the radius of gyration is to be computed.
Returns
-------
radius_of_gyration : simtk.unit.Quantity with units compatible with angstrom
The radius of gyration
"""
unit = positions.unit
# Get dimensionless receptor positions.
x = positions / unit
# Get dimensionless restrained atom coordinate.
xref = x.mean(0)
xref = np.reshape(xref, (1,3)) # (1,3) array
# Compute distances from restrained atom.
natoms = x.shape[0]
# Distances[i] is the distance from the centroid to particle i
distances = np.sqrt(((x - np.tile(xref, (natoms, 1)))**2).sum(1))
# Compute std dev of distances from restrained atom.
radius_of_gyration = distances.std() * unit
return radius_of_gyration
def compute_net_charge(system, atom_indices):
"""Compute the total net charge of a subset of atoms in the system.
Parameters
----------
system : simtk.openmm.System
The system object containing the atoms of interest.
atom_indices : list of int
Indices of the atoms of interest.
Returns
-------
net_charge : int
Total net charge as the sum of the partial charges of the atoms.
"""
atom_indices = set(atom_indices) # convert to set to speed up searching
net_charge = 0.0 * unit.elementary_charge
for force_index in range(system.getNumForces()):
force = system.getForce(force_index)
if isinstance(force, openmm.NonbondedForce):
for particle_index in range(force.getNumParticles()):
if particle_index in atom_indices:
net_charge += force.getParticleParameters(particle_index)[0]
atom_indices.remove(particle_index)
assert len(atom_indices) == 0
net_charge = int(round(net_charge / unit.elementary_charge))
return net_charge
def find_alchemical_counterions(system, topography, region_name):
"""Return the atom indices of the ligand or solute counter ions.
In periodic systems, the solvation box needs to be neutral, and
if the decoupled molecule is charged, it will cause trouble. This
can be used to find a set of ions in the system that neutralize
the molecule, so that the solvation box will remain neutral all
the time.
Parameters
----------
system : simtk.openmm.System
The system object containing the atoms of interest.
topography : yank.Topography
The topography object holding the indices of the ions and the
ligand (for binding free energy) or solute (for transfer free
energy).
region_name : str
The region name in the topography (e.g. "ligand_atoms") for
which to find counter ions.
Returns
-------
counterions_indices : list of int
The list of atom indices in the system of the counter ions
neutralizing the region.
Raises
------
ValueError
If the topography region has no atoms, or if it impossible
to neutralize the region with the ions in the system.
"""
# Check whether we need to find counterions of ligand or solute.
atom_indices = getattr(topography, region_name)
if len(atom_indices) == 0:
raise ValueError("Cannot find counterions for region {}. "
"The region has no atoms.")
# If the net charge of alchemical atoms is 0, we don't need counterions.
mol_net_charge = compute_net_charge(system, atom_indices)
logger.debug('{} net charge: {}'.format(region_name, mol_net_charge))
if mol_net_charge == 0:
return []
# Find net charge of all ions in the system.
ions_net_charges = [(ion_id, compute_net_charge(system, [ion_id]))
for ion_id in topography.ions_atoms]
topology = topography.topology
ions_names_charges = [(topology.atom(ion_id).residue.name, ion_net_charge)
for ion_id, ion_net_charge in ions_net_charges]
logger.debug('Ions net charges: {}'.format(ions_names_charges))
# Find minimal subset of counterions whose charges sums to -mol_net_charge.
for n_ions in range(1, len(ions_net_charges) + 1):
for ion_subset in itertools.combinations(ions_net_charges, n_ions):
counterions_indices, counterions_charges = zip(*ion_subset)
if sum(counterions_charges) == -mol_net_charge:
return counterions_indices
# We couldn't find any subset of counterions neutralizing the region.
raise ValueError('Impossible to find a solution for region {}. '
'Net charge: {}, system ions: {}.'.format(
region_name, mol_net_charge, ions_names_charges))
# See Amber manual Table 4.1 http://ambermd.org/doc12/Amber15.pdf
_OPENMM_TO_TLEAP_PBRADII = {'HCT': 'mbondi', 'OBC1': 'mbondi2', 'OBC2': 'mbondi2',
'GBn': 'bondi', 'GBn2': 'mbondi3'}
def get_leap_recommended_pbradii(implicit_solvent):
"""Return the recommended PBradii setting for LeAP.
Parameters
----------
implicit_solvent : str
The implicit solvent model.
Returns
-------
pbradii : str or object
The LeAP recommended PBradii for the model.
Raises
------
ValueError
If the implicit solvent model is not supported by OpenMM.
Examples
--------
>>> get_leap_recommended_pbradii('OBC2')
'mbondi2'
>>> from simtk.openmm.app import HCT
>>> get_leap_recommended_pbradii(HCT)
'mbondi'
"""
try:
return _OPENMM_TO_TLEAP_PBRADII[str(implicit_solvent)]
except KeyError:
raise ValueError('Implicit solvent {} is not supported.'.format(implicit_solvent))
_NONPERIODIC_NONBONDED_METHODS = [openmm.app.NoCutoff, openmm.app.CutoffNonPeriodic]
def create_system(parameters_file, box_vectors, create_system_args, system_options):
"""Create and return an OpenMM system.
Parameters
----------
parameters_file : simtk.openmm.app.AmberPrmtopFile or GromacsTopFile
The file used to create they system.
box_vectors : list of Vec3
The default box vectors of the system will be set to this value.
create_system_args : dict of str
The kwargs accepted by the ``createSystem()`` function of the ``parameters_file``.
system_options : dict
The kwargs to forward to ``createSystem()``.
Returns
-------
system : simtk.openmm.System
The system created.
"""
# Prepare createSystem() options
# OpenMM adopts camel case convention so we need to change the options format.
# Then we filter system options according to specific createSystem() args
system_options = {utils.underscore_to_camelcase(key): value
for key, value in system_options.items()}
system_options = {arg: system_options[arg] for arg in create_system_args
if arg in system_options}
# Determine if this will be an explicit or implicit solvent simulation.
if box_vectors is not None:
is_periodic = True
else:
is_periodic = False
# Adjust nonbondedMethod
# TODO: Ensure that selected method is appropriate.
if 'nonbondedMethod' not in system_options:
if is_periodic:
system_options['nonbondedMethod'] = openmm.app.CutoffPeriodic
else:
system_options['nonbondedMethod'] = openmm.app.NoCutoff
# Check for solvent configuration inconsistencies
# TODO: Check to make sure both files agree on explicit/implicit.
err_msg = ''
nonbonded_method = system_options['nonbondedMethod']
if is_periodic:
if 'implicitSolvent' in system_options and system_options['implicitSolvent'] is not None:
err_msg = 'Found periodic box in positions file and implicitSolvent specified.'
if nonbonded_method in _NONPERIODIC_NONBONDED_METHODS:
err_msg = ('Found periodic box in positions file but '
'nonbondedMethod is {}'.format(nonbonded_method))
else:
if nonbonded_method not in _NONPERIODIC_NONBONDED_METHODS:
err_msg = ('nonbondedMethod {} is periodic but could not '
'find periodic box in positions file.'.format(nonbonded_method))
if len(err_msg) != 0:
logger.error(err_msg)
raise RuntimeError(err_msg)
# Create system and update box vectors (if needed)
system = parameters_file.createSystem(removeCMMotion=False, **system_options)
if is_periodic:
system.setDefaultPeriodicBoxVectors(*box_vectors)
return system
def read_system_files(positions_file_path, parameters_file_path, system_options,
gromacs_include_dir=None, charmm_parameter_files=None):
"""Create a Yank arguments for a phase from system files.
Parameters
----------
positions_file_path : str
Path to system position file (e.g. 'complex.inpcrd/.gro/.pdb').
parameters_file_path : str
Path to system parameters file (e.g. 'complex.prmtop/.top/.xml/.psf').
system_options : dict
``system_options[phase]`` is a a dictionary containing options to
pass to ``createSystem()``. If the parameters file is an OpenMM
system in XML format, this will be ignored.
gromacs_include_dir : str, optional
Path to directory in which to look for other files included
from the gromacs top file.
charmm_parameter_files : str, optional
Path to additional parameter files
Returns
-------
system : simtk.openmm.System
The OpenMM System built from the given files.
topology : openmm.app.Topology
The OpenMM Topology built from the given files.
sampler_state : openmmtools.states.SamplerState
The sampler state containing the positions of the atoms.
"""
# Load system files
parameters_file_extension = os.path.splitext(parameters_file_path)[1]
# Read OpenMM XML and PDB files.
if parameters_file_extension == '.xml':
logger.debug("xml: {}".format(parameters_file_path))
logger.debug("pdb: {}".format(positions_file_path))
positions_file = openmm.app.PDBFile(positions_file_path)
parameters_file = positions_file # Needed for topology.
with open(parameters_file_path, 'r') as f:
serialized_system = f.read()
system = openmm.XmlSerializer.deserialize(serialized_system)
box_vectors = positions_file.topology.getPeriodicBoxVectors()
if box_vectors is None:
box_vectors = system.getDefaultPeriodicBoxVectors()
# Read Amber prmtop and inpcrd files.
elif parameters_file_extension == '.prmtop':
logger.debug("prmtop: {}".format(parameters_file_path))
logger.debug("inpcrd: {}".format(positions_file_path))
parameters_file = openmm.app.AmberPrmtopFile(parameters_file_path)
positions_file = openmm.app.AmberInpcrdFile(positions_file_path)
box_vectors = positions_file.boxVectors
create_system_args = set(inspect.getargspec(openmm.app.AmberPrmtopFile.createSystem).args)
system = create_system(parameters_file, box_vectors, create_system_args, system_options)
# Read Gromacs top and gro files.
elif parameters_file_extension == '.top':
logger.debug("top: {}".format(parameters_file_path))
logger.debug("gro: {}".format(positions_file_path))
positions_file = openmm.app.GromacsGroFile(positions_file_path)
# gro files must contain box vectors, so we must determine whether system
# is non-periodic or not from provided nonbonded options
# WARNING: This uses the private API for GromacsGroFile, and may break.
if ('nonbonded_method' in system_options and
system_options['nonbonded_method'] in _NONPERIODIC_NONBONDED_METHODS):
logger.info('nonbonded_method = {}, so removing periodic box vectors '
'from gro file'.format(system_options['nonbonded_method']))
for frame, box_vectors in enumerate(positions_file._periodicBoxVectors):
positions_file._periodicBoxVectors[frame] = None
box_vectors = positions_file.getPeriodicBoxVectors()
parameters_file = openmm.app.GromacsTopFile(parameters_file_path,
periodicBoxVectors=box_vectors,
includeDir=gromacs_include_dir)
create_system_args = set(inspect.getargspec(openmm.app.GromacsTopFile.createSystem).args)
system = create_system(parameters_file, box_vectors, create_system_args, system_options)
# Read CHARMM format psf and pdb files
elif parameters_file_extension == '.psf':
logger.debug("psf: {}".format(parameters_file_path))
logger.debug("pdb: {}".format(positions_file_path))
parameters_file = openmm.app.CharmmPsfFile(parameters_file_path)
positions_file = openmm.app.PDBFile(positions_file_path)
params = openmm.app.CharmmParameterSet(*charmm_parameter_files)
box_vectors = positions_file.topology.getPeriodicBoxVectors()
if box_vectors is None:
box_vectors = system.getDefaultPeriodicBoxVectors()
parameters_file.setBox(box_vectors[0][0], box_vectors[1][1], box_vectors[2][2])
create_system_args = set(inspect.getargspec(openmm.app.CharmmPsfFile.createSystem).args)
system_options['params'] = params
system = create_system(parameters_file, box_vectors, create_system_args, system_options)
# Unsupported file format.
else:
raise ValueError('Unsupported format for parameter file {}'.format(parameters_file_extension))
# Store numpy positions and create SamplerState.
positions = positions_file.getPositions(asNumpy=True)
sampler_state = mmtools.states.SamplerState(positions=positions, box_vectors=box_vectors)
# Check to make sure number of atoms match between prmtop and inpcrd.
n_atoms_system = system.getNumParticles()
n_atoms_positions = positions.shape[0]
if n_atoms_system != n_atoms_positions:
err_msg = "Atom number mismatch: {} has {} atoms; {} has {} atoms.".format(
parameters_file_path, n_atoms_system, positions_file_path, n_atoms_positions)
logger.error(err_msg)
raise RuntimeError(err_msg)
return system, parameters_file.topology, sampler_state
# =============================================================================
# SETUP PIPELINE UTILITY FUNCTIONS
# =============================================================================
# Map the OpenMM-style name for a solvent to the tleap
# name compatible with the solvateBox command.
_OPENMM_LEAP_SOLVENT_MODELS_MAP = {
'tip3p': 'TIP3PBOX',
'tip3pfb': 'TIP3PFBOX',
'tip4pew': 'TIP4PEWBOX',
'tip5p': 'TIP5PBOX',
'spce': 'SPCBOX',
}
# Map the OpenMM-style name for solvent to the tleap
# name for a list of files which would enable the
# solvent model to work. Servers as error checking,
# but is not foolproof
_OPENMM_LEAP_SOLVENT_FILES_MAP = {
'tip3p': 'leaprc.water.tip3p',
'tip3pfb': 'leaprc.water.tip3p',
'tip4pew': 'leaprc.water.tip4pew',
'tip5p': 'leaprc.water.tip4pew', # Enables the EP atom type
'spce': 'leaprc.water.spce',
}
def remove_overlap(mol_positions, *args, **kwargs):
"""Remove any eventual overlap between a molecule and a set of others.
The method both randomly shifts and rotates the molecule (when overlapping atoms
are detected) until it does not clash with any other given molecule anymore. All
the others are kept fixed.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol_positions : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule that we want to not clash with the others.
min_distance : float
The minimum distance accepted to consider the molecule not clashing with
the others. Must be in the same unit of measure of the positions.
sigma : float
The maximum displacement for a single step. Must be in the same unit of
measure of the positions.
Other parameters
----------------
args
A series of numpy.ndarrays containing the positions of the atoms of the
molecules that are kept fixed.
Returns
-------
x : numpy.ndarray
Positions of the atoms of the given molecules that do not clash.
"""
x = np.copy(mol_positions)
sigma = kwargs.get('sigma', 1.0)
min_distance = kwargs.get('min_distance', 1.0)
# Try until we have a non-overlapping conformation w.r.t. all fixed molecules
while compute_min_dist(x, *args) <= min_distance:
# Compute center of geometry
x0 = x.mean(0)
# Randomize orientation of ligand.
Rq = mmtools.mcmc.MCRotationMove.generate_random_rotation_matrix()
x = ((Rq * np.matrix(x - x0).T).T + x0).A
# Choose a random displacement vector and translate
x += sigma * np.random.randn(3)
return x
def pack_transformation(mol1_pos, mol2_pos, min_distance, max_distance):
"""Compute an affine transformation that solve clashes and fit mol2 in the box.
The method randomly shifts and rotates mol2 until all its atoms are within
min_distance and max_distance from mol1. The position of mol1 is kept fixed.
Every 200 failed iterations, the algorithm increases max_distance by 50%. It
raise an exception after 1000 iterations.
All the positions must be expressed in the same unit of measure.
Parameters
----------
mol1_pos : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule that will be kept fixed.
mol2_pos : numpy.ndarray
An Nx3 array where, N is the number of atoms, containing the positions of
the atoms of the molecule that will be eventually moved.
min_distance : float
The minimum distance accepted to consider mol2 not clashing with mol1. It
must be in the same unit of measure of the positions.
max_distance : float
The maximum distance from mol1 to consider mol2 within the box. It must
be in the same unit of measure of the positions.
Returns
-------
transformation : numpy.ndarray
A 4x4 ndarray representing the affine transformation that translate and
rotate mol2.
"""
translation = None # we'll use this to check if we made changes to mol2_pos
transformation = np.identity(4)
# Compute center of geometry
x0 = mol2_pos.mean(0)
# Try until we have a non-overlapping conformation w.r.t. all fixed molecules
i = 0
min_dist, max_dist = compute_min_max_dist(mol1_pos, mol2_pos)
while min_dist < min_distance or max_distance <= max_dist:
# Select random atom of fixed molecule and use it to propose new x0 position
mol1_atom_idx = np.random.random_integers(0, len(mol1_pos) - 1)
translation = mol1_pos[mol1_atom_idx] + max_distance * np.random.randn(3) - x0
# Generate random rotation matrix
Rq = mmtools.mcmc.MCRotationMove.generate_random_rotation_matrix()
# Apply random transformation and test
x = ((Rq * np.matrix(mol2_pos - x0).T).T + x0).A + translation
min_dist, max_dist = compute_min_max_dist(mol1_pos, x)
# Check n iterations
i += 1
if i % 200 == 0:
max_distance *= 1.5
if i >= 1000:
err_msg = 'Cannot fit mol2 into solvation box!'
logger.error(err_msg)
raise RuntimeError(err_msg)
# Generate 4x4 affine transformation in molecule reference frame
if translation is not None:
transl_to_origin, transl_to_x0, rot_transl_matrix = (np.identity(4) for _ in range(3))
transl_to_origin[:3, 3] = -x0 # translate the molecule from x0 to origin
rot_transl_matrix[:3, :3] = Rq # rotate molecule in origin
rot_transl_matrix[:3, 3] = translation # translate molecule
transl_to_x0[:3, 3] = x0 # translate the molecule from origin to x0
transformation = transl_to_x0.dot(rot_transl_matrix.dot(transl_to_origin))
return transformation
def pull_close(fixed_mol_pos, translated_mol_pos, min_bound, max_bound):
"""Heuristic algorithm to quickly translate the ligand close to the receptor.
The distance of the ligand from the receptor here is defined as the shortest
Euclidean distance between an atom of the ligand and one of the receptor.
The molecules positions will not be modified if the ligand is already at a
distance in the interval [min_bound, max_bound].
Parameters
----------
fixed_mol_pos : numpy.array
The positions of the molecule to keep fixed as a Nx3 array.
translated_mol_pos : numpy.array
The positions of the molecule to translate as a Nx3 array.
min_bound : float
Minimum distance from the receptor to the ligand. This should be high
enough for the ligand to not overlap the receptor atoms at the beginning
of the simulation.
max_bound : float
Maximum distance from the receptor to the ligand. This should be short
enough to make the ligand and the receptor interact since the beginning
of the simulation.
Returns
-------
translation : numpy.array
A 1x3 array containing the translation vector to apply to translated_mol_pos
to move the molecule at a distance between min_bound and max_bound from
fixed_mol_pos.
"""
goal_distance = (min_bound + max_bound) / 2
trans_pos = copy.deepcopy(translated_mol_pos) # positions that we can modify
# Find translation
final_translation = np.zeros(3)
while True:
# Compute squared distances between all atoms
# Each row is an array of distances from a translated atom to all fixed atoms
# We don't need to apply square root to everything
distances2 = np.array([((fixed_mol_pos - pos)**2).sum(1) for pos in trans_pos])
# Find closest atoms and their distance
min_idx = np.unravel_index(distances2.argmin(), distances2.shape)
min_dist = np.sqrt(distances2[min_idx])
# If closest atom is between boundaries translate ligand
if min_bound <= min_dist <= max_bound:
break
# Compute unit vector that connects receptor and ligand atom
if min_dist != 0:
direction = fixed_mol_pos[min_idx[1]] - trans_pos[min_idx[0]]
else: # any deterministic direction
direction = np.array([1, 1, 1])
direction = direction / np.sqrt((direction**2).sum()) # normalize
if max_bound < min_dist: # the atom is far away
translation = (min_dist - goal_distance) * direction
trans_pos += translation
final_translation += translation
elif min_dist < min_bound: # the two molecules overlap
max_dist = np.sqrt(distances2.max())
translation = (max_dist + goal_distance) * direction
trans_pos += translation
final_translation += translation
return final_translation
def strip_protons(input_file_path, output_file_path):
"""
Remove all hydrogens from PDB file and save the result.
Input and output file cannot be the same file
Parameters
----------
input_file_path : str
Full file path to the file to read, including extensions
output_file_path : str
Full file path to the file to save, including extensions
"""
output_file = open(output_file_path, 'w')
with open(input_file_path, 'r') as input_file:
for line in input_file:
if not (line[:6] == 'ATOM ' and (line[12] == 'H' or line[13] == 'H')):
output_file.write(line)
output_file.close()
# For mutate_protein
_three_letter_code = {
'A': 'ALA',
'C': 'CYS',
'D': 'ASP',
'E': 'GLU',
'F': 'PHE',
'G': 'GLY',
'H': 'HIS',
'I': 'ILE',
'K': 'LYS',
'L': 'LEU',
'M': 'MET',
'N': 'ASN',
'P': 'PRO',
'Q': 'GLN',
'R': 'ARG',
'S': 'SER',
'T': 'THR',
'V': 'VAL',
'W': 'TRP',
'Y': 'TYR'
}
_one_letter_code = dict()
for one_letter in _three_letter_code.keys():
three_letter = _three_letter_code[one_letter]
_one_letter_code[three_letter] = one_letter
def decompose_mutation(mutation):
match = re.match('(\D)(\d+)(\D)', mutation)
try:
original_residue_name = _three_letter_code[match.group(1)]
residue_index = int(match.group(2))
mutated_residue_name = _three_letter_code[match.group(3)]
except AttributeError:
error_string = 'Mutation "{}" could not be parsed! '.format(mutation)
error_string += 'Should be of form {single letter}{integer}{another single letter}'
raise ValueError(error_string)
return original_residue_name, residue_index, mutated_residue_name
def generate_pdbfixer_mutation_code(original_residue_name, residue_index, mutated_residue_name):
return '{0:s}-{1:d}-{2:s}'.format(original_residue_name, residue_index, mutated_residue_name)
def process_tool_directive(directives, option, dispatch, allowed_values, yields_value=False):
"""Process a directive.
Parameters
----------
option : str
The name of the option to be processed.
Will remove this option from `directives` once processed.
dispatch : function
The function to call.
allowed_values : list
If not None, the value of directives[option] will be checked against this list
yields_value : boolean, default False
Tells this function to expect a return from the dispatch function and give it back as needed
"""
if option in directives:
value = directives[option]
# Validate options
if allowed_values is not None:
if value not in allowed_values:
raise ValueError("'{}' must be one of {}".format(option, allowed_values))
# Dispatch
output = dispatch(value)
# Delete the key once we've processed it
del directives[option]
if yields_value:
return output
return
def apply_pdbfixer(input_file_path, output_file_path, directives):
"""
Apply PDBFixer to make changes to the specified molecule.
Single mutants are supported in the form "T315I"
Double mutants are supported in the form "L858R/T790M"
The string "WT" still pushes the molecule through PDBFixer, but makes no mutations.
This is useful for testing.
Original PDB file numbering scheme is used.
Currently, only PDB files are supported.
pdbfixer is used to make the mutations
Parameters
----------
input_file_path : str
Full file path to the file to read, including extensions
output_file_path : str
Full file path to the file to save, including extensions
directives : dict
Dict containing directives for PDBFixer.
"""
DEFAULT_PH = 7.4 # default pH
# Make a copy since we will delete from the dictionary to validate
directives = copy.deepcopy(directives)
# Create a PDBFixer object
fixer = PDBFixer(input_file_path)
fixer.missingResidues = {}
# Dispatch functions
# These won't be documented individually because they are so short
def dispatch_pH(value):
pH = DEFAULT_PH
try:
pH = float(value)
logger.info('pdbfixer: Will use user-specified pH {}'.format(pH))
except:
raise ValueError("'ph' must be a floating-point number: found '{}'".format(value))
return pH
pH = process_tool_directive(directives, 'ph', dispatch_pH, None, yields_value=True)
def add_missing_residues(value):
if value == 'yes':
fixer.findMissingResidues()
logger.info('pdbfixer: Will add missing residues specified in SEQRES')
def apply_mutations(value):
# Extract chain id
chain_id = None
if 'chain_id' in value:
chain_id = value['chain_id']
if chain_id == 'none':
chain_id = None
# Extract mutations
mutations = value['mutations']
# Convert mutations to PDBFixer format
if mutations != 'WT':
pdbfixer_mutations = [generate_pdbfixer_mutation_code(*decompose_mutation(mutation))
for mutation in mutations.split('/')]
logger.info('pdbfixer: Will make mutations {} to chain_id {}.'.format(pdbfixer_mutations, chain_id))
fixer.applyMutations(pdbfixer_mutations, chain_id)
else:
logger.info('pdbfixer: No mutations will be applied since "WT" specified.')
def replace_nonstandard_residues(value):
if value == 'yes':
logger.info('pdbfixer: Will replace nonstandard residues.')
fixer.findNonstandardResidues()
fixer.replaceNonstandardResidues()
def remove_heterogens(value):
if value == 'water':
logger.info('pdbfixer: Will remove heterogens, retaining water.')
fixer.removeHeterogens(keepWater=True)
elif value == 'all':
logger.info('pdbfixer: Will remove heterogens, discarding water.')
fixer.removeHeterogens(keepWater=False)
def add_missing_atoms(value):
fixer.findMissingAtoms()
if value not in ('all', 'heavy'):
fixer.missingAtoms = {}
fixer.missingTerminals = {}
logger.info('pdbfixer: Will add missing atoms: {}.'.format(value))
fixer.addMissingAtoms()
if value in ('all', 'hydrogens'):
logger.info('pdbfixer: Will add hydrogens in default protonation state for pH {}.'.format(pH))
fixer.addMissingHydrogens(pH)
# Set default atom addition method
if 'add_missing_atoms' not in directives:
directives['add_missing_atoms'] = 'heavy'
# Dispatch directives
process_tool_directive(directives, 'add_missing_residues', add_missing_residues, [True, False])
process_tool_directive(directives, 'apply_mutations', apply_mutations, None)
process_tool_directive(directives, 'replace_nonstandard_residues', replace_nonstandard_residues, [True, False])
process_tool_directive(directives, 'remove_heterogens', remove_heterogens, ['all', 'water', 'none'])
process_tool_directive(directives, 'add_missing_atoms', add_missing_atoms, ['all', 'heavy', 'hydrogens', 'none'])
# Check that there were no extra options
if len(directives) > 0:
raise ValueError("The 'pdbfixer:' block contained some nodes that it didn't know how to process: {}".format(directives))
# Write the final structure
PDBFile.writeFile(fixer.topology, fixer.positions, open(output_file_path, 'w'))
def apply_modeller(input_file_path, output_file_path, directives):
"""
Apply Salilab Modeller to make changes to the specified molecule.
Single mutants are supported in the form "T315I"
Double mutants are not currently supported.
The string "WT" makes no mutations.
Original PDB file numbering scheme is used.
Currently, only PDB files are supported.
modeller is used to make the mutations. You must have a license file installed for this to work
Parameters
----------
input_file_path : str
Full file path to the file to read, including extensions
output_file_path : str
Full file path to the file to save, including extensions
directives : dict
Dict containing directives for modeller.
"""
if not utils.is_modeller_installed():
raise ImportError('Modeller and license must be installed to use this feature.')
import modeller
directives = copy.deepcopy(directives)
# Silence unnecessary output to the log files
modeller.log.none()
# Create modeller environment and point it to the PDB file
env = modeller.environ()
atom_files_directory = os.path.dirname(input_file_path)
atom_file_name = os.path.basename(input_file_path)
# Read in topology and parameter files
env.libs.topology.read(file='$(LIB)/top_heav.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
env.io.atom_files_directory = [atom_files_directory]
alignment = modeller.alignment(env)
model = modeller.model(env, file=atom_file_name)
model_original_numbering = modeller.model(env, file=atom_file_name)
alignment.append_model(model, atom_files=atom_file_name, align_codes=atom_file_name)
def apply_mutations_modeller(value):
# Extract chain id
chain_id = None
if 'chain_id' in value:
chain_id = value['chain_id']
if chain_id == 'none':
chain_id = 0
# Extract mutations
mutations = value['mutations']
# Convert mutations to PDBFixer format
if mutations != 'WT':
modeller_mutations = [generate_pdbfixer_mutation_code(*decompose_mutation(mutation))
for mutation in mutations.split('/')]
if len(modeller_mutations) > 1:
raise ValueError('{} is a double mutant and not supported by Modeller currently.'.format(mutations))
else:
logger.info('modeller: Will make mutations {} to chain_id {}.'.format(modeller_mutations, chain_id))
sel = modeller.selection(model.chains[chain_id].residues[modeller_mutations[0].split('-')[1]])
sel.mutate(residue_type=modeller_mutations[0].split('-')[2])
alignment.append_model(model, align_codes=modeller_mutations[0])
model.clear_topology()
model.generate_topology(alignment[modeller_mutations[0]])
model.transfer_xyz(alignment)
model.build(initialize_xyz=False, build_method='INTERNAL_COORDINATES')
else:
logger.info('modeller: No mutations will be applied since "WT" specified.')
alignment.append_model(model, align_codes='WT')
process_tool_directive(directives, 'apply_mutations', apply_mutations_modeller, None)
# Check that there were no extra options
if len(directives) > 0:
raise ValueError("The 'modeller:' block contained some nodes that it didn't know how to process: {}".format(directives))
# Write the final model
model.res_num_from(model_original_numbering, alignment)
model.write(file=output_file_path)
def read_csv_lines(file_path, lines):
"""Return a list of CSV records.
The function takes care of ignoring comments and blank lines.
Parameters
----------
file_path : str
The path to the CSV file.
lines : 'all' or int
The index of the line to read or 'all' to return
the list of all lines.
Returns
-------
records : str or list of str
The CSV record if lines is an integer, or a list of CSV
records if it is 'all'.
"""
# Read all lines ignoring blank lines and comments.
with open(file_path, 'r') as f:
all_records = [line for line in f
if bool(line) and not line.strip().startswith('#')]
if lines == 'all':
return all_records
return all_records[lines]
# ==============================================================================
# SETUP DATABASE
# ==============================================================================
class SetupDatabase:
"""Provide utility functions to set up systems and molecules.
The object allows to access molecules, systems and solvents by 'id' and takes
care of parametrizing molecules and creating the AMBER prmtop and inpcrd files
describing systems.
Parameters
----------
setup_dir : str
Path to the main setup directory. Changing this means changing the database.
molecules : dict, Optional. Default: None
YAML description of the molecules.
Dictionary should be of form {molecule_id : molecule YAML description}
solvents : dict, Optional. Default: None
YAML description of the solvents.
Dictionary should be of form {solvent_id : solvent YAML description}
systems : dict, Optional. Default: None
YAML description of the systems.
Dictionary should be of form {system_id : system YAML description}
"""
SYSTEMS_DIR = 'systems' #: Stock system's sub-directory name
MOLECULES_DIR = 'molecules' #: Stock Molecules sub-directory name
CLASH_THRESHOLD = 1.5 #: distance in Angstroms to consider two atoms clashing
def __init__(self, setup_dir, molecules=None, solvents=None, systems=None):
"""Initialize the database."""
self.setup_dir = setup_dir
self.molecules = molecules
self.solvents = solvents
self.systems = systems
# Private attributes
self._pos_cache = {} # cache positions of molecules
self._processed_mols = set() # keep track of parametrized molecules
def get_molecule_dir(self, molecule_id):
"""Return the directory where the parameter files are stored.
Parameters
----------
molecule_id : str
The ID of the molecule.
Returns
-------
str
The path to the molecule directory.
"""
return os.path.join(self.setup_dir, self.MOLECULES_DIR, molecule_id)
def get_system_files_paths(self, system_id):
"""Return the paths to the systems files.
Parameters
----------
system_id : str
The ID of the system.
Returns
-------
system_files_paths : list of namedtuple
Elements of the list contain the paths to the system files for
each phase. Each namedtuple contains the fields position_path (e.g.
inpcrd, gro, or pdb) and parameters_path (e.g. prmtop, top, or xml).
"""
Paths = collections.namedtuple('Paths', ['position_path', 'parameters_path'])
system_dir = os.path.join(self.setup_dir, self.SYSTEMS_DIR, system_id)
if 'receptor' in self.systems[system_id]:
system_files_paths = [
Paths(position_path=os.path.join(system_dir, 'complex.inpcrd'),
parameters_path=os.path.join(system_dir, 'complex.prmtop')),
Paths(position_path=os.path.join(system_dir, 'solvent.inpcrd'),
parameters_path=os.path.join(system_dir, 'solvent.prmtop'))
]
elif 'solute' in self.systems[system_id]:
system_files_paths = [
Paths(position_path=os.path.join(system_dir, 'solvent1.inpcrd'),
parameters_path=os.path.join(system_dir, 'solvent1.prmtop')),
Paths(position_path=os.path.join(system_dir, 'solvent2.inpcrd'),
parameters_path=os.path.join(system_dir, 'solvent2.prmtop'))
]
else:
parameter_file_extensions = {'prmtop', 'top', 'xml', 'psf'}
system_files_paths = []
for phase_path_name in ['phase1_path', 'phase2_path']:
file_paths = self.systems[system_id][phase_path_name]
assert len(file_paths) == 2
# Make sure that the position file is first.
first_file_extension = os.path.splitext(file_paths[0])[1][1:]
if first_file_extension in parameter_file_extensions:
file_paths = list(reversed(file_paths))
# Append Paths object.
system_files_paths.append(Paths(position_path=file_paths[0],
parameters_path=file_paths[1]))
return system_files_paths
def is_molecule_setup(self, molecule_id):
"""Check whether the molecule has been processed previously.
The molecule must be set up if it needs to be parametrize by antechamber
(and the gaff.mol2 and frcmod files do not exist), if the molecule must be
generated by OpenEye, or if it needs to be extracted by a multi-molecule file.
An example to clarify the difference between the two return values: a protein
in a single-frame pdb does not have to be processed (since it does not go through
antechamber) thus the function will return ``is_setup=True`` and ``is_processed=False``.
Parameters
----------
molecule_id : str
The id of the molecule.
Returns
-------
is_setup : bool
True if the molecule's parameter files have been specified by the user
or if they have been generated by SetupDatabase.
is_processed : bool
True if parameter files have been generated previously by SetupDatabase
(i.e. if the parameter files were not manually specified by the user).
"""
# The only way to check if we processed the molecule in the current run is
# through self._processed_mols as 'parameters' will be changed after setup
if molecule_id in self._processed_mols:
return True, True
# Some convenience variables
molecule_descr = self.molecules[molecule_id]
molecule_dir = self.get_molecule_dir(molecule_id)
molecule_id_path = os.path.join(molecule_dir, molecule_id)
try:
extension = os.path.splitext(molecule_descr['filepath'])[1]
except KeyError:
extension = None
# The following checks must be performed in reverse order w.r.t. how they
# are executed in _setup_molecules()
files_to_check = {}
# If the molecule must go through antechamber we search for its output
if 'antechamber' in molecule_descr:
files_to_check = [('filepath', molecule_id_path + '.gaff.mol2'),
(['leap', 'parameters'], molecule_id_path + '.frcmod')]
# If the molecule must be generated by OpenEye, a mol2 should have been created
elif extension is None or extension == '.smiles' or extension == '.csv':
files_to_check = [('filepath', molecule_id_path + '.mol2')]
# If we have to strip the protons off a PDB, a new PDB should have been created
elif 'strip_protons' in molecule_descr and molecule_descr['strip_protons']:
files_to_check = [('filepath', molecule_id_path + '.pdb')]
# If we have to make mutations, a new PDB should be created
elif 'pdbfixer' in molecule_descr:
files_to_check = [('filepath', molecule_id_path + '.pdb')]
# If we have to make mutations using modeller, a new PDB should be created
elif 'modeller' in molecule_descr:
files_to_check = [('filepath', molecule_id_path + '.pdb')]
# If a single structure must be extracted we search for output
elif 'select' in molecule_descr:
files_to_check = [('filepath', molecule_id_path + extension)]
# Check if this needed to be processed at all
if not files_to_check:
return True, False
# Check if all output files exist
all_file_exist = True
for descr_key, file_path in files_to_check:
all_file_exist &= os.path.isfile(file_path) and os.path.getsize(file_path) > 0
if all_file_exist: # Make sure internal description is correct
try:
molecule_descr[descr_key] = file_path
except TypeError: # nested key, list are unhashable
molecule_descr[descr_key[0]][descr_key[1]].append(file_path)
# Compute and update small molecule net charge
if all_file_exist:
extension = os.path.splitext(molecule_descr['filepath'])[1]
# TODO what if this is a peptide? This should be computed in get_system()
if extension == '.mol2':
molecule_descr['net_charge'] = utils.Mol2File(molecule_descr['filepath']).net_charge
return all_file_exist, all_file_exist
def is_system_setup(self, system_id):
"""Check whether the system has been already processed.
Parameters
----------
system_id : str
The ID of the system.
Returns
-------
is_setup : bool
True if the system is ready to be used for an experiment. Either because
the system has directly provided the system files, or because it already
went through the setup pipeline.
is_processed : bool
True if the system has already gone through the setup pipeline.
"""
if 'ligand' in self.systems[system_id] or 'solute' in self.systems[system_id]:
system_files_paths = self.get_system_files_paths(system_id)
is_setup = (os.path.exists(system_files_paths[0].position_path) and
os.path.exists(system_files_paths[0].parameters_path) and
os.path.exists(system_files_paths[1].position_path) and
os.path.exists(system_files_paths[1].parameters_path))
return is_setup, is_setup
else:
return True, False
def get_system(self, system_id):
"""Make sure that the system files are set up and return the system folder.
If necessary, create the prmtop and inpcrd files from the given components.
The system files are generated with tleap. If no molecule specifies a general
force field, leaprc.ff14SB is loaded.
Parameters
----------
system_id : str
The ID of the system.
Returns
-------
system_files_paths : list of namedtuple
Elements of the list contain the paths to the system files for
each phase. Each namedtuple contains the fields position_path (e.g.
inpcrd, gro, or pdb) and parameters_path (e.g. prmtop, top, or xml).
"""
# Check if system has been already processed
system_files_paths = self.get_system_files_paths(system_id)
if self.is_system_setup(system_id)[0]:
return system_files_paths
system_descr = self.systems[system_id]
log_message = 'Setting up the systems for {} and {} using solvent {}'
if 'receptor' in system_descr: # binding free energy calculation
receptor_id = system_descr['receptor']
ligand_id = system_descr['ligand']
solvent_id = system_descr['solvent']
system_parameters = system_descr['leap']['parameters']
logger.info(log_message.format(receptor_id, ligand_id, solvent_id))
# solvent phase
logger.debug('Setting up solvent phase')
self._setup_system(system_files_paths[1].position_path, False,
0, system_parameters, solvent_id, ligand_id)
try:
alchemical_charge = int(round(self.molecules[ligand_id]['net_charge']))
except KeyError:
alchemical_charge = 0
# complex phase
logger.debug('Setting up complex phase')
self._setup_system(system_files_paths[0].position_path,
system_descr['pack'], alchemical_charge,
system_parameters, solvent_id, receptor_id,
ligand_id)
else: # partition/solvation free energy calculation
solute_id = system_descr['solute']
solvent1_id = system_descr['solvent1']
solvent2_id = system_descr['solvent2']
system_parameters = system_descr['leap']['parameters']
logger.info(log_message.format(solute_id, solvent1_id, solvent2_id))
# solvent1 phase
logger.debug('Setting up solvent1 phase')
self._setup_system(system_files_paths[0].position_path, False,
0, system_parameters, solvent1_id, solute_id)
# solvent2 phase
logger.debug('Setting up solvent2 phase')
self._setup_system(system_files_paths[1].position_path, False,
0, system_parameters, solvent2_id, solute_id)
return system_files_paths
def setup_all_systems(self):
"""Setup all molecules and systems in the database.
The method supports parallelization through MPI.
"""
# Find all molecules that need to be set up.
molecules_to_setup = []
for molecule_id in self.molecules:
if not self.is_molecule_setup(molecule_id)[0]:
molecules_to_setup.append(molecule_id)
molecules_to_setup.sort()
# Parallelize generation of all molecules among nodes.
mpiplus.distribute(self._setup_molecules,
distributed_args=molecules_to_setup,
send_results_to=None, group_size=1, sync_nodes=True)
# Find all systems that need to be set up.
systems_to_setup = []
for system_id in self.systems:
if not self.is_system_setup(system_id)[0]:
systems_to_setup.append(system_id)
systems_to_setup.sort()
# Parallelize generation of all systems among nodes.
mpiplus.distribute(self.get_system,
distributed_args=systems_to_setup,
send_results_to=None, group_size=1, sync_nodes=True)
def _generate_molecule(self, molecule_id):
"""Generate molecule using the OpenEye toolkit from name or smiles.
The molecule is charged with OpenEye's recommended AM1BCC charge
selection scheme and it is saved into the OpenEye molecules cache.
Parameters
----------
molecule_id : str
The id of the molecule as given in the YAML script
Returns
-------
molecule : OEMol
The generated molecule.
"""
mol_descr = self.molecules[molecule_id] # molecule description
try:
if 'name' in mol_descr:
molecule = moltools.openeye.iupac_to_oemol(mol_descr['name'])
elif 'smiles' in mol_descr:
molecule = moltools.openeye.smiles_to_oemol(mol_descr['smiles'])
molecule = moltools.openeye.get_charges(molecule, keep_confs=1)
except ImportError as e:
error_msg = ('requested molecule generation from name or smiles but '
'could not find OpenEye toolkit: ' + str(e))
logger.error(error_msg)
raise RuntimeError(error_msg)
return molecule
def _generate_residue_name(self, molecule_id):
"""Generates a residue name for a molecule.
The function guarantees to not generate twice the same residue name.
Purely numeric residue names mess up the pipeline, so we generate
residue names of the form YXX, where Y is a letter and X are digits
(e.g. A01, A02, ..., Z99).
The order of the generated residue name is not guaranteed.
WARNING: The algorithm may fail when new molecules are added to
self.molecules after construction. This is not the case right now,
but it's good to keep in mind.
Parameters
----------
molecule_id : str
The molecule identifier.
Returns
-------
residue_name : str
A three-character residue name.
"""
# We need to associate a unique number to this molecule, and do
# so in such a way that distributing molecule setups over multiple
# MPI process still ends up in unique residue names for each molecule.
molecule_ids = sorted(self.molecules.keys())
n_molecule = molecule_ids.index(molecule_id)
assert n_molecule < 2600
# Build 3-character identifier.
character = chr(n_molecule // 100 + 65)
digits = str(n_molecule % 100)
residue_name = character + digits.zfill(2)
return residue_name
def _setup_molecules(self, *args):
"""Set up the files needed to generate the system for all the molecules.
If OpenEye tools are installed, this generate the molecules when the source is
not a file. If two (or more) molecules generated by OpenEye have overlapping
atoms, the molecules are randomly shifted and rotated until the clash is resolved.
With the OpenEye toolkit installed, we also perform a sanity check to verify that
the molecules from files do not have overlapping atoms. An Exception is raised if
this is not the case.
If the Schrodinger's suite is install, this can enumerate tautomeric and protonation
states with epik when requested.
This also parametrize the molecule with antechamber when requested.
Other parameters
----------------
args
All the molecules ids that compose the system. These molecules are the only
ones considered when trying to resolve the overlapping atoms.
"""
for mol_id in args:
net_charge = None # used by antechamber
mol_descr = self.molecules[mol_id]
# Have we already processed this molecule? Do we have to do it at all?
# We don't want to create the output folder if we don't need to
if self.is_molecule_setup(mol_id)[0]:
continue
# Create output directory if it doesn't exist
mol_dir = self.get_molecule_dir(mol_id)
if not os.path.exists(mol_dir):
os.makedirs(mol_dir)
try:
extension = os.path.splitext(mol_descr['filepath'])[1]
except KeyError:
extension = None
# Extract single model if this is a multi-model file
if 'select' in mol_descr:
model_idx = mol_descr['select']
single_file_path = os.path.join(mol_dir, mol_id + extension)
if extension == '.pdb':
# Create single-model PDB file
pdb_file = PDBFile(mol_descr['filepath'])
with open(single_file_path, 'w') as f:
PDBFile.writeHeader(pdb_file.topology, file=f)
PDBFile.writeModel(pdb_file.topology, pdb_file.getPositions(frame=model_idx), file=f)
# We might as well already cache the positions
self._pos_cache[mol_id] = pdb_file.getPositions(asNumpy=True, frame=model_idx) / unit.angstrom
elif extension == '.smiles' or extension == '.csv':
# Extract the correct line and save it in a new file.
smiles_line = read_csv_lines(mol_descr['filepath'], lines=model_idx)
with open(single_file_path, 'w') as f:
f.write(smiles_line)
elif extension == '.mol2' or extension == '.sdf':
if not utils.is_openeye_installed(oetools=('oechem',)):
raise RuntimeError('Cannot support {} files selection without OpenEye'.format(
extension[1:]))
oe_molecule = utils.load_oe_molecules(mol_descr['filepath'], molecule_idx=model_idx)
if extension == '.mol2':
mol_names = list(utils.Mol2File(mol_descr['filepath']).resnames)
utils.write_oe_molecule(oe_molecule, single_file_path, mol2_resname=mol_names[model_idx])
else:
utils.write_oe_molecule(oe_molecule, single_file_path)
else:
raise RuntimeError('Model selection is not supported for {} files'.format(extension[1:]))
# Save new file path
mol_descr['filepath'] = single_file_path
# Strip off protons if required
if 'strip_protons' in mol_descr and mol_descr['strip_protons']:
if extension != '.pdb':
raise RuntimeError('Cannot strip protons from {} files.'.format(extension[1:]))
output_file_path = os.path.join(mol_dir, mol_id + '.pdb')
strip_protons(mol_descr['filepath'], output_file_path)
mol_descr['filepath'] = output_file_path
# Apply PDBFixer if requested
if 'pdbfixer' in mol_descr:
if extension not in ['.pdb', '.PDB']:
raise RuntimeError('Cannot apply PDBFixer to {} files; a .pdb file is required.'.format(extension[1:]))
output_file_path = os.path.join(mol_dir, mol_id + '.pdb')
apply_pdbfixer(mol_descr['filepath'], output_file_path, mol_descr['pdbfixer'])
mol_descr['filepath'] = output_file_path
# Apply modeller if requested
if 'modeller' in mol_descr:
if extension not in ['.pdb', '.PDB']:
raise RuntimeError('Cannot apply modeller to {} files; a .pdb file is required.'.format(extension[1:]))
output_file_path = os.path.join(mol_dir, mol_id + '.pdb')
apply_modeller(mol_descr['filepath'], output_file_path, mol_descr['modeller'])
mol_descr['filepath'] = output_file_path
# Generate missing molecules with OpenEye. At the end of parametrization
# we update the 'filepath' key also for OpenEye-generated molecules so
# we don't need to keep track of the molecules we have already generated
if extension is None or extension == '.smiles' or extension == '.csv':
if not utils.is_openeye_installed(oetools=('oechem', 'oeiupac', 'oequacpac', 'oeomega')):
if extension is None:
raise RuntimeError('Cannot generate molecule {} without OpenEye licensed with '
'OEChem, OEIUPAC, OEOmega, and OEQuacPack.'.format(mol_id))
else:
raise RuntimeError('Cannot support {} files without OpenEye licensed with '
'OEChem, OEIUPAC, OEOmega, and OEQuacPack.'.format(extension[1:]))
# Retrieve the first SMILES string (eventually extracted
# while handling of the 'select' keyword above)
if extension is not None:
# Get first record in CSV file.
first_line = read_csv_lines(mol_descr['filepath'], lines=0)
# Automatically detect if delimiter is comma or semicolon
for delimiter in ',;':
logger.debug("Attempt to parse smiles file with delimiter '{}'".format(delimiter))
line_fields = first_line.split(delimiter)
# If there is only one column, take that, otherwise take second
if len(line_fields) > 1:
smiles_str = line_fields[1].strip()
else:
smiles_str = line_fields[0].strip()
# try to generate the smiles and try new delimiter if it fails
mol_descr['smiles'] = smiles_str
try:
oe_molecule = self._generate_molecule(mol_id)
break
except (ValueError, RuntimeError):
oe_molecule = None
# Raise an error if no delimiter worked
if oe_molecule is None:
raise RuntimeError('Cannot detect SMILES file format.')
else:
# Generate molecule from mol_descr['smiles']
oe_molecule = self._generate_molecule(mol_id)
# Cache atom positions
self._pos_cache[mol_id] = utils.get_oe_mol_positions(oe_molecule)
# Write OpenEye generated molecules in mol2 files
# We update the 'filepath' key in the molecule description
mol_descr['filepath'] = os.path.join(mol_dir, mol_id + '.mol2')
# Generate a residue name for the SMILES molecule.
residue_name = self._generate_residue_name(mol_id)
moltools.openeye.molecule_to_mol2(oe_molecule, mol_descr['filepath'],
residue_name=residue_name)
# Enumerate protonation states with epik
if 'epik' in mol_descr:
epik_base_path = os.path.join(mol_dir, mol_id + '-epik.')
epik_mae_file = epik_base_path + 'mae'
epik_mol2_file = epik_base_path + 'mol2'
epik_sdf_file = epik_base_path + 'sdf'
# Run epik and convert from maestro to both mol2 and sdf
# to not lose neither the penalties nor the residue name
epik_kwargs = mol_descr['epik']
moltools.schrodinger.run_epik(mol_descr['filepath'], epik_mae_file, **epik_kwargs)
moltools.schrodinger.run_structconvert(epik_mae_file, epik_sdf_file)
moltools.schrodinger.run_structconvert(epik_mae_file, epik_mol2_file)
# Save new net charge from the i_epik_Tot_Q property
net_charge = int(moltools.schrodinger.run_proplister(epik_sdf_file)[0]['i_epik_Tot_Q'])
# Keep filepath consistent
mol_descr['filepath'] = epik_mol2_file
# Antechamber does not support sdf files so we need to convert them
extension = os.path.splitext(mol_descr['filepath'])[1]
if extension == '.sdf':
if not utils.is_openeye_installed(oetools=('oechem',)):
raise RuntimeError('Cannot support sdf files without OpenEye OEChem')
mol2_file_path = os.path.join(mol_dir, mol_id + '.mol2')
oe_molecule = utils.load_oe_molecules(mol_descr['filepath'], molecule_idx=0)
# Generate a residue name for the sdf molecule.
residue_name = self._generate_residue_name(mol_id)
moltools.openeye.molecule_to_mol2(oe_molecule, mol2_file_path,
residue_name=residue_name)
# Update filepath information
mol_descr['filepath'] = mol2_file_path
# Parametrize the molecule with antechamber
if 'antechamber' in mol_descr:
# Generate charges with OpenEye if requested
if 'openeye' in mol_descr:
if not utils.is_openeye_installed(oetools=('oechem', 'oequacpac', 'oeomega')):
err_msg = ('Cannot find OpenEye toolkit with OEChem and OEQuacPac to compute charges '
'for molecule {}').format(mol_id)
logger.error(err_msg)
raise RuntimeError(err_msg)
mol2_file_path = os.path.join(mol_dir, mol_id + '.mol2')
oe_molecule = utils.load_oe_molecules(mol_descr['filepath'], molecule_idx=0)
# Setting keep_confs = None keeps the original conformation
oe_molecule = moltools.openeye.get_charges(oe_molecule, keep_confs=None)
residue_name = utils.Mol2File(mol_descr['filepath']).resname
moltools.openeye.molecule_to_mol2(oe_molecule, mol2_file_path,
residue_name=residue_name)
utils.Mol2File(mol2_file_path).round_charge() # normalize charges
# We don't need Epik's or input net charge as antechamber will
# infer the net charge from the sum of the OpenEye charges.
net_charge = None
mol_descr['filepath'] = mol2_file_path
# Check if use specified a net_charge, but don't overwrite Epik's protonation state.
elif net_charge is not None:
net_charge = mol_descr['antechamber'].get('net_charge', None)
# Generate parameters
charge_method = mol_descr['antechamber']['charge_method']
input_mol_path = os.path.abspath(mol_descr['filepath'])
# Use Gaff in parameters, otherwise default to gaff2
gaff = 'gaff2' if 'leaprc.gaff2' in mol_descr['leap']['parameters'] else 'gaff'
with moltools.utils.temporary_cd(mol_dir):
moltools.amber.run_antechamber(mol_id, input_mol_path,
charge_method=charge_method,
net_charge=net_charge,
gaff_version=gaff)
# Save new parameters paths
mol_descr['filepath'] = os.path.join(mol_dir, mol_id + '.gaff.mol2')
mol_descr['leap']['parameters'].append(os.path.join(mol_dir, mol_id + '.frcmod'))
# Normalize charges if not done before
if 'openeye' not in mol_descr:
utils.Mol2File(mol_descr['filepath']).round_charge()
# Determine small molecule net charge
extension = os.path.splitext(mol_descr['filepath'])[1]
if extension == '.mol2':
# TODO what if this is a peptide? this should be computed in get_system()
mol_descr['net_charge'] = utils.Mol2File(mol_descr['filepath']).net_charge
# Keep track of processed molecule
self._processed_mols.add(mol_id)
def _setup_system(self, system_file_path, pack, alchemical_charge,
system_parameters, solvent_id, *molecule_ids, **kwargs):
"""Setup a system and create its prmtop/inpcrd files.
IMPORTANT: This function does not check if it's about to overwrite
files. Use get_system() for safe setup.
Parameters
----------
system_file_path : str
The path to either the prmtop or inpcrd output file. The other one
will be saved in the same folder with the same base name.
pack : bool
True to automatically solve atom clashes and reduce box dimension.
alchemical_charge : int
Number of counterions to alchemically modify during the simulation.
system_parameters : list of str
Contain the parameters file that must be loaded in tleap for the
system in addition to the molecule-specific ones.
solvent_id : str
The ID of the solvent.
ignore_ionic_strength : bool, optional
If True, no ions will be added to reach the ionic strength (default
is False).
save_amber_files : bool, optional
If False, prmtop and inpcrd files are not saved (default is True).
Other Parameters
----------------
*molecule_ids : list-like of str
List the IDs of the molecules to pack together in the system.
"""
# Get kwargs
ignore_ionic_strength = kwargs.pop('ignore_ionic_strength', False)
save_amber_files = kwargs.pop('save_amber_files', True)
assert len(kwargs) == 0
# Make sure molecules are set up
self._setup_molecules(*molecule_ids)
solvent = self.solvents[solvent_id]
# Start error tracking variables
# Water
known_solvent_files = [file for file in _OPENMM_LEAP_SOLVENT_FILES_MAP.values()]
loaded_water_files = [] # Detected loaded water files
def extend_list_of_waters(leap_parameters):
"""Extend the loaded_water_files list given the leap_parameters"""
loaded_water_files.extend([water for water
in leap_parameters
if water in known_solvent_files])
# Create tleap script
tleap = utils.TLeap()
# Load all parameters
# --------------------
tleap.new_section('Load parameters')
for mol_id in molecule_ids:
molecule_parameters = self.molecules[mol_id]['leap']['parameters']
# Track loaded water models
extend_list_of_waters(molecule_parameters)
tleap.load_parameters(*molecule_parameters)
extend_list_of_waters(system_parameters)
tleap.load_parameters(*system_parameters)
solvent_leap = solvent['leap']['parameters']
extend_list_of_waters(solvent_leap)
tleap.load_parameters(*solvent_leap)
# Load molecules and create complexes
# ------------------------------------
tleap.new_section('Load molecules')
for mol_id in molecule_ids:
tleap.load_unit(unit_name=mol_id, file_path=self.molecules[mol_id]['filepath'])
if len(molecule_ids) > 1:
# Check that molecules don't have clashing atoms. Also, if the ligand
# is too far away from the molecule we want to pull it closer
# TODO this check should be available even without OpenEye
if pack and utils.is_openeye_installed(oetools=('oechem',)):
# Load atom positions of all molecules
positions = [0 for _ in molecule_ids]
for i, mol_id in enumerate(molecule_ids):
if mol_id not in self._pos_cache:
self._pos_cache[mol_id] = utils.get_oe_mol_positions(
utils.load_oe_molecules(self.molecules[mol_id]['filepath'], molecule_idx=0))
positions[i] = self._pos_cache[mol_id]
# Find and apply the transformation to fix clashing
# TODO this doesn't work with more than 2 molecule_ids
try:
max_dist = solvent['clearance'].value_in_unit(unit.angstrom) / 1.5
except KeyError:
max_dist = 10.0
transformation = pack_transformation(positions[0], positions[1],
self.CLASH_THRESHOLD, max_dist)
if (transformation != np.identity(4)).any():
logger.warning('Changing starting positions for {}.'.format(molecule_ids[1]))
tleap.new_section('Fix clashing atoms')
tleap.transform(molecule_ids[1], transformation)
# Create complex
tleap.new_section('Create complex')
tleap.combine('complex', *molecule_ids)
unit_to_solvate = 'complex'
else:
unit_to_solvate = molecule_ids[0]
# Configure solvent
# ------------------
if solvent['nonbonded_method'] == openmm.app.NoCutoff:
try:
implicit_solvent = solvent['implicit_solvent']
except KeyError: # vacuum
pass
else: # implicit solvent
tleap.new_section('Set GB radii to recommended values for OBC')
tleap.add_commands('set default PBRadii {}'.format(
get_leap_recommended_pbradii(implicit_solvent)))
else: # explicit solvent
tleap.new_section('Solvate systems')
# Solvate unit. Solvent models different than tip3p need parameter modifications.
solvent_model = solvent['solvent_model']
# Check that solvent model has loaded the appropriate leap parameters.
# This does guarantee a failure, but is a good sign of it.
if _OPENMM_LEAP_SOLVENT_FILES_MAP[solvent_model] not in loaded_water_files:
solvent_warning = ("WARNING: The solvent_model {} may not work for loaded "
"leaprc.water.X files.\n We expected {} to make your "
"solvent model work, but did not find it.\n "
"This is okay for tip4pew leaprc file with tip3p solvent_model, "
"but not the other way around.\nThis does "
"not mean the system will not build, but it may throw "
"an error.".format(solvent_model,
_OPENMM_LEAP_SOLVENT_FILES_MAP[solvent_model]))
logger.warning(solvent_warning)
leap_solvent_model = _OPENMM_LEAP_SOLVENT_MODELS_MAP[solvent_model]
clearance = solvent['clearance']
tleap.solvate(unit_name=unit_to_solvate, solvent_model=leap_solvent_model, clearance=clearance)
# First, determine how many ions we need to add for the ionic strength.
if not ignore_ionic_strength and solvent['ionic_strength'] != 0.0*unit.molar:
# Currently we support only monovalent ions.
for ion_name in [solvent['positive_ion'], solvent['negative_ion']]:
assert '2' not in ion_name and '3' not in ion_name
logger.debug('Estimating number of water molecules in the box.')
n_waters = self._get_number_box_waters(pack, alchemical_charge, system_parameters,
solvent_id, *molecule_ids)
logger.debug('Estimated number of water molecules: {}'.format(n_waters))
# Water molarity at room temperature: 998.23g/L / 18.01528g/mol ~= 55.41M
n_ions_ionic_strength = int(np.round(n_waters * solvent['ionic_strength'] / (55.41*unit.molar)))
logging.debug('Adding {} ions in {} water molecules to reach ionic strength '
'of {}'.format(n_ions_ionic_strength, n_waters, solvent['ionic_strength']))
else:
n_ions_ionic_strength = 0
# Add alchemically modified ions that we don't already add for ionic strength.
if abs(alchemical_charge) > n_ions_ionic_strength:
n_alchemical_ions = abs(alchemical_charge) - n_ions_ionic_strength
try:
if alchemical_charge > 0:
ion = solvent['negative_ion']
else:
ion = solvent['positive_ion']
except KeyError:
err_msg = ('Found charged ligand but no indications for ions in '
'solvent {}').format(solvent_id)
logger.error(err_msg)
raise RuntimeError(err_msg)
tleap.add_ions(unit_name=unit_to_solvate, ion=ion,
num_ions=n_alchemical_ions, replace_solvent=True)
logging.debug('Adding {} {} ion to neutralize ligand charge of {}'
''.format(n_alchemical_ions, ion, alchemical_charge))
# Neutralizing solvation box
if 'positive_ion' in solvent:
tleap.add_ions(unit_name=unit_to_solvate, ion=solvent['positive_ion'],
replace_solvent=True)
if 'negative_ion' in solvent:
tleap.add_ions(unit_name=unit_to_solvate, ion=solvent['negative_ion'],
replace_solvent=True)
# Ions for the ionic strength must be added AFTER neutralization.
if n_ions_ionic_strength != 0:
tleap.add_ions(unit_name=unit_to_solvate, ion=solvent['positive_ion'],
num_ions=n_ions_ionic_strength, replace_solvent=True)
tleap.add_ions(unit_name=unit_to_solvate, ion=solvent['negative_ion'],
num_ions=n_ions_ionic_strength, replace_solvent=True)
# Check charge
tleap.new_section('Check charge')
tleap.add_commands('check ' + unit_to_solvate)
# Save output files
# ------------------
system_dir = os.path.dirname(system_file_path)
base_file_path = os.path.basename(system_file_path).split('.')[0]
base_file_path = os.path.join(system_dir, base_file_path)
# Create output directory
if not os.path.exists(system_dir):
os.makedirs(system_dir)
# Save prmtop, inpcrd and reference pdb files
tleap.new_section('Save prmtop and inpcrd files')
if save_amber_files:
tleap.save_unit(unit_to_solvate, system_file_path)
tleap.save_unit(unit_to_solvate, base_file_path + '.pdb')
# Save tleap script for reference
tleap.export_script(base_file_path + '.leap.in')
# Run tleap and log warnings
# Handle common errors we know of
try:
warnings = tleap.run()
except RuntimeError as e:
error = RuntimeError('Solvent {}: {}'.format(solvent_id, str(e)))
error.with_traceback(sys.exc_info()[2])
raise error
for warning in warnings:
logger.warning('TLeap: ' + warning)
def _get_number_box_waters(self, *args):
"""Build a system in a temporary directory and count the number of waters."""
with mmtools.utils.temporary_directory() as tmp_dir:
system_file_path = os.path.join(tmp_dir, 'temp_system.prmtop')
self._setup_system(system_file_path, *args, ignore_ionic_strength=True,
save_amber_files=False)
# Count number of waters of created system.
system_file_path = os.path.join(tmp_dir, 'temp_system.pdb')
system_traj = mdtraj.load(system_file_path)
n_waters = sum([1 for res in system_traj.topology.residues if res.is_water])
return n_waters
# ==============================================================================
# FUNCTIONS FOR ALCHEMICAL PATH OPTIMIZATION
# ==============================================================================
class _DCDTrajectoryFile(mdtraj.formats.dcd.DCDTrajectoryFile):
"""Convenience class extending MDTraj DCD trajectory file.
This handles units and allow reading/writing SamplerStates instead
of positions, cell_lengths, and cell_angles.
"""
def read(self, *args, **kwargs):
positions, cell_lengths, cell_angles = super().read(*args, **kwargs)
# Add standard DCD units.
return positions*unit.angstrom, cell_lengths*unit.angstrom, cell_angles*unit.degree
def read_as_sampler_states(self, *args, **kwargs):
positions, cell_lengths, cell_angles = self.read(*args, **kwargs)
sampler_states = []
for i in range(len(positions)):
box_vectors = mdtraj.utils.lengths_and_angles_to_box_vectors(
*(cell_lengths[i]/unit.nanometer),
*(cell_angles[i]/unit.degree)
) * unit.nanometer
sampler_state = mmtools.states.SamplerState(positions[i], box_vectors=box_vectors)
sampler_states.append(sampler_state)
return sampler_states
def write(self, xyz, cell_lengths=None, cell_angles=None, **kwargs):
# Convert to standard DCD units.
super().write(xyz/unit.angstrom, cell_lengths/unit.angstrom, cell_angles/unit.degree)
def write_sampler_state(self, sampler_state):
a, b, c, alpha, beta, gamma = mdtraj.utils.box_vectors_to_lengths_and_angles(
*(np.array(sampler_state.box_vectors / unit.angstrom)))
super().write(sampler_state.positions / unit.angstrom,
(a, b, c), (alpha, beta, gamma))
def read_trailblaze_checkpoint_coordinates(checkpoint_dir_path, redistributed=True):
"""Read positions and box vectors stored as checkpoint by the trailblaze algorithm.
Parameters
----------
checkpoint_dir_path : str
The path to the directory containing the checkpoint information.
redistributed : bool, optional
If True, the function will check if the states were redistributed,
and will returned the set of coordinates that are more representative
of the redistributed protocol.
Returns
-------
sampler_states : List[openmmtools.states.SamplerState], optional
``sampler_states[i]`` contain positions and box vectors for the
intermediate state i generated by the trailblaze algorithm.
Raises
------
FileNotFoundError
If no file with the coordinates was found.
"""
positions_file_path = os.path.join(checkpoint_dir_path, 'coordinates.dcd')
states_map_file_path = os.path.join(checkpoint_dir_path, 'states_map.json')
# Open the file if it exist.
try:
trajectory_file = _DCDTrajectoryFile(positions_file_path, 'r')
except OSError as e:
raise FileNotFoundError(str(e))
# Read info.
try:
sampler_states = trajectory_file.read_as_sampler_states()
finally:
trajectory_file.close()
# If the protocol was redistributed, use the states map to create
# a new set of sampler states that can be used as starting conditions
# for the redistributed protocol.
if redistributed:
try:
with open(states_map_file_path, 'r') as f:
states_map = json.load(f)
sampler_states = [sampler_states[i] for i in states_map]
except FileNotFoundError:
pass
return sampler_states
def _resume_thermodynamic_trailblazing(checkpoint_dir_path, initial_protocol):
"""Resume a previously-run trailblaze execution.
Parameters
----------
checkpoint_dir_path : str
The path to the directory used to store the trailblaze information.
initial_protocol : Dict[str, List[float]]
The initial protocol containing only the first state of the path.
If no checkpoint file for the protocol is found, the file will
be initialized using this state.
Returns
-------
resumed_protocol : Dict[str, List[float]]
The resumed optimal protocol.
trajectory_file : _DCDTrajectoryFile
A DCD file open for appening.
sampler_state : SamplerState or None
The last saved SamplerState or None if no frame was saved yet.
"""
# We save the protocol in a YAML file and the positions in netcdf.
protocol_file_path = os.path.join(checkpoint_dir_path, 'protocol.yaml')
stds_file_path = os.path.join(checkpoint_dir_path, 'states_stds.json')
positions_file_path = os.path.join(checkpoint_dir_path, 'coordinates.dcd')
# Create the directory, if it doesn't exist.
os.makedirs(checkpoint_dir_path, exist_ok=True)
# Load protocol and stds checkpoint file.
try:
# Parse the previously calculated optimal_protocol dict.
with open(protocol_file_path, 'r') as file_stream:
resumed_protocol = yaml.load(file_stream, Loader=yaml.FullLoader)
# Load the energy difference stds.
with open(stds_file_path, 'r') as f:
states_stds = json.load(f)
except FileNotFoundError:
resumed_protocol = initial_protocol
states_stds = [[], []]
# Check if there's an existing positions information.
try:
# We want the coordinates of the states that were sampled
# during the search not the states after redistribution.
sampler_states = read_trailblaze_checkpoint_coordinates(
checkpoint_dir_path, redistributed=False)
except FileNotFoundError:
len_trajectory = 0
else:
len_trajectory = len(sampler_states)
# Raise an error if the algorithm was interrupted *during*
# writing on disk. We store only the states that we simulated
# so there should be one less here.
for state_values in resumed_protocol.values():
if len_trajectory < len(state_values) - 1:
err_msg = ("The trailblaze algorithm was interrupted while "
"writing the checkpoint file and it is now unable "
"to resume. Please delete the files "
f"in {checkpoint_dir_path} and restart.")
raise RuntimeError(err_msg)
# When this is resumed, but the trajectory is already completed,
# the frame of the final end state has been already written, but
# we don't want to add it twice at the end of the trailblaze function.
len_trajectory = len(state_values) - 1
# Whether the file exist or not, MDTraj doesn't support appending
# files so we open a new one and rewrite the configurations we
# have generated in the previous run.
trajectory_file = _DCDTrajectoryFile(positions_file_path, 'w',
force_overwrite=True)
if len_trajectory > 0:
for i in range(len_trajectory):
trajectory_file.write_sampler_state(sampler_states[i])
# Make sure the next search starts from the last saved position
# unless the previous calculation was interrupted before the
# first position could be saved.
sampler_state = sampler_states[-1]
else:
sampler_state = None
return resumed_protocol, states_stds, trajectory_file, sampler_state
def _cache_trailblaze_data(checkpoint_dir_path, optimal_protocol, states_stds,
trajectory_file, sampler_state):
"""Store on disk current state of the trailblaze run."""
# Determine the file paths of the stored data.
protocol_file_path = os.path.join(checkpoint_dir_path, 'protocol.yaml')
stds_file_path = os.path.join(checkpoint_dir_path, 'states_stds.json')
# Update protocol.
with open(protocol_file_path, 'w') as file_stream:
yaml.dump(optimal_protocol, file_stream)
# Update the stds between states.
with open(stds_file_path, 'w') as f:
json.dump(states_stds, f)
# Append positions of the state that we just simulated.
trajectory_file.write_sampler_state(sampler_state)
def _redistribute_trailblaze_states(old_protocol, states_stds, thermodynamic_distance):
"""Redistribute the states using a bidirectional estimate of the thermodynamic length.
Parameters
----------
old_protocol : Dict[str, List[float]]
The unidirectional optimal protocol.
states_stds : List[List[float]]
states_stds[j][i] is the standard deviation of the potential
difference between states i-1 and i computed in the j direction.
thermodynamic_distance : float
The distance between each pair of states.
Returns
-------
new_protocol : Dict[str, List[float]]
The new estimate of the optimal protocol.
states_map : List[int]
states_map[i] is the index of the state in the old protocol that
is closest to the i-th state in the new protocol. This allows to
map coordinates generated during trailblazing to the redistributed
protocol.
"""
# The parameter names in a fixed order.
parameter_names = [par_name for par_name in old_protocol]
# Initialize the new protocol from the first state the optimal protocol.
new_protocol = {par_name: [values[0]] for par_name, values in old_protocol.items()}
# The first state of the new protocol always maps to the first state of the old one.
states_map = [0]
def _get_old_protocol_state(state_idx):
"""Return a representation of the thermo state as a list of parameter values."""
return np.array([old_protocol[par_name][state_idx] for par_name in parameter_names])
def _add_state_to_new_protocol(state):
for parameter_name, new_state_value in zip(parameter_names, state.tolist()):
new_protocol[parameter_name].append(new_state_value)
# The thermodynamic length at 0 is 0.0.
states_stds[0] = [0.0] + states_stds[0]
states_stds[1] = [0.0] + states_stds[1]
# We don't have the energy difference std in the
# direction opposite to the search direction so we
# pad the list.
states_stds[1].append(states_stds[0][-1])
states_stds = np.array(states_stds)
# Compute a bidirectional estimate of the thermodynamic length.
old_protocol_thermo_length = np.cumsum(np.mean(states_stds, axis=0))
# Trailblaze again interpolating the thermodynamic length function.
current_state_idx = 0
current_state = _get_old_protocol_state(0)
last_state = _get_old_protocol_state(-1)
new_protocol_cum_thermo_length = 0.0
while (current_state != last_state).any():
# Find first state for which the accumulated standard
# deviation is greater than the thermo length threshold.
try:
while old_protocol_thermo_length[current_state_idx+1] - new_protocol_cum_thermo_length <= thermodynamic_distance:
current_state_idx += 1
except IndexError:
# If we got to the end, we just add the last state
# to the protocol and stop the while loop.
_add_state_to_new_protocol(last_state)
break
# Update current state.
current_state = _get_old_protocol_state(current_state_idx)
# The thermodynamic length from the last redistributed state to current state.
pair_thermo_length = old_protocol_thermo_length[current_state_idx] - new_protocol_cum_thermo_length
# Now interpolate between the current state state and the next to find
# the exact state for which the thermo length equal the threshold.
next_state = _get_old_protocol_state(current_state_idx+1)
differential = thermodynamic_distance - pair_thermo_length
differential /= old_protocol_thermo_length[current_state_idx+1] - old_protocol_thermo_length[current_state_idx]
new_state = current_state + differential * (next_state - current_state)
# Update cumulative thermo length.
new_protocol_cum_thermo_length += thermodynamic_distance
# Update states map.
closest_state_idx = current_state_idx if differential <= 0.5 else current_state_idx+1
states_map.append(closest_state_idx)
# Update redistributed protocol.
_add_state_to_new_protocol(new_state)
# The last state of the new protocol always maps to the last state of the old one.
states_map.append(len(old_protocol_thermo_length)-1)
return new_protocol, states_map
def run_thermodynamic_trailblazing(
thermodynamic_state, sampler_state, mcmc_move, state_parameters,
parameter_setters=None, thermodynamic_distance=1.0,
distance_tolerance=0.05, n_samples_per_state=100,
reversed_direction=False, bidirectional_redistribution=True,
bidirectional_search_thermo_dist='auto',
global_parameter_functions=None, function_variables=tuple(),
checkpoint_dir_path=None
):
"""
Find an alchemical path by placing alchemical states at a fixed distance.
The distance between two states is estimated by collecting ``n_samples_per_state``
configurations through the MCMCMove in one of the two alchemical states,
and computing the standard deviation of the difference of potential energies
between the two states at those configurations.
The states of the protocol are chosen so that each pair has a distance
(in thermodynamic length) of ``thermodynamic_distance +- distance_tolerance``.
The thermodynamic length estimate (in kT) is based on the standard deviation
of the difference in potential energy between the two states.
The function is capable of resuming when interrupted if ``checkpoint_dir_path``
is specified. This will create two files called 'protocol.yaml' and
'coordinates.dcd' storing the protocol and initial positions and box
vectors for each state that are generated while running the algorithm.
It is also possible to discretize a path specified through maathematical
expressions through the arguments ``global_parameter_function`` and
``function_variables``.
Parameters
----------
thermodynamic_state : openmmtools.states.CompoundThermodynamicState
The state of the alchemically modified system.
sampler_state : openmmtools.states.SamplerState
The sampler states including initial positions and box vectors.
mcmc_move : openmmtools.mcmc.MCMCMove
The MCMCMove to use for propagation.
state_parameters : List[Tuple[str, List[float]]]
Each element of this list is a tuple containing first the name
of the parameter to be modified (e.g. ``lambda_electrostatics``,
``lambda_sterics``) and a list specifying the initial and final
values for the path.
parameter_setters : Dict[str, Callable], optional
If the parameter cannot be set in the ``thermodynamic_state``
with a simple call to ``setattr``, you can pass a dictionary
mapping the parameter name to a function
``setter(thermodynamic_state, parameter_name, value)``. This
is useful for example to set global parameter function variables
with ``openmmtools.states.GlobalParameterState.set_function_variable``.
thermodynamic_distance : float, optional
The target distance (in thermodynamic length) between each pair of
states in kT. Default is 1.0 (kT).
distance_tolerance : float, optional
The tolerance on the found standard deviation. Default is 0.05 (kT).
n_samples_per_state : int, optional
How many samples to collect to estimate the overlap between two
states. Default is 100.
reversed_direction : bool, optional
If ``True``, the algorithm starts from the final state and traverses
the path from the end to the beginning. The returned path
discretization will still be ordered from the beginning to the
end following the order in ``state_parameters``. Default is ``False``.
bidirectional_redistribution : bool, optional
If ``True``, the states will be redistributed using the standard
deviation of the potential difference between states in both
directions. Default is ``True``.
bidirectional_search_thermo_dist : float or 'auto', optional
If ``bidirectional_redistribution`` is ``True``, the thermodynamic
distance between the sampled states used to collect data along
the path can be different than the thermodynamic distance after
redistribution. The default ('auto') caps the thermodynamic
distance used for trailblazing at 1 kT. Keeping this value small
lower the chance of obtaining very large stds in the opposite direction
due to rare, dominating events in sections of the path where the overlap
decreases quickly, which in turn may results in unreasonably long
protocols.
global_parameter_functions : Dict[str, Union[str, openmmtools.states.GlobalParameterFunction]], optional
Map a parameter name to a mathematical expression as a string
or a ``openmmtools.states.GlobalParameterFunction`` object.
function_variables : List[str], optional
A list of function variables entering the mathematical
expressions.
checkpoint_dir_path : str, optional
The path to the directory used to store the trailblaze information.
If this is given and the files exist, the algorithm will use this
information to resume in case it was previously interrupted. If
``None``, no information is stored and it won't be possible to
resume. Default is ``None``.
Returns
-------
optimal_protocol : Dict[str, List[float]]
The estimated protocol. Each dictionary key is one of the
parameters in ``state_parameters``, and its values is the
list of values that it takes in each state of the path.
"""
def _state_parameter_setter(state, parameter_name, value):
"""Helper function to set state parameters."""
setattr(state, parameter_name, value)
def _function_variable_setter(state, parameter_name, value):
"""Helper function to set global parameter function variables."""
state.set_function_variable(parameter_name, value)
# Make sure that the state parameters to optimize have a clear order.
assert (isinstance(state_parameters, list) or isinstance(state_parameters, tuple))
# Determine the thermo distance to achieve during the search.
if not bidirectional_redistribution:
search_thermo_dist = thermodynamic_distance
else:
if bidirectional_search_thermo_dist == 'auto':
search_thermo_dist = min(1.0, thermodynamic_distance)
else:
search_thermo_dist = bidirectional_search_thermo_dist
# Create unordered helper variable.
state_parameter_dict = {x[0]: x[1] for x in state_parameters}
# Do not modify original thermodynamic_state.
thermodynamic_state = copy.deepcopy(thermodynamic_state)
# Handle mutable default arguments.
if parameter_setters is None:
parameter_setters = {}
if global_parameter_functions is None:
global_parameter_functions = {}
# Make sure that the same parameter was not listed both as
# a function and a parameter to iterate.
for parameter_name, _ in state_parameters:
if parameter_name in global_parameter_functions:
raise ValueError(f"Cannot specify {parameter_name} in "
"'state_parameters' and 'global_parameter_functions'")
# Make sure all function variables are in state_parameters.
for function_variable in function_variables:
if function_variable not in state_parameter_dict:
raise ValueError(f"The variable '{function_variable}' must be given in 'state_parameters'")
# Use special setters for the function variables.
for function_variable in function_variables:
if parameter_name not in parameter_setters:
parameter_setters[function_variable] = _function_variable_setter
# Create default setters for all other state parameters to later avoid if/else or try/except.
for parameter_name, _ in state_parameters:
if parameter_name not in parameter_setters:
parameter_setters[parameter_name] = _state_parameter_setter
# Initialize all function variables in the thermodynamic state.
for function_variable in function_variables:
value = state_parameter_dict[function_variable][0]
thermodynamic_state.set_function_variable(function_variable, value)
# Assign global parameter functions.
for parameter_name, global_parameter_function in global_parameter_functions.items():
# If the user doesn't pass an instance of function class, create one.
if isinstance(global_parameter_function, str):
global_parameter_function = mmtools.states.GlobalParameterFunction(global_parameter_function)
setattr(thermodynamic_state, parameter_name, global_parameter_function)
# Reverse the direction of the algorithm if requested.
if reversed_direction:
state_parameters = [(par_name, end_states.__class__(reversed(end_states)))
for par_name, end_states in reversed(state_parameters)]
# Initialize protocol with the starting value.
optimal_protocol = {par: [values[0]] for par, values in state_parameters}
# Keep track of potential std between states in both directions
# of the path so that we can redistribute the states later.
# At the end of the protocol this will have the same length
# of the protocol minus one. The inner lists are for the forward
# and reversed direction stds respectively.
states_stds = [[], []]
# Check to see whether a trailblazing algorithm is already in progress,
# and if so, restore to the previously checkpointed state.
if checkpoint_dir_path is not None:
optimal_protocol, states_stds, trajectory_file, resumed_sampler_state = _resume_thermodynamic_trailblazing(
checkpoint_dir_path, optimal_protocol)
# Start from the last saved conformation.
if resumed_sampler_state is not None:
sampler_state = resumed_sampler_state
# We keep track of the previous state in the optimal protocol
# that we'll use to compute the stds in the opposite direction.
if len(states_stds[0]) == 0:
previous_thermo_state = None
else:
previous_thermo_state = copy.deepcopy(thermodynamic_state)
# Make sure that thermodynamic_state is in the last explored
# state, whether the algorithm was resumed or not.
for state_parameter in optimal_protocol:
parameter_setters[state_parameter](thermodynamic_state, state_parameter,
optimal_protocol[state_parameter][-1])
if previous_thermo_state is not None:
parameter_setters[state_parameter](previous_thermo_state, state_parameter,
optimal_protocol[state_parameter][-2])
# We change only one parameter at a time.
for state_parameter, values in state_parameters:
logger.debug('Determining alchemical path for parameter {}'.format(state_parameter))
# Is this a search from 0 to 1 or from 1 to 0?
search_direction = np.sign(values[1] - values[0])
# If the parameter doesn't change, continue to the next one.
if search_direction == 0:
continue
# Gather data until we get to the last value.
while optimal_protocol[state_parameter][-1] != values[-1]:
# Simulate current thermodynamic state to collect samples.
sampler_states = []
simulated_energies = np.zeros(n_samples_per_state)
for i in range(n_samples_per_state):
mcmc_move.apply(thermodynamic_state, sampler_state)
sampler_states.append(copy.deepcopy(sampler_state))
# Keep track of the thermo state we use for the reweighting.
reweighted_thermo_state = None
# Find first state that doesn't overlap with simulated one
# with std(du) within search_thermo_dist +- distance_tolerance.
# We stop anyway if we reach the last value of the protocol.
std_energy = 0.0
current_parameter_value = optimal_protocol[state_parameter][-1]
while (abs(std_energy - search_thermo_dist) > distance_tolerance and
not (current_parameter_value == values[1] and std_energy < search_thermo_dist)):
# Determine next parameter value to compute.
if np.isclose(std_energy, 0.0):
# This is the first iteration or the two state overlap significantly
# (e.g. small molecule in vacuum). Just advance by a +- 0.05 step.
old_parameter_value = current_parameter_value
current_parameter_value += (values[1] - values[0]) / 20.0
else:
# Assume std_energy(parameter_value) is linear to determine next value to try.
derivative_std_energy = ((std_energy - old_std_energy) /
(current_parameter_value - old_parameter_value))
old_parameter_value = current_parameter_value
current_parameter_value += (search_thermo_dist - std_energy) / derivative_std_energy
# Keep current_parameter_value inside bound interval.
if search_direction * current_parameter_value > values[1]:
current_parameter_value = values[1]
assert search_direction * (optimal_protocol[state_parameter][-1] - current_parameter_value) < 0
# Determine the thermo states at which we need to compute the energies.
# If this is the first attempt, compute also the reduced potential of
# the simulated energies and the previous state to estimate the standard
# deviation in the opposite direction.
if reweighted_thermo_state is None:
# First attempt.
reweighted_thermo_state = copy.deepcopy(thermodynamic_state)
computed_thermo_states = [reweighted_thermo_state, thermodynamic_state]
if previous_thermo_state is not None:
computed_thermo_states.append(previous_thermo_state)
else:
computed_thermo_states = [reweighted_thermo_state]
# Set the reweighted state to the current parameter value.
parameter_setters[state_parameter](reweighted_thermo_state, state_parameter, current_parameter_value)
# Compute all energies.
energies = np.empty(shape=(len(computed_thermo_states), n_samples_per_state))
for i, sampler_state in enumerate(sampler_states):
energies[:,i] = mmtools.states.reduced_potential_at_states(
sampler_state, computed_thermo_states, mmtools.cache.global_context_cache)
# Cache the simulated energies for the next iteration.
if len(computed_thermo_states) > 1:
simulated_energies = energies[1]
# Compute the energy difference std in the direction: simulated state -> previous state.
if len(computed_thermo_states) > 2:
denergies = energies[2] - simulated_energies
states_stds[1].append(float(np.std(denergies, ddof=1)))
# Compute the energy difference std between the currently simulated and the reweighted states.
old_std_energy = std_energy
denergies = energies[0] - simulated_energies
std_energy = np.std(denergies, ddof=1)
logger.debug('trailblazing: state_parameter {}, simulated_value {}, current_parameter_value {}, '
'std_du {}'.format(state_parameter, optimal_protocol[state_parameter][-1],
current_parameter_value, std_energy))
# Store energy difference std in the direction: simulated state -> reweighted state.
states_stds[0].append(float(std_energy))
# Update variables for next iteration.
previous_thermo_state = copy.deepcopy(thermodynamic_state)
thermodynamic_state = reweighted_thermo_state
# Update the optimal protocol with the new value of this parameter.
# The other parameters remain fixed.
for par_name in optimal_protocol:
# Make sure we append to a Python float to the list.
# Lists of numpy types sometimes give problems.
if par_name == state_parameter:
protocol_value = float(current_parameter_value)
else:
protocol_value = float(optimal_protocol[par_name][-1])
optimal_protocol[par_name].append(protocol_value)
# Save the updated checkpoint file to disk.
if checkpoint_dir_path is not None:
_cache_trailblaze_data(checkpoint_dir_path, optimal_protocol, states_stds,
trajectory_file, sampler_state)
if checkpoint_dir_path is not None:
# We haven't simulated the last state so we just set the positions of the second to last.
trajectory_file.write_sampler_state(sampler_state)
trajectory_file.close()
# Redistribute the states using the standard deviation estimates in both directions.
if bidirectional_redistribution:
optimal_protocol, states_map = _redistribute_trailblaze_states(
optimal_protocol, states_stds, thermodynamic_distance)
# Save the states map for reading the coordinates correctly.
if checkpoint_dir_path is not None:
states_map_file_path = os.path.join(checkpoint_dir_path, 'states_map.json')
if bidirectional_redistribution:
with open(states_map_file_path, 'w') as f:
json.dump(states_map, f)
elif os.path.isfile(states_map_file_path):
# If there's an old file because the path was previously redistributed,
# we delete it so that read_trailblaze_checkpoint_coordinates will
# return the coordinates associated to the most recently-generated path.
os.remove(states_map_file_path)
# If we have traversed the path in the reversed direction, re-invert
# the order of the discretized path.
if reversed_direction:
for par_name, values in optimal_protocol.items():
optimal_protocol[par_name] = values.__class__(reversed(values))
# If we used global parameter functions, the optimal_protocol at this
# point is a discretization of the function_variables, not the original
# parameters so we convert it back.
len_protocol = len(next(iter(optimal_protocol.values())))
function_variables_protocol = {var: optimal_protocol.pop(var) for var in function_variables}
original_parameters_protocol = {par: [] for par in global_parameter_functions}
# Rebuild the optimal discretization for the original parameters.
for state_idx in range(len_protocol):
# Set the function variable value so that the function is computed.
for function_variable in function_variables:
value = function_variables_protocol[function_variable][state_idx]
thermodynamic_state.set_function_variable(function_variable, value)
# Recover original parameters.
for parameter_name in original_parameters_protocol:
value = getattr(thermodynamic_state, parameter_name)
original_parameters_protocol[parameter_name].append(value)
# Update the total protocol.
optimal_protocol.update(original_parameters_protocol)
logger.debug('Alchemical path found: {}'.format(optimal_protocol))
return optimal_protocol
if __name__ == '__main__':
import doctest
doctest.testmod()
|
choderalab/yank
|
Yank/pipeline.py
|
Python
|
mit
| 116,266
|
[
"Amber",
"CHARMM",
"Gromacs",
"MDTraj",
"NetCDF",
"OpenMM"
] |
ab34eaadcf4e3dccc17851771253c3560e6c3e405ecdce54e713cbd5ac4977ac
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Classes and tools for storing and handling parsed data"""
import logging
from collections import namedtuple
import numpy
from cclib.method import Electrons
from cclib.method import orbitals
Attribute = namedtuple('Attribute', ['type', 'json_key', 'attribute_path'])
class ccData:
"""Stores data extracted by cclib parsers
Description of cclib attributes:
aonames -- atomic orbital names (list of strings)
aooverlaps -- atomic orbital overlap matrix (array[2])
atombasis -- indices of atomic orbitals on each atom (list of lists)
atomcharges -- atomic partial charges (dict of arrays[1])
atomcoords -- atom coordinates (array[3], angstroms)
atommasses -- atom masses (array[1], daltons)
atomnos -- atomic numbers (array[1])
atomspins -- atomic spin densities (dict of arrays[1])
ccenergies -- molecular energies with Coupled-Cluster corrections (array[2], eV)
charge -- net charge of the system (integer)
coreelectrons -- number of core electrons in atom pseudopotentials (array[1])
dispersionenergies -- a molecular dispersion energy corrections (array[1], eV)
enthalpy -- sum of electronic and thermal enthalpies (float, hartree/particle)
entropy -- entropy (float, hartree/(particle*kelvin))
etenergies -- energies of electronic transitions (array[1], 1/cm)
etoscs -- oscillator strengths of electronic transitions (array[1])
etdips -- electric transition dipoles of electronic transitions (array[2], ebohr)
etveldips -- velocity-gauge electric transition dipoles of electronic transitions (array[2], ebohr)
etmagdips -- magnetic transition dipoles of electronic transitions (array[2], ebohr)
etrotats -- rotatory strengths of electronic transitions (array[1], ??)
etsecs -- singly-excited configurations for electronic transitions (list of lists)
etsyms -- symmetries of electronic transitions (list of string)
freeenergy -- sum of electronic and thermal free energies (float, hartree/particle)
fonames -- fragment orbital names (list of strings)
fooverlaps -- fragment orbital overlap matrix (array[2])
fragnames -- names of fragments (list of strings)
frags -- indices of atoms in a fragment (list of lists)
gbasis -- coefficients and exponents of Gaussian basis functions (PyQuante format)
geotargets -- targets for convergence of geometry optimization (array[1])
geovalues -- current values for convergence of geometry optmization (array[1])
grads -- current values of forces (gradients) in geometry optimization (array[3])
hessian -- elements of the force constant matrix (array[1])
homos -- molecular orbital indices of HOMO(s) (array[1])
metadata -- various metadata about the package and computation (dict)
mocoeffs -- molecular orbital coefficients (list of arrays[2])
moenergies -- molecular orbital energies (list of arrays[1], eV)
moments -- molecular multipole moments (list of arrays[], a.u.)
mosyms -- orbital symmetries (list of lists)
mpenergies -- molecular electronic energies with Møller-Plesset corrections (array[2], eV)
mult -- multiplicity of the system (integer)
natom -- number of atoms (integer)
nbasis -- number of basis functions (integer)
nmo -- number of molecular orbitals (integer)
nmrtensors -- Nuclear magnetic resonance chemical shielding tensors (dict of dicts of array[2])
nocoeffs -- natural orbital coefficients (array[2])
nooccnos -- natural orbital occupation numbers (array[1])
nsocoeffs -- natural spin orbital coefficients (list of array[2])
nsooccnos -- natural spin orbital occupation numbers (list of array[1])
optdone -- flags whether an optimization has converged (Boolean)
optstatus -- optimization status for each set of atomic coordinates (array[1])
polarizabilities -- (dipole) polarizabilities, static or dynamic (list of arrays[2])
pressure -- pressure used for Thermochemistry (float, atm)
scancoords -- geometries of each scan step (array[3], angstroms)
scanenergies -- energies of potential energy surface (list)
scannames -- names of variables scanned (list of strings)
scanparm -- values of parameters in potential energy surface (list of tuples)
scfenergies -- molecular electronic energies after SCF (Hartree-Fock, DFT) (array[1], eV)
scftargets -- targets for convergence of the SCF (array[2])
scfvalues -- current values for convergence of the SCF (list of arrays[2])
temperature -- temperature used for Thermochemistry (float, kelvin)
time -- time in molecular dynamics and other trajectories (array[1], fs)
transprop -- all absorption and emission spectra (dictionary {name:(etenergies, etoscs)})
WARNING: this attribute is not standardized and is liable to change in cclib 2.0
vibanharms -- vibrational anharmonicity constants (array[2], 1/cm)
vibdisps -- cartesian displacement vectors (array[3], delta angstrom)
vibfreqs -- vibrational frequencies (array[1], 1/cm)
vibfconsts -- force constants of vibrations (array[1], mDyne/angstrom)
vibirs -- IR intensities (array[1], km/mol)
vibramans -- Raman activities (array[1], A^4/Da)
vibrmasses -- reduced masses of vibrations (array[1], daltons)
vibsyms -- symmetries of vibrations (list of strings)
zpve -- zero-point vibrational energy correction (float, hartree/particle)
(1) The term 'array' refers to a numpy array
(2) The number of dimensions of an array is given in square brackets
(3) Python indexes arrays/lists starting at zero, so if homos==[10], then
the 11th molecular orbital is the HOMO
"""
# The expected types for all supported attributes.
# The json_key is the key name used for attributes in the CJSON/JSON format
# 'TBD' - To Be Decided are the key names of attributes which haven't been included in the cjson format
_attributes = {
"aonames": Attribute(list, 'names', 'atoms:orbitals'),
"aooverlaps": Attribute(numpy.ndarray, 'overlaps', 'properties:orbitals'),
"atombasis": Attribute(list, 'indices', 'atoms:orbitals'),
"atomcharges": Attribute(dict, 'partial charges', 'properties'),
"atomcoords": Attribute(numpy.ndarray, 'coords', 'atoms:coords:3d'),
"atommasses": Attribute(numpy.ndarray, 'mass', 'atoms'),
"atomnos": Attribute(numpy.ndarray, 'number', 'atoms:elements'),
"atomspins": Attribute(dict, 'spins', 'atoms'),
"ccenergies": Attribute(numpy.ndarray, 'coupled cluster', 'properties:energy'),
"charge": Attribute(int, 'charge', 'properties'),
"coreelectrons": Attribute(numpy.ndarray, 'core electrons', 'atoms'),
"dispersionenergies":Attribute(numpy.ndarray, 'dispersion correction', 'properties:energy'),
"enthalpy": Attribute(float, 'enthalpy', 'properties'),
"entropy": Attribute(float, 'entropy', 'properties'),
"etenergies": Attribute(numpy.ndarray, 'electronic transitions', 'transitions'),
"etoscs": Attribute(numpy.ndarray, 'oscillator strength', 'transitions'),
"etdips": Attribute(numpy.ndarray, 'electic transition dipoles', 'transitions'),
"etveldips": Attribute(numpy.ndarray, 'velocity-gauge electric transition dipoles', 'transitions'),
"etmagdips": Attribute(numpy.ndarray, 'magnetic transition dipoles', 'transitions'),
"etrotats": Attribute(numpy.ndarray, 'rotatory strength', 'transitions'),
"etsecs": Attribute(list, 'one excited config', 'transitions'),
"etsyms": Attribute(list, 'symmetry', 'transitions'),
"freeenergy": Attribute(float, 'free energy', 'properties:energy'),
"fonames": Attribute(list, 'orbital names', 'fragments'),
"fooverlaps": Attribute(numpy.ndarray, 'orbital overlap', 'fragments'),
"fragnames": Attribute(list, 'fragment names', 'fragments'),
"frags": Attribute(list, 'atom indices', 'fragments'),
"gbasis": Attribute(list, 'basis functions', 'atoms:orbitals'),
"geotargets": Attribute(numpy.ndarray, 'geometric targets', 'optimization'),
"geovalues": Attribute(numpy.ndarray, 'geometric values', 'optimization'),
"grads": Attribute(numpy.ndarray, 'TBD', 'N/A'),
"hessian": Attribute(numpy.ndarray, 'hessian matrix', 'vibrations'),
"homos": Attribute(numpy.ndarray, 'homos', 'properties:orbitals'),
"metadata": Attribute(dict, 'TBD', 'N/A'),
"mocoeffs": Attribute(list, 'coeffs', 'properties:orbitals'),
"moenergies": Attribute(list, 'energies', 'properties:orbitals'),
"moments": Attribute(list, 'total dipole moment', 'properties'),
"mosyms": Attribute(list, 'molecular orbital symmetry', 'properties:orbitals'),
"mpenergies": Attribute(numpy.ndarray, 'moller plesset', 'properties:energy'),
"mult": Attribute(int, 'multiplicity', 'properties'),
"natom": Attribute(int, 'number of atoms', 'properties'),
"nbasis": Attribute(int, 'basis number', 'properties:orbitals'),
"nmo": Attribute(int, 'MO number', 'properties:orbitals'),
"nmrtensors": Attribute(dict, 'NMR chemical shielding tensors', 'properties:nmr'),
"nocoeffs": Attribute(numpy.ndarray, 'TBD', 'N/A'),
"nooccnos": Attribute(numpy.ndarray, 'TBD', 'N/A'),
"nsocoeffs": Attribute(list, 'TBD', 'N/A'),
"nsooccnos": Attribute(list, 'TBD', 'N/A'),
"optdone": Attribute(list, 'done', 'optimization'),
"optstatus": Attribute(numpy.ndarray, 'status', 'optimization'),
"polarizabilities": Attribute(list, 'polarizabilities', 'N/A'),
"pressure": Attribute(float, 'pressure', 'properties'),
"scancoords": Attribute(numpy.ndarray, 'step geometry', 'optimization:scan'),
"scanenergies": Attribute(list, 'PES energies', 'optimization:scan'),
"scannames": Attribute(list, 'variable names', 'optimization:scan'),
"scanparm": Attribute(list, 'PES parameter values', 'optimization:scan'),
"scfenergies": Attribute(numpy.ndarray, 'scf energies', 'optimization:scf'),
"scftargets": Attribute(numpy.ndarray, 'targets', 'optimization:scf'),
"scfvalues": Attribute(list, 'values', 'optimization:scf'),
"temperature": Attribute(float, 'temperature', 'properties'),
"time": Attribute(numpy.ndarray, 'time', 'N/A'),
"transprop": Attribute(dict, 'electronic transitions', 'transitions'),
"vibanharms": Attribute(numpy.ndarray, 'anharmonicity constants', 'vibrations'),
"vibdisps": Attribute(numpy.ndarray, 'displacement', 'vibrations'),
"vibfreqs": Attribute(numpy.ndarray, 'frequencies', 'vibrations'),
"vibfconsts": Attribute(numpy.ndarray, 'force constants', 'vibrations'),
"vibirs": Attribute(numpy.ndarray, 'IR', 'vibrations:intensities'),
"vibramans": Attribute(numpy.ndarray, 'raman', 'vibrations:intensities'),
"vibrmasses": Attribute(numpy.ndarray, 'reduced masses', 'vibrations'),
"vibsyms": Attribute(list, 'vibration symmetry', 'vibrations'),
"zpve": Attribute(float, 'zero-point correction', 'properties:energies')
}
# The name of all attributes can be generated from the dictionary above.
_attrlist = sorted(_attributes.keys())
# Arrays are double precision by default, but these will be integer arrays.
_intarrays = ['atomnos', 'coreelectrons', 'homos', 'optstatus']
# Attributes that should be lists of arrays (double precision).
_listsofarrays = ['mocoeffs', 'moenergies', 'moments', 'polarizabilities', 'scfvalues']
# Attributes that should be dictionaries of arrays (double precision).
_dictsofarrays = ["atomcharges", "atomspins"]
# Possible statuses for optimization steps.
# OPT_UNKNOWN is the default and means optimization is in progress.
# OPT_NEW is set for every new optimization (e.g. PES, IRCs, etc.)
# OPT_DONE is set for the last step of an optimisation that converged.
# OPT_UNCONVERGED is set for every unconverged step (e.g. should be mutually exclusive with OPT_DONE)
# bit value notation allows coding for multiple states: OPT_NEW and OPT_UNCONVERGED or OPT_NEW and OPT_DONE.
OPT_UNKNOWN = 0b000
OPT_NEW = 0b001
OPT_UNCONVERGED = 0b010
OPT_DONE = 0b100
def __init__(self, attributes={}):
"""Initialize the cclibData object.
Normally called in the parse() method of a Logfile subclass.
Inputs:
attributes - optional dictionary of attributes to load as data
"""
if attributes:
self.setattributes(attributes)
def listify(self):
"""Converts all attributes that are arrays or lists/dicts of arrays to lists."""
attrlist = [k for k in self._attrlist if hasattr(self, k)]
for k in attrlist:
v = self._attributes[k].type
if v == numpy.ndarray:
setattr(self, k, getattr(self, k).tolist())
elif v == list and k in self._listsofarrays:
setattr(self, k, [x.tolist() for x in getattr(self, k)])
elif v == dict and k in self._dictsofarrays:
items = getattr(self, k).items()
pairs = [(key, val.tolist()) for key, val in items]
setattr(self, k, dict(pairs))
def arrayify(self):
"""Converts appropriate attributes to arrays or lists/dicts of arrays."""
attrlist = [k for k in self._attrlist if hasattr(self, k)]
for k in attrlist:
v = self._attributes[k].type
precision = 'd'
if k in self._intarrays:
precision = 'i'
if v == numpy.ndarray:
setattr(self, k, numpy.array(getattr(self, k), precision))
elif v == list and k in self._listsofarrays:
setattr(self, k, [numpy.array(x, precision) for x in getattr(self, k)])
elif v == dict and k in self._dictsofarrays:
items = getattr(self, k).items()
pairs = [(key, numpy.array(val, precision)) for key, val in items]
setattr(self, k, dict(pairs))
def getattributes(self, tolists=False):
"""Returns a dictionary of existing data attributes.
Inputs:
tolists - flag to convert attributes to lists where applicable
"""
if tolists:
self.listify()
attributes = {}
for attr in self._attrlist:
if hasattr(self, attr):
attributes[attr] = getattr(self, attr)
if tolists:
self.arrayify()
return attributes
def setattributes(self, attributes):
"""Sets data attributes given in a dictionary.
Inputs:
attributes - dictionary of attributes to set
Outputs:
invalid - list of attributes names that were not set, which
means they are not specified in self._attrlist
"""
if type(attributes) is not dict:
raise TypeError("attributes must be in a dictionary")
valid = [a for a in attributes if a in self._attrlist]
invalid = [a for a in attributes if a not in self._attrlist]
for attr in valid:
setattr(self, attr, attributes[attr])
self.arrayify()
self.typecheck()
return invalid
def typecheck(self):
"""Check the types of all attributes.
If an attribute does not match the expected type, then attempt to
convert; if that fails, only then raise a TypeError.
"""
self.arrayify()
for attr in [a for a in self._attrlist if hasattr(self, a)]:
val = getattr(self, attr)
if type(val) == self._attributes[attr].type:
continue
try:
val = self._attributes[attr].type(val)
except ValueError:
args = (attr, type(val), self._attributes[attr].type)
raise TypeError("attribute %s is %s instead of %s and could not be converted" % args)
def check_values(self, logger=logging):
"""Perform custom checks on the values of attributes."""
if hasattr(self, "etenergies") and any(e < 0 for e in self.etenergies):
negative_values = [e for e in self.etenergies if e < 0]
msg = ("At least one excitation energy is negative. "
"\nNegative values: %s\nFull etenergies: %s"
% (negative_values, self.etenergies))
logger.error(msg)
def write(self, filename=None, indices=None, *args, **kwargs):
"""Write parsed attributes to a file.
Possible extensions:
.cjson or .json - output a chemical JSON file
.cml - output a chemical markup language (CML) file
.xyz - output a Cartesian XYZ file of the last coordinates available
"""
from cclib.io import ccwrite
outputstr = ccwrite(self, outputdest=filename, indices=indices,
*args, **kwargs)
return outputstr
def writejson(self, filename=None, indices=None):
"""Write parsed attributes to a JSON file."""
return self.write(filename=filename, indices=indices,
outputtype='cjson')
def writecml(self, filename=None, indices=None):
"""Write parsed attributes to a CML file."""
return self.write(filename=filename, indices=indices,
outputtype='cml')
def writexyz(self, filename=None, indices=None):
"""Write parsed attributes to an XML file."""
return self.write(filename=filename, indices=indices,
outputtype='xyz')
@property
def converged_geometries(self):
"""
Return all converged geometries.
An array containing only the converged geometries, e.g.:
- For PES or IRCs, return all geometries for which optstatus matches OPT_DONE
- The converged geometry for simple optimisations
- The input geometry for single points
"""
if hasattr(self, 'optstatus'):
converged_indexes = [x for x, y in enumerate(self.optstatus) if y & self.OPT_DONE > 0]
return self.atomcoords[converged_indexes]
else:
return self.atomcoords
@property
def new_geometries(self):
"""
Return all starting geometries.
An array containing only the starting geometries, e.g.:
- For PES or IRCs, return all geometries for which optstatus matches OPT_NEW
- The input geometry for simple optimisations or single points
"""
if hasattr(self, 'optstatus'):
new_indexes = [x for x, y in enumerate(self.optstatus) if y & self.OPT_NEW > 0]
return self.atomcoords[new_indexes]
else:
return self.atomcoords
@property
def unknown_geometries(self):
"""
Return all OPT_UNKNOWN geometries.
An array containing only the starting geometries, e.g.:
- For PES or IRCs, return all geometries for which optstatus matches OPT_UNKNOWN
- The input geometry for simple optimisations or single points
"""
if hasattr(self, 'optstatus'):
unknown_indexes = [x for x, y in enumerate(self.optstatus) if y == self.OPT_UNKNOWN]
return self.atomcoords[unknown_indexes]
else:
return self.atomcoords
@property
def unconverged_geometries(self):
"""
Return all unconverged geometries.
An array containing only the starting geometries, e.g.:
- For PES or IRCs, return all geometries for which optstatus matches OPT_UNCONVERGED
- The input geometry for simple optimisations or single points
"""
if hasattr(self, 'optstatus'):
unconverged_indexes = [x for x, y in enumerate(self.optstatus) if y & self.OPT_UNCONVERGED > 0]
return self.atomcoords[unconverged_indexes]
else:
return self.atomcoords
@property
def nelectrons(self):
return Electrons(self).count()
@property
def closed_shell(self):
return orbitals.Orbitals(self).closed_shell()
class ccData_optdone_bool(ccData):
"""This is the version of ccData where optdone is a Boolean."""
def __init__(self, *args, **kwargs):
super(ccData_optdone_bool, self).__init__(*args, **kwargs)
self._attributes["optdone"] = Attribute(bool, 'done', 'optimization')
def setattributes(self, *args, **kwargs):
invalid = super(ccData_optdone_bool, self).setattributes(*args, **kwargs)
# Reduce optdone to a Boolean, because it will be parsed as a list. If this list has any element,
# it means that there was an optimized structure and optdone should be True.
if hasattr(self, 'optdone'):
self.optdone = len(self.optdone) > 0
|
berquist/cclib
|
cclib/parser/data.py
|
Python
|
bsd-3-clause
| 23,991
|
[
"Gaussian",
"cclib"
] |
8b762136df15b0e133b07cc1a1069dc99f02fa4703cc9a1c0647204ca64c55e2
|
MIME_TYPE_DEFAULT = 'text/html'
MIME_TYPE_DICT = {
'.3gp': 'video/3gpp',
'.aab': 'application/x-authoware-bin',
'.aam': 'application/x-authoware-map',
'.aas': 'application/x-authoware-seg',
'.ai': 'application/postscript',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aiff',
'.aiff': 'audio/x-aiff',
'.als': 'audio/X-Alpha5',
'.amc': 'application/x-mpeg',
'.ani': 'application/octet-stream',
'.asc': 'text/plain',
'.asd': 'application/astound',
'.asf': 'video/x-ms-asf',
'.asn': 'application/astound',
'.asp': 'application/x-asap',
'.asx': 'video/x-ms-asf',
'.au': 'audio/basic',
'.avb': 'application/octet-stream',
'.avi': 'video/x-msvideo',
'.awb': 'audio/amr-wb',
'.bcpio': 'application/x-bcpio',
'.bin': 'application/octet-stream',
'.bld': 'application/bld',
'.bld2': 'application/bld2',
'.bmp': 'application/x-MS-bmp',
'.bpk': 'application/octet-stream',
'.bz2': 'application/x-bzip2',
'.cal': 'image/x-cals',
'.ccn': 'application/x-cnc',
'.cco': 'application/x-cocoa',
'.cdf': 'application/x-netcdf',
'.cgi': 'magnus-internal/cgi',
'.chat': 'application/x-chat',
'.class': 'application/octet-stream',
'.clp': 'application/x-msclip',
'.cmx': 'application/x-cmx',
'.co': 'application/x-cult3d-object',
'.cod': 'image/cis-cod',
'.cpio': 'application/x-cpio',
'.cpt': 'application/mac-compactpro',
'.crd': 'application/x-mscardfile',
'.csh': 'application/x-csh',
'.csm': 'chemical/x-csml',
'.csml': 'chemical/x-csml',
'.css': 'text/css',
'.cur': 'application/octet-stream',
'.dcm': 'x-lml/x-evm',
'.dcr': 'application/x-director',
'.dcx': 'image/x-dcx',
'.dhtml': 'text/html',
'.dir': 'application/x-director',
'.dll': 'application/octet-stream',
'.dmg': 'application/octet-stream',
'.dms': 'application/octet-stream',
'.doc': 'application/msword',
'.dot': 'application/x-dot',
'.dvi': 'application/x-dvi',
'.dwf': 'drawing/x-dwf',
'.dwg': 'application/x-autocad',
'.dxf': 'application/x-autocad',
'.dxr': 'application/x-director',
'.ebk': 'application/x-expandedbook',
'.emb': 'chemical/x-embl-dl-nucleotide',
'.embl': 'chemical/x-embl-dl-nucleotide',
'.eps': 'application/postscript',
'.eri': 'image/x-eri',
'.es': 'audio/echospeech',
'.esl': 'audio/echospeech',
'.etc': 'application/x-earthtime',
'.etx': 'text/x-setext',
'.evm': 'x-lml/x-evm',
'.evy': 'application/x-envoy',
'.exe': 'application/octet-stream',
'.fh4': 'image/x-freehand',
'.fh5': 'image/x-freehand',
'.fhc': 'image/x-freehand',
'.fif': 'image/fif',
'.fm': 'application/x-maker',
'.fpx': 'image/x-fpx',
'.fvi': 'video/isivideo',
'.gau': 'chemical/x-gaussian-input',
'.gca': 'application/x-gca-compressed',
'.gdb': 'x-lml/x-gdb',
'.gif': 'image/gif',
'.gps': 'application/x-gps',
'.gtar': 'application/x-gtar',
'.gz': 'application/x-gzip',
'.hdf': 'application/x-hdf',
'.hdm': 'text/x-hdml',
'.hdml': 'text/x-hdml',
'.hlp': 'application/winhlp',
'.hqx': 'application/mac-binhex40',
'.htm': 'text/html',
'.html': 'text/html',
'.hts': 'text/html',
'.ice': 'x-conference/x-cooltalk',
'.ico': 'application/octet-stream',
'.ief': 'image/ief',
'.ifm': 'image/gif',
'.ifs': 'image/ifs',
'.imy': 'audio/melody',
'.ins': 'application/x-NET-Install',
'.ips': 'application/x-ipscript',
'.ipx': 'application/x-ipix',
'.it': 'audio/x-mod',
'.itz': 'audio/x-mod',
'.ivr': 'i-world/i-vrml',
'.j2k': 'image/j2k',
'.jad': 'text/vnd.sun.j2me.app-descriptor',
'.jam': 'application/x-jam',
'.jar': 'application/java-archive',
'.jnlp': 'application/x-java-jnlp-file',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.jpz': 'image/jpeg',
'.js': 'application/x-javascript',
'.jwc': 'application/jwc',
'.kjx': 'application/x-kjx',
'.lak': 'x-lml/x-lak',
'.latex': 'application/x-latex',
'.lcc': 'application/fastman',
'.lcl': 'application/x-digitalloca',
'.lcr': 'application/x-digitalloca',
'.lgh': 'application/lgh',
'.lha': 'application/octet-stream',
'.lml': 'x-lml/x-lml',
'.lmlpack': 'x-lml/x-lmlpack',
'.lsf': 'video/x-ms-asf',
'.lsx': 'video/x-ms-asf',
'.lzh': 'application/x-lzh',
'.m13': 'application/x-msmediaview',
'.m14': 'application/x-msmediaview',
'.m15': 'audio/x-mod',
'.m3u': 'audio/x-mpegurl',
'.m3url': 'audio/x-mpegurl',
'.ma1': 'audio/ma1',
'.ma2': 'audio/ma2',
'.ma3': 'audio/ma3',
'.ma5': 'audio/ma5',
'.man': 'application/x-troff-man',
'.map': 'magnus-internal/imagemap',
'.mbd': 'application/mbedlet',
'.mct': 'application/x-mascot',
'.mdb': 'application/x-msaccess',
'.mdz': 'audio/x-mod',
'.me': 'application/x-troff-me',
'.mel': 'text/x-vmel',
'.mi': 'application/x-mif',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.mif': 'application/x-mif',
'.mil': 'image/x-cals',
'.mio': 'audio/x-mio',
'.mmf': 'application/x-skt-lbs',
'.mng': 'video/x-mng',
'.mny': 'application/x-msmoney',
'.moc': 'application/x-mocha',
'.mocha': 'application/x-mocha',
'.mod': 'audio/x-mod',
'.mof': 'application/x-yumekara',
'.mol': 'chemical/x-mdl-molfile',
'.mop': 'chemical/x-mopac-input',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mp2': 'audio/x-mpeg',
'.mp3': 'audio/x-mpeg',
'.mp4': 'video/mp4',
'.mpc': 'application/vnd.mpohun.certificate',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.mpg4': 'video/mp4',
'.mpga': 'audio/mpeg',
'.mpn': 'application/vnd.mophun.application',
'.mpp': 'application/vnd.ms-project',
'.mps': 'application/x-mapserver',
'.mrl': 'text/x-mrml',
'.mrm': 'application/x-mrm',
'.ms': 'application/x-troff-ms',
'.mts': 'application/metastream',
'.mtx': 'application/metastream',
'.mtz': 'application/metastream',
'.mzv': 'application/metastream',
'.nar': 'application/zip',
'.nbmp': 'image/nbmp',
'.nc': 'application/x-netcdf',
'.ndb': 'x-lml/x-ndb',
'.ndwn': 'application/ndwn',
'.nif': 'application/x-nif',
'.nmz': 'application/x-scream',
'.nokia': 'op-logo image/vnd.nok-oplogo-color',
'.npx': 'application/x-netfpx',
'.nsnd': 'audio/nsnd',
'.nva': 'application/x-neva1',
'.oda': 'application/oda',
'.oom': 'application/x-AtlasMate-Plugin',
'.pac': 'audio/x-pac',
'.pae': 'audio/x-epac',
'.pan': 'application/x-pan',
'.pbm': 'image/x-portable-bitmap',
'.pcx': 'image/x-pcx',
'.pda': 'image/x-pda',
'.pdb': 'chemical/x-pdb',
'.pdf': 'application/pdf',
'.pfr': 'application/font-tdpfr',
'.pgm': 'image/x-portable-graymap',
'.pict': 'image/x-pict',
'.pm': 'application/x-perl',
'.pmd': 'application/x-pmd',
'.png': 'image/png',
'.pnm': 'image/x-portable-anymap',
'.pnz': 'image/png',
'.pot': 'application/vnd.ms-powerpoint',
'.ppm': 'image/x-portable-pixmap',
'.pps': 'application/vnd.ms-powerpoint',
'.ppt': 'application/vnd.ms-powerpoint',
'.pqf': 'application/x-cprplayer',
'.pqi': 'application/cprplayer',
'.prc': 'application/x-prc',
'.proxy': 'application/x-ns-proxy-autoconfig',
'.ps': 'application/postscript',
'.ptlk': 'application/listenup',
'.pub': 'application/x-mspublisher',
'.pvx': 'video/x-pv-pvx',
'.qcp': 'audio/vnd.qcelp',
'.qt': 'video/quicktime',
'.qti': 'image/x-quicktime',
'.qtif': 'image/x-quicktime',
'.r3t': 'text/vnd.rn-realtext3d',
'.ra': 'audio/x-pn-realaudio',
'.ram': 'audio/x-pn-realaudio',
'.rar': 'application/x-rar-compressed',
'.ras': 'image/x-cmu-raster',
'.rdf': 'application/rdf+xml',
'.rf': 'image/vnd.rn-realflash',
'.rgb': 'image/x-rgb',
'.rlf': 'application/x-richlink',
'.rm': 'audio/x-pn-realaudio',
'.rmf': 'audio/x-rmf',
'.rmm': 'audio/x-pn-realaudio',
'.rmvb': 'audio/x-pn-realaudio',
'.rnx': 'application/vnd.rn-realplayer',
'.roff': 'application/x-troff',
'.rp': 'image/vnd.rn-realpix',
'.rpm': 'audio/x-pn-realaudio-plugin',
'.rt': 'text/vnd.rn-realtext',
'.rte': 'x-lml/x-gps',
'.rtf': 'application/rtf',
'.rtg': 'application/metastream',
'.rtx': 'text/richtext',
'.rv': 'video/vnd.rn-realvideo',
'.rwc': 'application/x-rogerwilco',
'.s3m': 'audio/x-mod',
'.s3z': 'audio/x-mod',
'.sca': 'application/x-supercard',
'.scd': 'application/x-msschedule',
'.sdf': 'application/e-score',
'.sea': 'application/x-stuffit',
'.sgm': 'text/x-sgml',
'.sgml': 'text/x-sgml',
'.sh': 'application/x-sh',
'.shar': 'application/x-shar',
'.shtml': 'magnus-internal/parsed-html',
'.shw': 'application/presentations',
'.si6': 'image/si6',
'.si7': 'image/vnd.stiwap.sis',
'.si9': 'image/vnd.lgtwap.sis',
'.sis': 'application/vnd.symbian.install',
'.sit': 'application/x-stuffit',
'.skd': 'application/x-Koan',
'.skm': 'application/x-Koan',
'.skp': 'application/x-Koan',
'.skt': 'application/x-Koan',
'.slc': 'application/x-salsa',
'.smd': 'audio/x-smd',
'.smi': 'application/smil',
'.smil': 'application/smil',
'.smp': 'application/studiom',
'.smz': 'audio/x-smd',
'.snd': 'audio/basic',
'.spc': 'text/x-speech',
'.spl': 'application/futuresplash',
'.spr': 'application/x-sprite',
'.sprite': 'application/x-sprite',
'.spt': 'application/x-spt',
'.src': 'application/x-wais-source',
'.stk': 'application/hyperstudio',
'.stm': 'audio/x-mod',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.svf': 'image/vnd',
'.svg': 'image/svg-xml',
'.svh': 'image/svh',
'.svr': 'x-world/x-svr',
'.swf': 'application/x-shockwave-flash',
'.swfl': 'application/x-shockwave-flash',
'.t': 'application/x-troff',
'.tad': 'application/octet-stream',
'.talk': 'text/x-speech',
'.tar': 'application/x-tar',
'.taz': 'application/x-tar',
'.tbp': 'application/x-timbuktu',
'.tbt': 'application/x-timbuktu',
'.tcl': 'application/x-tcl',
'.tex': 'application/x-tex',
'.texi': 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tgz': 'application/x-tar',
'.thm': 'application/vnd.eri.thm',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tki': 'application/x-tkined',
'.tkined': 'application/x-tkined',
'.toc': 'application/toc',
'.toy': 'image/toy',
'.tr': 'application/x-troff',
'.trk': 'x-lml/x-gps',
'.trm': 'application/x-msterminal',
'.tsi': 'audio/tsplayer',
'.tsp': 'application/dsptype',
'.tsv': 'text/tab-separated-values',
'.tsv': 'text/tab-separated-values',
'.ttf': 'application/octet-stream',
'.ttz': 'application/t-time',
'.txt': 'text/plain',
'.ult': 'audio/x-mod',
'.ustar': 'application/x-ustar',
'.uu': 'application/x-uuencode',
'.uue': 'application/x-uuencode',
'.vcd': 'application/x-cdlink',
'.vcf': 'text/x-vcard',
'.vdo': 'video/vdo',
'.vib': 'audio/vib',
'.viv': 'video/vivo',
'.vivo': 'video/vivo',
'.vmd': 'application/vocaltec-media-desc',
'.vmf': 'application/vocaltec-media-file',
'.vmi': 'application/x-dreamcast-vms-info',
'.vms': 'application/x-dreamcast-vms',
'.vox': 'audio/voxware',
'.vqe': 'audio/x-twinvq-plugin',
'.vqf': 'audio/x-twinvq',
'.vql': 'audio/x-twinvq',
'.vre': 'x-world/x-vream',
'.vrml': 'x-world/x-vrml',
'.vrt': 'x-world/x-vrt',
'.vrw': 'x-world/x-vream',
'.vts': 'workbook/formulaone',
'.wav': 'audio/x-wav',
'.wax': 'audio/x-ms-wax',
'.wbmp': 'image/vnd.wap.wbmp',
'.web': 'application/vnd.xara',
'.wi': 'image/wavelet',
'.wis': 'application/x-InstallShield',
'.wm': 'video/x-ms-wm',
'.wma': 'audio/x-ms-wma',
'.wmd': 'application/x-ms-wmd',
'.wmf': 'application/x-msmetafile',
'.wml': 'text/vnd.wap.wml',
'.wmlc': 'application/vnd.wap.wmlc',
'.wmls': 'text/vnd.wap.wmlscript',
'.wmlsc': 'application/vnd.wap.wmlscriptc',
'.wmlscript': 'text/vnd.wap.wmlscript',
'.wmv': 'audio/x-ms-wmv',
'.wmx': 'video/x-ms-wmx',
'.wmz': 'application/x-ms-wmz',
'.woff': 'application/font-woff',
'.woff2': 'application/font-woff',
'.wpng': 'image/x-up-wpng',
'.wpt': 'x-lml/x-gps',
'.wri': 'application/x-mswrite',
'.wrl': 'x-world/x-vrml',
'.wrz': 'x-world/x-vrml',
'.ws': 'text/vnd.wap.wmlscript',
'.wsc': 'application/vnd.wap.wmlscriptc',
'.wv': 'video/wavelet',
'.wvx': 'video/x-ms-wvx',
'.wxl': 'application/x-wxl',
'.x': 'gzip application/x-gzip',
'.xar': 'application/vnd.xara',
'.xbm': 'image/x-xbitmap',
'.xdm': 'application/x-xdma',
'.xdma': 'application/x-xdma',
'.xdw': 'application/vnd.fujixerox.docuworks',
'.xht': 'application/xhtml+xml',
'.xhtm': 'application/xhtml+xml',
'.xhtml': 'application/xhtml+xml',
'.xla': 'application/vnd.ms-excel',
'.xlc': 'application/vnd.ms-excel',
'.xll': 'application/x-excel',
'.xlm': 'application/vnd.ms-excel',
'.xls': 'application/vnd.ms-excel',
'.xlt': 'application/vnd.ms-excel',
'.xlw': 'application/vnd.ms-excel',
'.xm': 'audio/x-mod',
'.xml': 'text/xml',
'.xmz': 'audio/x-mod',
'.xpi': 'application/x-xpinstall',
'.xpm': 'image/x-xpixmap',
'.xsit': 'text/xml',
'.xsl': 'text/xml',
'.xul': 'text/xul',
'.xwd': 'image/x-xwindowdump',
'.xyz': 'chemical/x-pdb',
'.yz1': 'application/x-yz1',
'.z': 'application/x-compress',
'.zac': 'application/x-zaurus-zac',
'.zip': 'application/zip',
}
|
ycroft/XPHENIX
|
source/ant/controller/mime_type.py
|
Python
|
gpl-3.0
| 18,867
|
[
"Gaussian",
"MOPAC",
"NetCDF",
"VMD"
] |
8ef0738df7d329eb56afce806fd7719e47dea103a58a009e2eee7ee5526628b6
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5.QtWidgets import QTextEdit, QMenu, QFileDialog, QSizePolicy
import mooseutils
class TerminalTextEdit(QTextEdit):
"""
A readonly text edit that replaces terminal codes with appropiate html codes.
Also uses fixed font.
"""
def __init__(self, **kwds):
super(TerminalTextEdit, self).__init__(**kwds)
self.setStyleSheet("TerminalTextEdit { background: black; color: white; }")
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setReadOnly(True)
def contextMenuEvent(self, event):
"""
User requested a context menu.
Input:
event: The QEvent()
"""
menu = QMenu()
save_action = menu.addAction("Save")
clear_action = menu.addAction("Clear")
action = menu.exec_(event.globalPos())
if action == save_action:
self.save()
elif action == clear_action:
self.clear()
def save(self):
"""
Save the contents into a file.
"""
fname, other = QFileDialog.getSaveFileName(self, "Choose output", "", "Output file (*.log *.txt)")
if fname:
try:
with open(fname, "w") as f:
f.write(self.toPlainText())
mooseutils.mooseMessage("Saved content to %s" % fname)
except Exception as e:
mooseutils.mooseError("Failed to save file: %s" % e, dialog=True)
def clear(self):
"""
Clear the output
"""
self.setHtml("")
def setFontSize(self, size):
self.setStyleSheet("TerminalTextEdit { background: black; color: white; font: %spx}" % size)
if __name__ == "__main__":
from PyQt5.QtWidgets import QApplication
import sys
qapp = QApplication(sys.argv)
w = TerminalTextEdit()
w.append('<span style="color:red;">foo</span>')
w.show()
w.setEnabled(True)
sys.exit(qapp.exec_())
|
nuclear-wizard/moose
|
python/peacock/Execute/TerminalTextEdit.py
|
Python
|
lgpl-2.1
| 2,291
|
[
"MOOSE"
] |
30b25b632db412a8f67185a853b0591bd75c0005d5659a1878367d8befece529
|
# from matplotlib.colors import LogNorm
# import matplotlib.pyplot as plt
# import numpy as np
import pandas as pd
from root_pandas import read_root
import sys
# from itertools import islice
# MY LIBS
import mie_utils as my
file_name = sys.argv[1]
crystal_name = sys.argv[2]
run_number = sys.argv[3]
particle_name = sys.argv[4]
particle_energy = sys.argv[5]
gonio_low = sys.argv[6]
chunksize = 2000000
evts = read_root(file_name, chunksize=chunksize) # iterator iterating the chunks
print "Opened root file "
print file_name
# CRYSTAL DATAFRAME COLUMNS
# 'Time', 'Date', 'Event_run', 'Event_evtnum', 'Event_nuclear',
# 'Event_nuclearRaw', 'GonioPos_x', 'GonioPos_y', 'GonioPos_z',
# 'MultiHits_thetaIn_x', 'MultiHits_thetaIn_y',
# 'MultiHits_thetaInErr_x', 'MultiHits_thetaInErr_y',
# 'MultiHits_d0_x', 'MultiHits_d0_y', 'MultiHits_d0Err_x',
# 'MultiHits_d0Err_y', 'Tracks_thetaIn_x', 'Tracks_thetaIn_y',
# 'Tracks_thetaOut_x', 'Tracks_thetaOut_y', 'Tracks_thetaInErr_x',
# 'Tracks_thetaInErr_y', 'Tracks_thetaOutErr_x',
# 'Tracks_thetaOutErr_y', 'Tracks_d0_x', 'Tracks_d0_y',
# 'Tracks_d0Err_x', 'Tracks_d0Err_y', 'Tracks_chi2_x',
# 'Tracks_chi2_y', 'SingleTrack', 'MultiHit'
chunks = 0
init_scan = -1
for df in evts:
df.query('GonioPos_x > {}'.format(float(gonio_low)*1e-5),inplace=True)
df.to_hdf(file_name+".hdf","simpleEvent", format="table", \
fletcher32=True, mode="a", complevel=9,append=True, \
data_columns=['SingleTrack','Tracks_thetaIn_x', 'Tracks_d0_x', 'Tracks_d0_y'])
chunks = chunks + chunksize
print("Written " + str(chunks) + " rows")
init_scan_column = df.loc[:,"GonioPos_x"] # TODO bug misterioso
init_scan = init_scan_column.mean()
# TODO Commentare qua sta roba convoluta
# parameters_table = pd.DataFrame({"parameter_name": ["init_scan"], "value": [init_scan]})
# parameters_table.set_index("parameter_name",inplace=True)
#
# parameters_table.to_csv("crystal_analysis_parameters.csv",sep='\t')
my.save_in_csv("crystal_analysis_parameters.csv",
root_file_name=file_name,
crystal_name=crystal_name,
run_number=run_number,
particle_name=particle_name,
particle_energy=particle_energy)
print "Finished creating HDF file"
|
f-forcher/crystal-channeling-analysis
|
save_as_hdf_goniocut.py
|
Python
|
gpl-3.0
| 2,407
|
[
"CRYSTAL"
] |
7f1fd30181f5a271791e938b8764d29cba88a11e39ccd8cf50098b90bf702b3c
|
"""
Sample Mayavi customization file.
This code is not to be executed as `mayavi2 -x user_mayavi.py` or
`python user_mayavi.py`.
Put this file in ~/.mayavi2/user_mayavi.py and rerun mayavi2 to see what
it does -- the worker view may not show up by default so you will have
to go to View->Other and in the Show View dialog, activate the "Custom
Mayavi2 View".
The added modules should show up in the menus (Look for UserOutline in
the Modules)
____
This module demonstrates how to extend Mayavi. It extends the modules
provided by mayavi by adding these to the Mayavi registry. Note that
the registry imports customize which in turn imports this file.
It also defines an Envisage plugin that is added to the default list of
plugins to extend the running mayavi application. This plugin is
returned by the `get_plugins()` function.
This file must be placed inside the `~/.mayavi2` directory and called
`user_mayavi.py`. Please note that `~/.mayavi2` is placed in `sys.path`
(if the directory exists) so make sure that you choose your module names
carefully (so as not to override any common module names).
The file may also be placed anywhere on sys.path and called
`site_mayavi.py` for global system level customizations.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2006-2008, Enthought, Inc.
# License: BSD Style.
from mayavi.core.registry import registry
from mayavi.core.pipeline_info import PipelineInfo
from mayavi.core.metadata import ModuleMetadata
# Metadata for the new module we want to add -- notice that we use a
# factory function here for convenience, we could also use a class but
# the reasons for doing this are documented below.
user_outline = ModuleMetadata(
id = "UserOutlineModule",
menu_name = "&UserOutline",
factory = 'user_mayavi.user_outline',
desc = "Draw a cornered outline for given input",
tooltip = "Draw a cornered outline for given input",
help = "Draw a cornered outline for given input",
input_info = PipelineInfo(datasets=['any'],
attribute_types=['any'],
attributes=['any'])
)
# Register the module with the mayavi registry.
registry.modules.append(user_outline)
#######
# The all important function that returns the plugin we wish to add to
# the default mayavi application.
def get_plugins():
# We simply return a list containing the WorkerPlugin defined below.
return [WorkerPlugin()]
######################################################################
# Thats it, basically. The rest of the code should really be in another
# module but is in the same module for convenience here. There are
# problems with doing any significant non-core module imports in this
# module as documented below.
######################################################################
######################################################################
# THE CODE BELOW SHOULD REALLY BE IN SEPARATE MODULES.
#
# The following can very well be in a separate module but I've kept it
# here to make this a compact demo of how to customize things.
######################################################################
######################################################################
# A new module to expose to mayavi.
#
# WARNING: Do not do other mayavi imports right here like for example:
# 'from mayavi.modules.outline import Outline' etc. This is
# because the user_mayavi is imported at a time when many of the imports
# are not complete and this will cause hard-to-debug circular import
# problems. The registry is given only metadata mostly in the form of
# strings and this will cause no problem. Therefore to define new
# modules, we strongly recommend that the modules be defined in another
# module or be defined in a factory function as done below.
def user_outline():
"""A Factory function that creates a new module to add to the
pipeline. Note that the method safely does any mayavi imports
inside avoiding any circular imports.
"""
print("User Outline")
from mayavi.modules.outline import Outline
o = Outline(outline_mode='cornered', name='UserOutline')
return o
######################################################################
# This code simulates something the user would like to do. In this case
# we just want to create some data, view it with mayavi and modify the
# data. We want to add this as a view to the standard mayavi. The code
# below is simply traits code with a few extra things to be able to grab
# the running mayavi instance and script it. The object we create we
# offer as an envisage service offer -- this instantiates the worker.
# The WorkerPlugin exposes the service offer and shows the view of this
# worker.
import numpy
from traits.api import HasTraits, Range, Button, Instance, List
from traitsui.api import Item, View
######################################################################
# `Worker` class
######################################################################
class Worker(HasTraits):
"""This class basically allows you to create a data set, view it
and modify the dataset. This is a rather crude example but
demonstrates how things can be done.
"""
# Set by envisage when this is contributed as a ServiceOffer.
window = Instance('pyface.workbench.api.WorkbenchWindow')
create_data = Button('Create data')
reset_data = Button('Reset data')
view_data = Button('View data')
scale = Range(0.0, 1.0)
source = Instance('mayavi.core.source.Source')
# Our UI view.
view = View(Item('create_data', show_label=False),
Item('view_data', show_label=False),
Item('reset_data', show_label=False),
Item('scale'),
resizable=True
)
def get_mayavi(self):
from mayavi.plugins.script import Script
return self.window.get_service(Script)
def _make_data(self):
dims = [64, 64, 64]
np = dims[0]*dims[1]*dims[2]
x, y, z = numpy.ogrid[-5:5:dims[0]*1j,-5:5:dims[1]*1j,-5:5:dims[2]*1j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
s = (numpy.sin(x*y*z)/(x*y*z))
s = s.transpose().copy() # This makes the data contiguous.
return s
def _create_data_fired(self):
mayavi = self.get_mayavi()
from mayavi.sources.array_source import ArraySource
s = self._make_data()
src = ArraySource(transpose_input_array=False, scalar_data=s)
self.source = src
mayavi.add_source(src)
def _reset_data_fired(self):
self.source.scalar_data = self._make_data()
def _view_data_fired(self):
mayavi = self.get_mayavi()
from mayavi.modules.outline import Outline
from mayavi.modules.image_plane_widget import ImagePlaneWidget
# Visualize the data.
o = Outline()
mayavi.add_module(o)
ipw = ImagePlaneWidget()
mayavi.add_module(ipw)
ipw.module_manager.scalar_lut_manager.show_scalar_bar = True
ipw_y = ImagePlaneWidget()
mayavi.add_module(ipw_y)
ipw_y.ipw.plane_orientation = 'y_axes'
def _scale_changed(self, value):
src = self.source
data = src.scalar_data
data += value*0.01
numpy.mod(data, 1.0, data)
src.update()
######################################################################
# The following code is the small amount of envisage code that brings
# the users code (above) and Envisage/Mayavi UI together.
from envisage.api import Plugin, ServiceOffer
######################################################################
# `WorkerPlugin` class
######################################################################
class WorkerPlugin(Plugin):
# Extension point Ids.
SERVICE_OFFERS = 'envisage.ui.workbench.service_offers'
VIEWS = 'envisage.ui.workbench.views'
# Services we contribute.
service_offers = List(contributes_to=SERVICE_OFFERS)
# Views.
views = List(contributes_to=VIEWS)
######################################################################
# Private methods.
def _service_offers_default(self):
""" Trait initializer. """
worker_service_offer = ServiceOffer(
protocol = 'user_mayavi.Worker',
factory = 'user_mayavi.Worker'
)
return [worker_service_offer]
def _views_default(self):
""" Trait initializer. """
return [self._worker_view_factory]
def _worker_view_factory(self, window, **traits):
""" Factory method for the current selection of the engine. """
from pyface.workbench.traits_ui_view import \
TraitsUIView
worker = window.get_service(Worker)
tui_worker_view = TraitsUIView(obj=worker,
view='view',
id='user_mayavi.Worker.view',
name='Custom Mayavi2 View',
window=window,
position='left',
**traits
)
return tui_worker_view
# END OF CODE THAT SHOULD REALLY BE IN SEPARATE MODULES.
######################################################################
if __name__ == '__main__':
import sys
print("*"*80)
print("ERROR: This script isn't supposed to be executed.")
print(__doc__)
print("*"*80)
from traits.util.home_directory import get_home_directory
print("Your .mayavi2 directory should be in %s"%get_home_directory())
print("*"*80)
sys.exit(1)
|
dmsurti/mayavi
|
examples/mayavi/user_mayavi.py
|
Python
|
bsd-3-clause
| 9,815
|
[
"Mayavi"
] |
22abdcd9f3b9f4bd889b6f60da1acecd6b4c7c8b5bf20b245a23d5608b97ef4c
|
########################################################################
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2012/12/11 18:04:25
########################################################################
""" :mod: SubprocessTests
=======================
.. module: SubprocessTests
:synopsis: unittest for Subprocess module
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unittest for Subprocess module
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from os.path import dirname, join
# imports
import time
import pytest
from subprocess import Popen
# SUT
from DIRAC.Core.Utilities.Subprocess import systemCall, shellCall, pythonCall, getChildrenPIDs
# Mark this entire module as slow
pytestmark = pytest.mark.slow
cmd = ["sleep", "2"]
def pyfunc(_name):
time.sleep(2)
@pytest.mark.parametrize("timeout, expected", [
(False, True),
(3, True),
(1, False)
])
def test_calls(timeout, expected):
ret = systemCall(timeout, cmdSeq=cmd)
assert ret['OK'] == expected
ret = shellCall(timeout, cmdSeq=" ".join(cmd))
assert ret['OK'] == expected
ret = pythonCall(timeout, pyfunc, 'something')
assert ret['OK'] == expected
def test_getChildrenPIDs():
import os
os.system("echo $PWD")
mainProcess = Popen(['python', join(dirname(__file__), 'ProcessesCreator.py')])
time.sleep(1)
res = getChildrenPIDs(mainProcess.pid)
assert len(res) == 3
for p in res:
assert isinstance(p, int)
mainProcess.wait()
|
yujikato/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_Subprocess.py
|
Python
|
gpl-3.0
| 1,565
|
[
"DIRAC"
] |
ce0dfef92e278b2c564c1adb48c6b8f6761a9ed6592739981c76623e3c6672a7
|
import collections
import copy
import warnings
import numpy
import six
from chainer import cuda
from chainer import link as link_module
from chainer import serializer as serializer_module
from chainer import variable
def _sum_sqnorm(arr):
sq_sum = collections.defaultdict(float)
for x in arr:
with cuda.get_device_from_array(x) as dev:
x = x.ravel()
s = x.dot(x)
sq_sum[int(dev)] += s
return sum([float(i) for i in six.itervalues(sq_sum)])
def exponential_decay_noise(xp, shape, dtype, hook, opt):
"""Time-dependent annealed Gaussian noise function from the paper:
`Adding Gradient Noise Improves Learning for Very Deep Networks
<https://arxiv.org/pdf/1511.06807>`_.
"""
std = numpy.sqrt(hook.eta / numpy.power(1 + opt.t, 0.55))
return xp.random.normal(0, std, shape).astype(dtype)
class Hyperparameter(object):
"""Set of hyperparameter entries of an optimizer.
This is a utility class to provide a set of hyperparameter entries for
update rules and an optimizer. Each entry can be set as an attribute of a
hyperparameter object.
A hyperparameter object can hold a reference to its parent hyperparameter
object. When an attribute does not exist in the child hyperparameter, it
automatically refers to the parent. We typically set the hyperparameter of
the gradient method as the parent of the hyperparameter of each update
rule. It enables us to centralize the management of hyperparameters (e.g.
we can change the learning rate of all update rules just by modifying the
hyperparameter of the central optimizer object), while users can freely
customize the hyperparameter of each update rule if needed.
Args:
parent (Hyperparameter): Parent hyperparameter.
"""
def __init__(self, parent=None):
self._parent = parent
def __getattr__(self, name):
if '_parent' not in self.__dict__:
raise AttributeError('_parent is not set up yet')
return getattr(self._parent, name)
def __repr__(self):
d = self.get_dict()
keys = sorted(d.keys())
values_repr = ', '.join('%s=%s' % (k, d[k]) for k in keys)
return 'Hyperparameter(%s)' % values_repr
@property
def parent(self):
"""Parent hyperparmaeter object."""
return self._parent
def get_dict(self):
"""Converts the hyperparameter into a dictionary.
Returns:
Dictionary containing all entries that can be referred by this
hyperparameter object.
"""
d = {} if self._parent is None else self._parent.get_dict()
for k, v in six.iteritems(self.__dict__):
if k != '_parent':
d[k] = v
return d
class UpdateRule(object):
"""Base class of all update rules.
Update rule is an object that implements how to update one parameter
variable using the gradient of a loss function. This class provides the
interface and the common features of any update rules.
An update rule can be set to a :class:`~chainer.Variable` object that
represents a parameter array of a model. An :class:`~chainer.Optimizer`
instance defines which parameters to update, and the update rule instance
of each parameter defines how to update it.
Hook functions can be set to any update rule instance. The hook function is
called just before any updates in the order of registrations.
An implementation of update rule should override :meth:`update_core` or
its device-dependent variants (i.e., :meth:`update_core_cpu` and
:meth:`update_core_gpu`).
The state (e.g. a moving average of the gradient) of the update rule is
stored into the state dictionary. An implementation of update rule using
state should also override :meth:`init_state` to initialize the state at
the first update. The values of the state dictionary are automatically
copied to the appropriate device before the update based on the data and
grad arrays.
Args:
parent_hyperparam (Hyperparameter): Hyperparameter that provides the
default values.
Attributes:
enabled (bool): Flag to configure if this update rule is active. If the
update rule is not active (i.e., ``enabled = False``), the
:meth:`update` method does not update the parameter.
hyperparam (Hyperparameter): Hyperparameter of the update rule.
t (int): Number of updates made by this update rule.
"""
def __init__(self, parent_hyperparam=None):
self._hooks = collections.OrderedDict()
self._state = None
self.enabled = True
self.hyperparam = Hyperparameter(parent_hyperparam)
self.t = 0
@property
def state(self):
"""State dictionary."""
return self._state
def add_hook(self, hook, name=None):
"""Adds a hook function.
The hook function is called before any updates.
Args:
hook (callable): Hook function to be added. It takes two
arguments: the update rule object and the parameter variable.
name (str): Name of the hook function. The name attribute of the
hook function is used by default.
"""
if not callable(hook):
raise TypeError('hook function must be callable')
if name is None:
name = getattr(hook, 'name', getattr(hook, '__name__', None))
if name is None:
raise ValueError(
'the name of the hook function is not specified')
if name in self._hooks:
raise ValueError('hook "{}" already exists'.format(name))
self._hooks[name] = hook
def remove_hook(self, name):
"""Removes the specified hook function.
Args:
name (str): Name of the hook function to be removed. The hook
function registered with this name will be removed.
"""
del self._hooks[name]
def update(self, param):
"""Invokes hook functions and updates the parameter.
Args:
param (~chainer.Variable): Variable to be updated.
"""
if not self.enabled:
return
self.t += 1
self._prepare(param)
for hook in six.itervalues(self._hooks):
hook(self, param)
self.update_core(param)
def update_core(self, param):
"""Updates the parameter.
Implementation of UpdateRule should override this method or both of
:meth:`_update_core_cpu` and :meth:`_update_core_gpu`.
Args:
param (~chainer.Variable): Variable to be updated.
"""
with cuda.get_device_from_array(param.data) as dev:
if int(dev) == -1:
self.update_core_cpu(param)
else:
self.update_core_gpu(param)
def update_core_cpu(self, param):
"""Updates the parameter on CPU.
See :meth:`update_core` for details.
Args:
param (~chainer.Variable): Variable to be updated.
"""
raise NotImplementedError
def update_core_gpu(self, param):
"""Updates the parameter on GPU.
See :meth:`update_core` for details.
Args:
param (~chainer.Variable): Variable to be updated.
"""
raise NotImplementedError
def init_state(self, param):
"""Initializes the state.
Any implementations that use the state should override this mehtod.
This method is called at the first update.
Args:
param (~chainer.Variable): Parameter variable. It can be used to
extract the shape and the data type of the parameter.
"""
pass
def serialize(self, serializer):
"""Serializes the update rule state.
Be careful that this method only saves/loads the state of the update
rule. The parameters of the target link is not saved/loaded by this
method, and so you need to serialize the target link separately if you
want to fully recover the training state including parameters.
Args:
serializer (~chainer.AbstractSerializer): Serializer object.
"""
if self.state is None:
if isinstance(serializer, serializer_module.Deserializer):
# try to initialize the state to retrieve state entries
self._state = {}
self_copy = copy.copy(self)
arr = numpy.empty(1, dtype=numpy.float32)
self_copy.init_state(variable.Variable(arr, grad=arr))
for key in self._state:
self._state[key] = serializer(key, None)
else:
for key in self._state:
self._state[key] = serializer(key, self._state[key])
def _prepare(self, param):
with cuda.get_device(param.data) as device:
state = self.state
if state is None:
state = self._state = {}
self.init_state(param)
for name, value in six.iteritems(state):
if not isinstance(value, (numpy.ndarray, cuda.ndarray)):
continue
value_device = cuda.get_device(value)
if value_device.id != device.id:
if device.id >= 0:
state[name] = cuda.to_gpu(value)
else:
state[name] = cuda.to_cpu(value)
class Optimizer(object):
"""Base class of all numerical optimizers.
This class provides basic features for all optimization methods. It
optimizes parameters of a *target link*. The target link is registered via
the :meth:`setup` method, and then the :meth:`update` method updates its
parameters based on a given loss function.
Each optimizer implementation must be defined as a child class of
Optimizer. It must override :meth:`update` method.
If the optimizer is based on single gradient computation (like
most first-order methods), then it should inherit :class:`GradientMethod`,
which adds some features dedicated for the first order methods, including
the support of :class:`~chainer.UpdateRule`.
Optimizer instance also supports *hook functions*. Hook function is
registered by the :meth:`add_hook` method. Each hook function is called
in registration order in advance of the actual parameter update. If the
hook function has an attribute ``call_for_each_param`` and its value is
``True``, the hook function is used as a hook function of all update rules
(i.e., it is invoked for every parameter by passing the corresponding
update rule and the parameter).
Attributes:
target: Target link object. It is set by the :meth:`setup` method.
t: Number of update steps. It must be incremented by the
:meth:`update` method.
epoch: Current epoch. It is incremented by the :meth:`new_epoch`
method.
"""
def setup(self, link):
"""Sets a target link and initializes the optimizer states.
Given link is set to the :attr:`target` attribute. It also prepares the
optimizer state dictionaries corresponding to all parameters in the
link hierarchy. The existing states are discarded.
Args:
link (~chainer.Link): Target link object.
"""
if not isinstance(link, link_module.Link):
raise TypeError('optimization target must be a link')
self.target = link
self.t = 0
self.epoch = 0
self._hooks = collections.OrderedDict()
def update(self, lossfun=None, *args, **kwds):
"""Updates the parameters.
This method updates the parameters of the target link. The behavior of
this method is different for the cases either ``lossfun`` is given or
not.
If ``lossfun`` is given, this method typically clears the gradients,
calls the loss function with given extra arguments, and calls the
:meth:`~chainer.Variable.backward` method of its output to compute the
gradients. The actual implementation might call ``lossfun`` more than
once.
If ``lossfun`` is not given, then this method assumes that the
gradients of all parameters are already computed. An implementation
that requires multiple gradient computations might raise an error on
this case.
In both cases, this method invokes the update procedure for all
parameters.
Args:
lossfun (function): Loss function. It accepts arbitrary arguments
and returns one :class:`~chainer.Variable` object that
represents the loss (or objective) value. This argument can be
omitted for single gradient-based methods. In this case, this
method assumes gradient arrays computed.
args, kwds: Arguments for the loss function.
"""
raise NotImplementedError
def new_epoch(self):
"""Starts a new epoch.
This method increments the :attr:`epoch` count. Note that if the
optimizer depends on the epoch count, then user should call this method
appropriately at the beginning of each epoch.
"""
self.epoch += 1
def add_hook(self, hook, name=None):
"""Registers a hook function.
Hook function is typically called right after the gradient computation,
though the timing depends on the optimization method.
Args:
hook (function): Hook function. If ``hook.call_for_each_param`` is
true, this hook function is called for each parameter by
passing the update rule and the parameter. Otherwise, this hook
function is called only once each iteration by passing the
optimizer.
name (str): Name of the registration. If omitted, ``hook.name`` is
used by default.
"""
if not callable(hook):
raise TypeError('hook function is not callable')
if not hasattr(self, '_hooks'):
raise RuntimeError('call `setup` method before `add_hook` method')
if name is None:
name = hook.name
if name in self._hooks:
raise KeyError('hook %s already exists' % name)
self._hooks[name] = hook
def remove_hook(self, name):
"""Removes a hook function.
Args:
name (str): Registered name of the hook function to remove.
"""
del self._hooks[name]
def call_hooks(self):
"""Invokes hook functions in registration order."""
for hook in six.itervalues(self._hooks):
self._call_hook(hook)
def _call_hook(self, hook):
if getattr(hook, 'call_for_each_param', False):
for param in self.target.params():
hook(param.update_rule, param)
else:
hook(self)
def serialize(self, serializer):
"""Serializes or deserializes the optimizer.
It only saves or loads the following things:
- Optimizer states
- Global states (:attr:`t` and :attr:`epoch`)
**It does not saves nor loads the parameters of the target link.** They
should be separately saved or loaded.
Args:
serializer (~chainer.AbstractSerializer): Serializer or
deserializer object.
"""
self.t = serializer('t', self.t)
self.epoch = serializer('epoch', self.epoch)
for name, param in self.target.namedparams():
rule = getattr(param, 'update_rule', None)
if rule is not None:
rule.serialize(serializer[name])
class GradientMethod(Optimizer):
"""Base class of all single gradient-based optimizers.
This is an extension of the :class:`Optimizer` class. Typical gradient
methods that just require the gradient at the current parameter vector on
an update can be implemented as its child class.
This class uses :class:`~chainer.UpdateRule` to manage the update rule of
each parameter. A child class of GradientMethod should override
:meth:`create_update_rule` to create the default update rule of each
parameter.
This class also provides :attr:`hyperparam`, which is the hyperparameter
used as the default configuration of each update rule. All built-in
gradient method implementations also provide proxy properties that act
as aliases to the attributes of :attr:`hyperparam`. It is recommended to
provide such an alias to each attribute. It can be done by only adding one
line for each attribute using :class:`HyperparameterProxy`.
Attributes:
hyperparam (Hyperparameter): The hyperparameter of the gradient
method. It is used as the default configuration of each update
rule (i.e., the hyperparameter of each update rule refers this
hyperparameter as its parent).
"""
def __init__(self):
super(GradientMethod, self).__init__()
self.hyperparam = Hyperparameter()
def setup(self, link):
super(GradientMethod, self).setup(link)
for param in link.params():
param.update_rule = self.create_update_rule()
def reallocate_cleared_grads(self):
"""Reallocate gradients cleared by :meth:`~chainer.Variable.cleargrad`.
This method allocates arrays for all gradients which have :obj:`None`.
This method is called before and after every optimizer hook.
If an inheriting optimizer does not require this allocation,
the optimizer can override this method with a blank function.
"""
for name, param in self.target.namedparams(False):
if param.grad is None:
with cuda.get_device_from_array(param.data):
xp = cuda.get_array_module(param.data)
param.grad = xp.zeros_like(param.data)
def call_hooks(self):
"""Invokes hook functions in registration order."""
for hook in six.itervalues(self._hooks):
self._call_hook(hook)
self.reallocate_cleared_grads()
def update(self, lossfun=None, *args, **kwds):
"""Updates parameters based on a loss function or computed gradients.
This method runs in two ways.
- If ``lossfun`` is given, then it is used as a loss function to
compute gradients.
- Otherwise, this method assumes that the gradients are already
computed.
In both cases, the computed gradients are used to update parameters.
The actual update routines are defined by the update rule of each
parameter.
"""
if lossfun is not None:
use_cleargrads = getattr(self, '_use_cleargrads', True)
loss = lossfun(*args, **kwds)
if use_cleargrads:
self.target.cleargrads()
else:
self.target.zerograds()
loss.backward()
del loss
self.reallocate_cleared_grads()
self.call_hooks()
self.t += 1
for param in self.target.params():
param.update()
def use_cleargrads(self, use=True):
"""Enables or disables use of :func:`~chainer.Link.cleargrads` in `update`.
Args:
use (bool): If ``True``, this function enables use of
`cleargrads`. If ``False``, disables use of `cleargrads`
(`zerograds` is used).
.. deprecated:: v2.0
Note that :meth:`update` calls :meth:`~Link.cleargrads` by default.
:meth:`~Link.cleargrads` is more efficient than
:meth:`~Link.zerograds`, so one does not have to call
:meth:`use_cleargrads`. This method remains for backward
compatibility.
"""
warnings.warn(
'GradientMethod.use_cleargrads is deprecated.',
DeprecationWarning)
self._use_cleargrads = use
def create_update_rule(self):
"""Creates a new update rule object.
This method creates an update rule object. It is called by
:meth:`setup` to set up an update rule of each parameter.
Each implementation of the gradient method should override this method
to provide the default update rule implementation.
Return:
UpdateRule: Update rule object.
"""
raise NotImplementedError
class HyperparameterProxy(object):
"""Property that acts as an alias to an attribute of the hyperparameter.
This class is used to define a property of an implementation of
:class:`GradientMethod` that acts as an alias to an attribute of the
hyperparameter.
Args:
attr_name (str): Name of the attribute of the hyperparameter.
"""
def __init__(self, attr_name):
self._attr_name = attr_name
self.__doc__ = 'Alias to ``self.hyperparam.{}``'.format(attr_name)
def __get__(self, obj, type=None):
if obj is None:
return self
return getattr(obj.hyperparam, self._attr_name)
def __set__(self, obj, value):
setattr(obj.hyperparam, self._attr_name, value)
class WeightDecay(object):
"""Optimizer/UpdateRule hook function for weight decay regularization.
This hook function adds a scaled parameter to the corresponding gradient.
It can be used as a regularization.
Args:
rate (float): Coefficient for the weight decay.
Attributes:
rate (float): Coefficient for the weight decay.
"""
name = 'WeightDecay'
call_for_each_param = True
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
p, g = param.data, param.grad
with cuda.get_device(p) as dev:
if int(dev) == -1:
g += self.rate * p
else:
kernel = cuda.elementwise(
'T p, T decay', 'T g', 'g += decay * p', 'weight_decay')
kernel(p, self.rate, g)
class Lasso(object):
"""Optimizer/UpdateRule hook function for Lasso regularization.
This hook function adds a scaled parameter to the sign of each weight.
It can be used as a regularization.
Args:
rate (float): Coefficient for the weight decay.
Attributes:
rate (float): Coefficient for the weight decay.
"""
name = 'Lasso'
call_for_each_param = True
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
p, g = param.data, param.grad
xp = cuda.get_array_module(p)
with cuda.get_device_from_array(p) as dev:
sign = xp.sign(p)
if int(dev) == -1:
g += self.rate * sign
else:
kernel = cuda.elementwise(
'T s, T decay', 'T g', 'g += decay * s', 'lasso')
kernel(sign, self.rate, g)
class GradientClipping(object):
"""Optimizer hook function for gradient clipping.
This hook function scales all gradient arrays to fit to the defined L2 norm
threshold.
Args:
threshold (float): L2 norm threshold.
Attributes:
threshold (float): L2 norm threshold of gradient norm.
"""
name = 'GradientClipping'
def __init__(self, threshold):
self.threshold = threshold
def __call__(self, opt):
norm = numpy.sqrt(_sum_sqnorm(
[p.grad for p in opt.target.params(False)]))
rate = self.threshold / norm
if rate < 1:
for param in opt.target.params(False):
grad = param.grad
with cuda.get_device_from_array(grad):
grad *= rate
class GradientNoise(object):
"""Optimizer/UpdateRule hook function for adding gradient noise.
This hook function simply adds noise generated by the ``noise_func``
to the gradient. By default it adds time-dependent annealed Gaussian
noise to the gradient at every training step:
.. math::
g_t \\leftarrow g_t + N(0, \\sigma_t^2)
where
.. math::
\\sigma_t^2 = \\frac{\\eta}{(1+t)^\\gamma}
with :math:`\\eta` selected from {0.01, 0.3, 1.0} and
:math:`\\gamma = 0.55`.
Args:
eta (float): Parameter that defines the scale of the noise, which for
the default noise function is recommended to be either 0.01, 0.3
or 1.0.
noise_func (function): Noise generating function which by default
is given by `Adding Gradient Noise Improves Learning for Very Deep\
Networks <https://arxiv.org/pdf/1511.06807>`_.
"""
name = 'GradientNoise'
call_for_each_param = True
def __init__(self, eta, noise_func=exponential_decay_noise):
self.eta = eta
self.noise_func = noise_func
def __call__(self, rule, param):
g = param.grad
xp = cuda.get_array_module(g)
with cuda.get_device_from_array(g) as dev:
noise = self.noise_func(xp, g.shape, g.dtype, self, rule)
if int(dev) == -1:
g += noise
else:
kernel = cuda.elementwise(
'T noise', 'T g', 'g += noise', 'gradient_noise')
kernel(noise, g)
class GradientHardClipping(object):
"""Optimizer/UpdateRule hook function for gradient clipping.
This hook function clips all gradient arrays to be within a lower and upper
bound.
Args:
lower_bound (float): The lower bound of the gradient value.
upper_bound (float): The upper bound of the gradient value.
Attributes:
lower_bound (float): The lower bound of the gradient value.
upper_bound (float): The upper bound of the gradient value.
"""
name = 'GradientHardClipping'
call_for_each_param = True
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __call__(self, rule, param):
grad = param.grad
xp = cuda.get_array_module(grad)
with cuda.get_device_from_array(grad):
xp.clip(grad, self.lower_bound, self.upper_bound, out=grad)
|
ysekky/chainer
|
chainer/optimizer.py
|
Python
|
mit
| 26,435
|
[
"Gaussian"
] |
302aff72da7906a77dbe5770f33963ab77865008137562b5f0e5187d549cce2f
|
""" demixed Principal Component Analysis
"""
# Author: Wieland Brendel <wieland.brendel@neuro.fchampalimaud.org>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from collections import OrderedDict
from itertools import combinations, chain
from scipy.sparse.linalg import svds
from scipy.linalg import pinv
from sklearn.base import BaseEstimator
from sklearn.utils.extmath import randomized_svd
import numexpr as ne
from .utils import shuffle2D, classification, denoise_mask
class dPCA(BaseEstimator):
""" demixed Principal component analysis (dPCA)
dPCA is a linear dimensionality reduction technique that automatically discovers
and highlights the essential features of complex population activities. The
population activity is decomposed into a few demixed components that capture most
of the variance in the data and that highlight the dynamic tuning of the population
to various task parameters, such as stimuli, decisions, rewards, etc.
Parameters
----------
labels : int or string
Labels of feature axis.
If int the corresponding number of labels are selected from the alphabet 'abcde...'
join : None or dict
Parameter combinations to join
If a data set has parametrized by time t and stimulus s, then dPCA will split
the data into marginalizations corresponding to 't', 's' and 'ts'. At times,
we want to join different marginalizations (like 's' and 'ts'), e.g. if
we are only interested in the time-modulated stimulus components. In this case,
we would pass {'ts' : ['s','ts']}.
regularizer : None, float, 'auto'
Regularization parameter. If None or 0, then no regularization is applied.
For float, the regularization weight is regularizer*var(data). If 'auto', the
optimal regularization parameter is found during fitting (might take some time).
n_components : None, int or dict
Number of components to keep.
If n_components is int, then the same number of components are kept in every
marginalization. Otherwise, the dict allows to set the number of components
in each marginalization (e.g. {'t' : 10, 'ts' : 5}). Defaults to 10.
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
n_iter : int (default: 0)
Number of iterations for randomized SVD solver (sklearn).
Attributes
----------
explained_variance_ratio_ : dict with arrays, [n_components]
Dictionary in which each key refers to one marginalization and the \
value is a vector with the percentage of variance explained by each of \
the marginal components.
Notes
-----
Implements the dPCA model from:
D Kobak*, W Brendel*, C Constantinidis, C Feierstein, A Kepecs, Z Mainen, \
R Romo, X-L Qi, N Uchida, C Machens
Demixed principal component analysis of population activity in higher \
cortical areas reveals independent representation of task parameters,
Examples
--------
>>> import numpy as np
>>> from dPCA import dPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> dpca = dPCA(n_components=2)
>>> dpca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_)
[ 0.99244... 0.00755...]
"""
def __init__(self, labels=None, join=None, n_components=10, regularizer=None, copy=True, n_iter=0):
# create labels from alphabet if not provided
if isinstance(labels,str):
self.labels = labels
elif isinstance(labels,int):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
labels = alphabet[:labels]
else:
raise TypeError('Wrong type for labels. Please either set labels to the number of variables or provide the axis labels as a single string of characters (like "ts" for time and stimulus)')
self._join = join
self.regularizer = 0 if regularizer == None else regularizer
self.opt_regularizer_flag = regularizer == 'auto'
self.n_components = n_components
self.copy = copy
self.marginalizations = self._get_parameter_combinations()
self.n_iter = n_iter
# set debug mode, 0 = no reports, 1 = warnings, 2 = warnings & progress, >2 = everything
self.debug = 2
if regularizer == 'auto':
print("""You chose to determine the regularization parameter automatically. This can
take substantial time and grows linearly with the number of crossvalidation
folds. The latter can be set by changing self.n_trials (default = 3). Similarly,
use self.protect to set the list of axes that are not supposed to get to get shuffled
(e.g. upon splitting the data into test- and training, time-points should always
be drawn from the same trial, i.e. self.protect = ['t']). This can significantly
speed up the code.""")
self.n_trials = 3
self.protect = None
def fit(self, X, trialX=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X,trialX=trialX)
return self
def fit_transform(self, X, trialX=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
Returns
-------
X_new : dict with arrays with the same shape as X
Dictionary in which each key refers to one marginalization and the value is the
latent component.
"""
self._fit(X,trialX=trialX)
return self.transform(X)
def _get_parameter_combinations(self,join=True):
''' Returns all parameter combinations, e.g. for labels = 'xyz'
{'x' : (0,), 'y' : (1,), 'z' : (2,), 'xy' : (0,1), 'xz' : (0,2), 'yz' : (1,2), 'xyz' : (0,1,2)}
If join == True, parameter combinations are condensed according to self._join, Otherwise all
combinations are returned.
'''
# subsets = () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2)"
subsets = list(chain.from_iterable(combinations(list(range(len(self.labels))), r) for r in range(len(self.labels))))
# delete empty set & add (0,1,2)
del subsets[0]
subsets.append(list(range(len(self.labels))))
# create dictionary
pcombs = OrderedDict()
for subset in subsets:
key = ''.join([self.labels[i] for i in subset])
pcombs[key] = set(subset)
# condense dict if not None
if isinstance(self._join,dict) and join:
for key, combs in self._join.items():
tmp = [pcombs[comb] for comb in combs]
for comb in combs:
del pcombs[comb]
pcombs[key] = tmp
return pcombs
def _marginalize(self,X,save_memory=False):
""" Marginalize the data matrix
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
save_memory : bool, set to True if memory really is an issue (though optimization is not perfect yet)
Returns
-------
mXs : dictionary, with values corresponding to the marginalized data (and the key refers to the marginalization)
"""
def mmean(X,axes,expand=False):
''' Takes mean along several axis (given as list). If expand the averaged dimensions will be filled with
new axis to retain the dimension.
'''
Z = X.copy()
for ax in np.sort(axes)[::-1]:
Z = np.mean(Z,ax)
if expand == True:
Z = np.expand_dims(Z,ax)
return Z
def dense_marg(Y,mYs):
''' The original marginalizations as returned by "get_marginalizations" are sparse in the sense that
marginalized axis are newaxis. This functions blows them up to the original size of the data set
(need for optimization).
'''
tmp = np.zeros_like(Y)
for key in list(mYs.keys()):
mYs[key] = (tmp + mYs[key]).reshape((Y.shape[0],-1))
return mYs
Xres = X.copy() # residual of data
# center data
Xres -= np.mean(Xres.reshape((Xres.shape[0],-1)),-1).reshape((Xres.shape[0],) + (len(Xres.shape)-1)*(1,))
# init dict with marginals
Xmargs = OrderedDict()
# get parameter combinations
pcombs = self._get_parameter_combinations(join=False)
# subtract the mean
S = list(pcombs.values())[-1] # full set of indices
if save_memory:
for key, phi in pcombs.items():
S_without_phi = list(S - phi)
# compute marginalization and save
Xmargs[key] = mmean(Xres,np.array(S_without_phi)+1,expand=True)
# subtract the marginalization from the data
Xres -= Xmargs[key]
else:
# efficient precomputation of means
pre_mean = {}
for key, phi in pcombs.items():
if len(key) == 1:
pre_mean[key] = mmean(Xres,np.array(list(phi))+1,expand=True)
else:
pre_mean[key] = mmean(pre_mean[key[:-1]],np.array([list(phi)[-1]])+1,expand=True)
# compute marginalizations
for key, phi in pcombs.items():
key_without_phi = ''.join(filter(lambda ch: ch not in key, self.labels))
# self.labels.translate(None, key)
# build local dictionary for numexpr
X = pre_mean[key_without_phi] if len(key_without_phi) > 0 else Xres
if len(key) > 1:
subsets = list(chain.from_iterable(combinations(key, r) for r in range(1,len(key))))
subsets = [''.join(subset) for subset in subsets]
local_dict = {subset : Xmargs[subset] for subset in subsets}
local_dict['X'] = X
Xmargs[key] = ne.evaluate('X - ' + ' - '.join(subsets),local_dict=local_dict)
else:
Xmargs[key] = X
# condense dict if not None
if isinstance(self._join,dict):
for key, combs in self._join.items():
Xshape = np.ones(len(self.labels)+1,dtype='int')
for comb in combs:
sh = np.array(Xmargs[comb].shape)
Xshape[(sh-1).nonzero()] = sh[(sh-1).nonzero()]
tmp = np.zeros(Xshape)
for comb in combs:
tmp += Xmargs[comb]
del Xmargs[comb]
Xmargs[key] = tmp
Xmargs = dense_marg(X,Xmargs)
return Xmargs
def _optimize_regularization(self,X,trialX,center=True,lams='auto'):
""" Optimization routine to find optimal regularization parameter.
TO DO: Routine is pretty dumb right now (go through predetermined
list and find minimum). There are several ways to speed it up.
"""
# center data
if center:
X = X - np.mean(X.reshape((X.shape[0],-1)),1).reshape((X.shape[0],)\
+ len(self.labels)*(1,))
# compute variance of data
varX = np.sum(X**2)
# test different inits and regularization parameters
if lams == 'auto':
N = 45
lams = np.logspace(0,N,num=N, base=1.4, endpoint=False)*1e-7
# compute crossvalidated score over n_trials repetitions
scores = self.crossval_score(lams,X,trialX,mean=False)
# take mean over total scores
totalscore = np.mean(np.sum(np.dstack([scores[key] for key in list(scores.keys())]),-1),0)
# Raise warning if optimal lambda lies at boundaries
if np.argmin(totalscore) == 0 or np.argmin(totalscore) == len(totalscore) - 1:
if self.debug > 0:
print("Warning: Optimal regularization parameter lies at the \
boundary of the search interval. Please provide \
different search list (key: lams).")
# set minimum as new lambda
self.regularizer = lams[np.argmin(totalscore)]
if self.debug > 1:
print('Optimized regularization, optimal lambda = ', self.regularizer)
print('Regularization will be fixed; to compute the optimal \
parameter again on the next fit, please \
set opt_regularizer_flag to True.')
self.opt_regularizer_flag = False
def crossval_score(self,lams,X,trialX,mean=True):
""" Calculates crossvalidation scores for a given set of regularization
parameters. To this end it takes one parameter off the list,
computes the model on a training set and then validates the
reconstruction performance on a validation set.
Parameters
----------
lams: 1D array of floats
Array of regularization parameters to test.
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the
axis correspond to different parameters).
trialX: array-like, shape (n_trials, n_samples, n_features_1, n_features_2, ...)
Trial-by-trial data. Shape is similar to X but with an additional axis at the beginning
with different trials. If different combinations of features have different number
of trials, then set n_samples to the maximum number of trials and fill unoccupied data
points with NaN.
mean: bool (default: True)
Set True if the crossvalidation score should be averaged over
all marginalizations, otherwise False.
Returns
-------
mXs : dictionary, with values corresponding to the marginalized
data (and the key refers to the marginalization)
"""
# placeholder for scores
scores = np.zeros((self.n_trials,len(lams))) if mean else {key : np.zeros((self.n_trials,len(lams))) for key in list(self.marginalizations.keys())}
# compute number of samples in each condition
N_samples = self._get_n_samples(trialX,protect=self.protect)
for trial in range(self.n_trials):
print("Starting trial ", trial + 1, "/", self.n_trials)
# perform split into training and test trials
trainX, validX = self.train_test_split(X,trialX,N_samples=N_samples)
# compute marginalization of test and validation data
trainmXs, validmXs = self._marginalize(trainX), self._marginalize(validX)
# compute crossvalidation score for every regularization parameter
for k, lam in enumerate(lams):
# fit dpca model
self.regularizer = lam
self._fit(trainX,mXs=trainmXs,optimize=False)
# compute crossvalidation score
if mean:
scores[trial,k] = self._score(validX,validmXs)
else:
tmp = self._score(validX,validmXs,mean=False)
for key in list(self.marginalizations.keys()):
scores[key][trial,k] = tmp[key]
return scores
def _score(self,X,mXs,mean=True):
""" Scoring for crossvalidation. Predicts one observable (e.g. one neuron) of X at a time, using all other dimensions:
\sum_phi ||X[n] - F_\phi D_phi^{-n} X^{-n}||^2
where phi refers to the marginalization and X^{-n} (D_phi^{-n}) are all rows of X (D) except the n-th row.
"""
n_features = X.shape[0]
X = X.reshape((n_features,-1))
error = {key: 0 for key in list(mXs.keys())}
PDY = {key : np.dot(self.P[key],np.dot(self.D[key].T,X)) for key in list(mXs.keys())}
trPD = {key : np.sum(self.P[key]*self.D[key],1) for key in list(mXs.keys())}
for key in list(mXs.keys()):
error[key] = np.sum((mXs[key] - PDY[key] + trPD[key][:,None]*X)**2)
return error if not mean else np.sum(list(error.values()))
def _randomized_dpca(self,X,mXs,pinvX=None):
""" Solves the dPCA minimization problem analytically by using a randomized SVD solver from sklearn.
Returns
-------
P : dict mapping strings to array-like,
Holds encoding matrices for each term in variance decompostions (used in inverse_transform
to map from low-dimensional representation back to original data space).
D : dict mapping strings to array-like,
Holds decoding matrices for each term in variance decompostions (used to transform data
to low-dimensional space).
"""
n_features = X.shape[0]
rX = X.reshape((n_features,-1))
pinvX = pinv(rX) if pinvX is None else pinvX
P, D = {}, {}
for key in list(mXs.keys()):
mX = mXs[key].reshape((n_features,-1)) # called X_phi in paper
C = np.dot(mX,pinvX)
if isinstance(self.n_components,dict):
U,s,V = randomized_svd(np.dot(C,rX),n_components=self.n_components[key],n_iter=self.n_iter,random_state=np.random.randint(10e5))
else:
U,s,V = randomized_svd(np.dot(C,rX),n_components=self.n_components,n_iter=self.n_iter,random_state=np.random.randint(10e5))
P[key] = U
D[key] = np.dot(U.T,C).T
return P, D
def _add_regularization(self,Y,mYs,lam,SVD=None,pre_reg=False):
""" Prepares the data matrix and its marginalizations for the randomized_dpca solver (see paper)."""
n_features = Y.shape[0]
if not pre_reg:
regY = np.hstack([Y.reshape((n_features,-1)),lam*np.eye(n_features)])
else:
regY = Y
regY[:,-n_features:] = lam*eye(n_features)
if not pre_reg:
regmYs = OrderedDict()
for key in list(mYs.keys()):
regmYs[key] = np.hstack([mYs[key],np.zeros((n_features,n_features))])
else:
regmYs = mYs
if SVD is not None:
U,s,V = SVD
M = ((s**2 + lam**2)**-1)[:,None]*U.T
pregY = np.dot(np.vstack([V.T*s[None,:],lam*U]),M)
else:
pregY = np.dot(regY.reshape((n_features,-1)).T,np.linalg.inv(np.dot(Y.reshape((n_features,-1)),Y.reshape((n_features,-1)).T) + lam**2*np.eye(n_features)))
return regY, regmYs, pregY
def _fit(self, X, trialX=None, mXs=None, center=True, SVD=None, optimize=True):
""" Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
trialX: array-like, shape (n_trials, n_samples, n_features_1, n_features_2, ...)
Trial-by-trial data. Shape is similar to X but with an additional axis at the beginning
with different trials. If different combinations of features have different number
of trials, then set n_samples to the maximum number of trials and fill unoccupied data
points with NaN.
mXs: dict with values in the shape of X
Marginalized data, should be the result of dpca._marginalize
center: bool
Centers data if center = True
SVD: list of arrays
Singular-value decomposition of the data. Don't provide!
optimize: bool
Flag to turn automatic optimization of regularization parameter on or off. Needed
internally.
"""
def flat2d(A):
''' Flattens all but the first axis of an ndarray, returns view. '''
return A.reshape((A.shape[0],-1))
# X = check_array(X)
n_features = X.shape[0]
# center data
if center:
X = X - np.mean(flat2d(X),1).reshape((n_features,) + len(self.labels)*(1,))
# marginalize data
if mXs is None:
mXs = self._marginalize(X)
# compute optimal regularization
if self.opt_regularizer_flag and optimize:
if self.debug > 0:
print("Start optimizing regularization.")
if trialX is None:
raise ValueError('To optimize the regularization parameter, the trial-by-trial data trialX needs to be provided.')
self._optimize_regularization(X,trialX)
# add regularization
if self.regularizer > 0:
regX, regmXs, pregX = self._add_regularization(X,mXs,self.regularizer*np.sum(X**2),SVD=SVD)
else:
regX, regmXs, pregX = X, mXs, pinv(X.reshape((n_features,-1)))
# compute closed-form solution
self.P, self.D = self._randomized_dpca(regX,regmXs,pinvX=pregX)
def _zero_mean(self,X):
""" Subtracts the mean from each observable """
return X - np.mean(X.reshape((X.shape[0],-1)),1).reshape((X.shape[0],) + (len(X.shape)-1)*(1,))
def _roll_back(self,X,axes,invert=False):
''' Rolls all axis in list crossval_protect to the end (or inverts if invert=True) '''
rX = X
axes = np.sort(axes)
if invert:
for ax in reversed(axes):
rX = np.rollaxis(rX,-1,start=ax)
else:
for ax in axes:
rX = np.rollaxis(rX,ax,start=len(X.shape))
return rX
def _get_n_samples(self,trialX,protect=None):
""" Computes the number of samples for each parameter combinations (except along protect) """
n_unprotect = len(trialX.shape) - len(protect) - 1 if protect is not None else len(trialX.shape) - 1
n_protect = len(protect) if protect is not None else 0
return trialX.shape[0] - np.sum(np.isnan(trialX[(np.s_[:],) + (np.s_[:],)*n_unprotect + (0,)*n_protect]),0)
def _check_protected(self,X,protect):
''' Checks if protect == None or, alternatively, if all protected axis are at the end '''
if protect is None:
protected = True
else:
# convert label in index
protect = [self.labels.index(ax) for ax in protect]
if set(protect) == set(np.arange(len(self.labels)-len(protect),len(self.labels))):
protected = True
else:
protected = False
print('Not all protected axis are at the end! While the algorithm will still work, the performance of the shuffling algorithm will substantially decrease due to unavoidable copies.')
return protected
def train_test_split(self,X,trialX,N_samples=None,sample_ax=0):
""" Splits data in training and validation trial. To this end, we select one data-point in each observable for every
combination of parameters (except along protected axis) for the validation set and average the remaining trial-by-trial
data to get the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
trialX: array-like, shape (n_trials, n_samples, n_features_1, n_features_2, ...)
Trial-by-trial data. Shape is similar to X but with an additional axis at the beginning
with different trials. If different combinations of features have different number
of trials, then set n_samples to the maximum number of trials and fill unoccupied data
points with NaN.
N_samples: array-like with the same shape as X (except for protected axis).
Number of trials in each condition. If None, computed from trial data.
Returns
-------
trainX: array-like, same shape as X
Training data
blindX: array-like, same shape as X
Validation data
"""
def flat2d(A):
''' Flattens all but the first axis of an ndarray, returns view. '''
return A.reshape((A.shape[0],-1))
protect = self.protect
n_samples = trialX.shape[-1] # number of samples
n_unprotect = len(X.shape) - len(protect) if protect is not None else len(X.shape)
n_protect = len(protect) if protect is not None else 0
if sample_ax != 0:
raise NotImplemented('The sample axis needs to come first.')
# test if all protected axes lie at the end
protected = self._check_protected(trialX,protect)
# reorder matrix to protect certain axis (for speedup)
if ~protected:
# turn crossval_protect into index listX
axes = [self.labels.index(ax) + 2 for ax in protect]
# reorder matrix
trialX = self._roll_back(trialX,axes)
X = np.squeeze(self._roll_back(X[None,...],axes))
# compute number of samples in each condition
if N_samples is None:
N_samples = self._get_n_samples(trialX,protect=self.protect)
# get random indices
idx = (np.random.rand(*N_samples.shape)*N_samples).astype(int)
# select values
blindX = np.empty(trialX.shape[1:])
# iterate over multi_index
it = np.nditer(np.empty(N_samples.shape), flags=['multi_index'])
while not it.finished:
blindX[it.multi_index + (np.s_[:],)*n_protect] = trialX[(idx[it.multi_index],) + it.multi_index + (np.s_[:],)*n_protect]
it.iternext()
# compute trainX
trainX = (X*(N_samples/(N_samples-1))[(np.s_[:],)*n_unprotect + (None,)*n_protect] - blindX/(N_samples-1)[(np.s_[:],)*n_unprotect + (None,)*n_protect])
# inverse rolled axis in blindX
if ~protected:
blindX = self._roll_back(blindX[...,None],axes,invert=True)[...,0]
trainX = self._roll_back(trainX[...,None],axes,invert=True)[...,0]
# remean datasets (both equally)
trainX -= np.mean(flat2d(trainX),1)[(np.s_[:],) + (None,)*(len(X.shape)-1)]
blindX -= np.mean(flat2d(blindX),1)[(np.s_[:],) + (None,)*(len(X.shape)-1)]
return trainX, blindX
def shuffle_labels(self,trialX):
""" Shuffles *inplace* labels between conditions in trial-by-trial data, respecting the number of trials per condition.
Parameters
----------
trialX: array-like, shape (n_trials, n_samples, n_features_1, n_features_2, ...)
Trial-by-trial data. Shape is similar to X but with an additional axis at the beginning
with different trials. If different combinations of features have different number
of trials, then set n_samples to the maximum number of trials and fill unoccupied data
points with NaN.
"""
# import shuffling algorithm from cython source
protect = self.protect
# test if all protected axes lie at the end
protected = self._check_protected(trialX,protect)
# reorder matrix to protect certain axis (for speedup)
if ~protected:
# turn crossval_protect into index list
axes = [self.labels.index(ax) + 2 for ax in protect]
# reorder matrix
trialX = self._roll_back(trialX,axes)
# reshape all non-protect axis into one vector
original_shape = trialX.shape
trialX = trialX.reshape((-1,) + trialX.shape[-len(protect):])
# reshape all protected axis into one
original_shape_protected = trialX.shape
trialX = trialX.reshape((trialX.shape[0],-1))
# shuffle within non-protected axis
shuffle2D(trialX)
# inverse reshaping of protected axis
trialX = trialX.reshape(original_shape_protected)
# inverse reshaping & sample axis
trialX = trialX.reshape(original_shape)
#trialX = np.rollaxis(trialX,0,len(original_shape))
# inverse rolled axis in trialX
if protected:
trialX = self._roll_back(trialX,axes,invert=True)
return trialX
def significance_analysis(self,X,trialX,n_shuffles=100,n_splits=100,n_consecutive=1,axis=None,full=False):
'''
Cross-validated significance analysis of dPCA model. Here the generalization from the training
to test data is tested by a simple classification measure in which one tries to predict the
label of a validation test point from the training data. The performance is tested for n_splits
test and training separations. The classification performance is then compared against
the performance on data with randomly shuffled labels. Only if the performance is higher
then the maximum in the shuffled data we regard the component as significant.
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
trialX: array-like, shape (n_trials, n_samples, n_features_1, n_features_2, ...)
Trial-by-trial data. Shape is similar to X but with an additional axis at the beginning
with different trials. If different combinations of features have different number
of trials, then set n_samples to the maximum number of trials and fill unoccupied data
points with NaN.
n_shuffles: integer
Number of label shuffles over which the maximum is taken (default = 100, which
is equivalent to p > 0.01)
n_splits: integer
Number of train-test splits per shuffle, from which the average performance is
deduced.
n_consecutive: integer
Sometimes individual data points are deemed significant purely by chance. To reduced
such noise one can demand that at least n consecutive data points are rated as significant.
axis: None or True (default = None)
Determines whether the significance is calculated over the last axis. More precisely,
one is often interested in determining the significance of a component over time. In this
case, set axis to True and make sure the last axis is time.
full: Boolean (default = False)
Whether or not all scores are returned. If False, only the significance matrix is returned.
Returns
-------
masks: Dictionary
Dictionary with keys corresponding to the marginalizations and with values that are
binary nparrays that capture the significance of the demixed components.
true_score: Dictionary (only returned when full = True)
Dictionary with the scores of the data.
scores: Dictionary (only returned when full = True)
Dictionary with the scores of the shuffled data.
'''
assert axis in [None, True]
def compute_mean_score(X,trialX,n_splits):
K = 1 if axis is None else X.shape[-1]
if type(self.n_components) == int:
scores = {key : np.empty((self.n_components, n_splits, K)) for key in keys}
else:
scores = {key : np.empty((self.n_components[key], n_splits, K)) for key in keys}
for shuffle in range(n_splits):
print('.', end=' ')
# do train-validation split
trainX, validX = self.train_test_split(X,trialX)
# fit a dPCA model to training data & transform validation data
trainZ = self.fit_transform(trainX)
validZ = self.transform(validX)
# reshape data to match Cython input
for key in keys:
ncomps = self.n_components if type(self.n_components) == int else self.n_components[key]
# mean over all axis not in key
axset = self.marginalizations[key]
axset = axset if type(axset) == set else set.union(*axset)
axes = set(range(len(X.shape)-1)) - axset
for ax in list(axes)[::-1]:
trainZ[key] = np.mean(trainZ[key],axis=ax+1)
validZ[key] = np.mean(validZ[key],axis=ax+1)
# reshape
if len(X.shape)-2 in axset and axis is not None:
trainZ[key] = trainZ[key].reshape((ncomps,-1,K))
validZ[key] = validZ[key].reshape((ncomps,-1,K))
else:
trainZ[key] = trainZ[key].reshape((ncomps,-1,1))
validZ[key] = validZ[key].reshape((ncomps,-1,1))
# compute classification score
for key in keys:
ncomps = self.n_components if type(self.n_components) == int else self.n_components[key]
for comp in range(ncomps):
scores[key][comp, shuffle] = classification(trainZ[key][comp],validZ[key][comp])
for key in keys:
scores[key] = np.nanmean(scores[key], axis=1)
return scores
if self.opt_regularizer_flag:
print("Regularization not optimized yet; start optimization now.")
self._optimize_regularization(X,trialX)
keys = list(self.marginalizations.keys())
keys.remove(self.labels[-1])
# shuffling is in-place, so we need to copy the data
trialX = trialX.copy()
# compute score of original data
print("Compute score of data: ", end=' ')
true_score = compute_mean_score(X,trialX,n_splits)
print("Finished.")
# data collection
scores = {key : [] for key in keys}
# iterate over shuffles
for it in range(n_shuffles):
print("\rCompute score of shuffled data: ", str(it), "/", str(n_shuffles), end=' ')
# shuffle labels
self.shuffle_labels(trialX)
# mean trial-by-trial data
X = np.nanmean(trialX,axis=0)
score = compute_mean_score(X,trialX,n_splits)
for key in keys:
scores[key].append(score[key])
# binary mask, if data score is above maximum shuffled score make true
masks = {}
for key in keys:
maxscore = np.amax(np.dstack(scores[key]),-1)
masks[key] = true_score[key] > maxscore
if n_consecutive > 1:
for key in keys:
mask = masks[key]
for k in range(mask.shape[0]):
masks[key][k,:] = denoise_mask(masks[key][k].astype(np.int32),n_consecutive)
if full:
return masks, true_score, scores
else:
return masks
def transform(self, X, marginalization=None):
"""Apply the dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X: array-like, shape (n_samples, n_features_1, n_features_2, ...)
Training data, where n_samples in the number of samples
and n_features_j is the number of the j-features (where the axis correspond
to different parameters).
marginalization : str or None
Marginalization subspace upon which to project, if None return dict
with projections on all marginalizations
Returns
-------
X_new : dict with arrays of the same shape as X
Dictionary in which each key refers to one marginalization and the value is the
latent component. If specific marginalization is given, returns only array
"""
X = self._zero_mean(X)
total_variance = np.sum((X - np.mean(X))**2)
def marginal_variances(marginal):
''' Computes the relative variance explained of each component
within a marginalization
'''
D, Xr = self.D[marginal], X.reshape((X.shape[0],-1))
return [np.sum(np.dot(D[:,k], Xr)**2) / total_variance for k in range(D.shape[1])]
if marginalization is not None:
D, Xr = self.D[marginalization], X.reshape((X.shape[0],-1))
X_transformed = np.dot(D.T, Xr).reshape((D.shape[1],) + X.shape[1:])
self.explained_variance_ratio_ = {marginalization : marginal_variances(marginalization)}
else:
X_transformed = {}
self.explained_variance_ratio_ = {}
for key in list(self.marginalizations.keys()):
X_transformed[key] = np.dot(self.D[key].T, X.reshape((X.shape[0],-1))).reshape((self.D[key].shape[1],) + X.shape[1:])
self.explained_variance_ratio_[key] = marginal_variances(key)
return X_transformed
def inverse_transform(self, X, marginalization):
""" Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
X = self._zero_mean(X)
X_transformed = np.dot(self.P[marginalization],X.reshape((X.shape[0],-1))).reshape((self.P[marginalization].shape[0],) + X.shape[1:])
return X_transformed
def reconstruct(self, X, marginalization):
""" Transform data first into reduced space before projecting
it back into data space. Equivalent to inverse_transform(transform(X)).
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
"""
return self.inverse_transform(self.transform(X,marginalization),marginalization)
|
machenslab/dPCA
|
python/dPCA/dPCA.py
|
Python
|
mit
| 40,570
|
[
"NEURON"
] |
feb9d0408371729a58d519b13483f80e0788df70e1230e56f84514359be1e64c
|
from bs_utils.utils import *
import re
BAM_MATCH = 0
BAM_INS = 1
BAM_DEL = 2
BAM_SOFTCLIP = 4
CIGAR_OPS = {'M' : BAM_MATCH, 'I' : BAM_INS, 'D' : BAM_DEL, 'S' : BAM_SOFTCLIP}
def N_MIS(r,g):
mismatches = 0
if len(r)==len(g):
for i in xrange(len(r)):
if r[i] != g[i] and r[i] != "N" and g[i] != "N" and not(r[i] == 'T' and g[i] == 'C'):
mismatches += 1
#
#
#
return mismatches
#
#----------------------------------------------------------------
"""
Exmaple:
========
Read : ACCGCGTTGATCGAGTACGTACGTGGGTC
Adapter : ....................ACGTGGGTCCCG
========
no_mismatch : the maximum number allowed for mismatches
Algorithm: (allowing 1 mismatch)
========
-Step 1:
ACCGCGTTGATCGAGTACGTACGTGGGTC
||XX
ACGTGGGTCCCG
-Step 2:
ACCGCGTTGATCGAGTACGTACGTGGGTC
X||X
.ACGTGGGTCCCG
-Step 3:
ACCGCGTTGATCGAGTACGTACGTGGGTC
XX
..ACGTGGGTCCCG
-Step ...
-Step N:
ACCGCGTTGATCGAGTACGTACGTGGGTC
|||||||||
....................ACGTGGGTCCCG
Success & return!
========
"""
# Remove the adapter from 3' end
def RemoveAdapter ( read, adapter, no_mismatch, rm_back=0) :
lr = len(read)
la = len(adapter)
if la == 0 :
return read
# Check the empty adapter, namely, the reads start with the 2nd base of adapter,
# not including the 'A' base in front of the adapter.
if adapter[2:] == read[0:(la-1)] :
return ""
#
for i in xrange( lr - no_mismatch ) :
read_pos = i
adapter_pos = 0
count_no_mis = 0
while (adapter_pos < la) and (read_pos < lr) :
if (read[read_pos] == adapter[adapter_pos]) :
read_pos = read_pos + 1
adapter_pos = adapter_pos + 1
else :
count_no_mis = count_no_mis + 1
if count_no_mis > no_mismatch :
break
else :
read_pos = read_pos + 1
adapter_pos = adapter_pos + 1
#
#
# while_end
# Cut the extra bases before the adapter
# --C|CG G-- => --CNN+A+<adapter>
# --G GC|C-- --GGC
if adapter_pos == la or read_pos == lr :
if i <= rm_back :
return ''
else :
return read[:(i-rm_back)]
#
#
# for_end
return read
def Remove_5end_Adapter ( read, adapter, no_mismatch) :
lr = len(read)
la = len(adapter)
if la == 0 :
return read
#
for i in xrange (la - no_mismatch) :
read_pos = 0
adapter_pos = i
count_no_mis = 0
while (adapter_pos < la) and (read_pos < lr) :
if (read[read_pos] == adapter[adapter_pos]) :
adapter_pos = adapter_pos + 1
read_pos = read_pos + 1
else :
count_no_mis = count_no_mis + 1
if count_no_mis > no_mismatch :
break
else :
read_pos = read_pos + 1
adapter_pos = adapter_pos + 1
#
#
# while_end
if adapter_pos == la :
return read[(la-i):]
#
return read
#
def next_nuc(seq, pos, n):
""" Returns the nucleotide that is n places from pos in seq. Skips gap symbols.
"""
i = pos + 1
while i < len(seq):
if seq[i] != '-':
n -= 1
if n == 0: break
i += 1
if i < len(seq) :
return seq[i]
else :
return 'N'
#
#
def methy_seq(read, genome):
H = ['A', 'C', 'T']
m_seq = []
xx = "-"
for i in xrange(len(read)):
if i>=len(genome) or genome[i] == '-':
xx = "-"
elif read[i] != 'C' and read[i] != 'T':
xx = "-"
elif read[i] == "T" and genome[i] == "C": #(unmethylated):
nn1 = next_nuc(genome, i, 1)
if nn1 == "G":
xx = "x"
elif nn1 in H :
nn2 = next_nuc(genome, i, 2)
if nn2 == "G":
xx = "y"
elif nn2 in H :
xx = "z"
#
#
elif read[i] == "C" and genome[i] == "C": #(methylated):
nn1 = next_nuc(genome, i, 1)
if nn1 == "G":
xx = "X"
elif nn1 in H :
nn2 = next_nuc(genome, i, 2)
#
if nn2 == "G":
xx = "Y"
elif nn2 in H:
xx = "Z"
#
#
else:
xx = "-"
#
m_seq.append(xx)
#
return ''.join(m_seq)
#
def mcounts(mseq, mlst, ulst):
out_mlst=[mlst[0]+mseq.count("X"), mlst[1]+mseq.count("Y"), mlst[2]+mseq.count("Z")]
out_ulst=[ulst[0]+mseq.count("x"), ulst[1]+mseq.count("y"), ulst[2]+mseq.count("z")]
return out_mlst, out_ulst
#
def process_aligner_output(filename, pair_end = False):
#m = re.search(r'-('+'|'.join(supported_aligners) +')-TMP', filename)
m = re.search(r'-('+'|'.join(supported_aligners) +')-.*TMP', filename)
if m is None:
error('The temporary folder path should contain the name of one of the supported aligners: ' + filename)
#
format = m.group(1)
try :
input = open(filename)
except IOError:
print "[Error] Cannot open file %s" % filename
exit(-1)
#
QNAME, FLAG, RNAME, POS, MAPQ, CIGAR, RNEXT, PNEXT, TLEN, SEQ, QUAL = range(11)
def parse_SAM(line):
# fix error when reading file with lots of \x00 # date on 2016-12-09
line = line.replace('\x00', '').strip()
buf = line.split("\t")
if len(buf) < 11 :
sys.stderr.write("[warning] SAM input without enough columns\n")
return None, None, None, None, None, None
#
flag = int(buf[FLAG])
# skip reads that are not mapped
# skip reads that have probability of being non-unique higher than 1/10
if flag & 0x4 : # or int(buf[MAPQ]) < 10:
return None, None, None, None, None, None
# print "format = ", format
if format == BOWTIE:
mismatches = int([buf[i][5:] for i in xrange(11, len(buf)) if buf[i][:5] == 'NM:i:'][0]) # get the edit distance
# --- bug fixed ------
elif format == BOWTIE2:
if re.search(r'(.)*-e2e-TMP(.*)', filename) is None : # local model
mismatches = 1-int([buf[i][5:] for i in xrange(11, len(buf)) if buf[i][:5] == 'AS:i:'][0])
# print "====local=====\n"
## bowtie2 use AS tag (score) to evaluate the mapping. The higher, the better.
else : # end-to-end model
# print "end-to-end\n"
mismatches = int([buf[i][5:] for i in xrange(11, len(buf)) if buf[i][:5] == 'XM:i:'][0])
# --- Weilong ---------
elif format == SOAP:
mismatches = 1-buf[MAPQ]
# mismatches = 1/float(buf[MAPQ])
## downstream might round (0,1) to 0, so use integer instead
## fixed by Weilong
elif format == RMAP:
# chr16 75728107 75728147 read45 9 -
# chr16 67934919 67934959 read45 9 -
mismatches = buf[4]
#
return (buf[QNAME], # read ID
buf[RNAME], # reference ID
int(buf[POS]) - 1, # position, 0 based (SAM is 1 based)
mismatches, # number of mismatches
parse_cigar(buf[CIGAR]), # the parsed cigar string
flag & 0x40 # true if it is the first mate in a pair, false if it is the second mate
)
#
SOAP_QNAME, SOAP_SEQ, SOAP_QUAL, SOAP_NHITS, SOAP_AB, SOAP_LEN, SOAP_STRAND, SOAP_CHR, SOAP_LOCATION, SOAP_MISMATCHES = range(10)
def parse_SOAP(line):
buf = line.split()
return (buf[SOAP_QNAME],
buf[SOAP_CHR],
int(buf[SOAP_LOCATION]) - 1,
int(buf[SOAP_MISMATCHES]),
buf[SOAP_AB],
buf[SOAP_STRAND],
parse_cigar(buf[SOAP_LEN]+'M')
)
#
# chr16 75728107 75728147 read45 9 -
RMAP_CHR, RMAP_START, RMAP_END, RMAP_QNAME, RMAP_MISMATCH, RMAP_STRAND = range(6)
def parse_RMAP(line):
buf = line.split()
return ( buf[RMAP_QNAME],
buf[RMAP_CHR],
int(buf[RMAP_START]), # to check -1 or not
int(buf[RMAP_END]) - int(buf[RMAP_START]) + 1,
int(buf[RMAP_MISMATCH]),
buf[RMAP_STRAND]
)
#
if format == BOWTIE or format == BOWTIE2:
if pair_end:
for line in input:
header1, chr1, location1, no_mismatch1, cigar1, _ = parse_SAM(line)
header2, _, location2, no_mismatch2, cigar2, mate_no2 = parse_SAM(input.next())
#
if header1 and header2:
# flip the location info if the second mate comes first in the alignment file
if mate_no2:
location1, location2 = location2, location1
cigar1, cigar2 = cigar2, cigar1
#
yield header1, chr1, no_mismatch1 + no_mismatch2, location1, cigar1, location2, cigar2
#
#
else:
for line in input:
header, chr, location, no_mismatch, cigar, _ = parse_SAM(line)
if header is not None:
yield header, chr, location, no_mismatch, cigar
#
#
#
elif format == SOAP:
if pair_end:
for line in input:
header1, chr1, location1, no_mismatch1, mate1, strand1, cigar1 = parse_SOAP(line)
header2, _ , location2, no_mismatch2, _, strand2, cigar2 = parse_SOAP(input.next())
#
if mate1 == 'b':
location1, location2 = location2, location1
strand1, strand2 = strand2, strand1
ciga1, cigar2 = cigar2, cigar1
#
if header1 and header2 and strand1 == '+' and strand2 == '-':
yield header1, chr1, no_mismatch1 + no_mismatch2, location1, cigar1, location2, cigar2
#
#
#
else:
for line in input:
header, chr, location, no_mismatch, _, strand, cigar = parse_SOAP(line)
if header and strand == '+':
yield header, chr, location, no_mismatch, cigar
#
#
#
elif format == RMAP :
if pair_end :
todo = 0
# to do
else :
for line in input:
header, chr, location, read_len, no_mismatch, strand = parse_RMAP(line)
cigar = str(read_len) + "M"
yield header, chr, location, no_mismatch, cigar
#
#
#
input.close()
#
def parse_cigar(cigar_string):
i = 0
prev_i = 0
cigar = []
while i < len(cigar_string):
if cigar_string[i] in CIGAR_OPS:
cigar.append((CIGAR_OPS[cigar_string[i]], int(cigar_string[prev_i:i])))
prev_i = i + 1
i += 1
return cigar
def get_read_start_end_and_genome_length(cigar):
r_start = cigar[0][1] if cigar[0][0] == BAM_SOFTCLIP else 0
r_end = r_start
g_len = 0
for edit_op, count in cigar:
if edit_op == BAM_MATCH:
r_end += count
g_len += count
elif edit_op == BAM_INS:
r_end += count
elif edit_op == BAM_DEL:
g_len += count
#
#
return r_start, r_end, g_len # return the start and end in the read and the length of the genomic sequence
# r_start : start position on the read
# r_end : end position on the read
# g_len : length of the mapped region on genome
#
def cigar_to_alignment(cigar, read_seq, genome_seq):
""" Reconstruct the pairwise alignment based on the CIGAR string and the two sequences
"""
# reconstruct the alignment
r_pos = cigar[0][1] if cigar[0][0] == BAM_SOFTCLIP else 0
g_pos = 0
r_aln = ''
g_aln = ''
for edit_op, count in cigar:
if edit_op == BAM_MATCH:
r_aln += read_seq[r_pos : r_pos + count]
g_aln += genome_seq[g_pos : g_pos + count]
r_pos += count
g_pos += count
elif edit_op == BAM_DEL:
r_aln += '-'*count
g_aln += genome_seq[g_pos : g_pos + count]
g_pos += count
elif edit_op == BAM_INS:
r_aln += read_seq[r_pos : r_pos + count]
g_aln += '-'*count
r_pos += count
#
#
return r_aln, g_aln
#
# return sequence is [start, end), not include 'end'
def get_genomic_sequence(genome, start, end, strand = '+'):
if strand != '+' and strand != '-' :
print "[Bug] get_genomic_sequence input should be \'+\' or \'-\'."
exit(-1)
if start > 1:
prev = genome[start-2:start]
elif start == 1:
prev = 'N'+genome[0]
else:
prev = 'NN'
#
if end < len(genome) - 1:
next = genome[end: end + 2]
elif end == len(genome) - 1:
next = genome[end] + 'N'
else:
next = 'NN'
#
origin_genome = genome[start:end]
#
if strand == '-':
# reverse complement everything if strand is '-'
revc = reverse_compl_seq('%s%s%s' % (prev, origin_genome, next))
prev, origin_genome, next = revc[:2], revc[2:-2], revc[-2:]
#
return origin_genome, next, '%s_%s_%s' % (prev, origin_genome, next)
# next : next two nucleotides
#
|
BSSeeker/BSseeker2
|
bs_align/bs_align_utils.py
|
Python
|
mit
| 13,972
|
[
"Bowtie"
] |
bc8bdd0e938b2846bb8241a9aee9e34a92ed180fd5d2637f838ccfb21358c849
|
# -*- coding: utf-8 -*-
import mds.math
import mds.ordered_set
class Convention(object):
@staticmethod
def convention_names(
dataset):
result = []
for attribute_name in ["Conventions", "conventions"]:
if hasattr(dataset, attribute_name):
names = getattr(dataset, attribute_name)
# http://www.unidata.ucar.edu/software/netcdf/conventions.html:
# The value of the `Conventions' attribute may be a
# single text string containing a list of the convention
# names separated by blank space (recommended) or commas
# (if a convention name contains blanks)
separator = "," if names.find(",") != -1 else None
result = [name.strip() for name in names.split(separator)]
break
return result
@staticmethod
def extent_of_variable(
variable):
assert not variable is None
assert len(variable.shape) == 1
assert len(variable) > 0
first, last = variable[0], variable[-1]
return min(first, last), max(first, last)
def __init__(self,
dataset,
filter_out_nd_coordinates):
self.dataset = dataset
self.filter_out_nd_coordinates = filter_out_nd_coordinates
def coordinate_variable_names(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
result = mds.ordered_set.OrderedSet()
variable = self.dataset.variables[variable_name]
dimension_names = variable.dimensions
for dimension_name in dimension_names:
if dimension_name in self.dataset.variables:
if self.is_coordinate_variable(dimension_name):
result.add(dimension_name)
return result
def is_space_dimension_variable(self,
dimension_name):
result = False
if dimension_name in self.dataset.variables and \
self.is_coordinate_variable(dimension_name):
variable = self.dataset.variables[dimension_name]
result = self.is_x_dimension_variable(variable) or \
self.is_y_dimension_variable(variable)
return result
def space_dimension_names(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
result = mds.ordered_set.OrderedSet()
for dimension_name in variable.dimensions:
if self.is_space_dimension_variable(dimension_name):
result.add(dimension_name)
return result
def time_dimension_names(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
result = mds.ordered_set.OrderedSet()
for dimension_name in variable.dimensions:
if self.is_time_dimension_variable(dimension_name):
result.add(dimension_name)
return result
def data_variable_names(self):
result = mds.ordered_set.OrderedSet()
for variable_name in self.dataset.variables:
if self.is_data_variable(variable_name):
result.add(variable_name)
return result
def spatial_data_variable_names(self):
result = [variable_name for variable_name in
self.data_variable_names() if
self.is_spatial_variable(variable_name)]
return mds.ordered_set.OrderedSet(result)
def temporal_data_variable_names(self):
result = [variable_name for variable_name in
self.data_variable_names() if
self.is_temporal_variable(variable_name)]
return mds.ordered_set.OrderedSet(result)
def is_spatial_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
# A spatial variable has two dimensions representing the x and y
# dimensions.
space_dimension_names = []
variable = self.dataset.variables[variable_name]
for dimension_name in variable.dimensions:
if dimension_name in self.dataset.variables and \
self.is_space_dimension_variable(dimension_name):
space_dimension_names.append(dimension_name)
return len(space_dimension_names) == 2
def is_temporal_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
return any([self.is_time_dimension_variable(dimension_name) for
dimension_name in variable.dimensions if dimension_name in
self.dataset.variables])
def extent(self,
variable_name):
assert self.is_spatial_variable(variable_name), variable_name
coordinate_variable_names = self.coordinate_variable_names(
variable_name)
coordinate_variables = [self.dataset.variables[name] for name in
coordinate_variable_names]
x_variable = None
y_variable = None
for variable in coordinate_variables:
if x_variable is None and self.is_x_dimension_variable(variable):
x_variable = variable
if y_variable is None and self.is_y_dimension_variable(variable):
y_variable = variable
x_min, x_max = self.extent_of_variable(x_variable)
y_min, y_max = self.extent_of_variable(y_variable)
return [x_min, y_min, x_max, y_max]
def variable_attribute_values(self,
attribute_name):
result = []
for variable in self.dataset.variables.values():
if hasattr(variable, attribute_name):
result.append(getattr(variable, attribute_name))
return result
def spatial_dimension_slices(self,
variable_name,
extent):
space_dimension_names = list(self.space_dimension_names(
variable_name))
assert len(space_dimension_names) == 2, "{}: {}".format(
variable_name, space_dimension_names)
assert all(dimension_name in self.dataset.variables for dimension_name
in space_dimension_names)
spatial_dimension_variables = [self.dataset.variables[dimension_name]
for dimension_name in space_dimension_names]
if self.is_x_dimension_variable(spatial_dimension_variables[0]):
assert self.is_y_dimension_variable(spatial_dimension_variables[1])
x_index = 0
y_index = 1
else:
assert self.is_x_dimension_variable(spatial_dimension_variables[1])
assert self.is_y_dimension_variable(spatial_dimension_variables[0])
x_index = 1
y_index = 0
result = {}
result[space_dimension_names[x_index]] = mds.math.values_to_slice(
spatial_dimension_variables[x_index][:], extent[0], extent[2])
result[space_dimension_names[y_index]] = mds.math.values_to_slice(
spatial_dimension_variables[y_index][:], extent[1], extent[3])
return result
def compatible_data_variable_names(self,
variable_name):
"""
Return OrderedSet with names of data variables that are compatible with
data variable *variable_name*.
If variable *variable_name* is a spatial data variable, compatible
variables are those data variables that are not spatial or that are
spatial and share the same spatial dimensions.
If variable *variable_name* is not a spatial data variable, compatible
variables are all data variables.
The result includes *variable_name*.
"""
result = mds.ordered_set.OrderedSet()
data_variable_names = self.data_variable_names()
assert variable_name in data_variable_names, variable_name
if not self.is_spatial_variable(variable_name):
result = data_variable_names
else:
space_dimension_names = self.space_dimension_names(
variable_name)
for data_variable_name in data_variable_names:
if not self.is_spatial_variable(data_variable_name):
result.add(data_variable_name)
else:
if set(self.space_dimension_names(data_variable_name)) \
== set(space_dimension_names):
result.add(data_variable_name)
return result
def depends_on_nd_coordinate_variable(self,
variable_name):
# Return True if one of the coordinate variables is nD.
coordinate_variable_names = self.coordinate_variable_names(
variable_name)
result = False
for coordinate_variable_name in coordinate_variable_names:
coordinate_variable = self.dataset.variables[
coordinate_variable_name]
if len(coordinate_variable.shape) > 1:
result = True
break
return result
|
jfrygeo/solutions-geoprocessing-toolbox
|
suitability/toolboxes/scripts/MultidimensionSupplementalTools/MultidimensionSupplementalTools/Scripts/mds/netcdf/convention/convention.py
|
Python
|
apache-2.0
| 9,216
|
[
"NetCDF"
] |
267f93361ff2e89ed511d35f2d05234d6a6db93d3f1697ca8025723aa4d6d228
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************************************
**espressopp.interaction.PotentialUniqueDist**
********************************************************
This is an abstract class, only needed to be inherited from.
.. function:: espressopp.interaction.PotentialUniqueDist.computeEnergy(\*args)
:param \*args:
:type \*args:
:rtype:
.. function:: espressopp.interaction.PotentialUniqueDist.computeForce(\*args)
:param \*args:
:type \*args:
:rtype:
"""
from espressopp import pmi
from espressopp import toReal3DFromVector
from _espressopp import interaction_PotentialUniqueDist
# Python base class for PotentialUniqueDists
class PotentialUniqueDistLocal(object):
def computeEnergy(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
return self.cxxclass.computeEnergy(self, arg0)
return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args))
def computeForce(self, *args):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, float) or isinstance(arg0, int):
newargs = [arg0, 0, 0]
return self.cxxclass.computeForce(self, toReal3DFromVector(*newargs))[0]
return self.cxxclass.computeForce(self, toReal3DFromVector(*args))
def _setShift(self, shift="auto"):
if (shift == "auto"):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setAutoShift(self)
else:
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.shift.fset(self, shift)
def _getShift(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.shift.fget(self)
shift = property(_getShift, _setShift)
if pmi.isController:
class PotentialUniqueDist(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
localcall = [ 'computeForce', 'computeEnergy' ],
pmiproperty = ['cutoff', 'shift']
)
# class PythonPotentialUniqueDistLocal(PotentialUniqueDist_PythonPotentialUniqueDist):
# def getCutoffSqr(self):
# pass
# def computeForce(self, *args):
# """Override this method to compute the force for a given distance.
# It should at least be able to handle a Real3D distance input.
# """
# pass
# def computeEnergy(self, *args):
# """Override this method to compute the energy at a given distance.
# It should at least be able to handle a Real3D distance input.
# """
# pass
|
capoe/espressopp.soap
|
src/interaction/PotentialUniqueDist.py
|
Python
|
gpl-3.0
| 4,000
|
[
"ESPResSo"
] |
73360e5ef89890df541a9a594d43133b17902b83ecf5b768d807fb6dd08ebb1c
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api, dependency_resolution
from ansible.galaxy.dependency_resolution.dataclasses import Candidate, Requirement
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
class RequirementCandidates():
def __init__(self):
self.candidates = []
def func_wrapper(self, func):
def run(*args, **kwargs):
self.candidates = func(*args, **kwargs)
return self.candidates
return run
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == collection_artifact[0]
assert actual.ver == u'0.1.0'
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == to_text(version)
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_artifact_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# a collection artifact should always contain a valid version
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
expected = (
'^Collection metadata file at `.*` is expected to have a valid SemVer '
'version value but got {empty_unicode_string!r}$'.
format(empty_unicode_string=u'')
)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# version may be falsey/arbitrary strings for collections in development
manifest_path = os.path.join(collection_artifact[0], b'galaxy.yml')
metadata = {
'authors': ['Ansible'],
'readme': 'README.md',
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {},
}
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(yaml.safe_dump(metadata)))
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_dir_path_as_unknown(collection_artifact[0], concrete_artifact_cm)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.src == collection_artifact[0]
assert actual.ver == u'*'
def test_build_requirement_from_tar(collection_artifact):
tmp_path = os.path.join(os.path.split(collection_artifact[1])[0], b'temp')
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(tmp_path, validate_certs=False)
actual = Requirement.from_requirement_dict({'name': to_text(collection_artifact[1])}, concrete_artifact_cm)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.src == to_text(collection_artifact[1])
assert actual.ver == u'0.1.0'
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(test_file)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
with pytest.raises(KeyError, match='namespace'):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
Requirement.from_requirement_dict({'name': to_text(tar_path)}, concrete_artifact_cm)
def test_build_requirement_from_name(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_version_metadata = MagicMock(
namespace='namespace', name='collection',
version='2.1.10', artifact_sha256='', dependencies={}
)
monkeypatch.setattr(api.GalaxyAPI, 'get_collection_version_metadata', mock_version_metadata)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
collections = ['namespace.collection']
requirements_file = None
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', collections[0]])
requirements = cli._require_one_of_collections_requirements(
collections, requirements_file, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.ver == u'2.1.10'
assert actual.src == galaxy_server
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:2.0.1-beta.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:2.0.1-beta.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, True, False, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1-beta.1'
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch, tmp_path_factory):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.0.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_version_list = MagicMock()
mock_version_list.return_value = []
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_version_list)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>1.0.1'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(
requirements, [broken_server, galaxy_server], concrete_artifact_cm, None, True, False, False
)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'1.0.3'
assert mock_version_list.call_count == 1
assert mock_version_list.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.return_value = []
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n* namespace.collection:* (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch, tmp_path_factory):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>1.0.1'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server, galaxy_server], concrete_artifact_cm, None, False, False, False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:==2.0.0'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:==2.0.0'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.0'
assert [c.ver for c in matches.candidates] == [u'2.0.0']
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:>=2.0.1,<2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:>=2.0.1,<2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.1'
assert [c.ver for c in matches.candidates] == [u'2.0.1']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch, tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
multi_api_proxy = collection.galaxy_api_proxy.MultiGalaxyAPIProxy([galaxy_server], concrete_artifact_cm)
dep_provider = dependency_resolution.providers.CollectionDependencyProvider(apis=multi_api_proxy, concrete_artifacts_manager=concrete_artifact_cm)
matches = RequirementCandidates()
mock_find_matches = MagicMock(side_effect=matches.func_wrapper(dep_provider.find_matches), autospec=True)
monkeypatch.setattr(dependency_resolution.providers.CollectionDependencyProvider, 'find_matches', mock_find_matches)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.2'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.2'], None, artifacts_manager=concrete_artifact_cm
)['collections']
actual = collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)['namespace.collection']
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.src == galaxy_server
assert actual.ver == u'2.0.5'
# should be ordered latest to earliest
assert [c.ver for c in matches.candidates] == [u'2.0.5', u'2.0.4', u'2.0.3', u'2.0.1', u'2.0.0']
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.5', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection:!=2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['namespace.collection:!=2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=2.0.5 (direct request)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_dep_candidate_with_conflict(monkeypatch, tmp_path_factory, galaxy_server):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_get_info_return = [
api.CollectionVersionMetadata('parent', 'collection', '2.0.5', None, None, {'namespace.collection': '!=1.0.0'}),
api.CollectionVersionMetadata('namespace', 'collection', '1.0.0', None, None, {}),
]
mock_get_info = MagicMock(side_effect=mock_get_info_return)
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(side_effect=[['2.0.5'], ['1.0.0']])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'parent.collection:2.0.5'])
requirements = cli._require_one_of_collections_requirements(
['parent.collection:2.0.5'], None, artifacts_manager=concrete_artifact_cm
)['collections']
expected = "Failed to resolve the requested dependencies map. Could not satisfy the following requirements:\n"
expected += "* namespace.collection:!=1.0.0 (dependency of parent.collection:2.0.5)"
with pytest.raises(AnsibleError, match=re.escape(expected)):
collection._resolve_depenency_map(requirements, [galaxy_server], concrete_artifact_cm, None, False, True, False)
def test_install_installed_collection(monkeypatch, tmp_path_factory, galaxy_server):
mock_installed_collections = MagicMock(return_value=[Candidate('namespace.collection', '1.2.3', None, 'dir')])
monkeypatch.setattr(collection, 'find_existing_collections', mock_installed_collections)
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections'))
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(test_dir, validate_certs=False)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '1.2.3', None, None, {})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
mock_get_versions = MagicMock(return_value=['1.2.3', '1.3.0'])
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
cli = GalaxyCLI(args=['ansible-galaxy', 'collection', 'install', 'namespace.collection'])
cli.run()
expected = "Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`."
assert mock_display.mock_calls[1][1][0] == expected
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
output_path = os.path.join(os.path.split(collection_tar)[0])
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
candidate = Candidate('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')
collection.install(candidate, to_text(output_path), concrete_artifact_cm)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
shutil.rmtree(collection_path)
collections_dir = ('%s' % os.path.sep).join(to_text(collection_path).split('%s' % os.path.sep)[:-2])
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(concrete_artifact_cm, 'get_galaxy_artifact_path', mock_download)
req = Requirement('ansible_namespace.collection', '0.1.0', 'https://downloadme.com', 'galaxy')
collection.install(req, to_text(collections_dir), concrete_artifact_cm)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection:0.1.0 was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0].src == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][0].type == 'galaxy'
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
assert os.path.isdir(collection_path)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 1
assert display_msgs[0] == 'Nothing to do. All requested collections are already installed. If you want to reinstall them, consider using `--force`.'
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
concrete_artifact_cm = collection.concrete_artifact_manager.ConcreteArtifactsManager(temp_path, validate_certs=False)
requirements = [Requirement('ansible_namespace.collection', '0.1.0', to_text(collection_tar), 'file')]
collection.install_collections(requirements, to_text(temp_path), [], False, False, False, False, False, False, concrete_artifact_cm)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection:0.1.0 was installed successfully"
|
s-hertel/ansible
|
test/units/galaxy/test_collection_install.py
|
Python
|
gpl-3.0
| 43,229
|
[
"Galaxy"
] |
20f7d78462c72319aa4942702803aafc3efc5c8a308072941171f9dd870e9f92
|
#
# QAPI types generator
#
# Copyright IBM, Corp. 2011
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
#
# This work is licensed under the terms of the GNU GPLv2.
# See the COPYING.LIB file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import sys
import os
import getopt
import errno
def generate_fwd_struct(name, members):
return mcgen('''
typedef struct %(name)s %(name)s;
typedef struct %(name)sList
{
%(name)s *value;
struct %(name)sList *next;
} %(name)sList;
''',
name=name)
def generate_struct(structname, fieldname, members):
ret = mcgen('''
struct %(name)s
{
''',
name=structname)
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
bool has_%(c_name)s;
''',
c_name=c_var(argname))
if structured:
push_indent()
ret += generate_struct("", argname, argentry)
pop_indent()
else:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(argentry), c_name=c_var(argname))
if len(fieldname):
fieldname = " " + fieldname
ret += mcgen('''
}%(field)s;
''',
field=fieldname)
return ret
def generate_enum_lookup(name, values):
ret = mcgen('''
const char *%(name)s_lookup[] = {
''',
name=name)
i = 0
for value in values:
ret += mcgen('''
"%(value)s",
''',
value=value.lower())
ret += mcgen('''
NULL,
};
''')
return ret
def generate_enum(name, values):
lookup_decl = mcgen('''
extern const char *%(name)s_lookup[];
''',
name=name)
enum_decl = mcgen('''
typedef enum %(name)s
{
''',
name=name)
# append automatically generated _MAX value
enum_values = values + [ 'MAX' ]
i = 0
for value in enum_values:
enum_decl += mcgen('''
%(abbrev)s_%(value)s = %(i)d,
''',
abbrev=de_camel_case(name).upper(),
value=c_fun(value).upper(),
i=i)
i += 1
enum_decl += mcgen('''
} %(name)s;
''',
name=name)
return lookup_decl + enum_decl
def generate_union(name, typeinfo):
ret = mcgen('''
struct %(name)s
{
%(name)sKind kind;
union {
void *data;
''',
name=name)
for key in typeinfo:
ret += mcgen('''
%(c_type)s %(c_name)s;
''',
c_type=c_type(typeinfo[key]),
c_name=c_fun(key))
ret += mcgen('''
};
};
''')
return ret
def generate_type_cleanup_decl(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj);
''',
c_type=c_type(name),type=name)
return ret
def generate_type_cleanup(name):
ret = mcgen('''
void qapi_free_%(type)s(%(c_type)s obj)
{
QapiDeallocVisitor *md;
Visitor *v;
if (!obj) {
return;
}
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
visit_type_%(type)s(v, &obj, NULL, NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_type=c_type(name),type=name)
return ret
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:o:",
["source", "header", "prefix=", "output-dir="])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
c_file = 'qapi-types.c'
h_file = 'qapi-types.h'
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
fdef = maybe_open(do_c, c_file, 'w')
fdecl = maybe_open(do_h, h_file, 'w')
fdef.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* deallocation functions for schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
* Michael Roth <mdroth@linux.vnet.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qapi/qapi-dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''', prefix=prefix))
fdecl.write(mcgen('''
/* AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI types
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "qapi/qapi-types-core.h"
''',
guard=guardname(h_file)))
exprs = parse_schema(sys.stdin)
exprs = filter(lambda expr: not expr.has_key('gen'), exprs)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_fwd_struct(expr['type'], expr['data'])
elif expr.has_key('enum'):
ret += generate_enum(expr['enum'], expr['data'])
fdef.write(generate_enum_lookup(expr['enum'], expr['data']))
elif expr.has_key('union'):
ret += generate_fwd_struct(expr['union'], expr['data']) + "\n"
ret += generate_enum('%sKind' % expr['union'], expr['data'].keys())
fdef.write(generate_enum_lookup('%sKind' % expr['union'], expr['data'].keys()))
else:
continue
fdecl.write(ret)
for expr in exprs:
ret = "\n"
if expr.has_key('type'):
ret += generate_struct(expr['type'], "", expr['data']) + "\n"
ret += generate_type_cleanup_decl(expr['type'] + "List")
fdef.write(generate_type_cleanup(expr['type'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['type'])
fdef.write(generate_type_cleanup(expr['type']) + "\n")
elif expr.has_key('union'):
ret += generate_union(expr['union'], expr['data'])
ret += generate_type_cleanup_decl(expr['union'] + "List")
fdef.write(generate_type_cleanup(expr['union'] + "List") + "\n")
ret += generate_type_cleanup_decl(expr['union'])
fdef.write(generate_type_cleanup(expr['union']) + "\n")
else:
continue
fdecl.write(ret)
fdecl.write('''
#endif
''')
fdecl.flush()
fdecl.close()
fdef.flush()
fdef.close()
|
huikang/vCSIMx86_qemu
|
scripts/qapi-types.py
|
Python
|
gpl-2.0
| 6,861
|
[
"VisIt"
] |
e18cff55adbf9e1c072942cafddac401477f9ba7fd44b75013b566f452369b58
|
# -*- coding: utf-8 -*-
# Copyright 2017 Zachary Marv (马子昂)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DEX Tree
#
# This script is used to implement the tree node and tree structure.
from _settings import *
from collections import Counter
import hashlib
import csv
import redis
import zlib
import rputil
# tag_rules
labeled_libs = list()
no_lib = list()
with open(FILE_RULE, 'r') as file_rules:
csv_rules_reader = csv.reader(file_rules, delimiter=',', quotechar='|')
for row in csv_rules_reader:
if row[1] == "no":
no_lib.append(row)
else:
labeled_libs.append(row)
class TreeNode(object):
"""
Tree Node Structure
{
sha256 : 02b018f5b94c5fbc773ab425a15b8bbb // In fact sha256 is the non-hex one
weight : 1023 // How many APIs in this Node
pn : Lcom/facebook/internal // Current package name
parent : <TreeNode> // Parent node
children: dict("pn": <TreeNode>) // Children nodes
match : list( tuple(package_name, match_weight) ) // match lib list
}
"""
def __init__(self, n_weight=-1, n_pn="", n_parent=None):
self.sha256 = ""
self.weight = n_weight
self.pn = n_pn
self.parent = n_parent
self.children = dict()
self.match = list()
self.permissions = set()
self.db = redis.StrictRedis(host=DB_HOST, port=DB_PORT, db=DB_ID, password=DB_PSWD)
self.api_id_list = []
def insert(self, package_name, weight, sha256, permission_list, api_id_list):
# no matter how deep the package is, add permissions here.
for permission in permission_list:
self.permissions.add(permission)
# no matter how deep the package is, add api_id_list
# self.api_id_list = self.api_id_list + api_id_list
current_depth = 0 if self.pn == "" else self.pn.count('/') + 1
target_depth = package_name.count('/') + 1
if current_depth == target_depth:
self.sha256 = sha256
self.api_id_list = api_id_list
return "F: %s" % package_name
target_package_name = '/'.join(package_name.split('/')[:current_depth + 1])
if target_package_name in self.children:
self.children[target_package_name].weight += weight
return self.children[target_package_name].insert(package_name, weight, sha256, permission_list, api_id_list)
else:
self.children[target_package_name] = TreeNode(n_weight=weight, n_pn=target_package_name, n_parent=self)
return self.children[target_package_name].insert(package_name, weight, sha256, permission_list, api_id_list)
def brand(self, package_name, standard_package):
current_depth = 0 if self.pn == "" else self.pn.count('/') + 1
target_depth = package_name.count('/') + 1
if current_depth == target_depth:
yes_or_no = raw_input("Warning: Brand %s as a new library? (Y/n)" % self.pn)
if yes_or_no == 'Y' or yes_or_no == 'y':
try:
self.db.hincrby(name=DB_FEATURE_CNT, key=self.sha256, amount=10000000)
self.db.hset(name=DB_FEATURE_WEIGHT, key=self.sha256, value=self.weight)
self.db.hset(name=DB_UN_OB_PN, key=self.sha256, value=standard_package)
self.db.hset(name=DB_FEATURE_CNT, key=self.sha256, value=100000000)
except:
return "Error in database."
return "Success."
else:
return "Did nothing. Bye~"
else:
target_package_name = '/'.join(package_name.split('/')[:current_depth + 1])
if target_package_name in self.children:
return self.children[target_package_name].brand(package_name, standard_package)
else:
return "Package Not found in this APK."
class Tree(object):
"""
Tree
"""
def __init__(self):
self.root = TreeNode()
self.db = None
self.feature = None
self.db = redis.StrictRedis(host=DB_HOST, port=DB_PORT, db=DB_ID, password=DB_PSWD)
self.db_rep = redis.StrictRedis(host=DB_HOST, port=DB_PORT, db=DB_ID_REP, password=DB_PSWD)
def insert(self, package_name, weight, sha256, permission_list, api_id_list):
self.root.insert(package_name, weight, sha256, permission_list, api_id_list)
def brand(self, package_name, standard_package):
return self.root.brand(package_name, standard_package)
def pre_order_res(self, visit, res):
self._pre_order_res(node=self.root, visit=visit, res=res)
def _pre_order_res(self, node, visit, res):
ret = visit(node, res)
if ret < 0:
return
else:
for child_pn in node.children:
self._pre_order_res(node.children[child_pn], visit, res)
def pre_order_res_ret(self, visit, res, ret):
self._pre_order_res_ret(node=self.root, visit=visit, res=res, ret=ret)
def _pre_order_res_ret(self, node, visit, res, ret):
retu = visit(node, res, ret)
if retu < 0:
return
else:
for child_pn in node.children:
self._pre_order_res_ret(node.children[child_pn], visit, res, ret)
def pre_order(self, visit):
self._pre_order(self.root, visit)
def _pre_order(self, node, visit):
ret = visit(node)
if ret < 0:
return
else:
for child_pn in node.children:
self._pre_order(node.children[child_pn], visit)
def post_order(self, visit):
self._post_order(self.root, visit)
def _post_order(self, node, visit):
for child_pn in node.children:
self._post_order(node.children[child_pn], visit)
visit(node)
@staticmethod
def _cal_sha256(node):
# Ignore Leaf Node
if len(node.children) == 0 and node.sha256 != "":
return
# Everything seems Okay.
cur_sha256 = hashlib.sha256()
sha256_list = list()
for child in node.children:
sha256_list.append(node.children[child].sha256)
sha256_list.sort()
for sha256_item in sha256_list:
cur_sha256.update(sha256_item)
node.sha256 = cur_sha256.hexdigest()
# you could see node.pn here. e.g. Lcom/tencent/mm/sdk/modelpay
def cal_sha256(self):
"""
Calculate sha256 for every package
:return:
"""
self.post_order(visit=self._cal_sha256)
def _match(self, node):
a, c, u = None, None, None
pipe = self.db.pipeline()
pipe.hget(name=DB_UN_OB_PN, key=node.sha256)
pipe.hget(name=DB_FEATURE_CNT, key=node.sha256)
pipe.hget(name=DB_UN_OB_CNT, key=node.sha256)
pipe_res = pipe.execute()
a, c, u = pipe_res
# if could not find this package in database, search its children.
if a is None:
return 1
# Potential Name is not convincing enough.
if u < 8 or float(u) / float(c) < 0.3:
return 2
flag_not_deeper = False
for lib in labeled_libs:
# if the potential package name is the same as full lib path
# do not search its children
if lib[0] == a:
node.match.append([lib, node.weight, int(c)])
continue
# If they have the same length but not equal to each other, just continue
if len(lib[0]) == len(a):
continue
# if the potential package name is part of full lib path, search its children
# e.g. a is Lcom/google, we could find it as a part of Lcom/google/android/gms, so search its children for
# more details
if len(a) < len(lib[0]) and a == lib[0][:len(a)] and lib[0][len(a)] == '/':
continue
# If the lib path is part of potential package name, add some count into parent's match list.
if len(a) > len(lib[0]) and lib[0] == a[:len(lib[0])] and a[len(lib[0])] == '/':
depth_diff = a.count('/') - lib[0].count('/')
cursor = node
for i in range(depth_diff):
# cursor should not be the root, so cursor's parent should not be None.
if cursor.parent.parent is not None:
cursor = cursor.parent
else:
# root's parent is None
# This situation exists
# For Example: If it takes Lcom/a/b as Lcom/google/android/gms/ads/mediation/customevent,
# It will find its ancestor until root or None.
return 4
flag = False
for matc in cursor.match:
# if matc[0][0] == lib[0]:
if matc[0] == lib:
flag = True
if matc[1] != cursor.weight:
matc[1] += node.weight
if not flag:
cursor.match.append([lib, node.weight, c])
flag_not_deeper = True
continue
"""
One degree deeper!
深入探测一层
There's a situation that a package is a library and the child of a package is also a library.
库是存在相互嵌套的。
As we all know that Lcom/unity3d is definitely a Game Engine library. There could be some sub-package
like Lcom/unity3d/player, Lcom/unity3d/plugin, Lcom/unity3d/sdk, etc. So we take Lcom/unity3d as the
root package of this library.
比如,Lcom/unity3d 显然是Unity3D这个游戏引擎,在游戏引擎下可能会有player, plugin, sdk等次级包(文件夹),所以我们很
显然地把Lcom/unity3d作为游戏引擎的根包。
However, Lcom/unity3d/ads is an Advertisement library.
但是,Lcom/unity3d/ads是Unity3D公司推出的广告库
If we do not search one degree deeper, we could only find the game engine other than the ads library.
Likewise, we could not find Landroid/support/v4 anymore if we take Landroid/support as a library.
如果我们不继续搜索的话,那么对于一个应用,我们只能检测到Unity3D这个引擎,无法检测到Unity3D Ads这个广告库。
Implementation:
实现:
if lib[0] == a, we continue search his children.
if lib[0] == a 这个后面从return变成了continue,我们会继续搜索它的子节点
if we already found his child, we will not search deeper.
在后面的代码中,如果已经知道的就是子节点,那么就不会继续深层的搜了。
In my original code, I found a bug that the match degree is larger than the total amount of weight.
This is impossible. After debugging, I found that if I add the match value multiple times, the match
weight could overflow.
在我原来有bug的代码中,我发现匹配的similarity有大于1的情况,即com/facebook这个库的similarity大于了1。这是因为match
被我加总了数次
For example:
There's a library Lcom/google/android/gson, weight is 189
we found Lcom/google/android/gson, so add the weight 189
we found Lcom/google/android/gson/internal, so add the weight 24
we found Lcom/google/android/gson/stream, so add the weight 43
In this case, the weight of package gson overflows.
举例来看:
对于Lcom/google/android/gson这个包来说,它的API数量是189
搜索中找到 Lcom/google/android/gson, weight加上189
搜索中找到 Lcom/google/android/gson/internal, weight加上24
搜索中找到 Lcom/google/android/gson/stream, weight加上 43
这样显然就溢出了。
Because we only search 1 degree deeper, the match situation of Lcom/google/android/gson is only true or
false. In this case, we just need to check if the weight has overflowed before add weight. as the code:
if matc[1] != cursor.weight:
matc[1] += node.weight
因为我们可以多搜一层,所以判断是否溢出很简单。因为对于上层的库来说,也就只有两种情况,那就是匹配到和没匹配到。所以只需要
检测一下是否已经超出就行了。
"""
if flag_not_deeper:
return -1
# Never find a good match, search its children.
return 5
def match(self):
self.pre_order(visit=self._match)
def _find_untagged(self, node, res):
# If there's already some matches here, do not search its children. non-sense.
a, c, u = None, None, None
if len(node.match) != 0:
return -1
pipe = self.db.pipeline()
pipe.hget(name=DB_UN_OB_PN, key=node.sha256)
pipe.hget(name=DB_FEATURE_CNT, key=node.sha256)
pipe.hget(name=DB_UN_OB_CNT, key=node.sha256)
pipe_res = pipe.execute()
a, c, u = pipe_res
if a is None:
return 1
# If the package name is already in no_lib list, ignore it and search its children.
for non_lib in no_lib:
if non_lib[0] == a:
return 1
# Potential Name is not convincing enough. search its children
if float(u) / float(c) < 0.5 or node.weight < 50 or int(c) < 20:
return 2
# JSON support
utg_lib_obj = dict() # untagged library object
utg_lib_obj["Package"] = node.pn
utg_lib_obj["Standard Package"] = a
utg_lib_obj["Library"] = "Unknown"
utg_lib_obj["Popularity"] = int(c)
utg_lib_obj["Weight"] = node.weight
res.append(utg_lib_obj)
# OLD Print
# print("----")
# print("Package: %s" % node.pn)
# print("Match Package: %s" % u)
# print("Library: Unknown.")
# print("Popularity: %s" % c)
# print("API count: %s" % node.weight)
def find_untagged(self, res):
self.pre_order_res(visit=self._find_untagged, res=res)
@staticmethod
def _get_lib(node, res):
for matc in node.match:
if float(matc[1]) / float(node.weight) < 0.1 and matc[0][0] != node.pn:
continue
# JSON
lib_obj = dict()
lib_obj["Package"] = node.pn # cpn
lib_obj["Library"] = matc[0][1] # lib
lib_obj["Standard Package"] = matc[0][0] # pn
lib_obj["Type"] = matc[0][2] # tp
lib_obj["Website"] = matc[0][3] # ch
lib_obj["Match Ratio"] = "%d/%d" % (matc[1], node.weight) # no similarity in V1
lib_obj["Popularity"] = matc[2] # dn
lib_obj["Permission"] = sorted(list(node.permissions))
res.append(lib_obj)
# Old Print
# print("----")
# print("Package: %s" % node.pn)
# print("Library: %s" % matc[0][1])
# print("Standard Package: %s" % matc[0][0])
# print("Type: %s" % matc[0][2])
# print("Website: %s" % matc[0][3])
# print("Similarity: %d/%d" % (matc[1], node.weight))
# print("Popularity: %d" % matc[2])
# permission_out = ""
# for permission in sorted(list(node.permissions)):
# permission_out += (permission + ",")
# if len(permission_out) > 0:
# permission_out = permission_out[:-1]
# print("Permissions:" + permission_out)
return 0
def get_lib(self, res):
self.pre_order_res(visit=self._get_lib, res=res)
@staticmethod
def _get_repackage_main(node, res, ret):
if node.pn in res:
return -1
if len(node.children) == 0:
ret.extend(node.api_id_list)
ret += node.api_id_list
return 0
def get_repackage_main(self, res, hex_sha256):
# res is a list of libraries. Result.
pn_list = list()
for item in res:
pn_list.append(item["Package"])
ret = list()
self.pre_order_res_ret(visit=self._get_repackage_main, res=pn_list, ret=ret)
ret_length = len(ret)
kvd = dict(Counter(ret))
str = rputil.Util.dict2str(kvd)
zstr = zlib.compress(str,1)
self.db_rep.hset(name="apk_feature", key=hex_sha256, value=zstr)
self.db_rep.zadd("apk_weight", ret_length, hex_sha256 )
|
pkumza/LibRadar
|
LibRadar/dex_tree.py
|
Python
|
apache-2.0
| 17,703
|
[
"VisIt"
] |
e98a2f69929b929010d52ee5668966d91e76878339551f535aa07ac9aa3218b1
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from typing import Dict, List
import numpy as np
from psi4 import core
from psi4.driver import constants
from psi4.driver.p4util.exceptions import *
def least_squares_fit_polynomial(xvals, fvals, localization_point, no_factorials=True, weighted=True, polynomial_order=4):
"""Performs and unweighted least squares fit of a polynomial, with specified order
to an array of input function values (fvals) evaluated at given locations (xvals).
See https://doi.org/10.1063/1.4862157, particularly eqn (7) for details. """
xpts = np.array(xvals) - localization_point
if weighted:
R = 1.0
p_nu = 1
epsilon = 1e-3
zvals = np.square(xpts/R)
weights = np.exp(-zvals) / (zvals**p_nu + epsilon**p_nu)
else:
weights = None
fit = np.polynomial.polynomial.polyfit(xpts, fvals, polynomial_order, w=weights)
# Remove the 1/n! coefficients
if no_factorials:
scalefac = 1.0
for n in range(2,polynomial_order+1):
scalefac *= n
fit[n] *= scalefac
return fit
def anharmonicity(rvals: List, energies: List, plot_fit: str = '', mol = None) -> Dict:
"""Generates spectroscopic constants for a diatomic molecules.
Fits a diatomic potential energy curve using a weighted least squares approach
(c.f. https://doi.org/10.1063/1.4862157, particularly eqn. 7), locates the minimum
energy point, and then applies second order vibrational perturbation theory to obtain spectroscopic
constants. Any number of points greater than 4 may be provided, and they should bracket the minimum.
The data need not be evenly spaced, and can be provided in any order. The data are weighted such that
those closest to the minimum have highest impact.
A dictionary with the following keys, which correspond to spectroscopic constants, is returned:
:param rvals: The bond lengths (in Angstrom) for which energies are
provided, of length at least 5 and equal to the length of the energies array
:param energies: The energies (Eh) computed at the bond lengths in the rvals list
:param plot_fit: A string describing where to save a plot of the harmonic and anharmonic fits, the
inputted data points, re, r0 and the first few energy levels, if matplotlib
is available. Set to 'screen' to generate an interactive plot on the screen instead. If a filename is
provided, the image type is determined by the extension; see matplotlib for supported file types.
:returns: (*dict*) Keys: "re", "r0", "we", "wexe", "nu", "ZPVE(harmonic)", "ZPVE(anharmonic)", "Be", "B0", "ae", "De"
corresponding to the spectroscopic constants in cm-1
"""
angstrom_to_bohr = 1.0 / constants.bohr2angstroms
angstrom_to_meter = 10e-10
# Make sure the input is valid
if len(rvals) != len(energies):
raise ValidationError("The number of energies must match the number of distances")
npoints = len(rvals)
if npoints < 5:
raise ValidationError("At least 5 data points must be provided to compute anharmonicity")
core.print_out("\n\nPerforming a fit to %d data points\n" % npoints)
# Sort radii and values first from lowest to highest radius
indices = np.argsort(rvals)
rvals = np.array(rvals)[indices]
energies = np.array(energies)[indices]
# Make sure the molecule the user provided is the active one
molecule = mol or core.get_active_molecule()
molecule.update_geometry()
natoms = molecule.natom()
if natoms != 2:
raise Exception("The current molecule must be a diatomic for this code to work!")
m1 = molecule.mass(0)
m2 = molecule.mass(1)
# Find rval of the minimum of energies, check number of points left and right
min_index = np.argmin(energies)
if min_index < 3 :
core.print_out("\nWarning: fewer than 3 points provided with a r < r(min(E))!\n")
if min_index >= len(energies) - 3:
core.print_out("\nWarning: fewer than 3 points provided with a r > r(min(E))!\n")
# Optimize the geometry, refitting the surface around each new geometry
core.print_out("\nOptimizing geometry based on current surface:\n\n")
re = rvals[min_index]
maxit = 30
thres = 1.0e-9
for i in range(maxit):
derivs = least_squares_fit_polynomial(rvals,energies,localization_point=re)
e,g,H = derivs[0:3]
core.print_out(" E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if abs(g) < thres:
break
re -= g/H;
if i == maxit-1:
raise ConvergenceError("diatomic geometry optimization", maxit)
core.print_out(" Final E = %20.14f, x = %14.7f, grad = %20.14f\n" % (e, re, g))
if re < min(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a lower range of r values.")
if re > max(rvals):
raise Exception("Minimum energy point is outside range of points provided. Use a higher range of r values.")
# Convert to convenient units, and compute spectroscopic constants
d0,d1,d2,d3,d4 = derivs*constants.hartree2aJ
core.print_out("\nEquilibrium Energy %20.14f Hartrees\n" % e)
core.print_out("Gradient %20.14f\n" % g)
core.print_out("Quadratic Force Constant %14.7f MDYNE/A\n" % d2)
core.print_out("Cubic Force Constant %14.7f MDYNE/A**2\n" % d3)
core.print_out("Quartic Force Constant %14.7f MDYNE/A**3\n" % d4)
hbar = constants.h / (2.0 * np.pi)
mu = ((m1*m2)/(m1+m2))*constants.amu2kg
we = 5.3088375e-11 * np.sqrt(d2/mu)
wexe = (1.2415491e-6)*(we/d2)**2 * ((5.0*d3*d3)/(3.0*d2)-d4)
# Rotational constant: Be
I = ((m1*m2)/(m1+m2)) * constants.amu2kg * (re * angstrom_to_meter)**2
B = constants.h / (8.0 * np.pi**2 * constants.c * I)
# alpha_e and quartic centrifugal distortion constant
ae = -(6.0 * B**2 / we) * ((1.05052209e-3*we*d3)/(np.sqrt(B * d2**3))+1.0)
de = 4.0*B**3 / we**2
# B0 and r0 (plus re check using Be)
B0 = B - ae / 2.0
r0 = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B0))
recheck = np.sqrt(constants.h / (8.0 * np.pi**2 * mu * constants.c * B))
r0 /= angstrom_to_meter
recheck /= angstrom_to_meter
# Fundamental frequency nu
nu = we - 2.0 * wexe
zpve_nu = 0.5 * we - 0.25 * wexe
zpve_we = 0.5 * we
# Generate pretty pictures, if requested
if(plot_fit):
try:
import matplotlib.pyplot as plt
except ImportError:
msg = "\n\tPlot not generated; matplotlib is not installed on this machine.\n\n"
print(msg)
core.print_out(msg)
# Correct the derivatives for the missing factorial prefactors
dvals = np.zeros(5)
dvals[0:5] = derivs[0:5]
dvals[2] /= 2
dvals[3] /= 6
dvals[4] /= 24
# Default plot range, before considering energy levels
minE = np.min(energies)
maxE = np.max(energies)
minR = np.min(rvals)
maxR = np.max(rvals)
# Plot vibrational energy levels
we_au = we / constants.hartree2wavenumbers
wexe_au = wexe / constants.hartree2wavenumbers
coefs2 = [ dvals[2], dvals[1], dvals[0] ]
coefs4 = [ dvals[4], dvals[3], dvals[2], dvals[1], dvals[0] ]
for n in range(3):
Eharm = we_au*(n+0.5)
Evpt2 = Eharm - wexe_au*(n+0.5)**2
coefs2[-1] = -Eharm
coefs4[-1] = -Evpt2
roots2 = np.roots(coefs2)
roots4 = np.roots(coefs4)
xvals2 = roots2 + re
xvals4 = np.choose(np.where(np.isreal(roots4)), roots4)[0].real + re
Eharm += dvals[0]
Evpt2 += dvals[0]
plt.plot(xvals2, [Eharm, Eharm], 'b', linewidth=1)
plt.plot(xvals4, [Evpt2, Evpt2], 'g', linewidth=1)
maxE = Eharm
maxR = np.max([xvals2,xvals4])
minR = np.min([xvals2,xvals4])
# Find ranges for the plot
dE = maxE - minE
minE -= 0.2*dE
maxE += 0.4*dE
dR = maxR - minR
minR -= 0.2*dR
maxR += 0.2*dR
# Generate the fitted PES
xpts = np.linspace(minR, maxR, 1000)
xrel = xpts - re
xpows = xrel[:, None] ** range(5)
fit2 = np.einsum('xd,d', xpows[:,0:3], dvals[0:3])
fit4 = np.einsum('xd,d', xpows, dvals)
# Make / display the plot
plt.plot(xpts, fit2, 'b', linewidth=2.5, label='Harmonic (quadratic) fit')
plt.plot(xpts, fit4, 'g', linewidth=2.5, label='Anharmonic (quartic) fit')
plt.plot([re, re], [minE, maxE], 'b--', linewidth=0.5)
plt.plot([r0, r0], [minE, maxE], 'g--', linewidth=0.5)
plt.scatter(rvals, energies, c='Black', linewidth=3, label='Input Data')
plt.legend()
plt.xlabel('Bond length (Angstroms)')
plt.ylabel('Energy (Eh)')
plt.xlim(minR, maxR)
plt.ylim(minE, maxE)
if plot_fit == 'screen':
plt.show()
else:
plt.savefig(plot_fit)
core.print_out("\n\tPES fit saved to %s.\n\n" % plot_fit)
core.print_out("\nre = %10.6f A check: %10.6f\n" % (re, recheck))
core.print_out("r0 = %10.6f A\n" % r0)
core.print_out("E at re = %17.10f Eh\n" % e)
core.print_out("we = %10.4f cm-1\n" % we)
core.print_out("wexe = %10.4f cm-1\n" % wexe)
core.print_out("nu = %10.4f cm-1\n" % nu)
core.print_out("ZPVE(we) = %10.4f cm-1\n" % zpve_we)
core.print_out("ZPVE(nu) = %10.4f cm-1\n" % zpve_nu)
core.print_out("Be = %10.4f cm-1\n" % B)
core.print_out("B0 = %10.4f cm-1\n" % B0)
core.print_out("ae = %10.4f cm-1\n" % ae)
core.print_out("De = %10.7f cm-1\n" % de)
results = {
"re" : re,
"r0" : r0,
"we" : we,
"wexe" : wexe,
"nu" : nu,
"E(re)" : e,
"ZPVE(harmonic)" : zpve_we,
"ZPVE(anharmonic)" : zpve_nu,
"Be" : B,
"B0" : B0,
"ae" : ae,
"De" : de
}
return results
|
susilehtola/psi4
|
psi4/driver/diatomic.py
|
Python
|
lgpl-3.0
| 11,347
|
[
"Psi4"
] |
1d83271db4cf5c5c6967b7331becc68df056db30cec2574a871bffad4ca4c801
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import annotations
import sys
from typing import List
from ORCA.utils.Path import cPath
from ORCA.vars.Helpers import GetEnvVar
from ORCA.utils.Platform import OS_GetSystemUserPath
from ORCA.vars.QueryDict import TypedQueryDict
import argparse
__all__ = ['cParameter','cParserAction']
"""
Functions to parse and set given command line parameter
"""
class cParserAction(argparse.Action):
"""
A Action object for the argparse parser
"""
def __init__(self, option_strings, oParameter:cParameter, *args, **kwargs):
self.oParameter:cParameter = oParameter
self.HandleValue(kwargs.get("dest"),kwargs.get("default"))
super(cParserAction, self).__init__(option_strings=option_strings,*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
self.HandleValue(self.dest,values)
setattr(namespace, self.dest, values)
def HandleValue(self,uName:str,uValue:str):
"""
Manages a given parameter of the command line
identifies boolean parameter and paths for the home user folder across OS
:param str uName: the name of the parameter
:param str uValue: The value of he parameter
:return:
"""
if uName.startswith("oPath"):
uTmp = uValue
if uTmp.startswith("~"):
uTmp = OS_GetSystemUserPath() + uTmp[1:]
self.oParameter[uName]=cPath(uTmp)
elif uName.startswith("b"):
self.oParameter[uName] = (uValue!="")
else:
self.oParameter[uName]=uValue
class cParameter(TypedQueryDict):
"""
Initializes and holds the the given command line parameter
"""
def __init__(self):
super(cParameter, self).__init__()
oParser:argparse.ArgumentParser = argparse.ArgumentParser(description='ORCA Open Remote Control Application')
self.AddParameter(oParser=oParser)
aArgs=self.RemoveOtherArguments(oParser=oParser)
oParser.parse_args(aArgs)
def AddParameter(self,oParser:argparse.ArgumentParser) -> None:
"""
Adds all valid command line arguments to the parser object
:param oParser:
:return:
"""
oParser.add_argument('--debugpath', default=GetEnvVar('DEBUGPATH'), action=cParserAction, oParameter=self, dest="oPathDebug", help='Changes the path for ORCA files (can be passed as DEBUGPATH environment var)')
oParser.add_argument('--logpath', default=GetEnvVar('ORCALOGPATH'), action=cParserAction, oParameter=self, dest="oPathLog", help='Changes the path for ORCA/Kivy log files (can be passed as ORCALOGPATH environment var)')
oParser.add_argument('--tmppath', default=GetEnvVar('ORCATMPPATH'), action=cParserAction, oParameter=self, dest="oPathTmp", help='Changes the path for ORCA temp folder (can be passed as ORCATMPPATH environment var)')
oParser.add_argument('--smallscreen', default=GetEnvVar('ORCASMALL'), action=cParserAction, oParameter=self, dest="bSmallScreen", help='If set ORCA switches to small screen (can be passed as ORCASMALL environment var)')
# noinspection PyMethodMayBeStatic
def RemoveOtherArguments(self,oParser) -> List:
""" Removes all ORCA command line arguments from the command line, so it does not clash with kivy """
aRet:List = []
for uArg in sys.argv[1:]:
# noinspection PyProtectedMember
for uOption in oParser._option_string_actions:
if uOption.lstrip("-")==uArg.split("=")[0].lstrip("-"):
aRet.append(uArg)
break
return aRet
|
thica/ORCA-Remote
|
src/ORCA/Parameter.py
|
Python
|
gpl-3.0
| 4,672
|
[
"ORCA"
] |
3e5a0d3462f1a94c73321cf25db1bcef24996b8f1ba4d54137091325345d73d1
|
# -*- coding: utf-8 -*-
"""URLs for geomat project"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.storage import staticfiles_storage
from django.views import defaults as default_views
from django.views.generic import RedirectView
from geomat.stein.views import gallery_view
from rest_framework.documentation import include_docs_urls
urlpatterns = [
url(r'^$', gallery_view, name="home"),
# Redirect users from outdated 'preview/' to '/'
url(r'^preview/', RedirectView.as_view(pattern_name='home')),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# REST framework
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/', include("geomat.stein.apiurls", namespace="api")),
# Let's fix this stupid issue with Google Chrome and make a redirect from '/favicon.ico' to our
# 'common/images/favicon.ico' file!
# Google Chrome ignores the favicon file defined in HTML and always looks for it in '/favicon.ico'
url(r'^favicon.ico$',
RedirectView.as_view(
url=staticfiles_storage.url('common/images/favicon.ico'),
permanent=False),
name="favicon"),
url(r'^gallery/', gallery_view, name="gallery_listview"),
# Do some redirects to the correct URL, as some people manage to get typos in
url(r'^galery/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^galerie/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^gallerie/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^garllry/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^garllery/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^galary/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^gallary/', RedirectView.as_view(pattern_name='gallery_listview')),
url(r'^gallarie/', RedirectView.as_view(pattern_name='gallery_listview')),
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$',
default_views.bad_request,
kwargs={'exception': Exception("Bad Request!")}),
url(r'^403/$',
default_views.permission_denied,
kwargs={'exception': Exception("Permissin Denied")}),
url(r'^404/$',
default_views.page_not_found,
kwargs={'exception': Exception("Page not Found")}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
# Check for the rosetta module in local installation
if 'rosetta' in settings.INSTALLED_APPS:
urlpatterns += [
url(r'^rosetta/', include('rosetta.urls')),
]
|
GeoMatDigital/django-geomat
|
config/urls.py
|
Python
|
bsd-3-clause
| 3,181
|
[
"VisIt"
] |
bfa5e1b6e4fa9ddbc2ebdfe40cce0794553253755d1d709391cb728210274296
|
from collections.abc import Sized, Iterable
from . import ast
class Selection(Sized, Iterable):
def __init__(self, nodes):
self.selection = nodes
def __iter__(self):
return iter(self.selection)
def __len__(self):
return len(self.selection)
def __getitem__(self, item):
return self.selection[item]
class NodeFinder(ast.Visitor):
def __init__(self, target, modifier):
self.target = target
self.modifier = modifier
self.finished = False
self.results = []
def visit(self, node):
if self.finished:
return
if node.__class__.__name__ == self.target:
self.results.append(node)
if self.modifier == '*':
self.generic_visit(node)
elif self.modifier == 'first':
self.finished = True
else:
self.generic_visit(node)
def find(selector, selection):
components = selector.split()
if not components:
return Selection([])
next_node_name = components.pop()
if ':' in next_node_name:
next_node_name, modifier = next_node_name.split(':')
else:
modifier = ''
if isinstance(selection, ast.Node):
selection = (selection,)
results = []
for node in selection:
finder = NodeFinder(next_node_name, modifier)
finder.visit(node)
results.extend(finder.results)
results = Selection(results)
if components:
return find(' '.join(components), results)
return results
|
kyouko-taiga/tango
|
tango/utils.py
|
Python
|
apache-2.0
| 1,562
|
[
"VisIt"
] |
1c80a0486cae2a4b03c144b4bc7a843a08be5d38f40004d6f1e3fb2bc3dc8ccf
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Write ESPResSo trajectories in the H5MD format. See :ref:`Writing H5MD-files`.
"""
import espressomd
from espressomd.io.writer import h5md # pylint: disable=import-error
from espressomd import polymer
from espressomd import interactions
system = espressomd.System(box_l=[100.0, 100.0, 100.0])
system.time_step = 0.01
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
system.cell_system.skin = 0.4
fene = interactions.FeneBond(k=10, d_r_max=2)
system.bonded_inter.add(fene)
positions = polymer.linear_polymer_positions(n_polymers=5,
beads_per_chain=50,
bond_length=1.0,
seed=1234)
for polymer in positions:
monomers = system.part.add(pos=polymer)
previous_part = None
for part in monomers:
if previous_part:
part.add_bond((fene, previous_part))
previous_part = part
h5_units = h5md.UnitSystem(time='ps', mass='u', length='nm', charge='e')
h5_file = h5md.H5md(file_path="sample.h5", unit_system=h5_units)
for i in range(2):
h5_file.write()
system.integrator.run(steps=10)
h5_file.flush()
h5_file.close()
|
fweik/espresso
|
samples/h5md.py
|
Python
|
gpl-3.0
| 1,917
|
[
"ESPResSo"
] |
31f0f930338821f0c6fb06d19e7b5bedc11238f278c6662c36454fac1037a47e
|
#!/usr/bin/env python
# encoding: utf-8
'''
Word transformation.
Transforms words from singular to plural, class names to table names,
modularized class names to ones without, and class names to foreign keys.
Inflection is language specific and the default `inflection` handles English
language inflection. You can access inflection handlers for other languages
by importing the appropriate inflector. For example
``from miwa.inflection.sv import inflection`` to use Swedish inflection.
:Author: Rasmus Andersson http://hunch.se/
:var inflections: locale-to-Inflection-object map
:var inflection: English inflection
'''
import re, logging
inflections = {}
log = logging.getLogger(__name__)
class Inflector(object):
'''Language inflector.
:ivar locales: languages this inflection handles
:ivar plurals: plural rules
:ivar singulars: singular rules
:ivar uncountables: list of uncountable words
'''
def __init__(self, *locales):
'''Create a inflection handler.
Locale codes should comply to:
* `RFC 1766 <http://www.faqs.org/rfcs/rfc1766.html>`__, preferably as
* `ISO 639-1 <http://en.wikipedia.org/wiki/ISO_639-1>`__ (two-letter code).
A list of language codes can be found here:
http://www.loc.gov/standards/iso639-2/php/code_list.php
:param locales: Language codes representing languages this `Inflector` handles.
:type locales: string
'''
self.plurals = []
self.singulars = []
self.uncountables = []
self.locales = locales
for locale in locales:
inflections[locale] = self
def plural(self, rule, replacement):
'''Specifies a new pluralization rule and its replacement.
:type rule: RegExp pattern
:type replacement: string
:rtype: None
'''
self.plurals[0:0] = [tuple([rule, replacement])]
def singular(self, rule, replacement):
'''Specifies a new singularization rule and its replacement.
:type rule: RegExp pattern
:type replacement: string
:rtype: None
'''
self.singulars[0:0] = [tuple([rule, replacement])]
def regular(self, plural_find, plural_replace, singular_find, singular_replace=''):
'''Specified a new regular inflection.
:param plural_find: regular expression pattern (which will be compiled)
:type plural_find: string
:param plural_replace: replacement (may contain back-references to
regexp groups from ``plural_find``)
:type plural_replace: string
:param singular_find: regular expression pattern (which will be compiled)
:type singular_find: string
:param singular_replace: replacement (may contain back-references to
regexp groups from ``singular_find``)
:type singular_replace: string
:rtype: None
'''
self.plural(re.compile(plural_find, re.I), plural_replace)
self.singular(re.compile(singular_find, re.I), singular_replace)
def irregular(self, singular, plural, first_letter_is_the_same=True):
'''Specifies a new irregular that applies to both pluralization and
singularization at the same time.
Examples:
inf.irregular('octopus', 'octopi')
inf.irregular('person', 'people')
:type singular: string
:type plural: string
:type first_letter_is_the_same: bool
:rtype: None
'''
if first_letter_is_the_same:
self.plural(re.compile(ur"(%s)%s$" % (singular[0], singular[1:]), re.IGNORECASE), ur'\1' + plural[1:])
self.singular(re.compile(ur"(%s)%s$" % (plural[0], plural[1:]), re.IGNORECASE), ur'\1' + singular[1:])
else:
self.plural(re.compile(ur"%s$" % singular), plural)
self.plural(re.compile(ur"%s$" % singular.capitalize()), plural.capitalize())
self.singular(re.compile(ur"%s$" % plural.capitalize()), singular.capitalize())
self.singular(re.compile(ur"%s$" % plural), singular)
def uncountable(self, *words):
'''Add uncountable words that shouldn't be attempted inflected.
Examples:
uncountable "money"
uncountable "money"), "information"
uncountable %w( money information rice )
:param words: strings
:rtype: None
'''
self.uncountables[0:0] = [w.lower() for w in words]
def clear(self):
'''Clears any loaded inflections.
:rtype: None
'''
self.plurals = []
self.singulars = []
self.uncountables = []
def pluralize(self, word):
'''Returns the plural form of the word in the string.
Examples
"post".pluralize -> "posts"
"octopus".pluralize -> "octopi"
"sheep".pluralize -> "sheep"
"words".pluralize -> "words"
"the blue mailman".pluralize -> "the blue mailmen"
"CamelOctopus".pluralize -> "CamelOctopi"
:rtype: unicode
'''
word = unicode(word)
if word.lower() in self.uncountables:
return word
else:
for (rule, replacement) in self.plurals:
#log.debug("pluralize(): rule: %r, replacement: %r", rule.pattern, replacement)
m = rule.subn(replacement, word)
if m[1] > 0:
return m[0]
return word
def singularize(self, word):
'''The reverse of pluralize, returns the singular form of a word in a string.
Examples
"posts".singularize #=> "post"
"octopi".singularize #=> "octopus"
"sheep".singluarize #=> "sheep"
"word".singluarize #=> "word"
"the blue mailmen".singularize #=> "the blue mailman"
"CamelOctopi".singularize #=> "CamelOctopus"
:param word: a possibly plural word which should be converted to singular form.
:rtype: unicode
'''
word = unicode(word)
if word.lower() in self.uncountables:
return word
else:
for (rule, replacement) in self.singulars:
m = rule.subn(replacement, word)
if m[1] > 0:
return m[0]
return word
def camelize(self, lower_case_and_underscored_word, first_letter_uppercase=True):
'''By default, camelize converts strings to UpperCamelCase. If the
``first_letter_uppercase`` argument is set to False, `camelize` produces
lowerCamelCase.
`camelize` will also convert ``/`` to ``.`` which is useful for converting
paths to namespaces
Examples
"active_record".camelize -> "ActiveRecord"
"active_record".camelize(False) -> "activeRecord"
"active_record/errors".camelize -> "ActiveRecord.Errors"
"active_record/errors".camelize(0) -> "activeRecord.Errors"
:rtype: unicode
'''
if first_letter_uppercase:
p2 = re.compile(ur"(^|_)(.)")
lower_case_and_underscored_word = Inflector.camelize.re1.sub(
lambda m: '.' + m.group(1).upper(), lower_case_and_underscored_word)
return Inflector.camelize.re2.sub(lambda m: m.group(2).upper(),
lower_case_and_underscored_word)
else:
return lower_case_and_underscored_word[0] + camelize(lower_case_and_underscored_word)[1:]
camelize.re1 = re.compile(ur"/(.?)")
camelize.re2 = re.compile(ur"(^|_)(.)")
def underscore(self, camel_cased_word):
'''The reverse of `camelize`. Makes an underscored form from the expression
in the string.
Changes '.' to '/' to convert namespaces to paths.
Examples
"ActiveRecord".underscore -> "active_record"
"ActiveRecord::Errors".underscore -> active_record/errors
:rtype: unicode
'''
return Inflector.underscore.re2.sub(ur'\1_\2',
Inflector.underscore.re1.sub(ur'\1_\2',
camel_cased_word.replace(u'.',u'/'))).replace(u'-',u'_').lower()
underscore.re1 = re.compile(ur'([A-Z]+)([A-Z][a-z])')
underscore.re2 = re.compile(ur'([a-z\d])([A-Z])')
def humanize(self, lower_case_and_underscored_word):
'''Capitalizes the first word and turns underscores into spaces and strips _id.
Like titleize, this is meant for creating pretty output.
Examples
"employee_salary" -> "Employee salary"
"author_id" -> "Author"
:rtype: unicode
'''
if len(lower_case_and_underscored_word) >= 3 and lower_case_and_underscored_word[-3:] == '_id':
lower_case_and_underscored_word = lower_case_and_underscored_word[:-3]
return lower_case_and_underscored_word.replace('_',' ').capitalize()
def demodulize(self, class_name_in_module):
'''Removes the module part from the expression in the string
Examples
"ActiveRecord.CoreExtensions.String.Inflectors".demodulize #=> "Inflectors"
"Inflectors".demodulize #=> "Inflectors"
:rtype: unicode
'''
p = class_name_in_module.rfind('.')
if p != -1:
return class_name_in_module[p+1:]
return class_name_in_module
def tableize(self, class_name):
'''Create the name of a table like Rails does for models to table names. This method
uses the pluralize method on the last word in the string.
Examples
"RawScaledScorer".tableize -> "raw_scaled_scorers"
"egg_and_ham".tableize -> "egg_and_hams"
"fancyCategory".tableize -> "fancy_categories"
:rtype: unicode
'''
return self.pluralize(self.underscore(class_name))
def classify(self, table_name):
'''Create a class name from a table name.
Examples
"egg_and_hams".classify -> "EggAndHam"
"post".classify -> "Post"
:rtype: unicode
'''
return self.camelize(self.singularize(self.demodulize(table_name)))
def foreignKey(self, class_name, separate_class_name_and_id_with_underscore=True):
'''Creates a foreign key name from a class name.
+separate_class_name_and_id_with_underscore+ sets whether
the method should put '_' between the name and 'id'.
Examples
"Message".foreignKey -> "message_id"
"Message".foreignKey(false) -> "messageid"
"Admin::Post".foreignKey -> "post_id"
:rtype: unicode
'''
return self.underscore(self.demodulize(class_name)) +\
(separate_class_name_and_id_with_underscore and "_id" or "id")
def ordinalize(self, number):
'''Ordinalize turns a number into an ordinal string used to denote the
position in an ordered sequence such as 1st, 2nd, 3rd, 4th.
Examples
ordinalize(1) -> "1st"
ordinalize(2) -> "2nd"
ordinalize(1002) -> "1002nd"
ordinalize(1003) -> "1003rd"
:rtype: unicode
'''
i = int(number)
if i % 100 in [11,12,13]:
return u'%dth' % i
else:
x = i % 10
if x == 1:
return u'%sst' % i
elif x == 2:
return u'%dnd' % i
elif x == 3:
return u'%drd' % i
else:
return u'%dth' % i
# Load default inflections (English)
from smisk.inflection.en import inflection
if __name__ == '__main__':
from smisk.test.inflection import test
test()
|
rsms/smisk
|
lib/smisk/inflection/__init__.py
|
Python
|
mit
| 11,020
|
[
"Octopus"
] |
6c62df1a71937736779868e24deffcb2fdc2face4eced7e8d0c87512ec61203d
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, cint, nowdate, add_days, get_link_to_form
from frappe import _
from six import string_types
from frappe.model.utils import get_fetch_values
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
from frappe.desk.notifications import clear_doctype_notifications
from frappe.contacts.doctype.address.address import get_company_address
from erpnext.controllers.selling_controller import SellingController
from frappe.automation.doctype.auto_repeat.auto_repeat import get_next_schedule_date
from erpnext.selling.doctype.customer.customer import check_credit_limit
from erpnext.stock.doctype.item.item import get_item_defaults
from erpnext.setup.doctype.item_group.item_group import get_item_group_defaults
from erpnext.manufacturing.doctype.production_plan.production_plan import get_items_for_material_requests
from erpnext.accounts.doctype.sales_invoice.sales_invoice import validate_inter_company_party, update_linked_doc,\
unlink_inter_company_doc
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def __init__(self, *args, **kwargs):
super(SalesOrder, self).__init__(*args, **kwargs)
def validate(self):
super(SalesOrder, self).validate()
self.validate_delivery_date()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.validate_for_items()
self.validate_warehouse()
self.validate_drop_ship()
self.validate_serial_no_based_delivery()
validate_inter_company_party(self.doctype, self.customer, self.company, self.inter_company_order_reference)
if self.coupon_code:
from erpnext.accounts.doctype.pricing_rule.utils import validate_coupon_code
validate_coupon_code(self.coupon_code)
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.validate_with_previous_doc()
self.set_status()
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and not self.skip_delivery_note:
for d in self.get("items"):
if d.delivery_date and getdate(self.po_date) > getdate(d.delivery_date):
frappe.throw(_("Row #{0}: Expected Delivery Date cannot be before Purchase Order Date")
.format(d.idx))
if self.po_no and self.customer and not self.skip_delivery_note:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0] and not cint(frappe.db.get_single_value("Selling Settings",
"allow_against_multiple_purchase_orders")):
frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no))
def validate_for_items(self):
for d in self.get('items'):
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code, d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s",
(d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}")
.format(d.prevdoc_docname, self.order_type))
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.skip_delivery_note:
delivery_date_list = [d.delivery_date for d in self.get("items") if d.delivery_date]
max_delivery_date = max(delivery_date_list) if delivery_date_list else None
if not self.delivery_date:
self.delivery_date = max_delivery_date
if self.delivery_date:
for d in self.get("items"):
if not d.delivery_date:
d.delivery_date = self.delivery_date
if getdate(self.transaction_date) > getdate(d.delivery_date):
frappe.msgprint(_("Expected Delivery Date should be after Sales Order Date"),
indicator='orange', title=_('Warning'))
if getdate(self.delivery_date) != getdate(max_delivery_date):
self.delivery_date = max_delivery_date
else:
frappe.throw(_("Please enter Delivery Date"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project))
def validate_warehouse(self):
super(SalesOrder, self).validate_warehouse()
for d in self.get("items"):
if (frappe.get_cached_value("Item", d.item_code, "is_stock_item") == 1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse and not cint(d.delivered_by_supplier):
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def validate_drop_ship(self):
for d in self.get('items'):
if d.delivered_by_supplier and not d.supplier:
frappe.throw(_("Row #{0}: Set Supplier for item {1}").format(d.idx, d.item_code))
def on_submit(self):
self.check_credit_limit()
self.update_reserved_qty()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
self.update_project()
self.update_prevdoc_status('submit')
self.update_blanket_order()
update_linked_doc(self.doctype, self.name, self.inter_company_order_reference)
if self.coupon_code:
from erpnext.accounts.doctype.pricing_rule.utils import update_coupon_code_count
update_coupon_code_count(self.coupon_code,'used')
def on_cancel(self):
super(SalesOrder, self).on_cancel()
# Cannot cancel closed SO
if self.status == 'Closed':
frappe.throw(_("Closed order cannot be cancelled. Unclose to cancel."))
self.check_nextdoc_docstatus()
self.update_reserved_qty()
self.update_project()
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
self.update_blanket_order()
unlink_inter_company_doc(self.doctype, self.name, self.inter_company_order_reference)
if self.coupon_code:
from erpnext.accounts.doctype.pricing_rule.utils import update_coupon_code_count
update_coupon_code_count(self.coupon_code,'cancelled')
def update_project(self):
if frappe.db.get_single_value('Selling Settings', 'sales_update_frequency') != "Each Transaction":
return
if self.project:
project = frappe.get_doc("Project", self.project)
project.update_sales_amount()
project.db_update()
def check_credit_limit(self):
# if bypass credit limit check is set to true (1) at sales order level,
# then we need not to check credit limit and vise versa
if not cint(frappe.db.get_value("Customer Credit Limit",
{'parent': self.customer, 'parenttype': 'Customer', 'company': self.company},
"bypass_credit_limit_check")):
check_credit_limit(self.customer, self.company)
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""
select t1.name
from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
submit_dn = [get_link_to_form("Delivery Note", dn) for dn in submit_dn]
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order")
.format(", ".join(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
submit_rv = [get_link_to_form("Sales Invoice", si) for si in submit_rv]
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order")
.format(", ".join(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""
select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order = %s and t1.docstatus = 1""", self.name)
if submit_ms:
submit_ms = [get_link_to_form("Maintenance Schedule", ms) for ms in submit_ms]
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order")
.format(", ".join(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""
select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
submit_mv = [get_link_to_form("Maintenance Visit", mv) for mv in submit_mv]
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order")
.format(", ".join(submit_mv)))
# check work order
pro_order = frappe.db.sql_list("""
select name
from `tabWork Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
pro_order = [get_link_to_form("Work Order", po) for po in pro_order]
frappe.throw(_("Work Order {0} must be cancelled before cancelling this Sales Order")
.format(", ".join(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def update_status(self, status):
self.check_modified_date()
self.set_status(update=True, status=status)
self.update_reserved_qty()
self.notify_update()
clear_doctype_notifications(self)
def update_reserved_qty(self, so_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
def _valid_for_reserve(item_code, warehouse):
if item_code and warehouse and [item_code, warehouse] not in item_wh_list \
and frappe.get_cached_value("Item", item_code, "is_stock_item"):
item_wh_list.append([item_code, warehouse])
for d in self.get("items"):
if (not so_item_rows or d.name in so_item_rows) and not d.delivered_by_supplier:
if self.has_product_bundle(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
_valid_for_reserve(p.item_code, p.warehouse)
else:
_valid_for_reserve(d.item_code, d.warehouse)
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
def on_update(self):
pass
def before_update_after_submit(self):
self.validate_po()
self.validate_drop_ship()
self.validate_supplier_after_submit()
self.validate_delivery_date()
def validate_supplier_after_submit(self):
"""Check that supplier is the same after submit if PO is already made"""
exc_list = []
for item in self.items:
if item.supplier:
supplier = frappe.db.get_value("Sales Order Item", {"parent": self.name, "item_code": item.item_code},
"supplier")
if item.ordered_qty > 0.0 and item.supplier != supplier:
exc_list.append(_("Row #{0}: Not allowed to change Supplier as Purchase Order already exists").format(item.idx))
if exc_list:
frappe.throw('\n'.join(exc_list))
def update_delivery_status(self):
"""Update delivery status from Purchase Order for drop shipping"""
tot_qty, delivered_qty = 0.0, 0.0
for item in self.items:
if item.delivered_by_supplier:
item_delivered_qty = frappe.db.sql("""select sum(qty)
from `tabPurchase Order Item` poi, `tabPurchase Order` po
where poi.sales_order_item = %s
and poi.item_code = %s
and poi.parent = po.name
and po.docstatus = 1
and po.status = 'Delivered'""", (item.name, item.item_code))
item_delivered_qty = item_delivered_qty[0][0] if item_delivered_qty else 0
item.db_set("delivered_qty", flt(item_delivered_qty), update_modified=False)
delivered_qty += item.delivered_qty
tot_qty += item.qty
if tot_qty != 0:
self.db_set("per_delivered", flt(delivered_qty/tot_qty) * 100,
update_modified=False)
def set_indicator(self):
"""Set indicator for portal"""
if self.per_billed < 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Not Paid and Not Delivered")
elif self.per_billed == 100 and self.per_delivered < 100:
self.indicator_color = "orange"
self.indicator_title = _("Paid and Not Delivered")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def get_work_order_items(self, for_raw_material_request=0):
'''Returns items with BOM that already do not have a linked work order'''
items = []
for table in [self.items, self.packed_items]:
for i in table:
bom = get_default_bom_item(i.item_code)
stock_qty = i.qty if i.doctype == 'Packed Item' else i.stock_qty
if not for_raw_material_request:
total_work_order_qty = flt(frappe.db.sql('''select sum(qty) from `tabWork Order`
where production_item=%s and sales_order=%s and sales_order_item = %s and docstatus<2''', (i.item_code, self.name, i.name))[0][0])
pending_qty = stock_qty - total_work_order_qty
else:
pending_qty = stock_qty
if pending_qty:
if bom:
items.append(dict(
name= i.name,
item_code= i.item_code,
description= i.description,
bom = bom,
warehouse = i.warehouse,
pending_qty = pending_qty,
required_qty = pending_qty if for_raw_material_request else 0,
sales_order_item = i.name
))
else:
items.append(dict(
name= i.name,
item_code= i.item_code,
description= i.description,
bom = '',
warehouse = i.warehouse,
pending_qty = pending_qty,
required_qty = pending_qty if for_raw_material_request else 0,
sales_order_item = i.name
))
return items
def on_recurring(self, reference_doc, auto_repeat_doc):
def _get_delivery_date(ref_doc_delivery_date, red_doc_transaction_date, transaction_date):
delivery_date = get_next_schedule_date(ref_doc_delivery_date,
auto_repeat_doc.frequency, cint(auto_repeat_doc.repeat_on_day))
if delivery_date <= transaction_date:
delivery_date_diff = frappe.utils.date_diff(ref_doc_delivery_date, red_doc_transaction_date)
delivery_date = frappe.utils.add_days(transaction_date, delivery_date_diff)
return delivery_date
self.set("delivery_date", _get_delivery_date(reference_doc.delivery_date,
reference_doc.transaction_date, self.transaction_date ))
for d in self.get("items"):
reference_delivery_date = frappe.db.get_value("Sales Order Item",
{"parent": reference_doc.name, "item_code": d.item_code, "idx": d.idx}, "delivery_date")
d.set("delivery_date", _get_delivery_date(reference_delivery_date,
reference_doc.transaction_date, self.transaction_date))
def validate_serial_no_based_delivery(self):
reserved_items = []
normal_items = []
for item in self.items:
if item.ensure_delivery_based_on_produced_serial_no:
if item.item_code in normal_items:
frappe.throw(_("Cannot ensure delivery by Serial No as \
Item {0} is added with and without Ensure Delivery by \
Serial No.").format(item.item_code))
if item.item_code not in reserved_items:
if not frappe.get_cached_value("Item", item.item_code, "has_serial_no"):
frappe.throw(_("Item {0} has no Serial No. Only serilialized items \
can have delivery based on Serial No").format(item.item_code))
if not frappe.db.exists("BOM", {"item": item.item_code, "is_active": 1}):
frappe.throw(_("No active BOM found for item {0}. Delivery by \
Serial No cannot be ensured").format(item.item_code))
reserved_items.append(item.item_code)
else:
normal_items.append(item.item_code)
if not item.ensure_delivery_based_on_produced_serial_no and \
item.item_code in reserved_items:
frappe.throw(_("Cannot ensure delivery by Serial No as \
Item {0} is added with and without Ensure Delivery by \
Serial No.").format(item.item_code))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Orders'),
})
return list_context
@frappe.whitelist()
def close_or_unclose_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status == "Closed":
if so.status not in ("Cancelled", "Closed") and (so.per_delivered < 100 or so.per_billed < 100):
so.update_status(status)
else:
if so.status == "Closed":
so.update_status('Draft')
so.update_blanket_order()
frappe.local.message_log = []
def get_requested_item_qty(sales_order):
return frappe._dict(frappe.db.sql("""
select sales_order_item, sum(stock_qty)
from `tabMaterial Request Item`
where docstatus = 1
and sales_order = %s
group by sales_order_item
""", sales_order))
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
requested_item_qty = get_requested_item_qty(source_name)
def postprocess(source, doc):
doc.material_request_type = "Purchase"
def update_item(source, target, source_parent):
# qty is for packed items, because packed items don't have stock_qty field
qty = source.get("stock_qty") or source.get("qty")
target.project = source_parent.project
target.qty = qty - requested_item_qty.get(source.name, 0)
target.conversion_factor = 1
target.stock_qty = qty - requested_item_qty.get(source.name, 0)
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
"Packed Item": {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order",
"uom": "stock_uom"
},
"postprocess": update_item
},
"Sales Order Item": {
"doctype": "Material Request Item",
"field_map": {
"name": "sales_order_item",
"parent": "sales_order",
"stock_uom": "uom",
"stock_qty": "qty"
},
"condition": lambda doc: not frappe.db.exists('Product Bundle', doc.item_code) and doc.stock_qty > requested_item_qty.get(doc.name, 0),
"postprocess": update_item
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_project(source_name, target_doc=None):
def postprocess(source, doc):
doc.project_type = "External"
doc.project_name = source.name
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Project",
"validation": {
"docstatus": ["=", 1]
},
"field_map":{
"name" : "sales_order",
"base_grand_total" : "estimated_costing",
}
},
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None, skip_item_mapping=False):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("set_po_nos")
target.run_method("calculate_taxes_and_totals")
if source.company_address:
target.update({'company_address': source.company_address})
else:
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Delivery Note", 'company_address', target.company_address))
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
item = get_item_defaults(target.item_code, source_parent.company)
item_group = get_item_group_defaults(target.item_code, source_parent.company)
if item:
target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center") \
or item.get("buying_cost_center") \
or item_group.get("buying_cost_center")
mapper = {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}
if not skip_item_mapping:
mapper["Sales Order Item"] = {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1
}
target_doc = get_mapped_doc("Sales Order", source_name, mapper, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None, ignore_permissions=False):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
if target.get("allocate_advances_automatically"):
target.set_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.flags.ignore_permissions = True
target.run_method("set_missing_values")
target.run_method("set_po_nos")
target.run_method("calculate_taxes_and_totals")
if source.company_address:
target.update({'company_address': source.company_address})
else:
# set company address
target.update(get_company_address(target.company))
if target.company_address:
target.update(get_fetch_values("Sales Invoice", 'company_address', target.company_address))
# set the redeem loyalty points if provided via shopping cart
if source.loyalty_points and source.order_type == "Shopping Cart":
target.redeem_loyalty_points = 1
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty - source.returned_qty
if source_parent.project:
target.cost_center = frappe.db.get_value("Project", source_parent.project, "cost_center")
if target.item_code:
item = get_item_defaults(target.item_code, source_parent.company)
item_group = get_item_group_defaults(target.item_code, source_parent.company)
cost_center = item.get("selling_cost_center") \
or item_group.get("selling_cost_center")
if cost_center:
target.cost_center = cost_center
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"field_map": {
"party_account_currency": "party_account_currency",
"payment_terms_template": "payment_terms_template"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.qty and (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount))
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess, ignore_permissions=ignore_permissions)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.sales_order=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "sales_order"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Sales Order", filters)
data = frappe.db.sql("""
select
distinct `tabSales Order`.name, `tabSales Order`.customer_name, `tabSales Order`.status,
`tabSales Order`.delivery_status, `tabSales Order`.billing_status,
`tabSales Order Item`.delivery_date
from
`tabSales Order`, `tabSales Order Item`
where `tabSales Order`.name = `tabSales Order Item`.parent
and `tabSales Order`.skip_delivery_note = 0
and (ifnull(`tabSales Order Item`.delivery_date, '0000-00-00')!= '0000-00-00') \
and (`tabSales Order Item`.delivery_date between %(start)s and %(end)s)
and `tabSales Order`.docstatus < 2
{conditions}
""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def make_purchase_order(source_name, for_supplier=None, selected_items=[], target_doc=None):
if isinstance(selected_items, string_types):
selected_items = json.loads(selected_items)
def set_missing_values(source, target):
target.supplier = supplier
target.apply_discount_on = ""
target.additional_discount_percentage = 0.0
target.discount_amount = 0.0
target.inter_company_order_reference = ""
default_price_list = frappe.get_value("Supplier", supplier, "default_price_list")
if default_price_list:
target.buying_price_list = default_price_list
if any( item.delivered_by_supplier==1 for item in source.items):
if source.shipping_address_name:
target.shipping_address = source.shipping_address_name
target.shipping_address_display = source.shipping_address
else:
target.shipping_address = source.customer_address
target.shipping_address_display = source.address_display
target.customer_contact_person = source.contact_person
target.customer_contact_display = source.contact_display
target.customer_contact_mobile = source.contact_mobile
target.customer_contact_email = source.contact_email
else:
target.customer = ""
target.customer_name = ""
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.schedule_date = source.delivery_date
target.qty = flt(source.qty) - flt(source.ordered_qty)
target.stock_qty = (flt(source.qty) - flt(source.ordered_qty)) * flt(source.conversion_factor)
target.project = source_parent.project
suppliers =[]
if for_supplier:
suppliers.append(for_supplier)
else:
sales_order = frappe.get_doc("Sales Order", source_name)
for item in sales_order.items:
if item.supplier and item.supplier not in suppliers:
suppliers.append(item.supplier)
if not suppliers:
frappe.throw(_("Please set a Supplier against the Items to be considered in the Purchase Order."))
for supplier in suppliers:
po =frappe.get_list("Purchase Order", filters={"sales_order":source_name, "supplier":supplier, "docstatus": ("<", "2")})
if len(po) == 0:
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Purchase Order",
"field_no_map": [
"address_display",
"contact_display",
"contact_mobile",
"contact_email",
"contact_person",
"taxes_and_charges"
],
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "sales_order_item"],
["parent", "sales_order"],
["stock_uom", "stock_uom"],
["uom", "uom"],
["conversion_factor", "conversion_factor"],
["delivery_date", "schedule_date"]
],
"field_no_map": [
"rate",
"price_list_rate"
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty and doc.supplier == supplier and doc.item_code in selected_items
}
}, target_doc, set_missing_values)
if not for_supplier:
doc.insert()
else:
suppliers =[]
if suppliers:
if not for_supplier:
frappe.db.commit()
return doc
else:
frappe.msgprint(_("PO already created for all sales order items"))
@frappe.whitelist()
def get_supplier(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_group"]
else:
fields = ["name", "supplier_name", "supplier_group"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
and name in (select supplier from `tabSales Order Item` where parent = %(parent)s)
and name not in (select supplier from `tabPurchase Order` po inner join `tabPurchase Order Item` poi
on po.name=poi.parent where po.docstatus<2 and poi.sales_order=%(parent)s)
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': frappe.db.escape(searchfield)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'parent': filters.get('parent')
})
@frappe.whitelist()
def make_work_orders(items, sales_order, company, project=None):
'''Make Work Orders against the given Sales Order for the given `items`'''
items = json.loads(items).get('items')
out = []
for i in items:
if not i.get("bom"):
frappe.throw(_("Please select BOM against item {0}").format(i.get("item_code")))
if not i.get("pending_qty"):
frappe.throw(_("Please select Qty against item {0}").format(i.get("item_code")))
work_order = frappe.get_doc(dict(
doctype='Work Order',
production_item=i['item_code'],
bom_no=i.get('bom'),
qty=i['pending_qty'],
company=company,
sales_order=sales_order,
sales_order_item=i['sales_order_item'],
project=project,
fg_warehouse=i['warehouse'],
description=i['description']
)).insert()
work_order.set_work_order_operations()
work_order.save()
out.append(work_order)
return [p.name for p in out]
@frappe.whitelist()
def update_status(status, name):
so = frappe.get_doc("Sales Order", name)
so.update_status(status)
def get_default_bom_item(item_code):
bom = frappe.get_all('BOM', dict(item=item_code, is_active=True),
order_by='is_default desc')
bom = bom[0].name if bom else None
return bom
@frappe.whitelist()
def make_raw_material_request(items, company, sales_order, project=None):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
if isinstance(items, string_types):
items = frappe._dict(json.loads(items))
for item in items.get('items'):
item["include_exploded_items"] = items.get('include_exploded_items')
item["ignore_existing_ordered_qty"] = items.get('ignore_existing_ordered_qty')
item["include_raw_materials_from_sales_order"] = items.get('include_raw_materials_from_sales_order')
items.update({
'company': company,
'sales_order': sales_order
})
raw_materials = get_items_for_material_requests(items)
if not raw_materials:
frappe.msgprint(_("Material Request not created, as quantity for Raw Materials already available."))
return
material_request = frappe.new_doc('Material Request')
material_request.update(dict(
doctype = 'Material Request',
transaction_date = nowdate(),
company = company,
requested_by = frappe.session.user,
material_request_type = 'Purchase'
))
for item in raw_materials:
item_doc = frappe.get_cached_doc('Item', item.get('item_code'))
schedule_date = add_days(nowdate(), cint(item_doc.lead_time_days))
material_request.append('items', {
'item_code': item.get('item_code'),
'qty': item.get('quantity'),
'schedule_date': schedule_date,
'warehouse': item.get('warehouse'),
'sales_order': sales_order,
'project': project
})
material_request.insert()
material_request.flags.ignore_permissions = 1
material_request.run_method("set_missing_values")
material_request.submit()
return material_request
@frappe.whitelist()
def make_inter_company_purchase_order(source_name, target_doc=None):
from erpnext.accounts.doctype.sales_invoice.sales_invoice import make_inter_company_transaction
return make_inter_company_transaction("Sales Order", source_name, target_doc)
@frappe.whitelist()
def create_pick_list(source_name, target_doc=None):
def update_item_quantity(source, target, source_parent):
target.qty = flt(source.qty) - flt(source.delivered_qty)
target.stock_qty = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.conversion_factor)
doc = get_mapped_doc('Sales Order', source_name, {
'Sales Order': {
'doctype': 'Pick List',
'validation': {
'docstatus': ['=', 1]
}
},
'Sales Order Item': {
'doctype': 'Pick List Item',
'field_map': {
'parent': 'sales_order',
'name': 'sales_order_item'
},
'postprocess': update_item_quantity,
'condition': lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1
},
}, target_doc)
doc.purpose = 'Delivery against Sales Order'
doc.set_item_locations()
return doc
def update_produced_qty_in_so_item(sales_order, sales_order_item):
#for multiple work orders against same sales order item
linked_wo_with_so_item = frappe.db.get_all('Work Order', ['produced_qty'], {
'sales_order_item': sales_order_item,
'sales_order': sales_order,
'docstatus': 1
})
total_produced_qty = 0
for wo in linked_wo_with_so_item:
total_produced_qty += flt(wo.get('produced_qty'))
if not total_produced_qty and frappe.flags.in_patch: return
frappe.db.set_value('Sales Order Item', sales_order_item, 'produced_qty', total_produced_qty)
|
StrellaGroup/erpnext
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
gpl-3.0
| 37,262
|
[
"VisIt"
] |
be47042e4601e6dd73736994545c66eada945b644a6c36e1533e2294318c3ab6
|
# -*- coding: utf-8 -*-
##############################################################################
# 2014 E2OpenPlugins #
# #
# This file is open source software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License version 2 as #
# published by the Free Software Foundation. #
# #
##############################################################################
# Simulate the oe-a boxbranding module (Only functions required by OWIF) #
##############################################################################
from Plugins.Extensions.OpenWebif.__init__ import _
from Components.About import about
from socket import has_ipv6
from Tools.Directories import fileExists, pathExists
import string
import os, hashlib
try:
from Components.About import about
except ImportError:
pass
tpmloaded = 1
try:
from enigma import eTPM
if not hasattr(eTPM, 'getData'):
tpmloaded = 0
except:
tpmloaded = 0
def validate_certificate(cert, key):
buf = decrypt_block(cert[8:], key)
if buf is None:
return None
return buf[36:107] + cert[139:196]
def get_random():
try:
xor = lambda a,b: ''.join(chr(ord(c)^ord(d)) for c,d in zip(a,b*100))
random = urandom(8)
x = str(time())[-8:]
result = xor(random, x)
return result
except:
return None
def bin2long(s):
return reduce( lambda x,y:(x<<8L)+y, map(ord, s))
def long2bin(l):
res = ""
for byte in range(128):
res += chr((l >> (1024 - (byte + 1) * 8)) & 0xff)
return res
def rsa_pub1024(src, mod):
return long2bin(pow(bin2long(src), 65537, bin2long(mod)))
def decrypt_block(src, mod):
if len(src) != 128 and len(src) != 202:
return None
dest = rsa_pub1024(src[:128], mod)
hash = hashlib.sha1(dest[1:107])
if len(src) == 202:
hash.update(src[131:192])
result = hash.digest()
if result == dest[107:127]:
return dest
return None
def tpm_check():
try:
tpm = eTPM()
rootkey = ['\x9f', '|', '\xe4', 'G', '\xc9', '\xb4', '\xf4', '#', '&', '\xce', '\xb3', '\xfe', '\xda', '\xc9', 'U', '`', '\xd8', '\x8c', 's', 'o', '\x90', '\x9b', '\\', 'b', '\xc0', '\x89', '\xd1', '\x8c', '\x9e', 'J', 'T', '\xc5', 'X', '\xa1', '\xb8', '\x13', '5', 'E', '\x02', '\xc9', '\xb2', '\xe6', 't', '\x89', '\xde', '\xcd', '\x9d', '\x11', '\xdd', '\xc7', '\xf4', '\xe4', '\xe4', '\xbc', '\xdb', '\x9c', '\xea', '}', '\xad', '\xda', 't', 'r', '\x9b', '\xdc', '\xbc', '\x18', '3', '\xe7', '\xaf', '|', '\xae', '\x0c', '\xe3', '\xb5', '\x84', '\x8d', '\r', '\x8d', '\x9d', '2', '\xd0', '\xce', '\xd5', 'q', '\t', '\x84', 'c', '\xa8', ')', '\x99', '\xdc', '<', '"', 'x', '\xe8', '\x87', '\x8f', '\x02', ';', 'S', 'm', '\xd5', '\xf0', '\xa3', '_', '\xb7', 'T', '\t', '\xde', '\xa7', '\xf1', '\xc9', '\xae', '\x8a', '\xd7', '\xd2', '\xcf', '\xb2', '.', '\x13', '\xfb', '\xac', 'j', '\xdf', '\xb1', '\x1d', ':', '?']
random = None
result = None
l2r = False
l2k = None
l3k = None
l2c = tpm.getData(eTPM.DT_LEVEL2_CERT)
if l2c is None:
return 0
l2k = validate_certificate(l2c, rootkey)
if l2k is None:
return 0
l3c = tpm.getData(eTPM.DT_LEVEL3_CERT)
if l3c is None:
return 0
l3k = validate_certificate(l3c, l2k)
if l3k is None:
return 0
random = get_random()
if random is None:
return 0
value = tpm.computeSignature(random)
result = decrypt_block(value, l3k)
if result is None:
return 0
if result [80:88] != random:
return 0
return 1
except:
return 0
def getAllInfo():
info = {}
brand = "unknown"
model = "unknown"
procmodel = "unknown"
orgdream = 0
if tpmloaded:
orgdream = tpm_check()
if fileExists("/proc/stb/info/hwmodel"):
brand = "DAGS"
f = open("/proc/stb/info/hwmodel",'r')
procmodel = f.readline().strip()
f.close()
if (procmodel.startswith("optimuss") or procmodel.startswith("pingulux")):
brand = "Edision"
model = procmodel.replace("optimmuss", "Optimuss ").replace("plus", " Plus").replace(" os", " OS")
elif (procmodel.startswith("fusion") or procmodel.startswith("purehd") or procmodel.startswith("revo4k") or procmodel.startswith("galaxy4k")):
brand = "Xsarius"
if procmodel == "fusionhd":
model = procmodel.replace("fusionhd", "Fusion HD")
elif procmodel == "fusionhdse":
model = procmodel.replace("fusionhdse", "Fusion HD SE")
elif procmodel == "purehd":
model = procmodel.replace("purehd", "Pure HD")
elif procmodel == "purehdse":
model = procmodel.replace("purehdse", "Pure HD SE")
elif procmodel == "revo4k":
model = procmodel.replace("revo4k", "Revo4K")
elif procmodel == "galaxy4k":
model = procmodel.replace("galaxy4k", "Galaxy4K")
elif fileExists("/proc/stb/info/azmodel"):
brand = "AZBox"
f = open("/proc/stb/info/model",'r') # To-Do: Check if "model" is really correct ...
procmodel = f.readline().strip()
f.close()
model = procmodel.lower()
elif fileExists("/proc/stb/info/gbmodel"):
brand = "GigaBlue"
f = open("/proc/stb/info/gbmodel",'r')
procmodel = f.readline().strip()
f.close()
if procmodel == "GBQUAD PLUS":
model = procmodel.replace("GBQUAD", "Quad").replace("PLUS", " Plus")
elif procmodel == "gb7252":
model = procmodel.replace("gb7252", "UHD Quad 4k")
elif fileExists("/proc/stb/info/vumodel") and not fileExists("/proc/stb/info/boxtype"):
brand = "Vu+"
f = open("/proc/stb/info/vumodel",'r')
procmodel = f.readline().strip()
f.close()
model = procmodel.title().replace("olose", "olo SE").replace("olo2se", "olo2 SE").replace("2", "²")
elif fileExists("/proc/boxtype"):
f = open("/proc/boxtype",'r')
procmodel = f.readline().strip().lower()
f.close()
if procmodel in ("adb2850", "adb2849", "bska", "bsla", "bxzb", "bzzb"):
brand = "Advanced Digital Broadcast"
if procmodel in ("bska", "bxzb"):
model = "ADB 5800S"
elif procmodel in ("bsla", "bzzb"):
model = "ADB 5800SX"
elif procmodel == "adb2849":
model = "ADB 2849ST"
else:
model = "ADB 2850ST"
elif procmodel in ("esi88", "uhd88"):
brand = "Sagemcom"
if procmodel == "uhd88":
model = "UHD 88"
else:
model = "ESI 88"
elif fileExists("/proc/stb/info/boxtype"):
f = open("/proc/stb/info/boxtype",'r')
procmodel = f.readline().strip().lower()
f.close()
if procmodel.startswith("et"):
if procmodel == "et7000mini":
brand = "Galaxy Innovations"
model = "ET-7000 Mini"
elif procmodel == "et11000":
brand = "Galaxy Innovations"
model = "ET-11000"
else:
brand = "Xtrend"
model = procmodel.upper()
elif procmodel.startswith("xpeed"):
brand = "Golden Interstar"
model = procmodel
elif procmodel.startswith("xp"):
brand = "MaxDigital"
model = procmodel.upper()
elif procmodel.startswith("ixuss"):
brand = "Medialink"
model = procmodel.replace(" ", "")
elif procmodel == "formuler4turbo":
brand = "Formuler"
model = "4 Turbo"
elif procmodel.startswith("formuler"):
brand = "Formuler"
model = procmodel.replace("formuler","")
elif procmodel.startswith("mbtwinplus"):
brand = "Miraclebox"
model = "Premium Twin+"
elif procmodel.startswith("alphatriplehd"):
brand = "SAB"
model = "Alpha Triple HD"
elif procmodel in ("7000s", "mbmicro"):
procmodel = "mbmicro"
brand = "Miraclebox"
model = "Premium Micro"
elif procmodel in ("7005s", "mbmicrov2"):
procmodel = "mbmicrov2"
brand = "Miraclebox"
model = "Premium Micro v2"
elif procmodel.startswith("ini"):
if procmodel.endswith("9000ru"):
brand = "Sezam"
model = "Marvel"
elif procmodel.endswith("5000ru"):
brand = "Sezam"
model = "hdx"
elif procmodel.endswith("1000ru"):
brand = "Sezam"
model = "hde"
elif procmodel.endswith("5000sv"):
brand = "Miraclebox"
model = "mbtwin"
elif procmodel.endswith("1000sv"):
brand = "Miraclebox"
model = "mbmini"
elif procmodel.endswith("1000de"):
brand = "Golden Interstar"
model = "Xpeed LX"
elif procmodel.endswith("9000de"):
brand = "Golden Interstar"
model = "Xpeed LX3"
elif procmodel.endswith("1000lx"):
brand = "Golden Interstar"
model = "Xpeed LX"
elif procmodel.endswith("de"):
brand = "Golden Interstar"
elif procmodel.endswith("1000am"):
brand = "Atemio"
model = "5x00"
else:
brand = "Venton"
model = "HDx"
elif procmodel.startswith("unibox-"):
brand = "Venton"
model = "HDe"
elif procmodel == "hd1100":
brand = "Mut@nt"
model = "HD1100"
elif procmodel == "hd1200":
brand = "Mut@nt"
model = "HD1200"
elif procmodel == "hd1265":
brand = "Mut@nt"
model = "HD1265"
elif procmodel == "hd2400":
brand = "Mut@nt"
model = "HD2400"
elif procmodel == "hd51":
brand = "Mut@nt"
model = "HD51"
elif procmodel == "hd11":
brand = "Mut@nt"
model = "HD11"
elif procmodel == "hd500c":
brand = "Mut@nt"
model = "HD500c"
elif procmodel == "hd530c":
brand = "Mut@nt"
model = "HD530c"
elif procmodel == "arivalink200":
brand = "Ferguson"
model = "Ariva @Link 200"
elif procmodel.startswith("spark"):
brand = "Fulan"
if procmodel == "spark7162":
model = "Spark 7162"
else:
model = "Spark"
elif procmodel == "spycat":
brand = "Spycat"
model = "Spycat"
elif procmodel == "spycatmini":
brand = "Spycat"
model = "Spycat Mini"
elif procmodel == "spycatminiplus":
brand = "Spycat"
model = "Spycat Mini+"
elif procmodel == "spycat4kmini":
brand = "Spycat"
model = "spycat 4K Mini"
elif procmodel == "vipercombo":
brand = "Amiko"
model = "ViperCombo"
elif procmodel == "vipert2c":
brand = "Amiko"
model = "ViperT2C"
elif procmodel == "wetekplay":
brand = "WeTeK"
model = "Play"
elif procmodel.startswith("os"):
brand = "Edision"
if procmodel == "osmini":
model = "OS Mini"
elif procmodel == "osminiplus":
model = "OS Mini+"
elif procmodel == "osmega":
model = "OS Mega"
elif procmodel == "osnino":
model = "OS Nino"
else:
model = procmodel
elif procmodel == "h3":
brand = "Zgemma"
model = "H3 series"
elif procmodel == "h4":
brand = "Zgemma"
model = "H4 series"
elif procmodel == "h5":
brand = "Zgemma"
model = "H5 series"
elif procmodel == "h7":
brand = "Zgemma"
model = "H7 series"
elif procmodel == "lc":
brand = "Zgemma"
model = "LC"
elif procmodel == "sh1":
brand = "Zgemma"
model = "Star series"
elif procmodel == "i55":
brand = "Zgemma"
model = "i55"
elif procmodel == "vs1500":
brand = "Vimastec"
model = "vs1500"
elif procmodel == "sf4008":
brand = "Octagon"
model = procmodel
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model",'r')
procmodel = f.readline().strip().lower()
f.close()
if procmodel == "tf7700hdpvr":
brand = "Topfield"
model = "TF7700 HDPVR"
elif procmodel == "dsi87":
brand = "Sagemcom"
model = "DSI 87"
elif procmodel.startswith("spark"):
brand = "Fulan"
if procmodel == "spark7162":
model = "Spark 7162"
else:
model = "Spark"
elif (procmodel.startswith("dm") and not procmodel == "dm8000"):
brand = "Dream Multimedia"
model = procmodel.replace("dm", "DM", 1)
# A "dm8000" is only a Dreambox if it passes the tpm verification:
elif procmodel == "dm8000" and orgdream:
brand = "Dream Multimedia"
model = "DM8000"
else:
model = procmodel
if fileExists("/etc/.box"):
distro = "HDMU"
f = open("/etc/.box",'r')
tempmodel = f.readline().strip().lower()
if tempmodel.startswith("ufs") or model.startswith("ufc"):
brand = "Kathrein"
model = tempmodel.upcase()
procmodel = tempmodel
elif tempmodel.startswith("spark"):
brand = "Fulan"
model = tempmodel.title()
procmodel = tempmodel
elif tempmodel.startswith("xcombo"):
brand = "EVO"
model = "enfinityX combo plus"
procmodel = "vg2000"
type = procmodel
if type in ("et9000", "et9100", "et9200", "et9500"):
type = "et9x00"
elif type in ("et5000", "et6000", "et6x00"):
type = "et5x00"
elif type == "et4000":
type = "et4x00"
elif type == "xp1000":
type = "xp1000"
elif type in ("bska", "bxzb"):
type = "nbox_white"
elif type in ("bsla", "bzzb"):
type = "nbox"
elif type == "sagemcom88":
type = "esi88"
elif type in ("tf7700hdpvr", "topf"):
type = "topf"
info['brand'] = brand
info['model'] = model
info['procmodel'] = procmodel
info['type'] = type
remote = "dmm"
if procmodel in ("solo", "duo", "uno", "solo2", "solose", "zero", "solo4k", "uno4k", "ultimo4k"):
remote = "vu_normal"
elif procmodel == "duo2":
remote = "vu_duo2"
elif procmodel == "ultimo":
remote = "vu_ultimo"
elif procmodel in ("uno4kse", "zero4k"):
remote = "vu_normal_02"
elif procmodel == "e3hd":
remote = "e3hd"
elif procmodel in ("et9x00", "et9000", "et9100", "et9200", "et9500"):
remote = "et9x00"
elif procmodel in ("et5x00", "et5000", "et6x00", "et6000"):
remote = "et5x00"
elif procmodel in ("et4x00", "et4000"):
remote = "et4x00"
elif procmodel == "et6500":
remote = "et6500"
elif procmodel in ("et8x00", "et8000", "et8500", "et8500s", "et10000"):
remote = "et8000"
elif procmodel in ("et7x00", "et7000", "et7500"):
remote = "et7x00"
elif procmodel in ("et7000mini", "et11000"):
remote = "et7000mini"
elif procmodel == "gbquad":
remote = "gigablue"
elif procmodel == "gbquadplus":
remote = "gbquadplus"
elif procmodel == "gb7252":
remote = "gb7252"
elif procmodel in ("formuler1", "formuler3", "formuler4", "formuler4turbo"):
remote = "formuler1"
elif procmodel in ("azboxme", "azboxminime", "me", "minime"):
remote = "me"
elif procmodel in ("optimussos1", "optimussos1plus", "optimussos2", "optimussos2plus"):
remote = "optimuss"
elif procmodel in ("premium", "premium+"):
remote = "premium"
elif procmodel in ("elite", "ultra"):
remote = "elite"
elif procmodel in ("ini-1000", "ini-1000ru"):
remote = "ini-1000"
elif procmodel in ("ini-1000sv", "ini-5000sv", "ini-9000de"):
remote = "miraclebox"
elif procmodel in ("mbtwinplus", "mbmicro", "mbmicrov2"):
remote = "miraclebox2"
elif procmodel == "alphatriplehd":
remote = "alphatriplehd"
elif procmodel == "ini-3000":
remote = "ini-3000"
elif procmodel in ("ini-7012", "ini-7000", "ini-5000", "ini-5000ru"):
remote = "ini-7000"
elif procmodel.startswith("spark"):
remote = "spark"
elif procmodel == "xp1000":
remote = "xp1000"
elif procmodel.startswith("xpeedlx"):
remote = "xpeedlx"
elif procmodel in ("adb2850", "adb2849", "bska", "bsla", "bxzb", "bzzb", "esi88", "uhd88", "dsi87", "arivalink200"):
remote = "nbox"
elif procmodel in ("hd1100", "hd1200", "hd1265", "hd1400", "hd51", "hd11", "hd500c", "hd530c"):
remote = "hd1x00"
elif procmodel == "hd2400":
remote = "hd2400"
elif procmodel in ("spycat", "spycatmini", "spycatminiplus", "spycat4kmini"):
remote = "spycat"
elif procmodel.startswith("ixuss"):
remote = procmodel.replace(" ", "")
elif procmodel == "vg2000":
remote = "xcombo"
elif procmodel == "dm8000" and orgdream:
remote = "dmm1"
elif procmodel in ("dm7080", "dm7020hd", "dm7020hdv2", "dm800sev2", "dm500hdv2", "dm520", "dm820", "dm900"):
remote = "dmm2"
elif procmodel == "wetekplay":
remote = procmodel
elif procmodel.startswith("osm"):
remote = "osmini"
elif procmodel.startswith("osn"):
remote = "edision1"
elif procmodel in ("fusionhd"):
remote = procmodel
elif procmodel in ("fusionhdse"):
remote = procmodel
elif procmodel in ("purehd", "purehdse"):
remote = "purehd"
elif procmodel in ("revo4k"):
remote = procmodel
elif procmodel in ("galaxy4k"):
remote = procmodel
elif procmodel in ("sh1", "lc"):
remote = "sh1"
elif procmodel in ("h3", "h4", "h5", "h7"):
remote = "h3"
elif procmodel == "i55":
remote = "i55"
elif procmodel in ("vipercombo", "vipert2c"):
remote = "amiko"
elif procmodel == "sf4008":
remote = "octagon"
elif procmodel in ("vs1100", "vs1500"):
remote = "vs1x00"
info['remote'] = remote
kernel = about.getKernelVersionString()[0]
distro = "unknown"
imagever = "unknown"
imagebuild = ""
driverdate = "unknown"
# Assume OE 1.6
oever = "OE 1.6"
if kernel>2:
oever = "OE 2.0"
if fileExists("/etc/.box"):
distro = "HDMU"
oever = "private"
elif fileExists("/etc/bhversion"):
distro = "Black Hole"
f = open("/etc/bhversion",'r')
imagever = f.readline().strip()
f.close()
if kernel>2:
oever = "OpenVuplus 2.1"
elif fileExists("/etc/vtiversion.info"):
distro = "VTi-Team Image"
f = open("/etc/vtiversion.info",'r')
imagever = f.readline().strip().replace("VTi-Team Image ", "").replace("Release ", "").replace("v.", "")
f.close()
oever = "OE 1.6"
imagelist = imagever.split('.')
imagebuild = imagelist.pop()
imagever = ".".join(imagelist)
if kernel>2:
oever = "OpenVuplus 2.1"
if ((imagever == "5.1") or (imagever[0] > 5)):
oever = "OpenVuplus 2.1"
elif fileExists("/var/grun/grcstype"):
distro = "Graterlia OS"
try:
imagever = about.getImageVersionString()
except: # nosec
pass
# ToDo: If your distro gets detected as OpenPLi, feel free to add a detection for your distro here ...
else:
# OE 2.2 uses apt, not opkg
if not fileExists("/etc/opkg/all-feed.conf"):
oever = "OE 2.2"
else:
try:
f = open("/etc/opkg/all-feed.conf",'r')
oeline = f.readline().strip().lower()
f.close()
distro = oeline.split( )[1].replace("-all","")
except: # nosec
pass
if distro == "openpli":
oever = "PLi-OE"
try:
imagelist = open("/etc/issue").readlines()[-2].split()[1].split('.')
imagever = imagelist.pop(0)
if imagelist:
imagebuild = "".join(imagelist)
else:
# deal with major release versions only
if imagever.isnumeric():
imagebuild = "0"
except: # nosec
# just in case
pass
elif distro == "openrsi":
oever = "PLi-OE"
else:
try:
imagever = about.getImageVersionString()
except: # nosec
pass
if (distro == "unknown" and brand == "Vu+" and fileExists("/etc/version")):
# Since OE-A uses boxbranding and bh or vti can be detected, there isn't much else left for Vu+ boxes
distro = "Vu+ original"
f = open("/etc/version",'r')
imagever = f.readline().strip()
f.close()
if kernel>2:
oever = "OpenVuplus 2.1"
# reporting the installed dvb-module version is as close as we get without too much hassle
driverdate = 'unknown'
try:
driverdate = os.popen('/usr/bin/opkg -V0 list_installed *dvb-modules*').readline().split( )[2] # nosec
except:
try:
driverdate = os.popen('/usr/bin/opkg -V0 list_installed *dvb-proxy*').readline().split( )[2] # nosec
except:
try:
driverdate = os.popen('/usr/bin/opkg -V0 list_installed *kernel-core-default-gos*').readline().split( )[2] # nosec
except: # nosec
pass
info['oever'] = oever
info['distro'] = distro
info['imagever'] = imagever
info['imagebuild'] = imagebuild
info['driverdate'] = driverdate
return info
STATIC_INFO_DIC = getAllInfo()
def getMachineBuild():
return STATIC_INFO_DIC['procmodel']
def getMachineBrand():
return STATIC_INFO_DIC['brand']
def getMachineName():
return STATIC_INFO_DIC['model']
def getMachineProcModel():
return STATIC_INFO_DIC['procmodel']
def getBoxType():
return STATIC_INFO_DIC['type']
def getOEVersion():
return STATIC_INFO_DIC['oever']
def getDriverDate():
return STATIC_INFO_DIC['driverdate']
def getImageVersion():
return STATIC_INFO_DIC['imagever']
def getImageBuild():
return STATIC_INFO_DIC['imagebuild']
def getImageDistro():
return STATIC_INFO_DIC['distro']
class rc_model:
def getRcFolder(self):
return STATIC_INFO_DIC['remote']
|
mickeyreg/e2openplugin-OpenWebif
|
plugin/controllers/models/owibranding.py
|
Python
|
gpl-3.0
| 19,849
|
[
"Galaxy"
] |
bdb283cfa907df12491fe4b6002b02dd169f245b22055ea9942b35e333bbb2d7
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2013 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import sqlite3, os
import gettext
from gettext import gettext as _
gettext.textdomain('remindor-common')
from remindor_common import datetimeutil
#def database_file():
#return os.getenv('HOME') + '/.config/indicator-remindor/indicator-remindor.db'
class Database():
def __init__(self, filename):
self.filename = filename
directory = os.path.dirname(self.filename)
if not os.path.exists(directory):
os.makedirs(directory)
migrate = False
if not os.path.exists(filename):
migrate = True
self.connection = sqlite3.connect(filename)
self.connection.text_factory = str
self.cursor = self.connection.cursor()
self.setup()
if migrate:
self.migrate_from_couch()
if self.old_version < 9:
self.fix_time_from_to()
if self.old_version < 11:
self.fix_time()
def setup(self):
self.execute("""CREATE TABLE IF NOT EXISTS version (id INTEGER PRIMARY KEY AUTOINCREMENT,
version TEXT)""")
self.old_version = self.version()
if self.old_version < 11:
self.version(11)
#00 - id
#01 - label
#02 - notes
#03 - time
#04 - date
#05 - sound
#06 - sound length
#07 - sound loop
#08 - popup notification
#09 - command
#10 - boxcar notification
#11 - popup dialog
#12 - pushbullet device
self.execute("""CREATE TABLE IF NOT EXISTS alarms (id INTEGER PRIMARY KEY AUTOINCREMENT, label TEXT,
notes TEXT, time TEXT, date TEXT, sound TEXT, sound_length INTEGER, sound_loop INTEGER,
notification BOOLEAN, command TEXT, boxcar_notification BOOLEAN DEFAULT 0,
dialog BOOLEAN DEFAULT 0, pushbullet_device TEXT DEFAULT '')""")
if self.old_version < 1:
try:
self.fetchone("SELECT boxcar_notification FROM alarms WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE alarms ADD COLUMN boxcar_notification BOOLEAN DEFAULT 0")
if self.old_version < 2:
try:
self.fetchone("SELECT dialog FROM alarms WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE alarms ADD COLUMN dialog BOOLEAN DEFAULT 0")
if self.old_version < 10:
try:
self.fetchone('SELECT pushbullet_device FROM alarms WHERE 1')
except sqlite3.OperationalError:
self.execute("ALTER TABLE alarms ADD COLUMN pushbullet_device TEXT DEFAULT ''")
#00 - id
#01 - label
#02 - quick label
#03 - time
#04 - date
#05 - command
#06 - sound
#07 - sound length
#08 - sound loop
#09 - sound loop times
#10 - notification (popup)
#11 - time format
#12 - date format
#13 - today color
#14 - future color
#15 - past color
#16 - change indicator icon
#17 - popup dialog
#18 - postpone minutes
#19 - quick minutes
#20 - news notification
#21 - quick reminder popup
#22 - quick reminder dialog
#23 - quick reminder use standard reminder sound
#24 - quick show info on dialog
#25 - hide indicator
#26 - indicator icon index
#27 - quick use slider
#28 - quick time unit
self.has_settings = (self.fetchone("SELECT name FROM sqlite_master WHERE type='table' AND name='settings'") != None)
if self.has_settings:
self.execute("""CREATE TABLE IF NOT EXISTS settings (id INTEGER PRIMARY KEY AUTOINCREMENT,
label TEXT, quick_label TEXT, time TEXT, date TEXT, command TEXT, sound TEXT,
sound_length INTEGER, sound_loop BOOLEAN, sound_loop_times INTEGER, notification BOOLEAN,
time_format INTEGER, date_format INTEGER, today_color TEXT, future_color TEXT,
past_color TEXT, indicator_icon BOOLEAN DEFAULT 1, dialog BOOLEAN DEFAULT 0,
postpone INTEGER DEFAULT 30, quick_minutes INTEGER DEFAULT 30, news BOOLEAN DEFAULT 1,
quick_popup BOOLEAN DEFAULT 1, quick_dialog BOOLEAN DEFAULT 0, quick_sound BOOLEAN DEFAULT 0,
quick_info BOOLEAN DEFAULT 1, hide_indicator BOOLEAN DEFAULT 0,
indicator_icon_index INTEGER DEFAULT 0, quick_slider BOOLEAN DEFAULT 1,
quick_unit INTEGER DEFAULT 0)""")
if self.old_version < 1:
try:
self.fetchone("SELECT indicator_icon FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN indicator_icon BOOLEAN DEFAULT 1")
if self.old_version < 2:
try:
self.fetchone("SELECT dialog FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN dialog BOOLEAN DEFAULT 0")
if self.old_version < 3:
try:
self.fetchone("SELECT postpone FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN postpone INTEGER DEFAULT 30")
try:
self.fetchone("SELECT quick_minutes FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN quick_minutes INTEGER DEFAULT 30")
if self.old_version < 4:
try:
self.fetchone("SELECT news FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN news BOOLEAN DEFAULT 1")
if self.old_version < 5:
try:
self.fetchone("SELECT quick_popup FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN quick_popup BOOLEAN DEFAULT 1")
self.execute("ALTER TABLE settings ADD COLUMN quick_dialog BOOLEAN DEFAULT 0")
self.execute("ALTER TABLE settings ADD COLUMN quick_sound BOOLEAN DEFAULT 0")
self.execute("ALTER TABLE settings ADD COLUMN quick_info BOOLEAN DEFAULT 1")
self.execute("ALTER TABLE settings ADD COLUMN hide_indicator BOOLEAN DEFAULT 0")
if self.old_version < 6:
try:
self.fetchone("SELECT indicator_icon_index FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN indicator_icon_index INTEGER DEFAULT 0")
if self.old_version < 7:
try:
self.fetchone("SELECT quick_slider FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN quick_slider BOOLEAN DEFAULT 1")
if self.old_version < 8:
try:
self.fetchone("SELECT quick_unit FROM settings WHERE 1")
except sqlite3.OperationalError:
self.execute("ALTER TABLE settings ADD COLUMN quick_unit INTEGER DEFAULT 0")
#00 - id
#01 - type
#02 - email
#03 - secret
#04 - default notify
self.execute("""CREATE TABLE IF NOT EXISTS services (id INTEGER PRIMARY KEY AUTOINCREMENT,
type TEXT, email TEXT, secret TEXT, default_notification BOOLEAN)""")
#00 - id
#01 - key
#02 - value
self.execute("""CREATE TABLE IF NOT EXISTS internal (id INTEGER PRIMARY KEY AUTOINCREMENT,
key TEXT, value TEXT)""")
row = self.fetchone('SELECT * FROM settings WHERE 1')
if row == None:
indicator_icon_index = 0
#if os.name == 'nt':
# indicator_icon_index = 3
self.execute("""INSERT INTO settings (label, quick_label, time, date, command,
sound, sound_length, sound_loop, sound_loop_times, notification, time_format,
date_format, today_color, future_color, past_color, indicator_icon, dialog,
postpone, quick_minutes, news, quick_popup, quick_dialog, quick_sound, quick_info,
hide_indicator, indicator_icon_index, quick_slider, quick_unit)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""",
["", _("Quick Reminder"), _("now"), _("today"), "", "", 10, False, 5, True, 0, 0,
"#00007878ffff", "#04039e9e0000", "#ffffecec1f1f", True, False, 30, 30, True,
True, False, False, True, False, indicator_icon_index, True, 0]
)
def fix_time_from_to(self):
reminder_list = self.all_alarms()
for reminder in reminder_list:
new_time = datetimeutil.fix_time_from_to(reminder.time)
if reminder.time != new_time and new_time != None:
print reminder.time
print new_time
reminder.time = new_time
self.delete_alarm(reminder.id)
self.insert_alarm(reminder)
def fix_time(self):
reminder_list = self.all_alarms()
for reminder in reminder_list:
time = datetimeutil.str_time_simplify(reminder.time)
sql = """UPDATE alarms SET time = ?"""
self.execute(sql, [time])
def migrate_from_couch(self):
try:
from desktopcouch.records.server import CouchDatabase
from desktopcouch.records.record import Record
imported = True
except ImportError:
return
db = 0
try:
db = CouchDatabase('indicator-remindor-alarms', create=False)
except:
return
if not db.view_exists("list", "app"):
map_js = """function(doc) { emit(doc._id, doc) }"""
db.add_view("list", map_js, None, "app")
result = db.execute_view("list", "app")
result_list = list(result)
for row in result_list:
time_s = datetimeutil.format_time(row.value["hour"], row.value["minute"], row.value["am_pm"])
sound_length = 0
sound_loop = False
try:
sound_length = row.value["sound_length"]
except KeyError:
sound_length = 0
try:
sound_loop = row.value["sound_loop"]
except KeyError:
sound_loop = False
self.execute("""INSERT INTO alarms (label, notes, time, date, sound, sound_length,
sound_loop, notification, command) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)""",
(row.value["label"], row.value["notes"], time_s, row.value["date"],
row.value["sound"], sound_length, sound_loop,
row.value["notification"], row.value["command"]))
def delete_settings(self):
self.execute('DROP TABLE settings')
self.execute('DROP TABLE services')
self.execute('DROP TABLE internal')
self.has_settings = False
def version(self, v=None):
if v == None:
row = self.fetchone("SELECT * FROM version WHERE 1")
v = 0
if row is not None:
try:
v = int(row[1])
except:
pass
return v
else:
self.execute("DELETE FROM version WHERE 1")
self.execute("INSERT INTO version (version) VALUES(?)", [str(v)])
def execute(self, sql, data=None):
if data == None:
self.cursor.execute(sql)
else:
self.cursor.execute(sql, data)
sql_list = sql.split(' ')
if sql_list[0] != "SELECT":
self.connection.commit()
def fetchone(self, sql, data=None):
self.execute(sql, data)
return self.cursor.fetchone()
def fetchall(self, sql, data=None):
self.execute(sql, data)
return self.cursor.fetchall()
def close(self):
self.connection.close()
def alarm(self, id):
row = self.fetchone("SELECT * FROM alarms WHERE id = ?", [str(id)])
a = Alarm(-1)
a.set_data_row(row)
return a
def all_alarms(self):
result_list = self.fetchall("SELECT * FROM alarms")
alarm_list = list()
for row in result_list:
a = Alarm(-1)
a.set_data_row(row)
alarm_list.append(a)
return alarm_list
def insert_alarm(self, a):
sql = """INSERT INTO alarms (label, notes, time, date, sound,
sound_length, sound_loop, notification, command, boxcar_notification,
dialog, pushbullet_device) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
data = a.get_data_row()
self.execute(sql, data)
return self.cursor.lastrowid
def delete_alarm(self, id):
self.execute("DELETE FROM alarms WHERE id = ?", [str(id)])
def settings(self):
if not self.has_settings:
raise Exception('No settings to load!')
row = self.fetchone("SELECT * FROM settings WHERE 1")
return Settings(row)
def set_settings(self, s):
if not self.has_settings:
raise Exception('No settings to set!')
self.execute("DELETE FROM settings WHERE 1")
sql = """INSERT INTO settings (label, quick_label, time, date, command,
sound, sound_length, sound_loop, sound_loop_times, notification, time_format,
date_format, today_color, future_color, past_color, indicator_icon, dialog,
postpone, quick_minutes, news, quick_popup, quick_dialog, quick_sound, quick_info,
hide_indicator, indicator_icon_index, quick_slider, quick_unit)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
data = s.get_data_row()
self.execute(sql, data)
return self.cursor.lastrowid
def services(self):
if not self.has_settings:
raise Exception('No services to load!')
rows = self.fetchall("SELECT * FROM services WHERE 1")
return Services(rows)
def service(self, type):
if not self.has_settings:
raise Exception('No services to load!')
s = self.services()
return s.service(type)
def set_service(self, s):
if not self.has_settings:
raise Exception('No services to set!!')
self.remove_service(s.type)
sql = "INSERT INTO services (type, email, secret, default_notification) VALUES(?, ?, ?, ?)"
data = s.get_data_row()
self.execute(sql, data)
return self.cursor.lastrowid
def remove_service(self, type):
if not self.has_settings:
raise Exception('No services to remove!')
self.execute("DELETE FROM services WHERE type = ?", [type])
def get_internal(self, key):
if not self.has_settings:
raise Exception('No internals to load!')
internal = self.fetchone("SELECT * FROM internal WHERE key = ?", [key])
if internal != None:
value = internal[2]
else:
value = None
return value
def set_internal(self, key, value):
if not self.has_settings:
raise Exception('No internals to set!')
self.remove_internal(key)
self.execute("INSERT INTO internal (key, value) VALUES(?, ?)", [key, value])
return self.cursor.lastrowid
def remove_internal(self, key):
self.execute("DELETE FROM internal WHERE key = ?", [key])
class Alarm():
def __init__(self, id):
self.id = id
self.label = ""
self.notes = ""
self.time = "now"
self.date = "today"
self.sound_file = ""
self.sound_length = 0
self.sound_loop = True
self.notification = True
self.command = ""
self.boxcar = False
self.dialog = False
self.pushbullet_device = ''
def set_data_row(self, row):
if row != None:
self.id = row[0]
self.label = row[1]
self.notes = row[2]
self.time = row[3]
self.date = row[4]
self.sound_file = row[5]
self.sound_length = row[6]
self.sound_loop = row[7]
self.notification = row[8]
self.command = row[9]
self.boxcar = row[10]
self.dialog = row[11]
self.pushbullet_device = row[12]
def set_data(self, label, notes, time, date, sound_file, sound_length, sound_loop,
notification, command, boxcar, dialog, pushbullet_device):
self.label = label
self.notes = notes
self.time = time
self.date = date
self.sound_file = sound_file
self.sound_length = sound_length
self.sound_loop = sound_loop
self.notification = notification
self.command = command
self.boxcar = boxcar
self.dialog = dialog
self.pushbullet_device = pushbullet_device
def get_data_row(self):
row = list()
#row[0] = self.id
row.append(self.label)
row.append(self.notes)
row.append(self.time)
row.append(self.date)
row.append(self.sound_file)
row.append(self.sound_length)
row.append(self.sound_loop)
row.append(self.notification)
row.append(self.command)
row.append(self.boxcar)
row.append(self.dialog)
row.append(self.pushbullet_device)
return row
def get_data(self):
return (self.id, self.label, self.notes, self.time, self.date, self.sound_file,
self.sound_length, self.sound_loop, self.notification, self.command,
self.boxcar, self.dialog, self.pushbullet_device)
class Settings():
def __init__(self, row):
self.id = row[0]
self.label = row[1]
self.quick_label = row[2]
self.time = row[3]
self.date = row[4]
self.command = row[5]
self.sound_file = row[6]
self.sound_length = row[7]
self.sound_loop = row[8]
self.sound_loop_times = row[9]
self.notification = row[10]
self.time_format = row[11]
self.date_format = row[12]
self.today_color = row[13]
self.future_color = row[14]
self.past_color = row[15]
self.change_icon = row[16]
self.dialog = row[17]
self.postpone = row[18]
self.quick_minutes = row[19]
self.show_news = row[20]
self.quick_popup = row[21]
self.quick_dialog = row[22]
self.quick_sound = row[23]
self.quick_info = row[24]
self.hide_indicator = row[25]
self.indicator_icon = row[26]
self.quick_slider = row[27]
self.quick_unit = row[28]
def get_data_row(self):
row = list()
row.append(self.label)
row.append(self.quick_label)
row.append(self.time)
row.append(self.date)
row.append(self.command)
row.append(self.sound_file)
row.append(self.sound_length)
row.append(self.sound_loop)
row.append(self.sound_loop_times)
row.append(self.notification)
row.append(self.time_format)
row.append(self.date_format)
row.append(self.today_color)
row.append(self.future_color)
row.append(self.past_color)
row.append(self.change_icon)
row.append(self.dialog)
row.append(self.postpone)
row.append(self.quick_minutes)
row.append(self.show_news)
row.append(self.quick_popup)
row.append(self.quick_dialog)
row.append(self.quick_sound)
row.append(self.quick_info)
row.append(self.hide_indicator)
row.append(self.indicator_icon)
row.append(self.quick_slider)
row.append(self.quick_unit)
return row
class Services():
def __init__(self, rows):
self.dictionary = dict()
for row in rows:
self.dictionary[row[1]] = Service(row[1], row[2], row[3], row[4])
def service(self, type):
try:
return self.dictionary[type]
except KeyError:
return Service(type, "", "", False)
class Service():
def __init__(self, type, email, secret, default_notify):
self.type = type
self.email = email
self.secret = secret
self.default_notify = default_notify
def get_data_row(self):
row = list()
row.append(self.type)
row.append(self.email)
row.append(self.secret)
row.append(self.default_notify)
return row
|
bhdouglass/remindor-common
|
remindor_common/database.py
|
Python
|
gpl-3.0
| 21,951
|
[
"Brian"
] |
25073b18441c7922defba4c10aadafce643e91135d336cead528c7992948b669
|
from copy import deepcopy
from block import Block
from comment import Comment
from filter import Filter
from helpers.base_types_and_classes import atlas_bases, gg_es_bases, gg_spell_bases, jewellery, good_spell_bases, \
good_armour_bases, gg_phys_bases, good_phys_bases, gg_atlas_bases, other_atlas_bases, melee_only_classes
from helpers.colors import Colors
from helpers.general import add_failsafe, ilvl_swap, small_sizes
from helpers.map import get_maps_by_drop_level
from properties.color import Color
from properties.comparer import Comparer
from properties.sound import Sound
from theme import Theme
def main():
t1_uniques = ['Stibnite Flask', 'Ruby Flask', 'Topaz Flask', 'Sapphire Flask', 'Silver Flask', 'Void Axe',
'Prophecy Wand', 'Jewelled Foil', 'Royal Axe', 'Cutlass', 'Sorcerer Boots', 'Occultist\'s Vestment',
'Crusader Boots', 'Rawhide Boots', 'Ezomyte Tower Shield', 'Deicide Mask', 'Glorious Plate',
'Ezomyte Burgonet', 'Assassin\'s Garb', 'Greatwolf Talisman']
t2_uniques = ['Granite Flask', 'Bismuth Flask', 'Fiend Dagger', 'Imperial Staff', 'Carnal Sceptre', 'Siege Axe',
'Abyssal Axe', 'Sacrificial Garb', 'Ritual Sceptre', 'Judgement Staff', 'Vaal Sceptre',
'Imperial Maul', 'Steelhead', 'Carnal Sceptre', 'Terror Maul', 'Jingling Spirit Shield',
'Savant\'s Robe', 'Gladiator Plate', 'Vaal Regalia', 'Sacrificial Garb', 'Archon Kite Shield',
'Legion Gloves', 'Raven Mask', 'Champion Kite Shield', 'Nightmare Bascinet', 'Clasped Mitts',
'Wyrmscale Doublet', 'Gold Ring', 'Crystal Belt', 'Citrine Amulet', 'Ebony Tower Shield',
'Onyx Amulet', 'Goathide Boots', 'Two-Stone Ring']
t4_uniques = ['Plank Kite Shield']
map_no_sound_tier_difference = 5
special_maps = ['Beach']
shaped_maps_special_tier = 10
t1_currency = ['Exalted Orb', 'Eternal Orb', 'Albino Rhoa Feather', 'Ancient Reliquary Key']
t2_currency = ['Deafening Essence', 'Shrieking Essence', 'Divine Orb', 'Unshaping Orb', 'Essence of Hysteria',
'Essence of Insanity', 'Essence of Horror', 'Essence of Delirium', 'Blessing']
t3_currency = ['Cartographer\'s Sextant', 'Chaos Orb', 'Cartographer\'s Seal', 'Orb of Fusing', 'Orb of Regret',
'Regal Orb', 'Blessed Orb', 'Gemcutter\'s Prism', 'Orb of Scouring', 'Vaal Orb',
'Remnant of Corruption', 'Essence of', 'Cartographer\'s Chisel', 'Orb of Alchemy']
t4_currency = ['Silver Coin', 'Orb of Chance', 'Jeweller\'s Orb', 'Orb of Alteration', 'Chromatic Orb']
t1_divs = ['House of Mirrors', 'The Doctor', 'The Fiend', 'Hunter\'s Reward', 'The Dragon\'s Heart', 'Mawr Blaidd',
'The Last One Standing', 'The Offering', 'The Ethereal', 'The Queen', 'Abandoned Wealth',
'The Brittle Emperor', 'The Immortal', 'The Artist', 'Wealth and Power', 'Pride Before the Fall',
'The Enlightened', 'The King\'s Heart', 'Bowyer\'s Dream', 'The Hunger', 'The Celestial Justicar',
'Spark and the Flame', 'Polymath']
t2_divs = ['Chaotic Disposition', 'The Void', 'The Cartographer', 'The Dapper Prodigy', 'The Vast', 'The Dark Mage',
'Last Hope', 'The Valkyrie', 'The Sephirot', 'The Hoarder', 'The Chains that Bind', 'The Warlord',
'The Aesthete', 'Saint\'s Treasure', 'The Thaumaturgist', 'Heterochromia', 'The Porcupine',
'The Stormcaller', 'The Soul', 'Emperor of Purity', 'The Road to Power', 'The Arena Champion',
'The Gladiator', 'Glimmer of Hope', 'The Tyrant', 'The Union', 'The Risk', 'The Trial',
'Scholar of the Seas', 'Lucky Deck', 'Humility', 'The Penitent', 'The Penitent', 'The Surveyor',
'Lysah\'s Respite', 'The Inventor', 'The Jester', 'Vinia\'s Token', 'Rats', 'The Wrath', 'Hope',
'Treasure Hunter', 'The Explorer', 'The Body', 'Jack in the Box', 'The Traitor', 'Valley of Steel Boxes',
'Wolven King\'s Bite', 'Wretched', 'The Opulent', 'Might is Right', 'The Fletcher', 'The Forsaken',
'The Formless Sea', 'The Demoness', 'Time-Lost Relic', 'The Wolf', 'Earth Drinker', 'Standoff',
'Merciless Armament']
t4_divs = ['The Flora\'s Gift', 'Her Mask', 'Rain of Chaos', 'Thunderous Skies', 'The Gambler']
shit_divs = ['The Carrion Crow', 'Doedre\'s Madness']
show_jewellery = 0
chromatic_recipe = 2
flasks = 2
crude_bow = True
f = Filter()
t = themes()
f.add(Comment('Section: #001 - Special Stuff\n'))
f.add(Block(theme=t['t2_unique'],
comment='Tabula, we have to put this before everything cause our 6L block will override otherwise',
socket_group='W' * 6,
rarity='Unique',
base_type='Simple Robe'))
f.add(Block(theme=t['lab_and_shaper_orb'], _class='Quest Items', base_type='Shaper\'s Orb'))
f.add(Block(theme=t['lab_and_shaper_orb'], base_type='Offering to the Goddess'))
f.add(Block(theme=t['quest'], _class=['Quest Items', 'Microtransactions', 'Hideout Doodads']))
f.add(Block(theme=t['quest'], _class='Labyrinth', play_alert_sound=Sound(1)))
f.add(Block(theme=t['t1'], item_level=1, rarity='Normal',
base_type=['Glass Shank', 'Driftwood Wand', 'Rusted Sword', 'Crude Bow', 'Driftwood Sceptre'],
play_alert_sound=None,
comment='Make ilvl 1 of starter weapons ugly so people know they forgot their racing filter @TODO: add others'))
f.add(Comment('Section: #002 - GG!!!\n'))
f.add(Block(theme=t['t1'], base_type=['Mirror of Kalandra', 'Fishing Rod']))
f.add(Block(theme=t['t1'], linked_sockets=Comparer(6, '>=')))
f.add(Comment('Section: #003 - Uniques\n'))
f.add(Block(theme=t['t1_unique'], rarity='Unique', base_type=t1_uniques, comment='T1 Uniques'))
f.add(Block(theme=t['t2_unique'], rarity='Unique', _class=['Map'], comment='T2 Unique Maps'))
f.add(Block(theme=t['t2_unique'], rarity='Unique', base_type=t2_uniques, comment='T2 Uniques'))
f.add(Block(theme=t['t4_unique'], rarity='Unique', base_type=t4_uniques, sockets=Comparer(6, '<'),
linked_sockets=Comparer(5, '<'), comment='T4 Uniques'))
f.add(Block(theme=t['t3_unique'], rarity='Unique', comment='Other(T3) Uniques'))
f.add(Comment('Section: #004 - Maps\n'))
maps = get_maps_by_drop_level()
f.add(Block(theme=t['good_map'], _class='Maps', base_type='Shaped',
drop_level=Comparer(shaped_maps_special_tier + 67, '>=')))
f.add(Block(theme=t['good_map'], _class='Maps', base_type=special_maps, comment='Special Maps(usually MF)'))
for drop_level in list(maps.keys())[::-1]:
if drop_level == 68:
drop_level = 58
base_block = Block(_class='Maps', drop_level=Comparer(drop_level, '>='),
item_level=Comparer(drop_level - map_no_sound_tier_difference, '>='))
if drop_level >= 82:
base_block.set_theme(t['good_map'])
else:
base_block.set_theme(t['normal_map'])
if drop_level <= 68:
base_block.set_property('play_alert_sound', None)
f.add(base_block)
no_requirement = deepcopy(base_block)
no_requirement.set_theme(t['low_map'])
no_requirement.set_property('item_level', None)
f.add(no_requirement)
f.add(Comment('Section: #005 - Fragments\n'))
f.add(Block(play_alert_sound=Sound(2), _class='Maps')) # map failsafe
f.add(Block(theme=t['t1'], _class='Map Fragments',
base_type=['Mortal Hope', 'Mortal Ignorance', 'Fragment of the Phoenix',
'Fragment of the Minotaur', 'Fragment of the Chimera', 'Fragment of the Hydra']))
f.add(Block(theme=t['t2_fragment'], _class='Map Fragments',
base_type=['Mortal', 'Sacrifice at Midnight', 'Eber\'s Key', 'Inya\'s Key', 'Volkuur\'s Key',
'Yriel\'s Key', 'Breachstone']))
f.add(Block(theme=t['t3_fragment'], _class='Map Fragments'))
f.add(Comment('Section: #006 - Currency + Essences + Leaguestones\n'))
f.add(Block(theme=t['t1'], base_type=t1_currency))
f.add(Block(theme=t['t2_currency'], base_type=t2_currency))
f.add(Block(theme=t['leaguestone'], _class='Leaguestone'))
f.add(Block(theme=t['t3_currency'], base_type=t3_currency))
f.add(Block(theme=Theme(text_color=Color(231, 180, 120),
background_color=Color(0, 0, 0, 180),
border_color=Color(231, 180, 120),
font_size=45),
base_type='Perandus Coin'))
f.add(Block(theme=t['breach_big'], _class=['Stackable Currency'], base_type=['Splinter of Chayula', 'Splinter of Esh']))
f.add(Block(theme=t['breach'], _class=['Stackable Currency'], base_type=['Splinter']))
f.add(Block(theme=t['t4_currency'], base_type=t4_currency))
f.add(Block(_class=['Currency'], base_type=['Wisdom']))
f.add(Block(_class=['Currency'], base_type=['Portal']))
f.add(Block(theme=t['t5_currency'], _class=['Currency', 'Stackable Currency']))
f.add(Comment('Section: #007 - Divination Cards\n'))
f.add(Block(theme=t['t4_div'], base_type='The Wolf\'s Shadow',
comment='Added here so that "The Wolf" doesn\'t get confused with "The Wolf\'s Shadow"(Thanks Neversink for this tip!)'))
f.add(Block(theme=t['t1_div'], base_type=t1_divs))
f.add(Block(theme=t['t2_div'], base_type=t2_divs))
f.add(Block(theme=t['t4_div'], base_type=t4_divs))
f.add(Block(show=False, base_type=shit_divs))
f.add(Block(theme=t['t3_div'], _class='Divination Card'))
f.add(Comment('Section: #008 - Socket/Link Stuff\n'))
f.add(Block(theme=t['five_link'], play_alert_sound=Sound(1), linked_sockets=5))
f.add(Block(theme=t['six_socket'], sockets=Comparer(6, '>=')))
f.add(Comment('Section: #009 - Gems\n'))
f.add(
Block(theme=t['t1_gems'], _class='Skill Gems', base_type=['Empower', 'Enlighten'], quality=Comparer(10, '>=')))
f.add(Block(theme=t['t1_gems'], _class='Skill Gems', quality=Comparer(19, '>=')))
f.add(Block(theme=t['t2_gems'], _class='Skill Gems', base_type=['Portal', 'Empower', 'Enlighten', 'Vaal Haste']))
f.add(Block(theme=t['t2_gems'], _class='Skill Gems', quality=Comparer(13, '>=')))
f.add(Block(theme=t['t3_gems'], _class='Skill Gems', base_type=['Vaal', 'Detonate Mines', 'Added Chaos Damage']))
f.add(Block(theme=t['t3_gems'], _class='Skill Gems', quality=Comparer(1, '>=')))
f.add(Block(_class='Skill Gems'))
f.add(Comment('Section: #010 - Rare Evaluation\n'))
f.add(Block(theme=t['rare_jewel'], _class='Jewel', rarity='Rare'))
f.add(Block(theme=t['rare_talisman'], base_type=['Talisman'], rarity='Rare'))
f.add(Block(theme=t['talisman'], base_type=['Talisman'], rarity=Comparer('Rare', '<')))
f.add(Block(theme=t['ggg_rare'], item_level=Comparer(84, '>='), base_type=atlas_bases, rarity='Rare'))
f.add(
Block(theme=t['gg_rare'], item_level=Comparer(84, '>='), rarity='Rare', base_type=gg_es_bases + gg_spell_bases))
f.add(Block(theme=t['gg_rare'], item_level=Comparer(84, '>='), rarity='Rare', _class=jewellery))
f.add(Block(theme=t['gg_rare'], item_level=Comparer(84, '>='), rarity='Rare',
base_type=good_spell_bases + good_armour_bases, set_font_size=38))
f.add(Block(theme=t['gg_rare'], item_level=Comparer(84, '>='), rarity='Rare', base_type='Sai', _class='Daggers',
set_font_size=38))
f.add(Block(theme=t['gg_rare'], item_level=Comparer(83, '>='), rarity='Rare', base_type=gg_phys_bases))
f.add(Block(theme=t['gg_rare'], item_level=Comparer(83, '>='), rarity='Rare', base_type=good_phys_bases,
set_font_size=38))
f.add(Block(theme=t['ggg_rare'], rarity='Rare', base_type=gg_atlas_bases))
f.add(
Block(theme=t['ggg_rare'], rarity='Rare', play_alert_sound=None, set_font_size=42, base_type=other_atlas_bases))
f.add(Block(theme=t['good_rare_ilvl'], rarity='Rare', base_type=good_phys_bases, item_level=Comparer(73, '>=')))
f.add(Block(theme=t['good_rare_ilvl'], item_level=Comparer(72, '>='), rarity='Rare', base_type='Sai',
_class='Daggers'))
f.add(Block(theme=t['good_rare_ilvl'], item_level=Comparer(72, '>='), rarity='Rare',
base_type=good_armour_bases + gg_es_bases))
f.add(Block(theme=t['good_rare'], rarity='Rare',
base_type=good_phys_bases + gg_es_bases + good_armour_bases + gg_spell_bases + good_spell_bases))
ilvl_swap(f, Block(theme=t['good_rare_ilvl'], item_level=Comparer(75, '>='), rarity='Rare', _class=jewellery,
set_font_size=40), t['good_rare'], set_font_size=40)
ilvl_swap(f, Block(theme=t['good_rare_ilvl'], set_font_size=35, rarity='Rare', item_level=Comparer(73, '>='),
drop_level=Comparer(59, '>='), _class=['Wands', 'Daggers', 'Sceptres']), t['good_rare'],
set_font_size=35)
f.add(Block(theme=t['shit_rare'], item_level=Comparer(65, '>='), drop_level=Comparer(55, '<='),
_class=melee_only_classes, rarity='Rare'))
ilvl_swap(f, Block(theme=t['good_rare_ilvl'], item_level=Comparer(74, '>='), rarity='Rare', _class='Quivers',
set_font_size=35,
base_type=['Spike-Point Arrow Quiver', 'Broadhead Arrow Quiver', 'Two-Point Arrow Quiver']),
lower_theme=t['good_rare'], set_font_size=35)
ilvl_swap(f, Block(theme=t['good_rare_ilvl'], item_level=Comparer(72, '>='), drop_level=Comparer(44, '>='),
rarity='Rare', _class=['Gloves', 'Boots', 'Helmets'], set_font_size=35),
lower_theme=t['good_rare'], set_font_size=35)
f.add(Block(theme=t['shit_rare'], item_level=Comparer(65, '>='), drop_level=Comparer(15, '<='),
_class=['Gloves', 'Boots', 'Helmets'],
rarity='Rare'))
f.add(Block(theme=t['shit_rare'], item_level=Comparer(65, '>='), drop_level=Comparer(50, '<='), _class='Shields',
rarity='Rare'))
f.add(
Block(theme=t['shit_rare'], item_level=Comparer(65, '>='), drop_level=Comparer(47, '<='), _class='Body Armour',
rarity='Rare'))
f.add(Block(theme=t['good_rare'], set_font_size=45, item_level=Comparer(30, '<'), rarity='Rare', _class='Boots'))
f.add(Block(theme=t['good_rare'], item_level=Comparer(65, '<'), rarity='Rare',
_class=['Boots', 'Helmets', 'Gloves', 'Sceptres', 'Daggers', 'Wands']))
f.add(Block(theme=t['good_rare'], item_level=Comparer(10, '<'), rarity='Rare'))
# nice example of what you can do with PoEFilter
for drop_level in range(5, 56):
f.add(Block(theme=t['good_rare'], item_level=Comparer(drop_level + 10, '<'),
drop_level=Comparer(drop_level, '>'),
rarity='Rare', set_font_size=35))
f.add(Block(theme=Theme(background_color=Colors.BLACK, border_color=Color(150, 150, 150), font_size=35),
item_level=Comparer(20, '<'), rarity='Rare'))
f.add(Block(theme=Theme(background_color=Color(0, 0, 0, 180), border_color=Color(150, 150, 150), font_size=35),
item_level=Comparer(65, '<'), rarity='Rare'))
f.add(Block(theme=Theme(background_color=Color(0, 0, 0, 225), border_color=Color(150, 150, 150), font_size=26),
rarity='Rare', width=Comparer(2, '>='), height=Comparer(4, '>=')))
small_sizes(f, Block(rarity='Rare',
theme=Theme(background_color=Colors.BLACK, border_color=Color(150, 150, 150), font_size=35)))
f.add(Block(theme=Theme(background_color=Color(0, 0, 0, 180), border_color=Color(150, 150, 150), font_size=35),
rarity='Rare'))
f.add(Comment('Section: #011 - Normal and Magic Items\n'))
# f.add(Block(theme=Theme(background_color=Color(0, 0, 0, 0), border_color=Colors.BLOOD_RED, text_color=Colors.BLOOD_RED), comment='Animate Weapon', rarity='Normal', _class=['One Hand', 'Two Hand', 'Staves', 'Daggers', 'Thrusting', 'Sceptres', 'Claws']))
f.add(Block(comment='Redblade(potentially)', rarity='Magic', _class='Helmets', identified=True,
set_border_color=Color(210, 0, 0)))
f.add(Block(theme=t['gg_atlas_base'], base_type=atlas_bases, item_level=Comparer(84, '>=')))
f.add(Block(theme=t['gg_white_base'], item_level=Comparer(84, '>='), base_type=gg_es_bases + gg_spell_bases))
f.add(Block(theme=t['gg_white_base'], item_level=Comparer(84, '>='), _class=jewellery))
f.add(
Block(theme=t['ok_white_base'], item_level=Comparer(84, '>='), base_type=good_spell_bases + good_armour_bases))
f.add(Block(theme=t['ok_white_base'], item_level=Comparer(84, '>='), base_type='Sai', _class='Daggers'))
f.add(Block(theme=t['gg_white_base'], item_level=Comparer(83, '>='), base_type=gg_phys_bases))
f.add(Block(theme=t['ok_white_base'], item_level=Comparer(83, '>='), base_type=good_phys_bases))
f.add(Block(theme=t['gg_atlas_base'], base_type=gg_atlas_bases))
f.add(Block(theme=t['atlas_base'], base_type=other_atlas_bases))
if crude_bow:
f.add(Block(theme=t['gg_white_base'], base_type=['Crude Bow'], item_level=Comparer(50, '>=')))
if chromatic_recipe >= 1:
small_sizes(f, Block(theme=t['chromatic_item'], socket_group='RGB'))
if chromatic_recipe == 2:
f.add(Block(theme=t['chromatic_item'], set_font_size=30, socket_group='RGB'))
f.add(Block(theme=t['chance_item'], rarity='Normal',
base_type=['Sorcerer Boots', 'Occultist\'s Vestment', 'Sapphire Flask',
'Spike-Point Arrow Quiver', 'Imperial Staff']))
if show_jewellery >= 2:
f.add(Block(theme=t['alch_whites'], item_level=Comparer(67, '>='), rarity='Normal', _class=jewellery,
base_type=['Onyx', 'Ruby', 'Sapphire', 'Topaz', 'Two-Stone', 'Diamond', 'Prismatic', 'Unset',
'Gold',
'Citrine', 'Turquoise', 'Agate', 'Coral Ring', 'Moonstone', 'Leather', 'Heavy', 'Amber',
'Jade', 'Lapis', 'Rustic', 'Iron Ring']))
if show_jewellery >= 1:
f.add(Block(theme=t['magic_jewellery'], item_level=Comparer(67, '>='), rarity='Magic', _class=jewellery))
f.add(Block(theme=t['magic_jewel'], _class='Jewel'))
f.add(Block(item_level=Comparer(75, '>='), base_type=gg_es_bases))
f.add(Comment('Section: #0012 - Flasks\n'))
if flasks >= 1:
f.add(Block(theme=t['high_quality_flask'], quality=Comparer(18, '>='), rarity='Magic', _class='Utility Flasks'))
f.add(
Block(theme=t['high_quality_flask'], quality=Comparer(15, '>='), rarity='Normal', _class='Utility Flasks'))
f.add(
Block(theme=t['high_quality_flask'], quality=Comparer(1, '>='), _class='Utility Flasks', set_font_size=38))
f.add(Block(theme=t['utility_flask'], _class='Utility Flasks', item_level=Comparer(10, '<='), set_font_size=38))
f.add(Block(theme=t['utility_flask'], _class='Utility Flasks', item_level=Comparer(25, '<='), set_font_size=37))
f.add(Block(theme=t['utility_flask'], _class='Utility Flasks', item_level=Comparer(50, '<='), set_font_size=36))
if flasks >= 2:
f.add(Block(theme=t['utility_flask'], _class='Utility Flasks'))
f.add(Block(_class=['Life Flask', 'Mana Flask'], item_level=Comparer(72, '>='), set_font_size=20))
hybrid_flask_ilvl_to_keyword = {
15: 'Small',
25: 'Medium',
35: 'Large',
45: 'Colossal',
55: 'Sacred',
67: 'Hallowed',
}
for ilvl, base_type in sorted(hybrid_flask_ilvl_to_keyword.items()):
f.add(Block(set_font_size=38, item_level=Comparer(ilvl, '<='), _class='Hybrid Flask', base_type=base_type))
normal_flask_ilvl_to_keyword = {
5: 'Small',
8: 'Medium',
12: 'Large',
18: 'Greater',
24: 'Grand',
30: 'Giant',
37: 'Colossal',
42: 'Sacred',
48: 'Hallowed',
55: 'Sanctified',
66: ['Divine', 'Eternal'],
}
for ilvl, base_type in sorted(normal_flask_ilvl_to_keyword.items()):
f.add(Block(set_font_size=38, item_level=Comparer(ilvl, '<='), _class='Flask', base_type=base_type))
if flasks >= 2:
f.add(Block(rarity=Comparer('Magic', '<='), base_type='Flask', set_font_size=30))
f.add(Comment('Section: #0013 - Leveling\n'))
f.add(Block(theme=t['early_survival'], item_level=Comparer(12, '<'),
base_type=['Coral Ring', 'Sapphire Ring', 'Leather Belt']))
f.add(Block(theme=t['early_survival'], item_level=Comparer(12, '<'), set_font_size=38,
base_type=['Padded Vest', 'Light Brigandine', 'Leather Cap', 'Shabby Jerkin', 'Scale Vest',
'Rawhide Boots', 'Rawhide Gloves', 'Leatherscale Boots', 'Wrapped Boots', 'Goathide Gloves',
'Fishscale Gauntlets', 'Wrapped Mitts', 'Battered Helm', 'Scare Mask', 'Goathide Buckler',
'Pine Buckler', 'Rotted Round Shield', 'Spiked Bundle']))
f.add(Block(item_level=Comparer(35, '<'), rarity='Normal', _class=jewellery))
f.add(Block(item_level=Comparer(66, '<='), rarity='Magic', _class=jewellery))
f.add(Block(item_level=Comparer(68, '<'), rarity='Magic', linked_sockets=4, _class='Boots', theme=Theme(
border_color=Color(255, 255, 255), font_size=38
)))
f.add(Block(item_level=Comparer(25, '<'), rarity='Magic', set_font_size=38, _class='Boots'))
f.add(Block(item_level=Comparer(35, '<'), rarity=Comparer('Magic', '<='), linked_sockets=3,
_class=['Sceptres', 'Wands', 'Daggers'],
theme=Theme(border_color=Color(255, 255, 255), font_size=36)))
f.add(Block(item_level=Comparer(25, '<'), rarity=Comparer('Magic', '<='), _class=['Sceptres', 'Wands', 'Daggers'],
set_font_size=36))
f.add(Block(item_level=Comparer(66, '<='), linked_sockets=Comparer(4, '>='),
theme=Theme(border_color=Color(255, 255, 255), font_size=36)))
f.add(Block(item_level=Comparer(25, '<='), linked_sockets=Comparer(3, '>='), _class=['Gloves', 'Boots', 'Helmets'],
set_font_size=36))
f.add(Block(item_level=Comparer(60, '<='), linked_sockets=Comparer(3, '>='), _class=['Gloves', 'Boots', 'Helmets']))
f.add(Block(item_level=Comparer(12, '<'), linked_sockets=Comparer(3, '>='),
theme=Theme(font_size=36, background_color=Color(0, 0, 0, 185))))
f.add(Block(item_level=Comparer(12, '<'), rarity='Normal', width=2, height=Comparer(3, '>='),
_class=['Body Armours', 'Shields'],
theme=Theme(text_color=Color(200, 200, 200, 180), background_color=Color(0, 0, 0, 165))))
f.add(Block(item_level=Comparer(12, '<'), rarity='Normal', width=2, height=Comparer(4, '>='),
theme=Theme(text_color=Color(200, 200, 200, 180), background_color=Color(0, 0, 0, 165))))
small_sizes(f, Block(item_level=Comparer(12, '<='), rarity='Normal',
theme=Theme(text_color=Color(200, 200, 200), background_color=Color(0, 0, 0, 185))))
f.add(Block(item_level=Comparer(12, '<'), rarity='Normal',
theme=Theme(font_size=32, background_color=Color(0, 0, 0, 165))))
leveling_weapon_classes = ['Two Hand', 'Bows', 'One Hand', 'Wand', 'Sceptre', 'Staves', 'Claws']
item_and_drop_levels = [(13, 8), (14, 9), (16, 11), (18, 14), (20, 17), (22, 19), (24, 21), (26, 24), (28, 26),
(30, 28), (32, 30), (34, 32), (36, 34), (38, 37), (40, 39), (42, 41), (48, 46), (50, 48),
(52, 50), (54, 52), (56, 54), (58, 56), (62, 60), (66, 65), (68, 67)]
for i_and_d in item_and_drop_levels:
f.add(Block(item_level=Comparer(i_and_d[0], '<'), drop_level=Comparer(i_and_d[1], '>='),
rarity=Comparer('Magic', '<='),
_class=leveling_weapon_classes, theme=Theme(font_size=32, background_color=Color(0, 0, 0, 165))))
f.add(Block(item_level=Comparer(12, '<'), rarity='Magic', width=2, height=Comparer(3, '>='),
_class=['Body Armours', 'Shields'],
theme=Theme(text_color=Color(156, 156, 235, 150), background_color=Color(0, 0, 0, 165))))
f.add(Block(item_level=Comparer(12, '<'), rarity='Magic', width=2, height=Comparer(4, '>='),
theme=Theme(text_color=Color(156, 156, 235, 150), background_color=Color(0, 0, 0, 165))))
small_sizes(f, Block(item_level=Comparer(12, '<='), rarity='Magic',
theme=Theme(background_color=Color(0, 0, 0, 185))))
f.add(Block(item_level=Comparer(24, '<'), rarity='Magic',
theme=Theme(font_size=32, background_color=Color(0, 0, 0, 165))))
f.add(Block(item_level=Comparer(36, '<'), rarity='Magic',
theme=Theme(font_size=26, background_color=Color(0, 0, 0, 165))))
f.add(Comment('Section: #014 - Failsafe\n'))
add_failsafe(f)
with open('Xai.filter', encoding='utf-8', mode='w') as file:
file.write(str(f))
def themes():
t1_highlight = Colors.BLOOD_RED
t1_background = Colors.WHITE
highlight_1 = Color(0, 93, 255)
highlight_2 = Color(53, 255, 177)
break_1 = Color(255, 230, 122)
break_2 = Color(122, 255, 148)
break_3 = Color(0, 52, 112)
breach = Color(65, 20, 80)
gem_colour = highlight_2.darken(0.4)
good_rare = Color(198, 186, 9).darken(0.1)
rare = good_rare.darken(0.2)
return {
'lab_and_shaper_orb': Theme(text_color=highlight_1, background_color=break_1, border_color=highlight_1,
font_size=45, alert_sound=1),
'quest': Theme(text_color=break_2, border_color=break_2, font_size=45, alert_sound=1),
't1': Theme(text_color=t1_highlight, border_color=t1_highlight, background_color=t1_background, font_size=45,
alert_sound=8),
't1_unique': Theme(text_color=t1_highlight, border_color=t1_highlight, background_color=t1_background,
font_size=45,
alert_sound=6),
't2_unique': Theme(text_color=Colors.WHITE, border_color=Colors.WHITE, background_color=Colors.UNIQUE,
alert_sound=5, font_size=45),
't3_unique': Theme(text_color=Colors.BLACK, border_color=highlight_1, background_color=Colors.UNIQUE,
alert_sound=3, font_size=40),
't4_unique': Theme(text_color=break_1, border_color=break_1, background_color=Colors.UNIQUE,
font_size=35),
'low_map': Theme(background_color=break_2.change_opacity(122), text_color=Colors.BLACK,
border_color=Colors.BLACK, font_size=35),
'normal_map': Theme(background_color=break_2, text_color=Colors.BLACK, border_color=Colors.BLACK,
alert_sound=2, font_size=40),
'good_map': Theme(background_color=break_2, text_color=highlight_1, border_color=highlight_1,
alert_sound=9, font_size=45),
't2_fragment': Theme(text_color=Colors.BLACK, background_color=Colors.BLOOD_RED, border_color=Colors.BLACK,
font_size=45, alert_sound=2),
't3_fragment': Theme(text_color=Colors.BLOOD_RED, background_color=Colors.BLACK, border_color=Colors.BLOOD_RED,
font_size=38, alert_sound=2),
't2_currency': Theme(background_color=highlight_2, text_color=break_3, border_color=break_3, alert_sound=5,
font_size=45),
't3_currency': Theme(background_color=break_3, text_color=Colors.WHITE, border_color=Colors.WHITE,
alert_sound=1, font_size=41),
't4_currency': Theme(background_color=highlight_1, text_color=Colors.WHITE, border_color=Colors.WHITE,
font_size=38),
't5_currency': Theme(background_color=highlight_1.change_opacity(0.5), text_color=Colors.WHITE,
border_color=break_2),
't1_div': Theme(text_color=t1_highlight, border_color=highlight_1, background_color=t1_background, font_size=45,
alert_sound=6),
't2_div': Theme(text_color=break_3, border_color=break_3, background_color=Colors.WHITE, font_size=45,
alert_sound=2),
't3_div': Theme(text_color=highlight_1, border_color=highlight_1, background_color=Colors.WHITE, font_size=40,
alert_sound=2),
't4_div': Theme(text_color=highlight_1, border_color=highlight_1,
background_color=Colors.WHITE.change_opacity(0.5)),
't1_gems': Theme(text_color=break_3, border_color=break_3, background_color=t1_background, font_size=45,
alert_sound=6),
't2_gems': Theme(text_color=break_3, border_color=break_3, background_color=Colors.WHITE, font_size=42,
alert_sound=1),
't3_gems': Theme(text_color=gem_colour, border_color=gem_colour, font_size=37),
'five_link': Theme(background_color=highlight_1, border_color=Colors.WHITE, font_size=38, alert_sound=1),
'six_socket': Theme(background_color=break_3, border_color=Colors.WHITE, alert_sound=7, font_size=45),
'leaguestone': Theme(background_color=highlight_2.darken(0.3), text_color=Colors.WHITE,
border_color=Colors.WHITE,
alert_sound=4, font_size=38),
'breach': Theme(background_color=breach, text_color=Colors.BLOOD_RED, border_color=Colors.BLOOD_RED),
'breach_big': Theme(background_color=breach, text_color=Colors.BLOOD_RED, border_color=Colors.BLOOD_RED, font_size=40),
'rare_jewel': Theme(background_color=good_rare, text_color=Colors.WHITE, border_color=Colors.WHITE,
font_size=45),
'ggg_rare': Theme(background_color=good_rare, text_color=break_3, border_color=break_3, font_size=45,
alert_sound=5),
'gg_rare': Theme(background_color=good_rare, text_color=Colors.WHITE, border_color=break_3, font_size=45),
'good_rare': Theme(background_color=good_rare, text_color=Colors.WHITE, border_color=Colors.BLACK,
font_size=40),
'good_rare_ilvl': Theme(background_color=good_rare, text_color=Colors.WHITE, border_color=break_3,
font_size=42),
'shit_rare': Theme(text_color=rare, border_color=Colors.BLOOD_RED, font_size=30),
'rare_talisman': Theme(background_color=good_rare, text_color=Colors.BLOOD_RED, border_color=Colors.BLOOD_RED,
font_size=45, alert_sound=1),
'talisman': Theme(border_color=Colors.BLOOD_RED, font_size=38),
'gg_atlas_base': Theme(background_color=Colors.WHITE, text_color=break_3, border_color=break_3, font_size=45,
alert_sound=5),
'atlas_base': Theme(background_color=Colors.WHITE, text_color=break_3, border_color=break_3, font_size=40),
'gg_white_base': Theme(font_size=45, border_color=break_3),
'ok_white_base': Theme(font_size=40, border_color=highlight_1),
'chromatic_item': Theme(border_color=break_2, background_color=Colors.BLACK.change_opacity(0.5)),
'chance_item': Theme(border_color=gem_colour, font_size=40),
'alch_whites': Theme(border_color=rare),
'magic_jewel': Theme(border_color=break_3, background_color=highlight_1.change_opacity(0.5)),
'magic_jewellery': Theme(font_size=25),
'high_quality_flask': Theme(background_color=Color(75, 75, 75), border_color=Colors.WHITE, font_size=45),
'utility_flask': Theme(background_color=Color(75, 75, 75), border_color=Colors.BLACK),
'early_survival': Theme(border_color=Colors.BLOOD_RED, font_size=40)
}
if __name__ == '__main__':
main()
|
teuneboon/PoEFilter
|
examples/new_xai_filter.py
|
Python
|
mit
| 32,201
|
[
"Amber",
"CRYSTAL"
] |
cb342ab3d8db191411e44f738c4716dfb636589a47a4ccef8b2bb0808f52f809
|
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
from __future__ import print_function
from argparse import ArgumentParser
import collections
import gzip
import os
from Bio import SeqIO
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir, is_gzipped
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.pipeline.run_info import ALLOWED_CONTIG_NAME_CHARS
from bcbio.galaxy import loc
from bcbio.log import logger
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def extract_if_gzipped(filename):
stem, ext = os.path.splitext(filename)
if ext == ".gz":
subprocess.check_call("gzip -cd %s > %s" % (filename, stem), shell=True)
return stem
else:
return filename
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
logger.info("Converting %s to %s." % (gff3_file, out_file))
if _is_from_ncbi(gff3_file):
logger.info("NCBI format detected by the presence of the %s key."
% _is_from_ncbi(gff3_file))
_output_ncbi_gff3(gff3_file, out_file, dialect)
else:
_output_gff3(gff3_file, out_file, dialect)
return out_file
def _output_gff3(gff3_file, out_file, dialect):
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print(feature, file=out_handle, end="")
def _output_ncbi_gff3(gff3_file, out_file, dialect):
gene_key = "gene"
id_spec = {"gene": gene_key}
db = gffutils.create_db(gff3_file, ":memory:", id_spec=id_spec)
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
# Gnomon features are often missing a transcript id
# some malformed features are also missing the gene key
try:
transcript_id = feature["transcript_id"]
except KeyError:
try:
transcript_id = feature[gene_key]
except KeyError:
continue
gene_id = feature[gene_key]
try:
biotype = feature["gene_biotype"]
except KeyError:
biotype = "unknown"
attr = {"transcript_id": transcript_id, "gene_id": gene_id,
"gene_biotype": biotype}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print(feature, file=out_handle, end="")
def _is_from_ncbi(gff3_file):
with open(gff3_file) as in_handle:
for line in tz.take(10000, in_handle):
if "Dbxref" in line:
return "Dbxref"
if "db_xref" in line:
return "db_xref"
return None
def _index_w_command(env, dir_name, command, ref_file, pre=None, post=None, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
safe_makedir(out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not file_exists(out_file):
recs = SeqIO.parse(fasta, "fasta")
with open(out_file, "w") as out_handle:
SeqIO.write((_clean_rec_name(rec) for rec in recs), out_handle, "fasta")
return out_file
def _clean_rec_name(rec):
"""Clean illegal characters in input fasta file which cause problems downstream.
"""
out_id = []
for char in list(rec.id):
if char in ALLOWED_CONTIG_NAME_CHARS:
out_id.append(char)
else:
out_id.append("_")
rec.id = "".join(out_id)
rec.description = ""
return rec
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not file_exists(out_file):
if is_gzipped(gtf):
with gzip.open(gtf_file, 'rb') as in_handle:
with open(out_file, 'wb') as out_handle:
shutil.copyfileobj(in_handle, out_handle)
else:
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if gtf:
if not file_exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("cat %s | awk '{if ($0~/>%s/){name=$0; print name} else if ($0~/^>/){name=0};if (name!=0 && $0!~/^>/){print $0;}}' | sed 's/U/T/g' > hairpin.fa")
do.run(cmd % (hairpin, species), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print(append_fa_cmd.format(**locals()))
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print(append_gtf_cmd.format(**locals()))
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
class MyParser(ArgumentParser):
def error(self, message):
self.print_help()
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
print("\nCurrent genomes\n")
print(open(loc.get_loc_file(galaxy_base, "samtools")).read())
sys.exit(0)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = MyParser(description=description)
parser.add_argument("-c", "--cores", default=1,
help="number of cores to use")
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
parser.add_argument("--buildversion", required=True,
help=("String describing build of genome used. Examples: "
"Ensembl_94, EnsemblMetazoa_94, Flybase_21, etc"))
args = parser.parse_args()
# if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
# raise ValueError("--mirbase and --srna_gtf both need a value.")
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
if not file_exists(args.fasta):
print("%s does not exist, exiting." % args.fasta)
sys.exit(1)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gtf and not file_exists(args.gtf):
print("%s does not exist, exiting." % args.gtf)
sys.exit(1)
args.srna_gtf = os.path.abspath(args.srna_gtf) if args.srna_gtf else None
gtf_file = args.gtf
if args.gff3:
gtf_file = extract_if_gzipped(gtf_file)
gtf_file = gff3_to_gtf(gtf_file)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print("Creating directories using %s as the base." % (genome_dir))
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print("Genomes will be installed into %s." % (build_dir))
fasta_file = extract_if_gzipped(args.fasta)
fasta_file = install_fasta_file(build_dir, fasta_file, args.build)
print("Installed genome as %s." % (fasta_file))
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, gtf_file, args.build)
print("Installed GTF as %s." % (gtf_file))
if args.ercc:
print("Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file))
append_ercc(gtf_file, fasta_file)
indexed = {}
Env = collections.namedtuple("Env", "system_install, cores")
env = Env(genome_dir, args.cores)
for index in args.indexes:
print("Creating the %s index." % (index))
index_fn = genomes.get_index_fn(index)
if not index_fn:
print("Do not know how to make the index %s, skipping." % (index))
continue
indexed[index] = index_fn(env, fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --buildversion {args.buildversion} --cores {args.cores} --genome-dir {genome_dir} "
"--gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print("Dumping genome resources to %s." % resource_file)
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna_transcripts"]
srna_mirbase = ["srnaseq", "mirbase_hairpin"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print("Updating Galaxy .loc files.")
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
if index_file:
loc.update_loc_file(galaxy_base, index, args.build, index_file)
print("Genome installation complete.")
|
a113n/bcbio-nextgen
|
scripts/bcbio_setup_genome.py
|
Python
|
mit
| 15,182
|
[
"Galaxy"
] |
3ee490fe80f04c973d03cc6c7ac922eca6e8bc0dfec8ef526240f0f8e755074e
|
""" A generic client for creating productions
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.API import API
class Production(API):
""" Contains methods to build a production on the client, before it's actually added to the Production System
"""
def __init__(self, **kwargs):
""" Simple constructor
"""
super(Production, self).__init__()
self.prodDescription = {}
self.stepCounter = 1
# Methods running on the client to prepare the production description
def addStep(self, prodStep):
""" Add a step to the production description, by updating the description dictionary
:param object prodStep: an object of type ~:mod:`~DIRAC.ProductionSystem.Client.ProductionStep`
"""
stepName = 'Step' + str(self.stepCounter) + '_' + prodStep.Name
self.stepCounter += 1
prodStep.Name = stepName
res = prodStep.getAsDict()
if not res['OK']:
return res
prodStepDict = res['Value']
prodStepDict['name'] = stepName
self.prodDescription[prodStep.Name] = prodStepDict
return S_OK()
|
petricm/DIRAC
|
ProductionSystem/Client/Production.py
|
Python
|
gpl-3.0
| 1,095
|
[
"DIRAC"
] |
c8a4d1ef65d84093df4eb79f5188e2c45a2486e21e396d40ccf5a72087e601d1
|
# Monte Carlo Localisation
import prob_motion_model as pmm
#import hokuyo
import random
import numpy as np
import matplotlib.pyplot as plt
import time
import socket
import copy
import ttest
import math
from serial.tools import list_ports
import serialWrapper
import packetBuilder
import packetParser
import traceback
# Hokuyo socket parameters
TCP_IP = '192.168.0.10'
TCP_PORT = 10940
BUFFER_SIZE = 8192 #4096
# Number of particles
N = 100
# Dimensions of the playing field
WORLD_X = 3000
WORLD_Y = 2000
# Beacon location: 1(left middle), 2(right lower), 3(right upper)
BEACONS = [(-56,1000),(3062,-56),(3055,2014)]
#BEACONS = [(2994,0),(2994,1996),(0,962)]
# Dummy list before I implement odometry reading and conversion to global coord
rel_motion = [0, 0, 0]
# Dummy sensor noise
#sense_noise = 1.5
MARKER_POSITIONS = [(514,217),(1587,349),(2597,248),
(514,951),(1593,903),(2602,918),
(514,1677),(1613,1677),(2618,1699)]
VID = 1155
PID = 22336
SNR = '336234893534'
class Robot(object):
def __init__(self):
self.x = random.gauss(1587, 50)
self.y = random.gauss(349, 50)
self.orientation = random.gauss(math.pi/2, 0.1)
# Dummy sensor noise
self.sense_noise = 50
self.sense_angle_noise = math.radians(5)
def set(self, new_x, new_y, new_orientation):
if new_x < 0 or new_x > WORLD_X:
pass
else:
self.x = int(new_x)
if new_y < 0 or new_y > WORLD_Y:
pass
else:
self.y = int(new_y)
self.orientation = new_orientation % (2 * math.pi)
def move(self, rel_motion):
new_x, new_y, new_orientation = pmm.prob(self.pose(), rel_motion)
new_robot = Robot()
new_robot.set(new_x, new_y, new_orientation)
return new_robot
def r_motion(self):
"""Return robot current coordinates"""
packet = packetBuilder.BuildPacket(commands.getCurentCoordinates)
recievedPacket = computerPort.sendRequest(packet.bytearray)
old = recievedPacket.reply
time.sleep(0.005)
packet = packetBuilder.BuildPacket(commands.getCurentCoordinates)
recievedPacket = computerPort.sendRequest(packet.bytearray)
new = recievedPacket.reply
return [new[0]-old[0], new[1]-old[1],new[2]-old[2]]
def pose(self):
return self.x, self.y, self.orientation
def weight(self, measurement):
#beacons_sorted = sort_beacons(self.orientation, BEACONS, self.x, self.y)
beacons_sorted = BEACONS
x_glob, y_glob = ttest.point_transform(measurement, self.pose())
#print 'SORTED BEACONS: ', beacons_sorted
prob = 1.0
#print 'measurement in weights: ', measurement
if len(beacons_sorted) != len(measurement):
#print 'beacons not the same length'
return 0.0
for i in xrange(len(beacons_sorted)):
try:
dist = math.sqrt((self.x - x_glob[i]) ** 2 + (self.y - y_glob[i] ** 2))
except ValueError:
dist = 0.0
# Angle between current orientation and global beacons, measure counterclockwise
#fi = angle(angle_conv(beacons_sorted[i][1] - self.y, beacons_sorted[i][0] - self.x) - self.orientation)
#print 'calculated distance: ', dist, measurement[i][1]
#print 'calculated angle: ', fi, angle2(measurement[i][0])
try:
prob_trans = self.gaussian_trans(dist, self.sense_noise, measurement[i][1])
#print 'prob_trans: ', prob_trans
#prob_rot = self.gaussian_rot(fi, self.sense_angle_noise, angle2(measurement[i][0]))
#print 'prob_rot: ', prob_rot/10
#prob *= ((self.gaussian_trans(dist, self.sense_noise, measurement[i][1])) * \
# self.gaussian_rot(fi, self.sense_angle_noise, angle2(measurement[i][0])))
prob *= (prob_trans)#*(prob_rot/10))
except IndexError:
print 'INDEX ERROR'
prob *= 1
#print 'Probability:................................. ', prob
return prob
def gaussian_trans(self, mu, sigma, x):
# calculates the probability of x for 1-dim Gaussian with mean mu and var. sigma
#print 'Gaussian mu: ', mu
#print 'Gaussian sigma: ', sigma
#print 'Gaussian x: ', x
# try:
# print 'Gaussian calculation: ', math.exp(- ((mu - (x+29)) ** 2) / (sigma ** 2) / 2.0) / \
# math.sqrt(2.0 * math.pi * (sigma ** 2))
# except OverflowError:
# print 'Gaussian calculation rouding: ', 0
try:
return math.exp(- ((mu - (x+40)) ** 2) / (sigma ** 2) / 2.0) / \
(sigma * math.sqrt(2.0 * math.pi))
except OverflowError:
return 0.0
def gaussian_rot(self, mu, sigma, x):
try:
return math.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / \
(sigma * math.sqrt(2.0 * math.pi))
except OverflowError:
return 0.0
def __str__(self):
return 'Particle pose: x = %i mm, y = %i mm, theta = %.2f deg' \
%(self.x, self.y, np.degrees(self.orientation))
def angle_conv(y, x):
angle = math.atan2(y, x)
if angle<0:
angle += (2*math.pi)
return angle
def angle(measurement):
if measurement < 0:
measurement += (2*math.pi)
return measurement
def angle2(measurement):
if measurement < 3*math.pi/4:
measurement += (5*math.pi/4)
return measurement
else:
measurement -= (3*math.pi/4)
return measurement
# Finds angle of lidar start arm from global x axis (takes self.orientation)
def angle3(orientation):
start = orientation - 3*math.pi/4
if start < 0:
start += (2*math.pi)
return start
# Finds angle between angle3 and beacon (takes angle3 of orientation, and angle_conv of beacon point)
def angle4(arm, beacon):
angle = beacon - arm
if angle < 0:
angle += (2*math.pi)
return angle
def sort_beacons(orientation,BEACONS,x,y):
arm = angle3(orientation)
beacons = [angle_conv(BEACONS[i][1] - y, BEACONS[i][0] - x) for i in xrange(3)]
order =[angle4(arm,beacon) for beacon in beacons]
beacons_sort = [BEACONS for (order,BEACONS) in sorted(zip(order,BEACONS)) if order <= 3*math.pi/2]
return beacons_sort
def init_xy_plot():
""" setup an XY plot canvas """
plt.ion()
figure = plt.figure(figsize=(6, 4),
dpi=200,
facecolor="w",
edgecolor="k")
ax = figure.add_subplot(111)
lines, = ax.plot([],[],linestyle="none",
marker=".",
markersize=1,
markerfacecolor="blue")
ax.set_xlim(0, 3000)
ax.set_ylim(0, 2000)
ax.grid()
return figure, lines
def update_xy_plot(x, y):
""" re-draw the XY plot with new current_frame """
lines.set_xdata(x)
lines.set_ydata(y)
figure.canvas.draw()
def init_polar_plot():
""" setup a polar plot canvas """
plt.ion()
figure = plt.figure(figsize=(6, 6),
dpi=160,
facecolor="w",
edgecolor="k")
ax = figure.add_subplot(111, polar=True)
lines, = ax.plot([],[],
linestyle="none",
marker=".",
markersize=3,
markerfacecolor="blue")
ax.set_rmax(4000)
ax.set_theta_direction(1) #set to clockwise
ax.set_theta_offset(-np.pi/4) #offset by 90 degree so that 0 degree is at 12 o'clock
#ax.grid()
return figure, lines
def update_polar_plot(angle, dist):
""" re-draw the polar plot with new current_frame """
lines2.set_xdata(angle)
lines2.set_ydata(dist)
figure2.canvas.draw()
def portNumber():
"""Find all ports, and returns one with defined STM values"""
for port in list_ports.comports():
if (port.serial_number == SNR) and (port.pid == PID) and (port.vid == VID):
return port.name
if __name__ == '__main__':
port = '/dev/'+portNumber()
computerPort = serialWrapper.SerialWrapper(port)
commands = packetBuilder.CommandsList()
# Initialize socket connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print 'test'
s.connect((TCP_IP, TCP_PORT))
time.sleep(0.1)
s.send('BM\r')
data = s.recv(BUFFER_SIZE)
time.sleep(0.1)
for i in xrange(3):
s.send('GE0000108000\r')
data = s.recv(BUFFER_SIZE)
time.sleep(0.1)
print 'Ready'
# Initialize the plot
#figure2, lines2 = init_polar_plot()
#figure, lines = init_xy_plot()
# Set Robot randomly
myrobot = Robot()
myrobot.x, myrobot.y, myrobot.orientation = 1587.0, 349.0, math.pi/2
print 'First: ', myrobot
# Set N random particles
p = [Robot() for i in xrange(N)]
#for i in p:
# print 'First particles: ', i
#update_xy_plot([p[i].x for i in xrange(N)], [p[i].y for i in xrange(N)])
#time.sleep(5)
#rel_motion = []
while True:
try:
#for iteration in xrange(30):
#print 'ITERATION:.......', iteration
# Move robot; noise is in prob function
#rel_motion = myrobot.r_motion()
#print rel_motion
#myrobot = myrobot.move(rel_motion)
#print 'Robot after movement: ', myrobot
# Move particles
p2 = [p[i].move(rel_motion) for i in xrange(N)]
p = p2
#for i in p:
# print 'Particels after movement: ', i
s.send('GE0000108000\r')
data_lidar = s.recv(BUFFER_SIZE)
# Lidar sense - returns distance to 3 beacons
lidar, langle, lgraph = ttest.update_di(data_lidar)
#print 'After lidar'
#try:
# update_polar_plot(langle, lgraph)
#except:
# print 'not same length'
# Calculate the weights
#print 'lidar data: ', lidar
w =[p[i].weight(lidar) for i in xrange(N)]
w = np.asarray(w)
w /= w.sum()
#print 'just weights: ', w
#print 'sum of weights: ', np.sum(w)
try:
# Probability random pick - use np.random alg
p3 = np.random.choice(p, N, p = w)
p = list(p3)
mean_val = [(p[i].x, p[i].y, p[i].orientation) for i in xrange(len(p))]
#print 'list particles after random: ', p
# Set myrobot to particle with max w
#index2 = np.nonzero(w == w.max())[0][0]
#myrobot = copy.deepcopy(p[index2])
center = np.mean(mean_val, axis = 0)
myrobot.x, myrobot.y, myrobot.orientation = center[0], center[1], center[2]
except:
print 'error with choice'
pass
# for i in p:
# print 'Final particles: ', i
#update_xy_plot([p[i].x for i in xrange(N)], [p[i].y for i in xrange(N)])
print myrobot
time.sleep(0.05)
except:
traceback.print_exc()
s.send('QT\r')
s.shutdown(2)
s.close()
|
redarmy30/Eurobot-2017
|
old year/RESET-master/CommunicationWithRobot/localisation2.py
|
Python
|
mit
| 9,722
|
[
"Gaussian"
] |
353431dfcea443108fe36a2822266f5a71e4bb5daeb607d34db9ac68cfe7dfc7
|
#!/usr/bin/env python
'''
setup board.h for chibios
'''
import argparse, sys, fnmatch, os, dma_resolver, shlex, pickle, re
import shutil
parser = argparse.ArgumentParser("chibios_pins.py")
parser.add_argument(
'-D', '--outdir', type=str, default=None, help='Output directory')
parser.add_argument(
'--bootloader', action='store_true', default=False, help='configure for bootloader')
parser.add_argument(
'hwdef', type=str, default=None, help='hardware definition file')
args = parser.parse_args()
# output variables for each pin
f4f7_vtypes = ['MODER', 'OTYPER', 'OSPEEDR', 'PUPDR', 'ODR', 'AFRL', 'AFRH']
f1_vtypes = ['CRL', 'CRH', 'ODR']
f1_input_sigs = ['RX', 'MISO', 'CTS']
f1_output_sigs = ['TX', 'MOSI', 'SCK', 'RTS', 'CH1', 'CH2', 'CH3', 'CH4']
af_labels = ['USART', 'UART', 'SPI', 'I2C', 'SDIO', 'SDMMC', 'OTG', 'JT', 'TIM', 'CAN']
vtypes = []
# number of pins in each port
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
'H': 2,
'I': 0,
'J': 0,
'K': 0
}
ports = pincount.keys()
portmap = {}
# dictionary of all config lines, indexed by first word
config = {}
# list of all pins in config file order
allpins = []
# list of configs by type
bytype = {}
# list of configs by label
bylabel = {}
# list of SPI devices
spidev = []
# dictionary of ROMFS files
romfs = {}
# SPI bus list
spi_list = []
# all config lines in order
alllines = []
# allow for extra env vars
env_vars = {}
# build flags for ChibiOS makefiles
build_flags = []
# sensor lists
imu_list = []
compass_list = []
baro_list = []
mcu_type = None
def is_int(str):
'''check if a string is an integer'''
try:
int(str)
except Exception:
return False
return True
def error(str):
'''show an error and exit'''
print("Error: " + str)
sys.exit(1)
def get_mcu_lib(mcu):
'''get library file for the chosen MCU'''
import importlib
try:
return importlib.import_module(mcu)
except ImportError:
error("Unable to find module for MCU %s" % mcu)
def setup_mcu_type_defaults():
'''setup defaults for given mcu type'''
global pincount, ports, portmap, vtypes
lib = get_mcu_lib(mcu_type)
if hasattr(lib, 'pincount'):
pincount = lib.pincount
if mcu_series.startswith("STM32F1"):
vtypes = f1_vtypes
else:
vtypes = f4f7_vtypes
ports = pincount.keys()
# setup default as input pins
for port in ports:
portmap[port] = []
for pin in range(pincount[port]):
portmap[port].append(generic_pin(port, pin, None, 'INPUT', []))
def get_alt_function(mcu, pin, function):
'''return alternative function number for a pin'''
lib = get_mcu_lib(mcu)
if function.endswith('_TXINV') or function.endswith('_RXINV'):
# RXINV and TXINV are special labels for inversion pins, not alt-functions
return None
if hasattr(lib, "AltFunction_map"):
alt_map = lib.AltFunction_map
else:
# just check if Alt Func is available or not
for l in af_labels:
if function.startswith(l):
return 0
return None
if function and function.endswith("_RTS") and (
function.startswith('USART') or function.startswith('UART')):
# we do software RTS
return None
for l in af_labels:
if function.startswith(l):
s = pin + ":" + function
if not s in alt_map:
error("Unknown pin function %s for MCU %s" % (s, mcu))
return alt_map[s]
return None
def have_type_prefix(ptype):
'''return True if we have a peripheral starting with the given peripheral type'''
for t in bytype.keys():
if t.startswith(ptype):
return True
return False
def get_ADC1_chan(mcu, pin):
'''return ADC1 channel for an analog pin'''
import importlib
try:
lib = importlib.import_module(mcu)
ADC1_map = lib.ADC1_map
except ImportError:
error("Unable to find ADC1_Map for MCU %s" % mcu)
if not pin in ADC1_map:
error("Unable to find ADC1 channel for pin %s" % pin)
return ADC1_map[pin]
class generic_pin(object):
'''class to hold pin definition'''
def __init__(self, port, pin, label, type, extra):
global mcu_series
self.portpin = "P%s%u" % (port, pin)
self.port = port
self.pin = pin
self.label = label
self.type = type
self.extra = extra
self.af = None
if type == 'OUTPUT':
self.sig_dir = 'OUTPUT'
else:
self.sig_dir = 'INPUT'
if mcu_series.startswith("STM32F1") and self.label is not None:
self.f1_pin_setup()
# check that labels and pin types are consistent
for prefix in ['USART', 'UART', 'TIM']:
if label is None or type is None:
continue
if type.startswith(prefix):
a1 = label.split('_')
a2 = type.split('_')
if a1[0] != a2[0]:
error("Peripheral prefix mismatch for %s %s %s" % (self.portpin, label, type))
def f1_pin_setup(self):
for l in af_labels:
if self.label.startswith(l):
if self.label.endswith(tuple(f1_input_sigs)):
self.sig_dir = 'INPUT'
self.extra.append('FLOATING')
elif self.label.endswith(tuple(f1_output_sigs)):
self.sig_dir = 'OUTPUT'
elif l == 'I2C':
self.sig_dir = 'OUTPUT'
elif l == 'OTG':
self.sig_dir = 'OUTPUT'
else:
error("Unknown signal type %s:%s for %s!" % (self.portpin, self.label, mcu_type))
def has_extra(self, v):
'''return true if we have the given extra token'''
return v in self.extra
def extra_prefix(self, prefix):
'''find an extra token starting with the given prefix'''
for e in self.extra:
if e.startswith(prefix):
return e
return None
def extra_value(self, name, type=None, default=None):
'''find an extra value of given type'''
v = self.extra_prefix(name)
if v is None:
return default
if v[len(name)] != '(' or v[-1] != ')':
error("Badly formed value for %s: %s\n" % (name, v))
ret = v[len(name) + 1:-1]
if type is not None:
try:
ret = type(ret)
except Exception:
error("Badly formed value for %s: %s\n" % (name, ret))
return ret
def is_RTS(self):
'''return true if this is a RTS pin'''
if self.label and self.label.endswith("_RTS") and (
self.type.startswith('USART') or self.type.startswith('UART')):
return True
return False
def is_CS(self):
'''return true if this is a CS pin'''
return self.has_extra("CS") or self.type == "CS"
def get_MODER(self):
'''return one of ALTERNATE, OUTPUT, ANALOG, INPUT'''
if self.af is not None:
v = "ALTERNATE"
elif self.type == 'OUTPUT':
v = "OUTPUT"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT"
elif self.is_RTS():
v = "OUTPUT"
else:
v = "INPUT"
return "PIN_MODE_%s(%uU)" % (v, self.pin)
def get_OTYPER(self):
'''return one of PUSHPULL, OPENDRAIN'''
v = 'PUSHPULL'
if self.type.startswith('I2C'):
# default I2C to OPENDRAIN
v = 'OPENDRAIN'
values = ['PUSHPULL', 'OPENDRAIN']
for e in self.extra:
if e in values:
v = e
return "PIN_OTYPE_%s(%uU)" % (v, self.pin)
def get_OSPEEDR(self):
'''return one of SPEED_VERYLOW, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH'''
# on STM32F4 these speeds correspond to 2MHz, 25MHz, 50MHz and 100MHz
values = ['SPEED_VERYLOW', 'SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in values:
v = e
return "PIN_O%s(%uU)" % (v, self.pin)
def get_PUPDR(self):
'''return one of FLOATING, PULLUP, PULLDOWN'''
values = ['FLOATING', 'PULLUP', 'PULLDOWN']
v = 'FLOATING'
if self.is_CS():
v = "PULLUP"
# generate pullups for UARTs
if (self.type.startswith('USART') or
self.type.startswith('UART')) and (
(self.label.endswith('_TX') or
self.label.endswith('_RX') or
self.label.endswith('_CTS') or
self.label.endswith('_RTS'))):
v = "PULLUP"
# generate pullups for SDIO and SDMMC
if (self.type.startswith('SDIO') or
self.type.startswith('SDMMC')) and (
(self.label.endswith('_D0') or
self.label.endswith('_D1') or
self.label.endswith('_D2') or
self.label.endswith('_D3') or
self.label.endswith('_CMD'))):
v = "PULLUP"
for e in self.extra:
if e in values:
v = e
return "PIN_PUPDR_%s(%uU)" % (v, self.pin)
def get_ODR_F1(self):
'''return one of LOW, HIGH'''
values = ['LOW', 'HIGH']
v = 'HIGH'
if self.type == 'OUTPUT':
v = 'LOW'
elif self.label is not None and self.label.startswith('I2C'):
v = 'LOW'
for e in self.extra:
if e in values:
v = e
#for some controllers input pull up down is selected by ODR
if self.type == "INPUT":
v = 'LOW'
if 'PULLUP' in self.extra:
v = "HIGH"
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_ODR(self):
'''return one of LOW, HIGH'''
if mcu_series.startswith("STM32F1"):
return self.get_ODR_F1()
values = ['LOW', 'HIGH']
v = 'HIGH'
for e in self.extra:
if e in values:
v = e
return "PIN_ODR_%s(%uU)" % (v, self.pin)
def get_AFIO(self):
'''return AFIO'''
af = self.af
if af is None:
af = 0
return "PIN_AFIO_AF(%uU, %uU)" % (self.pin, af)
def get_AFRL(self):
'''return AFIO low 8'''
if self.pin >= 8:
return None
return self.get_AFIO()
def get_AFRH(self):
'''return AFIO high 8'''
if self.pin < 8:
return None
return self.get_AFIO()
def get_CR_F1(self):
'''return CR FLAGS for STM32F1xx'''
#Check Speed
if self.sig_dir != "INPUT" or self.af is not None:
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
elif self.is_CS():
speed_str = "PIN_SPEED_LOW(%uU) |" % (self.pin)
else:
speed_str = ""
if self.af is not None:
if self.label.endswith('_RX'):
# uart RX is configured as a input, and can be pullup, pulldown or float
if 'PULLUP' in self.extra or 'PULLDOWN' in self.extra:
v = 'PUD'
else:
v = "NOPULL"
elif self.label.startswith('I2C'):
v = "AF_OD"
else:
v = "AF_PP"
elif self.is_CS():
v = "OUTPUT_PP"
elif self.sig_dir == 'OUTPUT':
if 'OPENDRAIN' in self.extra:
v = 'OUTPUT_OD'
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CR(self):
'''return CR FLAGS'''
if mcu_series.startswith("STM32F1"):
return self.get_CR_F1()
if self.sig_dir != "INPUT":
speed_values = ['SPEED_LOW', 'SPEED_MEDIUM', 'SPEED_HIGH']
v = 'SPEED_MEDIUM'
for e in self.extra:
if e in speed_values:
v = e
speed_str = "PIN_%s(%uU) |" % (v, self.pin)
else:
speed_str = ""
#Check Alternate function
if self.type.startswith('I2C'):
v = "AF_OD"
elif self.sig_dir == 'OUTPUT':
if self.af is not None:
v = "AF_PP"
else:
v = "OUTPUT_PP"
elif self.type.startswith('ADC'):
v = "ANALOG"
elif self.is_CS():
v = "OUTPUT_PP"
elif self.is_RTS():
v = "OUTPUT_PP"
else:
v = "PUD"
if 'FLOATING' in self.extra:
v = "NOPULL"
mode_str = "PIN_MODE_%s(%uU)" % (v, self.pin)
return "%s %s" % (speed_str, mode_str)
def get_CRH(self):
if self.pin < 8:
return None
return self.get_CR()
def get_CRL(self):
if self.pin >= 8:
return None
return self.get_CR()
def __str__(self):
str = ''
if self.af is not None:
str += " AF%u" % self.af
if self.type.startswith('ADC1'):
str += " ADC1_IN%u" % get_ADC1_chan(mcu_type, self.portpin)
if self.extra_value('PWM', type=int):
str += " PWM%u" % self.extra_value('PWM', type=int)
return "P%s%u %s %s%s" % (self.port, self.pin, self.label, self.type,
str)
def get_config(name, column=0, required=True, default=None, type=None, spaces=False):
'''get a value from config dictionary'''
if not name in config:
if required and default is None:
error("missing required value %s in hwdef.dat" % name)
return default
if len(config[name]) < column + 1:
if not required:
return None
error("missing required value %s in hwdef.dat (column %u)" % (name,
column))
if spaces:
ret = ' '.join(config[name][column:])
else:
ret = config[name][column]
if type is not None:
if type == int and ret.startswith('0x'):
try:
ret = int(ret,16)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
else:
try:
ret = type(ret)
except Exception:
error("Badly formed config value %s (got %s)" % (name, ret))
return ret
def get_mcu_config(name, required=False):
'''get a value from the mcu dictionary'''
lib = get_mcu_lib(mcu_type)
if not hasattr(lib, 'mcu'):
error("Missing mcu config for %s" % mcu_type)
if not name in lib.mcu:
if required:
error("Missing required mcu config %s for %s" % (name, mcu_type))
return None
return lib.mcu[name]
def enable_can(f):
'''setup for a CAN enabled board'''
f.write('#define HAL_WITH_UAVCAN 1\n')
env_vars['HAL_WITH_UAVCAN'] = '1'
def has_sdcard_spi():
'''check for sdcard connected to spi bus'''
for dev in spidev:
if(dev[0] == 'sdcard'):
return True
return False
def write_mcu_config(f):
'''write MCU config defines'''
f.write('// MCU type (ChibiOS define)\n')
f.write('#define %s_MCUCONF\n' % get_config('MCU'))
mcu_subtype = get_config('MCU', 1)
if mcu_subtype.endswith('xx'):
f.write('#define %s_MCUCONF\n\n' % mcu_subtype[:-2])
f.write('#define %s\n\n' % mcu_subtype)
f.write('// crystal frequency\n')
f.write('#define STM32_HSECLK %sU\n\n' % get_config('OSCILLATOR_HZ'))
f.write('// UART used for stdout (printf)\n')
if get_config('STDOUT_SERIAL', required=False):
f.write('#define HAL_STDOUT_SERIAL %s\n\n' % get_config('STDOUT_SERIAL'))
f.write('// baudrate used for stdout (printf)\n')
f.write('#define HAL_STDOUT_BAUDRATE %u\n\n' % get_config('STDOUT_BAUDRATE', type=int))
if have_type_prefix('SDIO'):
f.write('// SDIO available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
build_flags.append('USE_FATFS=yes')
elif have_type_prefix('SDMMC'):
f.write('// SDMMC available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_SDC TRUE\n')
f.write('#define STM32_SDC_USE_SDMMC1 TRUE\n')
build_flags.append('USE_FATFS=yes')
elif has_sdcard_spi():
f.write('// MMC via SPI available, enable POSIX filesystem support\n')
f.write('#define USE_POSIX\n\n')
f.write('#define HAL_USE_MMC_SPI TRUE\n')
f.write('#define HAL_USE_SDC FALSE\n')
f.write('#define HAL_SDCARD_SPI_HOOK TRUE\n')
build_flags.append('USE_FATFS=yes')
else:
f.write('#define HAL_USE_SDC FALSE\n')
build_flags.append('USE_FATFS=no')
env_vars['DISABLE_SCRIPTING'] = True
if 'OTG1' in bytype:
f.write('#define STM32_USB_USE_OTG1 TRUE\n')
f.write('#define HAL_USE_USB TRUE\n')
f.write('#define HAL_USE_SERIAL_USB TRUE\n')
if 'OTG2' in bytype:
f.write('#define STM32_USB_USE_OTG2 TRUE\n')
if have_type_prefix('CAN') and not 'AP_PERIPH' in env_vars:
enable_can(f)
if get_config('PROCESS_STACK', required=False):
env_vars['PROCESS_STACK'] = get_config('PROCESS_STACK')
else:
env_vars['PROCESS_STACK'] = "0x2000"
if get_config('MAIN_STACK', required=False):
env_vars['MAIN_STACK'] = get_config('MAIN_STACK')
else:
env_vars['MAIN_STACK'] = "0x400"
if get_config('IOMCU_FW', required=False):
env_vars['IOMCU_FW'] = get_config('IOMCU_FW')
else:
env_vars['IOMCU_FW'] = 0
if get_config('PERIPH_FW', required=False):
env_vars['PERIPH_FW'] = get_config('PERIPH_FW')
else:
env_vars['PERIPH_FW'] = 0
# write any custom STM32 defines
for d in alllines:
if d.startswith('STM32_'):
f.write('#define %s\n' % d)
if d.startswith('define '):
f.write('#define %s\n' % d[7:])
flash_size = get_config('FLASH_SIZE_KB', type=int)
f.write('#define BOARD_FLASH_SIZE %u\n' % flash_size)
env_vars['BOARD_FLASH_SIZE'] = flash_size
f.write('#define CRT1_AREAS_NUMBER 1\n')
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
f.write('\n// location of loaded firmware\n')
f.write('#define FLASH_LOAD_ADDRESS 0x%08x\n' % (0x08000000 + flash_reserve_start*1024))
if args.bootloader:
f.write('#define FLASH_BOOTLOADER_LOAD_KB %u\n' % get_config('FLASH_BOOTLOADER_LOAD_KB', type=int))
f.write('\n')
ram_map = get_mcu_config('RAM_MAP', True)
f.write('// memory regions\n')
regions = []
total_memory = 0
for (address, size, flags) in ram_map:
regions.append('{(void*)0x%08x, 0x%08x, 0x%02x }' % (address, size*1024, flags))
total_memory += size
f.write('#define HAL_MEMORY_REGIONS %s\n' % ', '.join(regions))
f.write('#define HAL_MEMORY_TOTAL_KB %u\n' % total_memory)
f.write('#define HAL_RAM0_START 0x%08x\n' % ram_map[0][0])
ram_reserve_start = get_config('RAM_RESERVE_START', default=0, type=int)
if ram_reserve_start > 0:
f.write('#define HAL_RAM_RESERVE_START 0x%08x\n' % ram_reserve_start)
f.write('\n// CPU serial number (12 bytes)\n')
f.write('#define UDID_START 0x%08x\n\n' % get_mcu_config('UDID_START', True))
f.write('\n// APJ board ID (for bootloaders)\n')
f.write('#define APJ_BOARD_ID %s\n' % get_config('APJ_BOARD_ID'))
lib = get_mcu_lib(mcu_type)
build_info = lib.build
if mcu_series.startswith("STM32F1"):
cortex = "cortex-m3"
env_vars['CPU_FLAGS'] = ["-mcpu=%s" % cortex]
build_info['MCU'] = cortex
else:
cortex = "cortex-m4"
env_vars['CPU_FLAGS'] = [ "-mcpu=%s" % cortex, "-mfpu=fpv4-sp-d16", "-mfloat-abi=hard"]
build_info['MCU'] = cortex
if not args.bootloader:
env_vars['CPU_FLAGS'].append('-u_printf_float')
build_info['ENV_UDEFS'] = "-DCHPRINTF_USE_FLOAT=1"
# setup build variables
for v in build_info.keys():
build_flags.append('%s=%s' % (v, build_info[v]))
# setup for bootloader build
if args.bootloader:
f.write('''
#define HAL_BOOTLOADER_BUILD TRUE
#define HAL_USE_ADC FALSE
#define HAL_USE_EXT FALSE
#define HAL_NO_UARTDRIVER
#define HAL_NO_PRINTF
#define HAL_NO_CCM
#define CH_DBG_STATISTICS FALSE
#define CH_CFG_USE_TM FALSE
#define CH_CFG_USE_REGISTRY FALSE
#define CH_CFG_USE_WAITEXIT FALSE
#define CH_CFG_USE_DYNAMIC FALSE
#define CH_CFG_USE_MEMPOOLS FALSE
#define CH_CFG_USE_OBJ_FIFOS FALSE
#define CH_DBG_FILL_THREADS FALSE
#define CH_CFG_USE_SEMAPHORES FALSE
#define CH_CFG_USE_HEAP FALSE
#define CH_CFG_USE_MUTEXES FALSE
#define CH_CFG_USE_CONDVARS FALSE
#define CH_CFG_USE_CONDVARS_TIMEOUT FALSE
#define CH_CFG_USE_EVENTS FALSE
#define CH_CFG_USE_EVENTS_TIMEOUT FALSE
#define CH_CFG_USE_MESSAGES FALSE
#define CH_CFG_USE_MAILBOXES FALSE
#define CH_CFG_USE_FACTORY FALSE
#define CH_CFG_USE_MEMCORE FALSE
#define HAL_USE_I2C FALSE
#define HAL_USE_PWM FALSE
''')
if env_vars.get('ROMFS_UNCOMPRESSED', False):
f.write('#define HAL_ROMFS_UNCOMPRESSED\n')
def write_ldscript(fname):
'''write ldscript.ld for this board'''
flash_size = get_config('FLASH_USE_MAX_KB', type=int, default=0)
if flash_size == 0:
flash_size = get_config('FLASH_SIZE_KB', type=int)
# space to reserve for bootloader and storage at start of flash
flash_reserve_start = get_config(
'FLASH_RESERVE_START_KB', default=16, type=int)
env_vars['FLASH_RESERVE_START_KB'] = str(flash_reserve_start)
# space to reserve for storage at end of flash
flash_reserve_end = get_config('FLASH_RESERVE_END_KB', default=0, type=int)
# ram layout
ram_map = get_mcu_config('RAM_MAP', True)
flash_base = 0x08000000 + flash_reserve_start * 1024
if not args.bootloader:
flash_length = flash_size - (flash_reserve_start + flash_reserve_end)
else:
flash_length = get_config('FLASH_BOOTLOADER_LOAD_KB', type=int)
print("Generating ldscript.ld")
f = open(fname, 'w')
ram0_start = ram_map[0][0]
ram0_len = ram_map[0][1] * 1024
# possibly reserve some memory for app/bootloader comms
ram_reserve_start = get_config('RAM_RESERVE_START', default=0, type=int)
ram0_start += ram_reserve_start
ram0_len -= ram_reserve_start
f.write('''/* generated ldscript.ld */
MEMORY
{
flash : org = 0x%08x, len = %uK
ram0 : org = 0x%08x, len = %u
}
INCLUDE common.ld
''' % (flash_base, flash_length, ram0_start, ram0_len))
def copy_common_linkerscript(outdir, hwdef):
dirpath = os.path.dirname(hwdef)
shutil.copy(os.path.join(dirpath, "../common/common.ld"),
os.path.join(outdir, "common.ld"))
def write_USB_config(f):
'''write USB config defines'''
if not have_type_prefix('OTG'):
return
f.write('// USB configuration\n')
f.write('#define HAL_USB_VENDOR_ID %s\n' % get_config('USB_VENDOR', default=0x0483)) # default to ST
f.write('#define HAL_USB_PRODUCT_ID %s\n' % get_config('USB_PRODUCT', default=0x5740))
f.write('#define HAL_USB_STRING_MANUFACTURER "%s"\n' % get_config("USB_STRING_MANUFACTURER", default="ArduPilot"))
default_product = "%BOARD%"
if args.bootloader:
default_product += "-BL"
f.write('#define HAL_USB_STRING_PRODUCT "%s"\n' % get_config("USB_STRING_PRODUCT", default=default_product))
f.write('#define HAL_USB_STRING_SERIAL "%s"\n' % get_config("USB_STRING_SERIAL", default="%SERIAL%"))
f.write('\n\n')
def write_SPI_table(f):
'''write SPI device table'''
f.write('\n// SPI device table\n')
devlist = []
for dev in spidev:
if len(dev) != 7:
print("Badly formed SPIDEV line %s" % dev)
name = '"' + dev[0] + '"'
bus = dev[1]
devid = dev[2]
cs = dev[3]
mode = dev[4]
lowspeed = dev[5]
highspeed = dev[6]
if not bus.startswith('SPI') or not bus in spi_list:
error("Bad SPI bus in SPIDEV line %s" % dev)
if not devid.startswith('DEVID') or not is_int(devid[5:]):
error("Bad DEVID in SPIDEV line %s" % dev)
if not cs in bylabel or not bylabel[cs].is_CS():
error("Bad CS pin in SPIDEV line %s" % dev)
if not mode in ['MODE0', 'MODE1', 'MODE2', 'MODE3']:
error("Bad MODE in SPIDEV line %s" % dev)
if not lowspeed.endswith('*MHZ') and not lowspeed.endswith('*KHZ'):
error("Bad lowspeed value %s in SPIDEV line %s" % (lowspeed, dev))
if not highspeed.endswith('*MHZ') and not highspeed.endswith('*KHZ'):
error("Bad highspeed value %s in SPIDEV line %s" % (highspeed,
dev))
cs_pin = bylabel[cs]
pal_line = 'PAL_LINE(GPIO%s,%uU)' % (cs_pin.port, cs_pin.pin)
devidx = len(devlist)
f.write(
'#define HAL_SPI_DEVICE%-2u SPIDesc(%-17s, %2u, %2u, %-19s, SPIDEV_%s, %7s, %7s)\n'
% (devidx, name, spi_list.index(bus), int(devid[5:]), pal_line,
mode, lowspeed, highspeed))
devlist.append('HAL_SPI_DEVICE%u' % devidx)
f.write('#define HAL_SPI_DEVICE_LIST %s\n\n' % ','.join(devlist))
def write_SPI_config(f):
'''write SPI config defines'''
global spi_list
for t in bytype.keys():
if t.startswith('SPI'):
spi_list.append(t)
spi_list = sorted(spi_list)
if len(spi_list) == 0:
f.write('#define HAL_USE_SPI FALSE\n')
return
devlist = []
for dev in spi_list:
n = int(dev[3:])
devlist.append('HAL_SPI%u_CONFIG' % n)
f.write(
'#define HAL_SPI%u_CONFIG { &SPID%u, %u, STM32_SPI_SPI%u_DMA_STREAMS }\n'
% (n, n, n, n))
f.write('#define HAL_SPI_BUS_LIST %s\n\n' % ','.join(devlist))
write_SPI_table(f)
def parse_spi_device(dev):
'''parse a SPI:xxx device item'''
a = dev.split(':')
if len(a) != 2:
error("Bad SPI device: %s" % dev)
return 'hal.spi->get_device("%s")' % a[1]
def parse_i2c_device(dev):
'''parse a I2C:xxx:xxx device item'''
a = dev.split(':')
if len(a) != 3:
error("Bad I2C device: %s" % dev)
busaddr = int(a[2],base=0)
if a[1] == 'ALL_EXTERNAL':
return ('FOREACH_I2C_EXTERNAL(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
elif a[1] == 'ALL_INTERNAL':
return ('FOREACH_I2C_INTERNAL(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
elif a[1] == 'ALL':
return ('FOREACH_I2C(b)', 'GET_I2C_DEVICE(b,0x%02x)' % (busaddr))
busnum = int(a[1])
return ('', 'GET_I2C_DEVICE(%u,0x%02x)' % (busnum, busaddr))
def seen_str(dev):
'''return string representation of device for checking for duplicates'''
return str(dev[:2])
def write_IMU_config(f):
'''write IMU config defines'''
global imu_list
devlist = []
wrapper = ''
seen = set()
for dev in imu_list:
if seen_str(dev) in seen:
error("Duplicate IMU: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
for i in range(1,len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
n = len(devlist)+1
devlist.append('HAL_INS_PROBE%u' % n)
f.write(
'#define HAL_INS_PROBE%u %s ADD_BACKEND(AP_InertialSensor_%s::probe(*this,%s))\n'
% (n, wrapper, driver, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_INS_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_MAG_config(f):
'''write MAG config defines'''
global compass_list
devlist = []
seen = set()
for dev in compass_list:
if seen_str(dev) in seen:
error("Duplicate MAG: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
probe = 'probe'
wrapper = ''
a = driver.split(':')
driver = a[0]
if len(a) > 1 and a[1].startswith('probe'):
probe = a[1]
for i in range(1,len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
n = len(devlist)+1
devlist.append('HAL_MAG_PROBE%u' % n)
f.write(
'#define HAL_MAG_PROBE%u %s ADD_BACKEND(DRIVER_%s, AP_Compass_%s::%s(%s))\n'
% (n, wrapper, driver, driver, probe, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_MAG_PROBE_LIST %s\n\n' % ';'.join(devlist))
def write_BARO_config(f):
'''write barometer config defines'''
global baro_list
devlist = []
seen = set()
for dev in baro_list:
if seen_str(dev) in seen:
error("Duplicate BARO: %s" % seen_str(dev))
seen.add(seen_str(dev))
driver = dev[0]
probe = 'probe'
wrapper = ''
a = driver.split(':')
driver = a[0]
if len(a) > 1 and a[1].startswith('probe'):
probe = a[1]
for i in range(1,len(dev)):
if dev[i].startswith("SPI:"):
dev[i] = parse_spi_device(dev[i])
elif dev[i].startswith("I2C:"):
(wrapper, dev[i]) = parse_i2c_device(dev[i])
if dev[i].startswith('hal.i2c_mgr'):
dev[i] = 'std::move(%s)' % dev[i]
n = len(devlist)+1
devlist.append('HAL_BARO_PROBE%u' % n)
f.write(
'#define HAL_BARO_PROBE%u %s ADD_BACKEND(AP_Baro_%s::%s(*this,%s))\n'
% (n, wrapper, driver, probe, ','.join(dev[1:])))
if len(devlist) > 0:
f.write('#define HAL_BARO_PROBE_LIST %s\n\n' % ';'.join(devlist))
def get_gpio_bylabel(label):
'''get GPIO(n) setting on a pin label, or -1'''
p = bylabel.get(label)
if p is None:
return -1
return p.extra_value('GPIO', type=int, default=-1)
def get_extra_bylabel(label, name, default=None):
'''get extra setting for a label by name'''
p = bylabel.get(label)
if p is None:
return default
return p.extra_value(name, type=str, default=default)
def write_UART_config(f):
'''write UART config defines'''
if get_config('UART_ORDER', required=False) is None:
return
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
# write out driver declarations for HAL_ChibOS_Class.cpp
devnames = "ABCDEFGH"
sdev = 0
idx = 0
for dev in uart_list:
if dev == 'EMPTY':
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
else:
f.write(
'#define HAL_UART%s_DRIVER ChibiOS::UARTDriver uart%sDriver(%u)\n'
% (devnames[idx], devnames[idx], sdev))
sdev += 1
idx += 1
for idx in range(len(uart_list), len(devnames)):
f.write('#define HAL_UART%s_DRIVER Empty::UARTDriver uart%sDriver\n' %
(devnames[idx], devnames[idx]))
if 'IOMCU_UART' in config:
f.write('#define HAL_WITH_IO_MCU 1\n')
idx = len(uart_list)
f.write('#define HAL_UART_IOMCU_IDX %u\n' % idx)
f.write(
'#define HAL_UART_IO_DRIVER ChibiOS::UARTDriver uart_io(HAL_UART_IOMCU_IDX)\n'
)
uart_list.append(config['IOMCU_UART'][0])
f.write('#define HAL_HAVE_SERVO_VOLTAGE 1\n') # make the assumption that IO gurantees servo monitoring
else:
f.write('#define HAL_WITH_IO_MCU 0\n')
f.write('\n')
need_uart_driver = False
OTG2_index = None
devlist = []
for dev in uart_list:
if dev.startswith('UART'):
n = int(dev[4:])
elif dev.startswith('USART'):
n = int(dev[5:])
elif dev.startswith('OTG'):
n = int(dev[3:])
elif dev.startswith('EMPTY'):
continue
else:
error("Invalid element %s in UART_ORDER" % dev)
devlist.append('HAL_%s_CONFIG' % dev)
if dev + "_RTS" in bylabel:
p = bylabel[dev + '_RTS']
rts_line = 'PAL_LINE(GPIO%s,%uU)' % (p.port, p.pin)
else:
rts_line = "0"
if dev.startswith('OTG2'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU2, true, false, 0, 0, false, 0, 0}\n'
% dev)
OTG2_index = uart_list.index(dev)
elif dev.startswith('OTG'):
f.write(
'#define HAL_%s_CONFIG {(BaseSequentialStream*) &SDU1, true, false, 0, 0, false, 0, 0}\n'
% dev)
else:
need_uart_driver = True
f.write(
"#define HAL_%s_CONFIG { (BaseSequentialStream*) &SD%u, false, "
% (dev, n))
if mcu_series.startswith("STM32F1") or mcu_series.startswith("STM32F3"):
f.write("%s, " % rts_line)
else:
f.write("STM32_%s_RX_DMA_CONFIG, STM32_%s_TX_DMA_CONFIG, %s, " %
(dev, dev, rts_line))
# add inversion pins, if any
f.write("%d, " % get_gpio_bylabel(dev + "_RXINV"))
f.write("%s, " % get_extra_bylabel(dev + "_RXINV", "POL", "0"))
f.write("%d, " % get_gpio_bylabel(dev + "_TXINV"))
f.write("%s}\n" % get_extra_bylabel(dev + "_TXINV", "POL", "0"))
if OTG2_index is not None:
f.write('#define HAL_OTG2_UART_INDEX %d\n' % OTG2_index)
f.write('''
#if HAL_WITH_UAVCAN
#ifndef HAL_OTG2_PROTOCOL
#define HAL_OTG2_PROTOCOL SerialProtocol_SLCAN
#endif
#define HAL_SERIAL%d_PROTOCOL HAL_OTG2_PROTOCOL
#define HAL_SERIAL%d_BAUD 115200
#endif
''' % (OTG2_index, OTG2_index))
f.write('#define HAL_HAVE_DUAL_USB_CDC 1\n')
f.write('#define HAL_UART_DEVICE_LIST %s\n\n' % ','.join(devlist))
if not need_uart_driver and not args.bootloader:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL HAL_USE_SERIAL_USB
#endif
''')
num_uarts = len(devlist)
if 'IOMCU_UART' in config:
num_uarts -= 1
f.write('#define HAL_UART_NUM_SERIAL_PORTS %u\n' % num_uarts)
def write_UART_config_bootloader(f):
'''write UART config defines'''
if get_config('UART_ORDER', required=False) is None:
return
uart_list = config['UART_ORDER']
f.write('\n// UART configuration\n')
devlist = []
have_uart = False
OTG2_index = None
for u in uart_list:
if u.startswith('OTG2'):
devlist.append('(BaseChannel *)&SDU2')
OTG2_index = uart_list.index(u)
elif u.startswith('OTG'):
devlist.append('(BaseChannel *)&SDU1')
else:
unum = int(u[-1])
devlist.append('(BaseChannel *)&SD%u' % unum)
have_uart = True
f.write('#define BOOTLOADER_DEV_LIST %s\n' % ','.join(devlist))
if OTG2_index is not None:
f.write('#define HAL_OTG2_UART_INDEX %d\n' % OTG2_index)
if not have_uart:
f.write('''
#ifndef HAL_USE_SERIAL
#define HAL_USE_SERIAL FALSE
#endif
''')
def write_I2C_config(f):
'''write I2C config defines'''
if not have_type_prefix('I2C'):
print("No I2C peripherals")
f.write('''
#ifndef HAL_USE_I2C
#define HAL_USE_I2C FALSE
#endif
''')
return
if not 'I2C_ORDER' in config:
print("Missing I2C_ORDER config")
return
i2c_list = config['I2C_ORDER']
f.write('// I2C configuration\n')
if len(i2c_list) == 0:
error("I2C_ORDER invalid")
devlist = []
# write out config structures
for dev in i2c_list:
if not dev.startswith('I2C') or dev[3] not in "1234":
error("Bad I2C_ORDER element %s" % dev)
n = int(dev[3:])
devlist.append('HAL_I2C%u_CONFIG' % n)
f.write('''
#if defined(STM32_I2C_I2C%u_RX_DMA_STREAM) && defined(STM32_I2C_I2C%u_TX_DMA_STREAM)
#define HAL_I2C%u_CONFIG { &I2CD%u, STM32_I2C_I2C%u_RX_DMA_STREAM, STM32_I2C_I2C%u_TX_DMA_STREAM, HAL_GPIO_PIN_I2C%u_SCL, HAL_GPIO_PIN_I2C%u_SDA }
#else
#define HAL_I2C%u_CONFIG { &I2CD%u, SHARED_DMA_NONE, SHARED_DMA_NONE, HAL_GPIO_PIN_I2C%u_SCL, HAL_GPIO_PIN_I2C%u_SDA }
#endif
'''
% (n, n, n, n, n, n, n, n, n, n, n, n))
f.write('\n#define HAL_I2C_DEVICE_LIST %s\n\n' % ','.join(devlist))
def parse_timer(str):
'''parse timer channel string, i.e TIM8_CH2N'''
result = re.match(r'TIM([0-9]*)_CH([1234])(N?)', str)
if result:
tim = int(result.group(1))
chan = int(result.group(2))
compl = result.group(3) == 'N'
if tim < 1 or tim > 17:
error("Bad timer number %s in %s" % (tim, str))
return (tim, chan, compl)
else:
error("Bad timer definition %s" % str)
def write_PWM_config(f):
'''write PWM config defines'''
rc_in = None
rc_in_int = None
alarm = None
pwm_out = []
pwm_timers = []
for l in bylabel.keys():
p = bylabel[l]
if p.type.startswith('TIM'):
if p.has_extra('RCIN'):
rc_in = p
elif p.has_extra('RCININT'):
rc_in_int = p
elif p.has_extra('ALARM'):
alarm = p
else:
if p.extra_value('PWM', type=int) is not None:
pwm_out.append(p)
if p.type not in pwm_timers:
pwm_timers.append(p.type)
if not pwm_out and not alarm:
print("No PWM output defined")
f.write('''
#ifndef HAL_USE_PWM
#define HAL_USE_PWM FALSE
#endif
''')
if rc_in is not None:
(n, chan, compl) = parse_timer(rc_in.label)
if compl:
# it is an inverted channel
f.write('#define HAL_RCIN_IS_INVERTED\n')
if chan not in [1, 2]:
error(
"Bad channel number, only channel 1 and 2 supported for RCIN")
f.write('// RC input config\n')
f.write('#define HAL_USE_ICU TRUE\n')
f.write('#define STM32_ICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCIN_ICU_TIMER ICUD%u\n' % n)
f.write('#define RCIN_ICU_CHANNEL ICU_CHANNEL_%u\n' % chan)
f.write('#define STM32_RCIN_DMA_STREAM STM32_TIM_TIM%u_CH%u_DMA_STREAM\n' % (n, chan))
f.write('#define STM32_RCIN_DMA_CHANNEL STM32_TIM_TIM%u_CH%u_DMA_CHAN\n' % (n, chan))
f.write('\n')
if rc_in_int is not None:
(n, chan, compl) = parse_timer(rc_in_int.label)
if compl:
error('Complementary channel is not supported for RCININT %s' % rc_in_int.label)
f.write('// RC input config\n')
f.write('#define HAL_USE_EICU TRUE\n')
f.write('#define STM32_EICU_USE_TIM%u TRUE\n' % n)
f.write('#define RCININT_EICU_TIMER EICUD%u\n' % n)
f.write('#define RCININT_EICU_CHANNEL EICU_CHANNEL_%u\n' % chan)
f.write('\n')
if alarm is not None:
(n, chan, compl) = parse_timer(alarm.label)
if compl:
error("Complementary channel is not supported for ALARM %s" % alarm.label)
f.write('\n')
f.write('// Alarm PWM output config\n')
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
pwm_clock = 1000000
period = 1000
f.write('''#define HAL_PWM_ALARM \\
{ /* pwmGroup */ \\
%u, /* Timer channel */ \\
{ /* PWMConfig */ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ /* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, \\
0, 0 \\
}, \\
&PWMD%u /* PWMDriver* */ \\
}\n''' %
(chan-1, pwm_clock, period, chan_mode[0],
chan_mode[1], chan_mode[2], chan_mode[3], n))
else:
f.write('\n')
f.write('// No Alarm output pin defined\n')
f.write('#undef HAL_PWM_ALARM\n')
f.write('\n')
f.write('// PWM timer config\n')
for t in sorted(pwm_timers):
n = int(t[3:])
f.write('#define STM32_PWM_USE_TIM%u TRUE\n' % n)
f.write('#define STM32_TIM%u_SUPPRESS_ISR\n' % n)
f.write('\n')
f.write('// PWM output config\n')
groups = []
have_complementary = False
for t in sorted(pwm_timers):
group = len(groups) + 1
n = int(t[3:])
chan_list = [255, 255, 255, 255]
chan_mode = [
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED',
'PWM_OUTPUT_DISABLED', 'PWM_OUTPUT_DISABLED'
]
alt_functions = [ 0, 0, 0, 0 ]
pal_lines = [ '0', '0', '0', '0' ]
for p in pwm_out:
if p.type != t:
continue
(n, chan, compl) = parse_timer(p.label)
pwm = p.extra_value('PWM', type=int)
chan_list[chan - 1] = pwm - 1
if compl:
chan_mode[chan - 1] = 'PWM_COMPLEMENTARY_OUTPUT_ACTIVE_HIGH'
have_complementary = True
else:
chan_mode[chan - 1] = 'PWM_OUTPUT_ACTIVE_HIGH'
alt_functions[chan - 1] = p.af
pal_lines[chan - 1] = 'PAL_LINE(GPIO%s, %uU)' % (p.port, p.pin)
groups.append('HAL_PWM_GROUP%u' % group)
if n in [1, 8]:
# only the advanced timers do 8MHz clocks
advanced_timer = 'true'
else:
advanced_timer = 'false'
pwm_clock = 1000000
period = 20000 * pwm_clock / 1000000
f.write('''#if defined(STM32_TIM_TIM%u_UP_DMA_STREAM) && defined(STM32_TIM_TIM%u_UP_DMA_CHAN)
# define HAL_PWM%u_DMA_CONFIG true, STM32_TIM_TIM%u_UP_DMA_STREAM, STM32_TIM_TIM%u_UP_DMA_CHAN
#else
# define HAL_PWM%u_DMA_CONFIG false, 0, 0
#endif\n''' % (n, n, n, n, n, n))
f.write('''#define HAL_PWM_GROUP%u { %s, \\
{%u, %u, %u, %u}, \\
/* Group Initial Config */ \\
{ \\
%u, /* PWM clock frequency. */ \\
%u, /* Initial PWM period 20ms. */ \\
NULL, /* no callback */ \\
{ \\
/* Channel Config */ \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL}, \\
{%s, NULL} \\
}, 0, 0}, &PWMD%u, \\
HAL_PWM%u_DMA_CONFIG, \\
{ %u, %u, %u, %u }, \\
{ %s, %s, %s, %s }}\n''' %
(group, advanced_timer,
chan_list[0], chan_list[1], chan_list[2], chan_list[3],
pwm_clock, period,
chan_mode[0], chan_mode[1], chan_mode[2], chan_mode[3],
n, n,
alt_functions[0], alt_functions[1], alt_functions[2], alt_functions[3],
pal_lines[0], pal_lines[1], pal_lines[2], pal_lines[3]))
f.write('#define HAL_PWM_GROUPS %s\n\n' % ','.join(groups))
if have_complementary:
f.write('#define STM32_PWM_USE_ADVANCED TRUE\n')
def write_ADC_config(f):
'''write ADC config defines'''
f.write('// ADC config\n')
adc_chans = []
for l in bylabel:
p = bylabel[l]
if not p.type.startswith('ADC'):
continue
chan = get_ADC1_chan(mcu_type, p.portpin)
scale = p.extra_value('SCALE', default=None)
if p.label == 'VDD_5V_SENS':
f.write('#define ANALOG_VCC_5V_PIN %u\n' % chan)
f.write('#define HAL_HAVE_BOARD_VOLTAGE 1\n')
if p.label == 'FMU_SERVORAIL_VCC_SENS':
f.write('#define FMU_SERVORAIL_ADC_CHAN %u\n' % chan)
f.write('#define HAL_HAVE_SERVO_VOLTAGE 1\n')
adc_chans.append((chan, scale, p.label, p.portpin))
adc_chans = sorted(adc_chans)
vdd = get_config('STM32_VDD')
if vdd[-1] == 'U':
vdd = vdd[:-1]
vdd = float(vdd) * 0.01
f.write('#define HAL_ANALOG_PINS { \\\n')
for (chan, scale, label, portpin) in adc_chans:
scale_str = '%.2f/4096' % vdd
if scale is not None and scale != '1':
scale_str = scale + '*' + scale_str
f.write('{ %2u, %12s }, /* %s %s */ \\\n' % (chan, scale_str, portpin,
label))
f.write('}\n\n')
def write_GPIO_config(f):
'''write GPIO config defines'''
f.write('// GPIO config\n')
gpios = []
gpioset = set()
for l in bylabel:
p = bylabel[l]
gpio = p.extra_value('GPIO', type=int)
if gpio is None:
continue
if gpio in gpioset:
error("Duplicate GPIO value %u" % gpio)
gpioset.add(gpio)
# see if it is also a PWM pin
pwm = p.extra_value('PWM', type=int, default=0)
port = p.port
pin = p.pin
gpios.append((gpio, pwm, port, pin, p))
gpios = sorted(gpios)
for (gpio, pwm, port, pin, p) in gpios:
f.write('#define HAL_GPIO_LINE_GPIO%u PAL_LINE(GPIO%s, %2uU)\n' % (gpio, port, pin))
f.write('#define HAL_GPIO_PINS { \\\n')
for (gpio, pwm, port, pin, p) in gpios:
f.write('{ %3u, true, %2u, PAL_LINE(GPIO%s, %2uU)}, /* %s */ \\\n' %
(gpio, pwm, port, pin, p))
# and write #defines for use by config code
f.write('}\n\n')
f.write('// full pin define list\n')
last_label = None
for l in sorted(list(set(bylabel.keys()))):
p = bylabel[l]
label = p.label
label = label.replace('-', '_')
if label == last_label:
continue
last_label = label
f.write('#define HAL_GPIO_PIN_%-20s PAL_LINE(GPIO%s,%uU)\n' %
(label, p.port, p.pin))
f.write('\n')
def bootloader_path():
# always embed a bootloader if it is available
this_dir = os.path.realpath(__file__)
rootdir = os.path.relpath(os.path.join(this_dir, "../../../../.."))
hwdef_dirname = os.path.basename(os.path.dirname(args.hwdef))
bootloader_filename = "%s_bl.bin" % (hwdef_dirname,)
bootloader_path = os.path.join(rootdir,
"Tools",
"bootloaders",
bootloader_filename)
if os.path.exists(bootloader_path):
return os.path.realpath(bootloader_path)
return None
def add_bootloader():
'''added bootloader to ROMFS'''
bp = bootloader_path()
if bp is not None:
romfs["bootloader.bin"] = bp
def write_ROMFS(outdir):
'''create ROMFS embedded header'''
romfs_list = []
for k in romfs.keys():
romfs_list.append((k, romfs[k]))
env_vars['ROMFS_FILES'] = romfs_list
def setup_apj_IDs():
'''setup the APJ board IDs'''
env_vars['APJ_BOARD_ID'] = get_config('APJ_BOARD_ID')
env_vars['APJ_BOARD_TYPE'] = get_config('APJ_BOARD_TYPE', default=mcu_type)
def write_peripheral_enable(f):
'''write peripheral enable lines'''
f.write('// peripherals enabled\n')
for type in sorted(bytype.keys()):
if type.startswith('USART') or type.startswith('UART'):
dstr = 'STM32_SERIAL_USE_%-6s' % type
f.write('#ifndef %s\n' % dstr)
f.write('#define %s TRUE\n' % dstr)
f.write('#endif\n')
if type.startswith('SPI'):
f.write('#define STM32_SPI_USE_%s TRUE\n' % type)
if type.startswith('OTG'):
f.write('#define STM32_USB_USE_%s TRUE\n' % type)
if type.startswith('I2C'):
f.write('#define STM32_I2C_USE_%s TRUE\n' % type)
def get_dma_exclude(periph_list):
'''return list of DMA devices to exclude from DMA'''
dma_exclude = []
for periph in periph_list:
if periph not in bylabel:
continue
p = bylabel[periph]
if p.has_extra('NODMA'):
dma_exclude.append(periph)
return dma_exclude
def write_hwdef_header(outfilename):
'''write hwdef header file'''
print("Writing hwdef setup in %s" % outfilename)
f = open(outfilename, 'w')
f.write('''/*
generated hardware definitions from hwdef.dat - DO NOT EDIT
*/
#pragma once
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
''')
write_mcu_config(f)
write_USB_config(f)
write_SPI_config(f)
write_ADC_config(f)
write_GPIO_config(f)
write_IMU_config(f)
write_MAG_config(f)
write_BARO_config(f)
write_peripheral_enable(f)
setup_apj_IDs()
dma_resolver.write_dma_header(f, periph_list, mcu_type,
dma_exclude=get_dma_exclude(periph_list),
dma_priority=get_config('DMA_PRIORITY',default='TIM* SPI*', spaces=True),
dma_noshare=get_config('DMA_NOSHARE',default='', spaces=True))
if not args.bootloader:
write_PWM_config(f)
write_I2C_config(f)
write_UART_config(f)
else:
write_UART_config_bootloader(f)
add_bootloader()
if len(romfs) > 0:
f.write('#define HAL_HAVE_AP_ROMFS_EMBEDDED_H 1\n')
if mcu_series.startswith('STM32F1'):
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_OUTPUT_PP(n) (0U << (((n) & 7) * 4))
#define PIN_MODE_OUTPUT_OD(n) (4U << (((n) & 7) * 4))
#define PIN_MODE_AF_PP(n) (8U << (((n) & 7) * 4))
#define PIN_MODE_AF_OD(n) (12U << (((n) & 7) * 4))
#define PIN_MODE_ANALOG(n) (0U << (((n) & 7) * 4))
#define PIN_MODE_NOPULL(n) (4U << (((n) & 7) * 4))
#define PIN_MODE_PUD(n) (8U << (((n) & 7) * 4))
#define PIN_SPEED_MEDIUM(n) (1U << (((n) & 7) * 4))
#define PIN_SPEED_LOW(n) (2U << (((n) & 7) * 4))
#define PIN_SPEED_HIGH(n) (3U << (((n) & 7) * 4))
#define PIN_ODR_HIGH(n) (1U << (((n) & 15)))
#define PIN_ODR_LOW(n) (0U << (((n) & 15)))
#define PIN_PULLUP(n) (1U << (((n) & 15)))
#define PIN_PULLDOWN(n) (0U << (((n) & 15)))
#define PIN_UNDEFINED(n) PIN_INPUT_PUD(n)
''')
else:
f.write('''
/*
* I/O ports initial setup, this configuration is established soon after reset
* in the initialization code.
* Please refer to the STM32 Reference Manual for details.
*/
#define PIN_MODE_INPUT(n) (0U << ((n) * 2U))
#define PIN_MODE_OUTPUT(n) (1U << ((n) * 2U))
#define PIN_MODE_ALTERNATE(n) (2U << ((n) * 2U))
#define PIN_MODE_ANALOG(n) (3U << ((n) * 2U))
#define PIN_ODR_LOW(n) (0U << (n))
#define PIN_ODR_HIGH(n) (1U << (n))
#define PIN_OTYPE_PUSHPULL(n) (0U << (n))
#define PIN_OTYPE_OPENDRAIN(n) (1U << (n))
#define PIN_OSPEED_VERYLOW(n) (0U << ((n) * 2U))
#define PIN_OSPEED_LOW(n) (1U << ((n) * 2U))
#define PIN_OSPEED_MEDIUM(n) (2U << ((n) * 2U))
#define PIN_OSPEED_HIGH(n) (3U << ((n) * 2U))
#define PIN_PUPDR_FLOATING(n) (0U << ((n) * 2U))
#define PIN_PUPDR_PULLUP(n) (1U << ((n) * 2U))
#define PIN_PUPDR_PULLDOWN(n) (2U << ((n) * 2U))
#define PIN_AFIO_AF(n, v) ((v) << (((n) % 8U) * 4U))
''')
for port in sorted(ports):
f.write("/* PORT%s:\n" % port)
for pin in range(pincount[port]):
p = portmap[port][pin]
if p.label is not None:
f.write(" %s\n" % p)
f.write("*/\n\n")
if pincount[port] == 0:
# handle blank ports
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s 0x0\n" % (port,
vtype))
f.write("\n\n\n")
continue
for vtype in vtypes:
f.write("#define VAL_GPIO%s_%-7s (" % (p.port, vtype))
first = True
for pin in range(pincount[port]):
p = portmap[port][pin]
modefunc = getattr(p, "get_" + vtype)
v = modefunc()
if v is None:
continue
if not first:
f.write(" | \\\n ")
f.write(v)
first = False
if first:
# there were no pin definitions, use 0
f.write("0")
f.write(")\n\n")
def build_peripheral_list():
'''build a list of peripherals for DMA resolver to work on'''
peripherals = []
done = set()
prefixes = ['SPI', 'USART', 'UART', 'I2C']
for p in allpins:
type = p.type
if type in done:
continue
for prefix in prefixes:
if type.startswith(prefix):
ptx = type + "_TX"
prx = type + "_RX"
peripherals.append(ptx)
peripherals.append(prx)
if not ptx in bylabel:
bylabel[ptx] = p
if not prx in bylabel:
bylabel[prx] = p
if type.startswith('ADC'):
peripherals.append(type)
if type.startswith('SDIO') or type.startswith('SDMMC'):
if not mcu_series.startswith("STM32H7"):
peripherals.append(type)
if type.startswith('TIM'):
if p.has_extra('RCIN'):
label = p.label
if label[-1] == 'N':
label = label[:-1]
peripherals.append(label)
elif not p.has_extra('ALARM') and not p.has_extra('RCININT'):
# get the TIMn_UP DMA channels for DShot
label = type + '_UP'
if not label in peripherals and not p.has_extra('NODMA'):
peripherals.append(label)
done.add(type)
return peripherals
def write_env_py(filename):
'''write out env.py for environment variables to control the build process'''
# see if board has a defaults.parm file
defaults_filename = os.path.join(os.path.dirname(args.hwdef), 'defaults.parm')
if os.path.exists(defaults_filename) and not args.bootloader:
print("Adding defaults.parm")
env_vars['DEFAULT_PARAMETERS'] = os.path.abspath(defaults_filename)
# CHIBIOS_BUILD_FLAGS is passed to the ChibiOS makefile
env_vars['CHIBIOS_BUILD_FLAGS'] = ' '.join(build_flags)
pickle.dump(env_vars, open(filename, "wb"))
def romfs_add(romfs_filename, filename):
'''add a file to ROMFS'''
romfs[romfs_filename] = filename
def romfs_wildcard(pattern):
'''add a set of files to ROMFS by wildcard'''
base_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '..')
(pattern_dir, pattern) = os.path.split(pattern)
for f in os.listdir(os.path.join(base_path, pattern_dir)):
if fnmatch.fnmatch(f, pattern):
romfs[f] = os.path.join(pattern_dir, f)
def process_line(line):
'''process one line of pin definition file'''
global allpins, imu_list, compass_list, baro_list
a = shlex.split(line)
# keep all config lines for later use
alllines.append(line)
if a[0].startswith('P') and a[0][1] in ports and a[0] in config:
error("Pin %s redefined" % a[0])
config[a[0]] = a[1:]
if a[0] == 'MCU':
global mcu_type, mcu_series
mcu_type = a[2]
mcu_series = a[1]
setup_mcu_type_defaults()
if a[0].startswith('P') and a[0][1] in ports:
# it is a port/pin definition
try:
port = a[0][1]
pin = int(a[0][2:])
label = a[1]
type = a[2]
extra = a[3:]
except Exception:
error("Bad pin line: %s" % a)
return
p = generic_pin(port, pin, label, type, extra)
portmap[port][pin] = p
allpins.append(p)
if not type in bytype:
bytype[type] = []
bytype[type].append(p)
bylabel[label] = p
af = get_alt_function(mcu_type, a[0], label)
if af is not None:
p.af = af
if a[0] == 'SPIDEV':
spidev.append(a[1:])
if a[0] == 'IMU':
imu_list.append(a[1:])
if a[0] == 'COMPASS':
compass_list.append(a[1:])
if a[0] == 'BARO':
baro_list.append(a[1:])
if a[0] == 'ROMFS':
romfs_add(a[1],a[2])
if a[0] == 'ROMFS_WILDCARD':
romfs_wildcard(a[1])
if a[0] == 'undef':
print("Removing %s" % a[1])
config.pop(a[1], '')
bytype.pop(a[1],'')
bylabel.pop(a[1],'')
#also remove all occurences of defines in previous lines if any
for line in alllines[:]:
if line.startswith('define') and a[1] == line.split()[1]:
alllines.remove(line)
newpins = []
for pin in allpins:
if pin.type == a[1]:
continue
if pin.label == a[1]:
continue
if pin.portpin == a[1]:
continue
newpins.append(pin)
allpins = newpins
if a[1] == 'IMU':
imu_list = []
if a[1] == 'COMPASS':
compass_list = []
if a[1] == 'BARO':
baro_list = []
if a[0] == 'env':
print("Adding environment %s" % ' '.join(a[1:]))
if len(a[1:]) < 2:
error("Bad env line for %s" % a[0])
env_vars[a[1]] = ' '.join(a[2:])
def process_file(filename):
'''process a hwdef.dat file'''
try:
f = open(filename, "r")
except Exception:
error("Unable to open file %s" % filename)
for line in f.readlines():
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
a = shlex.split(line)
if a[0] == "include" and len(a) > 1:
include_file = a[1]
if include_file[0] != '/':
dir = os.path.dirname(filename)
include_file = os.path.normpath(
os.path.join(dir, include_file))
print("Including %s" % include_file)
process_file(include_file)
else:
process_line(line)
# process input file
process_file(args.hwdef)
outdir = args.outdir
if outdir is None:
outdir = '/tmp'
if not "MCU" in config:
error("Missing MCU type in config")
mcu_type = get_config('MCU', 1)
print("Setup for MCU %s" % mcu_type)
# build a list for peripherals for DMA resolver
periph_list = build_peripheral_list()
# write out hwdef.h
write_hwdef_header(os.path.join(outdir, "hwdef.h"))
# write out ldscript.ld
write_ldscript(os.path.join(outdir, "ldscript.ld"))
write_ROMFS(outdir)
# copy the shared linker script into the build directory; it must
# exist in the same directory as the ldscript.ld file we generate.
copy_common_linkerscript(outdir, args.hwdef)
write_env_py(os.path.join(outdir, "env.py"))
|
R-Lefebvre/ardupilot
|
libraries/AP_HAL_ChibiOS/hwdef/scripts/chibios_hwdef.py
|
Python
|
gpl-3.0
| 60,196
|
[
"CRYSTAL"
] |
c4bbc7359d834386a868d216870702cd4e0d4ffabe110489a50ecd439dfad69d
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------
# cpu_socket_expansion
# --------------------
# by Phillip Pearson
# TODO check if I need to enlarge the holes on the CPU socket, because the
# 220-1-40-006 datasheet says to have 0.9mm holes. The pins are 0.46mm dia
# though, so 0.8 should probably be OK.
# A board to make it easier to piggyback on the 6502's pins in an Electron or
# BBC Micro. This converts all the 5V signals to 3.3V, for compatibility with
# FPGAs and newer MCUs.
# I spent ages trying to decide whether this should *only* be to piggyback on
# the pins, or whether it should also enable completely replacing the CPU with
# an FPGA, and eventually settled on the latter, using a small CPLD.
# CPU signals (37 signal pins + 3 power pins)
# - D x 8
# - A x 16
# - Inputs: PHI0_IN
# - Inputs that we can pull low: nSO, nRESET, RDY, nIRQ, nNMI
# - Outputs: PHI1_OUT, PHI2_OUT, SYNC, RNW
# - Ignored: NC_XTLO (crystal connection on some chips), NC_BE, NC_nML
# I'm keeping the expansion connector as vague as possible for now. It
# definitely needs 3 power pins, 8 data pins, and 16 address pins, leaving 13
# more pins that should just connect into the CPLD and be defined later.
# To let us tristate the bus for shadow RAM, we want to be able to drive
# A15:13 to 110 on the motherboard (after removing the socketed OS/BASIC ROM),
# which requires 6 pins on the CPLD.
# CPLD pin usage (max 34):
# - clk_16MHz tap from somewhere on the motherboard
# - 13 GPIO on the connector
# - 10 signals on the CPU
# - 6: A13 x 2, A14 x 2, A15 x 2
# - 4: dbuf and abuf /CE and A->B
# Size constraints
# ----------------
# The board can extend past the top of the 6502 about 1" before hitting the
# top of the Electron's case. In the Electron, the keyboard connector is in
# line with pin 13 of the 6502, and starts 59mm to the left of the left row of
# 6502 pins, and has 22 pins with 0.1" spacing, so is 2.2" (55.88mm) long. The
# RF circuitry starts just after pin 13, so ending the board there is probably
# a good plan.
# In the BBC, the keyboard connector is in line with the bottom of the 6502.
# When making a board to plug into this, if you put a
# Pin_Header_Angled_2x20_Pitch2.54mm at (128.7, 78.54), so the left row of
# pins is at x=128.7, and draw a 3mm thick line left from (102.23, 108.96),
# that should be pretty accurate, including space for the connector in between
# the angled headers... i.e. you have 26.47 mm between the left row of your
# connector and the edge of the keyboard connector, and will probably need an
# L-shaped board. The top of the board can extend up to y=53.14, so you have
# about 55mm from the keyboard connector to the top of the case. The RF
# circuitry means the left side of the board has to finish around (102.23 -
# 25.4 * 1.3) = 69.21.
# Daughterboards
# --------------
# I have two ideas right now. #1 is something to let me plug in my
# miniSpartan6+. #2 is a MachXO-based RAM/flash/SD card board that might
# possibly also include a PiTubeDirect socket.
import sys, os
here = os.path.dirname(sys.argv[0])
sys.path.insert(0, os.path.join(here, "../../third_party/myelin-kicad.pretty"))
import myelin_kicad_pcb
Pin = myelin_kicad_pcb.Pin
# cpu socket and header -- pass through, so same pinout
# cpu_1 / CPU1 / cpu_Axx_1 = connection to motherboard
# cpu_2 / PU2 / cpu_Axx_2 = socketed 6502
cpu_1, cpu_2 = [
myelin_kicad_pcb.Component(
footprint="Housings_DIP:DIP-40_W15.24mm",
identifier="CPU%d" % (cpuid + 1),
value="6502",
desc=(
"adapter to emulate a 600mil 40-pin DIP, e.g. Digikey 1175-1527-5-ND"
if cpuid == 0
else "600mil 40-pin DIP socket, e.g. Digikey 609-4716-ND"
),
pins=[
# Need to disconnect pin 1 on W65C02S; add a jumper between these two nets
Pin( 1, "VSS", ["cpu_GND_VPB_2" if cpuid else "GND"]),
Pin( 2, "RDY", ["cpu_RDY"]),
Pin( 3, "PHI1_OUT", ["cpu_PHI1_OUT"]),
Pin( 4, "nIRQ", ["cpu_nIRQ"]), # pulled up on the motherboard
# Pin 5 is NC on R6502, /ML on W65C02S; only used in multiprocessor systems
Pin( 5, "NC_nML"), # Isolate this
Pin( 6, "nNMI", ["cpu_nNMI"]), # pulled up on the motherboard
Pin( 7, "SYNC", ["cpu_SYNC"]),
Pin( 8, "VCC", ["5V"]),
Pin( 9, "A0", ["cpu_A0"]),
Pin(10, "A1", ["cpu_A1"]),
Pin(11, "A2", ["cpu_A2"]),
Pin(12, "A3", ["cpu_A3"]),
Pin(13, "A4", ["cpu_A4"]),
Pin(14, "A5", ["cpu_A5"]),
Pin(15, "A6", ["cpu_A6"]),
Pin(16, "A7", ["cpu_A7"]),
Pin(17, "A8", ["cpu_A8"]),
Pin(18, "A9", ["cpu_A9"]),
Pin(19, "A10", ["cpu_A10"]),
Pin(20, "A11", ["cpu_A11"]),
Pin(21, "VSS", ["GND"]),
Pin(22, "A12", ["cpu_A12"]),
Pin(23, "A13", ["cpu_A13_%d" % (cpuid + 1)]),
Pin(24, "A14", ["cpu_A14_%d" % (cpuid + 1)]),
Pin(25, "A15", ["cpu_A15_%d" % (cpuid + 1)]),
Pin(26, "D7", ["cpu_D7"]),
Pin(27, "D6", ["cpu_D6"]),
Pin(28, "D5", ["cpu_D5"]),
Pin(29, "D4", ["cpu_D4"]),
Pin(30, "D3", ["cpu_D3"]),
Pin(31, "D2", ["cpu_D2"]),
Pin(32, "D1", ["cpu_D1"]),
Pin(33, "D0", ["cpu_D0"]),
Pin(34, "RnW", ["cpu_RnW"]),
Pin(35, "NC_XTLO", ["cpu_NC_XTLO"]),
Pin(36, "NC_BE", ["cpu_NC_BE"]), # NC on R6502, BE on W65C02S; WDC says to tie to VDD
Pin(37, "PHI0_IN", ["cpu_PHI0_IN"]), # PHI2_IN on W65C02S
Pin(38, "nSO", ["cpu_nSO"]),
Pin(39, "PHI2_OUT", ["cpu_PHI2_OUT"]),
Pin(40, "nRESET", ["cpu_nRESET"]),
],
) for cpuid in range(2)]
cpu_cap = myelin_kicad_pcb.C0805("100n", "GND", "5V", ref="C1")
cpu_VPB_jumper = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x02_Pitch2.54mm",
identifier="JP1",
desc="1x2 0.1 inch male header",
value="VPB",
pins=[
Pin(1, "A", ["cpu_GND_VPB_2"]),
Pin(2, "B", ["GND"]),
],
)
cpu_BE_pullup = myelin_kicad_pcb.R0805("4k7", "cpu_NC_BE", "5V", ref="R1")
cpld_16MHz_port = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x02_Pitch2.54mm",
identifier="M16",
desc="1x2 0.1 inch male header",
value="16MHz",
pins=[
Pin(1, "A", ["clk_16MHz"]),
Pin(2, "B", ["GND"]),
],
)
# 3v3 regulator for buffers and whatever's on the other side of the connector
regulator = myelin_kicad_pcb.Component(
footprint="TO_SOT_Packages_SMD:SOT-89-3",
identifier="REG",
value="MCP1700T-3302E/MB",
desc="3.3V LDO regulator, e.g. Digikey MCP1700T3302EMBCT-ND. Search for the exact part number because there are many variants.",
pins=[
Pin(2, "VIN", ["5V"]),
Pin(3, "VOUT", ["3V3"]),
Pin(1, "GND", ["GND"]),
],
)
reg_in_cap = myelin_kicad_pcb.C0805("1u", "GND", "5V", ref="C2")
reg_out_cap = myelin_kicad_pcb.C0805("1u", "3V3", "GND", ref="C3")
# Helpful power input/output
ext_power = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x03_Pitch2.54mm",
identifier="EXTPWR",
value="ext pwr",
desc="1x3 0.1 inch male header",
pins=[
Pin(1, "A", ["GND"]),
Pin(2, "B", ["3V3"]),
Pin(3, "C", ["5V"]),
],
)
# bidirectional buffer for data lines, with direction fed by RnW and /OE passed to the expansion connector
# so the CPLD on the other side can signal when data is valid.
data_buf = myelin_kicad_pcb.Component(
footprint="Housings_SSOP:SSOP-20_4.4x6.5mm_Pitch0.65mm",
identifier="DBUF",
value="74LVC245APW",
desc="74LVC245 in TSSOP20 with 4.4mm body width, e.g. Digikey 1727-3105-1-ND",
pins=[
Pin( 1, "A->B", ["dbuf_ext_to_cpu"]),
Pin( 2, "A0", ["ext_D7"]),
Pin( 3, "A1", ["ext_D6"]),
Pin( 4, "A2", ["ext_D5"]),
Pin( 5, "A3", ["ext_D4"]),
Pin( 6, "A4", ["ext_D3"]),
Pin( 7, "A5", ["ext_D2"]),
Pin( 8, "A6", ["ext_D1"]),
Pin( 9, "A7", ["ext_D0"]),
Pin(10, "GND", ["GND"]),
Pin(11, "B7", ["cpu_D0"]),
Pin(12, "B6", ["cpu_D1"]),
Pin(13, "B5", ["cpu_D2"]),
Pin(14, "B4", ["cpu_D3"]),
Pin(15, "B3", ["cpu_D4"]),
Pin(16, "B2", ["cpu_D5"]),
Pin(17, "B1", ["cpu_D6"]),
Pin(18, "B0", ["cpu_D7"]),
Pin(19, "nCE", ["dbuf_nCE"]),
Pin(20, "VCC", ["3V3"]),
],
)
dbuf_cap = myelin_kicad_pcb.C0805("100n", "GND", "3V3", ref="C4")
dbuf_nCE_pullup = myelin_kicad_pcb.R0805("10k", "dbuf_nCE", "3V3", ref="R2")
# unidirectional buffer for address lines, cpu -> expansion connector
# *** maybe make this bidirectional for future expansion
addr_buf_lo = myelin_kicad_pcb.Component(
footprint="Housings_SSOP:SSOP-20_4.4x6.5mm_Pitch0.65mm",
identifier="ABUFL",
value="74LVC245APW",
desc="74LVC245 in TSSOP20 with 4.4mm body width, e.g. Digikey 1727-3105-1-ND",
pins=[
Pin( 1, "A->B", ["abuf_ext_to_cpu"]),
Pin( 2, "A0", ["ext_A0"]),
Pin( 3, "A1", ["ext_A1"]),
Pin( 4, "A2", ["ext_A2"]),
Pin( 5, "A3", ["ext_A3"]),
Pin( 6, "A4", ["ext_A4"]),
Pin( 7, "A5", ["ext_A5"]),
Pin( 8, "A6", ["ext_A6"]),
Pin( 9, "A7", ["ext_A7"]),
Pin(10, "GND", ["GND"]),
Pin(11, "B7", ["cpu_A7"]),
Pin(12, "B6", ["cpu_A6"]),
Pin(13, "B5", ["cpu_A5"]),
Pin(14, "B4", ["cpu_A4"]),
Pin(15, "B3", ["cpu_A3"]),
Pin(16, "B2", ["cpu_A2"]),
Pin(17, "B1", ["cpu_A1"]),
Pin(18, "B0", ["cpu_A0"]),
Pin(19, "nCE", ["abuf_nCE"]),
Pin(20, "VCC", ["3V3"]),
],
)
addr_buf_lo_cap = myelin_kicad_pcb.C0805("100n", "GND", "3V3", ref="C5")
abuf_nCE_pullup = myelin_kicad_pcb.R0805("10k", "abuf_nCE", "3V3", ref="R3")
addr_buf_hi = myelin_kicad_pcb.Component(
footprint="Housings_SSOP:SSOP-20_4.4x6.5mm_Pitch0.65mm",
identifier="ABUFH",
value="74LVC245APW",
desc="74LVC245 in TSSOP20 with 4.4mm body width, e.g. Digikey 1727-3105-1-ND",
pins=[
Pin( 1, "A->B", ["abuf_ext_to_cpu"]),
Pin( 2, "A0", ["ext_A8"]),
Pin( 3, "A1", ["ext_A9"]),
Pin( 4, "A2", ["ext_A10"]),
Pin( 5, "A3", ["ext_A11"]),
Pin( 6, "A4", ["ext_A12"]),
Pin( 7, "A5", ["ext_A13"]),
Pin( 8, "A6", ["ext_A14"]),
Pin( 9, "A7", ["ext_A15"]),
Pin(10, "GND", ["GND"]),
Pin(11, "B7", ["cpu_A15_2"]),
Pin(12, "B6", ["cpu_A14_2"]),
Pin(13, "B5", ["cpu_A13_2"]),
Pin(14, "B4", ["cpu_A12"]),
Pin(15, "B3", ["cpu_A11"]),
Pin(16, "B2", ["cpu_A10"]),
Pin(17, "B1", ["cpu_A9"]),
Pin(18, "B0", ["cpu_A8"]),
Pin(19, "nCE", ["abuf_nCE"]),
Pin(20, "VCC", ["3V3"]),
],
)
addr_buf_hi_cap = myelin_kicad_pcb.C0805("100n", "GND", "3V3", ref="C6")
# CPLD to deal with all the other signals, allowing various different
# hardware modules to plug in.
# For an expansion which doesn't include a CPU, the option lines are:
# - 1 data valid output (to enble expansion to write to the data bus)
# - 8 control/flag inputs
# - RDY
# - nIRQ
# - nNMI
# - SYNC
# - RnW
# - PHI2_OUT
# - 16MHz
# - 4 OC outputs: RDY, /SO, /NMI, /IRQ
# For an expansion which *does* include a CPU (i.e. a big FPGA board):
# - nIRQ, nNMI, RDY, nSO inputs
# - SYNC output
# - RnW output, which also controls D bus direction
# - PHI0_IN input, PHI1_OUT and PHI2_OUT outputs
# - 16MHz input
# + 3 spares
cpld = myelin_kicad_pcb.Component(
footprint="myelin-kicad:xilinx_vqg44",
identifier="PLD",
value="XC9572XL-10VQG44C",
desc="Xilinx XC9572XL in 44-pin 0.8mm TQFP package. Any speed or temperature grade is OK.",
buses=[],
pins=[
Pin(39, "P1.2", ["cpu_RDY"]),
Pin(40, "P1.5", ["dbuf_ext_to_cpu"]),
Pin(41, "P1.6", ["cpu_PHI1_OUT"]),
Pin(42, "P1.8", ["abuf_nCE"]),
Pin(43, "P1.9-GCK1", ["cpu_nIRQ"]),
Pin(44, "P1.11-GCK2", ["dbuf_nCE"]),
Pin(1, "P1.14-GCK3", ["cpu_RnW"]),
Pin(2, "P1.15", ["cpu_SYNC"]),
Pin(3, "P1.17", ["cpu_nNMI"]),
Pin(4, "GND", ["GND"]),
Pin(5, "P3.2", ["cpu_PHI0_IN"]),
Pin(6, "P3.5", ["cpu_nSO"]),
Pin(7, "P3.8", ["cpu_PHI2_OUT"]),
Pin(8, "P3.9", ["cpu_nRESET"]),
Pin(9, "TDI", ["cpld_TDI"]),
Pin(10, "TMS", ["cpld_TMS"]),
Pin(11, "TCK", ["cpld_TCK"]),
Pin(12, "P3.11", ["cpu_A15_1"]),
Pin(13, "P3.14", ["cpu_A15_2"]),
Pin(14, "P3.15", ["cpu_A14_1"]),
Pin(15, "VCCINT_3V3", ["3V3"]),
Pin(16, "P3.17", ["cpu_A14_2"]),
Pin(17, "GND", ["GND"]),
Pin(18, "P3.16", ["cpu_A13_1"]),
Pin(19, "P4.2", ["cpu_A13_2"]),
Pin(20, "P4.5", ["clk_16MHz"]),
Pin(21, "P4.8", ["ext_GP0"]),
Pin(22, "P4.11", ["ext_GP2"]),
Pin(23, "P4.14", ["ext_GP1"]),
Pin(24, "TDO", ["cpld_TDO"]),
Pin(25, "GND", ["GND"]),
Pin(26, "VCCIO_2V5_3V3", ["3V3"]),
Pin(27, "P4.15", ["ext_GP3"]),
Pin(28, "P4.17", ["ext_GP4"]),
Pin(29, "P2.2", ["ext_GP5"]),
Pin(30, "P2.5", ["ext_GP6"]),
Pin(31, "P2.6", ["ext_GP7"]),
Pin(32, "P2.8", ["ext_GP8"]),
Pin(33, "P2.9-GSR", ["ext_GP9"]),
Pin(34, "P2.11-GTS2", ["ext_GP12"]),
Pin(35, "VCCINT_3V3", ["3V3"]),
Pin(36, "P2.14-GTS1", ["ext_GP11"]),
Pin(37, "P2.15", ["ext_GP10"]),
Pin(38, "P2.17", ["abuf_ext_to_cpu"]),
],
)
cpld_cap1 = myelin_kicad_pcb.C0805("100n", "3V3", "GND", ref="C7")
cpld_cap2 = myelin_kicad_pcb.C0805("100n", "3V3", "GND", ref="C8")
myelin_kicad_pcb.update_xilinx_constraints(cpld, os.path.join(here, "../cpld/constraints.ucf"))
# altera jtag header, like in the lc-electronics xc9572xl board
# left column: tck tdo tms nc tdi
# right column: gnd vcc nc nc gnd
cpld_jtag = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_2x05_Pitch2.54mm",
identifier="JTAG1",
value="jtag",
desc="2x5 header for JTAG programming. Use generic 0.1 inch header strip or Digikey ED1543-ND.",
pins=[
Pin(1, "TCK", ["cpld_TCK"]), # top left
Pin(2, "GND", ["GND"]), # top right
Pin(3, "TDO", ["cpld_TDO"]),
Pin(4, "3V3", ["3V3"]),
Pin(5, "TMS", ["cpld_TMS"]),
Pin(6, "NC"),
Pin(7, "NC"),
Pin(8, "NC"),
Pin(9, "TDI", ["cpld_TDI"]),
Pin(10, "GND", ["GND"]),
],
)
# Expansion connector
# - 5v, 3v3, gnd (input)
# - 16 address lines (input)
# - 8 data lines (i/o)
# - 13 configurable lines, depending on the CPLD firmware.
connector = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_2x20_Pitch2.54mm",
identifier="CON",
value="2x20 connector for daughterboard",
desc="2x20 0.1 inch male header",
pins=[
Pin( "1", "", ["ext_GP0"]),
Pin( "2", "", ["ext_GP1"]),
Pin( "3", "", ["ext_GP2"]),
Pin( "4", "", ["ext_GP3"]),
Pin( "5", "", ["ext_GP4"]),
Pin( "6", "", ["ext_GP5"]),
Pin( "7", "", ["ext_GP6"]),
Pin( "8", "", ["ext_GP7"]),
Pin( "9", "", ["ext_GP8"]),
Pin("10", "", ["ext_GP9"]),
Pin("11", "", ["ext_GP10"]),
Pin("12", "", ["ext_GP11"]),
Pin("13", "", ["ext_GP12"]),
Pin("14", "", ["GND"]),
Pin("15", "", ["3V3"]),
Pin("16", "", ["5V"]),
Pin("17", "", ["ext_D0"]),
Pin("18", "", ["ext_D1"]),
Pin("19", "", ["ext_D2"]),
Pin("20", "", ["ext_D3"]),
Pin("21", "", ["ext_D4"]),
Pin("22", "", ["ext_D5"]),
Pin("23", "", ["ext_D6"]),
Pin("24", "", ["ext_D7"]),
Pin("25", "", ["ext_A0"]),
Pin("26", "", ["ext_A1"]),
Pin("27", "", ["ext_A2"]),
Pin("28", "", ["ext_A3"]),
Pin("29", "", ["ext_A4"]),
Pin("30", "", ["ext_A5"]),
Pin("31", "", ["ext_A6"]),
Pin("32", "", ["ext_A7"]),
Pin("33", "", ["ext_A8"]),
Pin("34", "", ["ext_A9"]),
Pin("35", "", ["ext_A10"]),
Pin("36", "", ["ext_A11"]),
Pin("37", "", ["ext_A12"]),
Pin("38", "", ["ext_A13"]),
Pin("39", "", ["ext_A14"]),
Pin("40", "", ["ext_A15"]),
],
)
gp1_pullup = myelin_kicad_pcb.R0805("10k", "ext_GP1", "3V3", ref="R4")
gp3_pullup = myelin_kicad_pcb.R0805("10k", "ext_GP3", "3V3", ref="R5")
gp4_pullup = myelin_kicad_pcb.R0805("10k", "ext_GP4", "3V3", ref="R6")
staples = [
myelin_kicad_pcb.Component(
footprint="myelin-kicad:via_single",
identifier="staple_single%d" % (n+1),
value="",
pins=[Pin(1, "GND", ["GND"])],
)
for n in range(33)
]
myelin_kicad_pcb.dump_netlist("cpu_socket_expansion.net")
myelin_kicad_pcb.dump_bom("bill_of_materials.txt",
"readable_bill_of_materials.txt")
|
google/myelin-acorn-electron-hardware
|
cpu_socket_expansion/pcb/cpu_socket_expansion.py
|
Python
|
apache-2.0
| 17,964
|
[
"CRYSTAL"
] |
d53dcfdab4813cc9f9f7048ba17bbcecbd8c72aceb1a5d89af2d7268685f02cb
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
import os
import random
import sys
import time
import sgf_wrapper
import coords
import gtp
import numpy as np
from mcts import MCTSNode, MAX_DEPTH
import go
# When to do deterministic move selection. ~30 moves on a 19x19, ~8 on 9x9
TEMPERATURE_CUTOFF = int((go.N * go.N) / 12)
def time_recommendation(move_num, seconds_per_move=5, time_limit=15*60,
decay_factor=0.98):
'''Given the current move number and the 'desired' seconds per move, return
how much time should actually be used. This is intended specifically for
CGOS time controls, which has an absolute 15-minute time limit.
The strategy is to spend the maximum possible moves using seconds_per_move,
and then switch to an exponentially decaying time usage, calibrated so that
we have enough time for an infinite number of moves.'''
# Divide by two since you only play half the moves in a game.
player_move_num = move_num / 2
# Sum of geometric series maxes out at endgame_time seconds.
endgame_time = seconds_per_move / (1 - decay_factor)
if endgame_time > time_limit:
# There is so little main time that we're already in 'endgame' mode.
base_time = time_limit * (1 - decay_factor)
core_moves = 0
else:
# Leave over endgame_time seconds for the end, and play at
# seconds_per_move for as long as possible.
base_time = seconds_per_move
core_moves = (time_limit - endgame_time) / seconds_per_move
return base_time * decay_factor ** max(player_move_num - core_moves, 0)
class MCTSPlayerMixin:
# If `simulations_per_move` is nonzero, it will perform that many reads
# before playing. Otherwise, it uses `seconds_per_move` of wall time.
def __init__(self, network, seconds_per_move=5, simulations_per_move=0,
resign_threshold=-0.90, verbosity=0, two_player_mode=False,
num_parallel=8):
self.network = network
self.seconds_per_move = seconds_per_move
self.simulations_per_move = simulations_per_move
self.verbosity = verbosity
self.two_player_mode = two_player_mode
if two_player_mode:
self.temp_threshold = -1
else:
self.temp_threshold = TEMPERATURE_CUTOFF
self.num_parallel = num_parallel
self.qs = []
self.comments = []
self.searches_pi = []
self.root = None
self.result = 0
self.result_string = None
self.resign_threshold = -abs(resign_threshold)
super().__init__()
def initialize_game(self, position=None):
if position is None:
position = go.Position()
self.root = MCTSNode(position)
self.result = 0
self.result_string = None
self.comments = []
self.searches_pi = []
self.qs = []
def suggest_move(self, position):
''' Used for playing a single game.
For parallel play, use initialize_move, select_leaf,
incorporate_results, and pick_move
'''
start = time.time()
if self.simulations_per_move == 0:
while time.time() - start < self.seconds_per_move:
self.tree_search()
else:
current_readouts = self.root.N
while self.root.N < current_readouts + self.simulations_per_move:
self.tree_search()
if self.verbosity > 0:
print("%d: Searched %d times in %s seconds\n\n" % (
position.n, self.simulations_per_move, time.time() - start), file=sys.stderr)
# print some stats on anything with probability > 1%
if self.verbosity > 2:
print(self.root.describe(), file=sys.stderr)
print('\n\n', file=sys.stderr)
if self.verbosity > 3:
print(self.root.position, file=sys.stderr)
return self.pick_move()
def play_move(self, c):
'''
Notable side effects:
- finalizes the probability distribution according to
this roots visit counts into the class' running tally, `searches_pi`
- Makes the node associated with this move the root, for future
`inject_noise` calls.
'''
if not self.two_player_mode:
self.searches_pi.append(
self.root.children_as_pi(self.root.position.n < self.temp_threshold))
self.qs.append(self.root.Q) # Save our resulting Q.
self.comments.append(self.root.describe())
try:
self.root = self.root.maybe_add_child(coords.to_flat(c))
except go.IllegalMove:
print("Illegal move")
if not self.two_player_mode:
self.searches_pi.pop()
self.qs.pop()
self.comments.pop()
return False
self.position = self.root.position # for showboard
del self.root.parent.children
return True # GTP requires positive result.
def pick_move(self):
'''Picks a move to play, based on MCTS readout statistics.
Highest N is most robust indicator. In the early stage of the game, pick
a move weighted by visit count; later on, pick the absolute max.'''
if self.root.position.n > self.temp_threshold:
fcoord = np.argmax(self.root.child_N)
else:
cdf = self.root.child_N.cumsum()
cdf /= cdf[-1]
selection = random.random()
fcoord = cdf.searchsorted(selection)
assert self.root.child_N[fcoord] != 0
return coords.from_flat(fcoord)
def tree_search(self, num_parallel=None):
if num_parallel is None:
num_parallel = self.num_parallel
leaves = []
failsafe = 0
while len(leaves) < num_parallel and failsafe < num_parallel * 2:
failsafe += 1
leaf = self.root.select_leaf()
if self.verbosity >= 4:
print(self.show_path_to_root(leaf))
# if game is over, override the value estimate with the true score
if leaf.is_done():
value = 1 if leaf.position.score() > 0 else -1
leaf.backup_value(value, up_to=self.root)
continue
leaf.add_virtual_loss(up_to=self.root)
leaves.append(leaf)
if leaves:
move_probs, values = self.network.run_many(
[leaf.position for leaf in leaves])
for leaf, move_prob, value in zip(leaves, move_probs, values):
leaf.revert_virtual_loss(up_to=self.root)
leaf.incorporate_results(move_prob, value, up_to=self.root)
def show_path_to_root(self, node):
pos = node.position
diff = node.position.n - self.root.position.n
if len(pos.recent) == 0:
return
def fmt(move): return "{}-{}".format('b' if move.color == 1 else 'w',
coords.to_kgs(move.move))
path = " ".join(fmt(move) for move in pos.recent[-diff:])
if node.position.n >= MAX_DEPTH:
path += " (depth cutoff reached) %0.1f" % node.position.score()
elif node.position.is_game_over():
path += " (game over) %0.1f" % node.position.score()
return path
def should_resign(self):
'''Returns true if the player resigned. No further moves should be played'''
return self.root.Q_perspective < self.resign_threshold
def set_result(self, winner, was_resign):
self.result = winner
if was_resign:
string = "B+R" if winner == go.BLACK else "W+R"
else:
string = self.root.position.result_string()
self.result_string = string
def to_sgf(self, use_comments=True):
assert self.result_string is not None
pos = self.root.position
if use_comments:
comments = self.comments or ['No comments.']
comments[0] = ("Resign Threshold: %0.3f\n" %
self.resign_threshold) + comments[0]
else:
comments = []
return sgf_wrapper.make_sgf(pos.recent, self.result_string,
white_name=os.path.basename(
self.network.save_file) or "Unknown",
black_name=os.path.basename(
self.network.save_file) or "Unknown",
comments=comments)
def is_done(self):
return self.result != 0 or self.root.is_done()
def extract_data(self):
assert len(self.searches_pi) == self.root.position.n
assert self.result != 0
for pwc, pi in zip(go.replay_position(self.root.position, self.result),
self.searches_pi):
yield pwc.position, pi, pwc.result
def chat(self, msg_type, sender, text):
default_response = "Supported commands are 'winrate', 'nextplay', 'fortune', and 'help'."
if self.root is None or self.root.position.n == 0:
return "I'm not playing right now. " + default_response
if 'winrate' in text.lower():
wr = (abs(self.root.Q) + 1.0) / 2.0
color = "Black" if self.root.Q > 0 else "White"
return "{:s} {:.2f}%".format(color, wr * 100.0)
elif 'nextplay' in text.lower():
return "I'm thinking... " + self.root.most_visited_path()
elif 'fortune' in text.lower():
return "You're feeling lucky!"
elif 'help' in text.lower():
return "I can't help much with go -- try ladders! Otherwise: " + default_response
else:
return default_response
class CGOSPlayerMixin(MCTSPlayerMixin):
def suggest_move(self, position):
self.seconds_per_move = time_recommendation(position.n)
return super().suggest_move(position)
|
mlperf/training_results_v0.5
|
v0.5.0/intel/intel_minigo_submission_public_tensorflow/code/minigo/tensorflow/minigo/strategies.py
|
Python
|
apache-2.0
| 10,535
|
[
"VisIt"
] |
baa8f55dfb3a2d521404aef4b0c25b52a7291468b4ca89788fb1a721bc9fbf5a
|
#!/usr/bin/env python
####################################################################################
#
# This script reads the input src free image and put fake galaxies and point sources on it.
# This script heavily relies on the demo provided in GalSim package.
#
####################################################################################
# Copyright (c) 2012-2014 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import sys
import os
from math import *
import numpy as np
import logging
import time
import galsim
import multiprocessing
import utils
#########################
#
# Functions
#
#########################
def _Flux2Mag(flux,ZP):
"""
The conversion from Flux to magnitude defined in SE.
The image has to have exposure time = 1 sec and
the flux is the total counts (ADU or physical units) in the image.
mag = -2.5 * log10(flux) + ZP
Note: this function does not deal with nan.
Parameters:
-`flux`: nd array. the flux of the object.
-`ZP`: float. the zeropoint.
Return:
-`mag`: nd array. the magnitude of the object.
"""
return -2.5 * np.log10(flux) + ZP
def _Mag2Flux(mag, ZP):
"""
The conversion from magnitude to flux defined in SE.
The image has to have exposure time = 1 sec and
the flux is the total counts (ADU or physical units) in the image.
mag = -2.5 * log10(flux) + ZP
Note: this function does not deal with nan.
Parameters:
-`magnitude`: nd array. the magnitude of the object.
-`ZP`: float. the zeropoint.
Return:
-`flux`: nd array. the flux of the object.
"""
return 10.0**((mag - ZP)/-2.5)
# ---
# Set up the worker
# ---
def _worker(targeted_routine, args_and_info, results_info_proc):
"""
This is the generator worker routine for multiprocessing.
Parameters:
-`targeted_routine`: function object. The targeted function you want to run and pass the args_and_info to.
-`args_and_info`: multiprocessing.Queue. It is a queue with (args, info) tuples.
args are the arguements to pass to targeted_routine
info is passed along to the output queue.
-`results_info_proc`: multiprocessing.Queue.
It is a queue storing (result, info, time) tuples:
result is the return value of from targeted_routine
info is passed through from the input queue.
proc is the process name.
"""
for (args, info) in iter(args_and_info.get, 'STOP'):
result = targeted_routine(*args)
results_info_proc.put( (result, info, multiprocessing.current_process().name) )
#########################
#
# Different functions to put the sources on the targeted image
#
#########################
def _PntSrcLocator_single(
path2image,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
stamp_size_arcsec = 20.0,
mag_dict = {"lo":19.0, "hi":23.0 },
hlr_dict = {"lo":0.35 , "hi":0.75 },
fbulge_dict= {"lo":0.5 , "hi":0.9 },
q_dict = {"lo":0.4 , "hi":1.0 },
pos_ang_dict={"lo":0.0 , "hi":180.0},
ngals_arcmin2 = 30.0,
random_seed = 8241573
):
"""
This function reads the path2image and put the fake point sources on it.
The properties of the point sources are just the gaussian convolved with the psf.
There are some redundant configure parameters (e.g., the axis ratio) that we do not pass into the point sources, the reason
for keeping this is just to be consistent with the galaxies configuartion.
Parameters:
-`path2image`: string. The abs path to the image which you want to put the fake galaxies. It is usually
the source free image if you want to estimate the completeness as a function of image. It
can also be the BnB image if you want to simulate the galaxies in the image with Bright and Big
sources.
-`zeropoint`: float. The zeropoint of the image.
-`psf_dict`: dict. The moffat psf configuration. It has to be in the form of {"moffat":{"lo": [value], "high": [value]}}.
-`stamp_size_arcec`: float. The size (in arcmin) the GalSim will create for one single galaxy (or source).
-`mag_dict`: dict. The magnitude configuration of GalSim galaxies in the unit of magnitude.
It has to be in the form of {"lo": [value], "high": [value]}.
-`hlr_dict`: dict. The half light radius configuration of GalSim galaxies in the unit of arcsec.
It has to be in the form of {"lo": [value], "high": [value]}.
-`fbulge_dict`: dict. The configuration of the fraction of the bulge component.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
-`q_dict`: dict. The axis ratio configuration of GalSim galaxies.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
and q=1 means spherical.
-`pos_ang_dict`: dict. The position angle configuration of GalSim galaxies in the unit of degree.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,180.0].
Moreover, it is counter-clockwise with +x is 0 degree.
-`ngals_arcmin2`: float. The projected number per arcmin square of the galaxies you want to simulate.
-`random_seed`: int. The random seed of the random generator.
Returns:
-`image`: galsim image. It is the simulated image outputed by Galsim.
-`true_cats`: structured data array. It is the true catalog simulated by Galsim.
"""
# ---
# sanitize the parameters
# ---
# zeropoint
zeropoint = float(zeropoint)
# psf
moffat_beta = float(psf_dict["moffat"]["beta"]) # the moffat psf beta, see https://www.gnu.org/software/gnuastro/manual/html_node/PSF.html
moffat_fwhm = float(psf_dict["moffat"]["fwhm"]) # arcsec
stamp_size_arcsec= float(stamp_size_arcsec) # arcsec.
# galaxy and galaxy properties set up
# galaxy master - magnitude / hlr / bulge fraction / axis_ratio / pos_ang
# magnitude master
mag_lo = float(mag_dict["lo"]) # the magnitude range
mag_hi = float(mag_dict["hi"]) # tha magnitude range
# hlr master
hlr_lo = float(hlr_dict["lo"]) # arcsec
hlr_hi = float(hlr_dict["hi"]) # arcsec
# fbulge_master
fbulge_lo = float(fbulge_dict["lo"]) # lower limit fraction of bulge in [0,1]
fbulge_hi = float(fbulge_dict["hi"]) # upper limit fraction of bulge in [0,1]
# axis_ratio
q_lo = float(q_dict["lo"]) # the axis ratio min
q_hi = float(q_dict["hi"]) # the axis ratio max
# pos_ang
pos_ang_lo = float(pos_ang_dict["lo"]) # deg, the lower limit of position angle in [0, 180.0]
pos_ang_hi = float(pos_ang_dict["hi"]) # deg, the upper limit of position angle in [0, 180.0]
# ngals density
ngals_arcmin2 = float(ngals_arcmin2) # the density [arcmin^2] of the simualted gals
# ---
# Read in the image
# ---
# read in the full image and set up the properties of the full image
full_image = galsim.fits.read(path2image)
# get the pixel_scale from cd matrix -> there should be a simpler way but I dont know how right now.
# it has to multiply 3600 to convert it from degree to arcsec.
try:
pixel_scale = sqrt( abs( np.linalg.det(full_image.wcs.cd) ) ) * 3600.0
except:
pixel_scale = sqrt( abs( galsim.fits.FitsHeader(path2image).header["CD1_1"] * \
galsim.fits.FitsHeader(path2image).header["CD2_2"] - \
galsim.fits.FitsHeader(path2image).header["CD1_2"] * \
galsim.fits.FitsHeader(path2image).header["CD2_1"] ) ) * 3600.0
# get the image size -> it seems it is 1-basis, hence the xmax means the number of pixel.
full_image_xsize = full_image.xmax
full_image_ysize = full_image.ymax
# ---
# ngals
# ---
# ngals
ngals = np.int( full_image_xsize * full_image_ysize * ( pixel_scale / 60.0 )**2 * ngals_arcmin2 ) # the number of galaxies you want to simulate
# ---
# PSF properties
# ---
# Take the Moffat psf
# Note: You may omit the flux, since the default is flux=1.
psf = galsim.Moffat(flux = 1.0, beta = moffat_beta, fwhm = moffat_fwhm)
# Take the (e1, e2) shape parameters for psf
psf = psf.shear(e1=0.0, e2=0.0)
# ---
# Set the stamp size
# ---
# set up the stamp and stamp properties
# The stamp size in pixel. it has to be the multiply of 2.
stamp_size = np.int(stamp_size_arcsec / pixel_scale) \
if np.int(stamp_size_arcsec / pixel_scale) % 2 == 0 else \
np.int(stamp_size_arcsec / pixel_scale) + 1
# copy the image
image_copy = full_image.copy()
# true_cats
# true data type
data_type_in_cat= np.dtype([ ("x_true" , "f8"), ("y_true" , "f8"),
("mag_true" , "f8"), ("flux_true" , "f8") ])
# the catalog order is x, y, mag, flux, hlr(arcsec), fbulge, q, pos_ang
true_cats = np.array([], dtype = data_type_in_cat)
# take the random generator first
# rng is the random number generator generated from the Deviation with the random_seed.
# ud is the uniform random number between 0 and 1.
rng = galsim.BaseDeviate(random_seed)
ud = galsim.UniformDeviate(rng)
# ---
# Start simulate the image
# ---
# looping all pnt
for k in xrange(0, ngals):
# generate the position of the stamp in the full image.
xcen = ud() * full_image_xsize
ycen = ud() * full_image_ysize
# the pixel of gal center in the full image
ixcen = int(floor(xcen+1.0))
iycen = int(floor(ycen+1.0))
# stamp size
stamp_bounds = galsim.BoundsI(ixcen-0.5*stamp_size, ixcen+0.5*stamp_size-1,
iycen-0.5*stamp_size, iycen+0.5*stamp_size-1)
# pnt src properties
pnt_mag = mag_lo + ud() * (mag_hi - mag_lo)
# Point source properties
# set the flue
pnt_flux = _Mag2Flux(mag = pnt_mag, ZP = zeropoint)
final = psf.withFlux(pnt_flux)
# stamp the final gal image
stamp = final.drawImage(bounds = stamp_bounds, scale = pixel_scale)
# calc the overlapping bounds
overlapping_bounds = stamp_bounds & full_image.bounds
# add it to the full image
image_copy[overlapping_bounds] = image_copy[overlapping_bounds] + stamp[overlapping_bounds]
# collect pnt properties
pnt_prop = np.array([ ( float(xcen) , float(ycen) ,
float(pnt_mag) , float(pnt_flux) ) ],
dtype = data_type_in_cat )
# append gals in this image
true_cats = np.append(true_cats, pnt_prop)
# return
return image_copy.copy(), true_cats.copy()
def _BulDiskLocator_single(
path2image,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
stamp_size_arcsec = 20.0,
mag_dict = {"lo":19.0, "hi":23.0 },
hlr_dict = {"lo":0.35 , "hi":0.75 },
fbulge_dict= {"lo":0.5 , "hi":0.9 },
q_dict = {"lo":0.4 , "hi":1.0 },
pos_ang_dict={"lo":0.0 , "hi":180.0},
ngals_arcmin2 = 30.0,
random_seed = 8241573
):
"""
This function reads the path2image and put the fake galaxies on it.
The properties of the fake galaxies are configured by several input parameters and they
are convolved with psf before being put on the image.
Parameters:
-`path2image`: string. The abs path to the image which you want to put the fake galaxies. It is usually
the source free image if you want to estimate the completeness as a function of image. It
can also be the BnB image if you want to simulate the galaxies in the image with Bright and Big
sources.
-`zeropoint`: float. The zeropoint of the image.
-`psf_dict`: dict. The moffat psf configuration. It has to be in the form of {"moffat":{"lo": [value], "high": [value]}}.
-`stamp_size_arcec`: float. The size (in arcmin) the GalSim will create for one single galaxy (or source).
-`mag_dict`: dict. The magnitude configuration of GalSim galaxies in the unit of magnitude.
It has to be in the form of {"lo": [value], "high": [value]}.
-`hlr_dict`: dict. The half light radius configuration of GalSim galaxies in the unit of arcsec.
It has to be in the form of {"lo": [value], "high": [value]}.
-`fbulge_dict`: dict. The configuration of the fraction of the bulge component.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
-`q_dict`: dict. The axis ratio configuration of GalSim galaxies.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
and q=1 means spherical.
-`pos_ang_dict`: dict. The position angle configuration of GalSim galaxies in the unit of degree.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,180.0].
Moreover, it is counter-clockwise with +x is 0 degree.
-`ngals_arcmin2`: float. The projected number per arcmin square of the galaxies you want to simulate.
-`random_seed`: int. The random seed of the random generator.
Returns:
-`image`: galsim image. It is the simulated image outputed by Galsim.
-`true_cats`: structured data array. It is the true catalog simulated by Galsim.
"""
# ---
# sanitize the parameters
# ---
# zeropoint
zeropoint = float(zeropoint)
# psf
moffat_beta = float(psf_dict["moffat"]["beta"]) # the moffat psf beta, see https://www.gnu.org/software/gnuastro/manual/html_node/PSF.html
moffat_fwhm = float(psf_dict["moffat"]["fwhm"]) # arcsec
stamp_size_arcsec= float(stamp_size_arcsec) # arcsec.
# galaxy and galaxy properties set up
# galaxy master - magnitude / hlr / bulge fraction / axis_ratio / pos_ang
# magnitude master
mag_lo = float(mag_dict["lo"]) # the magnitude range
mag_hi = float(mag_dict["hi"]) # tha magnitude range
# hlr master
hlr_lo = float(hlr_dict["lo"]) # arcsec
hlr_hi = float(hlr_dict["hi"]) # arcsec
# fbulge_master
fbulge_lo = float(fbulge_dict["lo"]) # lower limit fraction of bulge in [0,1]
fbulge_hi = float(fbulge_dict["hi"]) # upper limit fraction of bulge in [0,1]
# axis_ratio
q_lo = float(q_dict["lo"]) # the axis ratio min
q_hi = float(q_dict["hi"]) # the axis ratio max
# pos_ang
pos_ang_lo = float(pos_ang_dict["lo"]) # deg, the lower limit of position angle in [0, 180.0]
pos_ang_hi = float(pos_ang_dict["hi"]) # deg, the upper limit of position angle in [0, 180.0]
# ngals density
ngals_arcmin2 = float(ngals_arcmin2) # the density [arcmin^2] of the simualted gals
# ---
# Read in the image
# ---
# read in the full image and set up the properties of the full image
full_image = galsim.fits.read(path2image)
# get the pixel_scale from cd matrix -> there should be a simpler way but I dont know how right now.
# it has to multiply 3600 to convert it from degree to arcsec.
try:
pixel_scale = sqrt( abs( np.linalg.det(full_image.wcs.cd) ) ) * 3600.0
except:
pixel_scale = sqrt( abs( galsim.fits.FitsHeader(path2image).header["CD1_1"] * \
galsim.fits.FitsHeader(path2image).header["CD2_2"] - \
galsim.fits.FitsHeader(path2image).header["CD1_2"] * \
galsim.fits.FitsHeader(path2image).header["CD2_1"] ) ) * 3600.0
# get the image size -> it seems it is 1-basis, hence the xmax means the number of pixel.
full_image_xsize = full_image.xmax
full_image_ysize = full_image.ymax
# ---
# ngals
# ---
# ngals
ngals = np.int( full_image_xsize * full_image_ysize * ( pixel_scale / 60.0 )**2 * ngals_arcmin2 ) # the number of galaxies you want to simulate
# ---
# PSF properties
# ---
# Take the Moffat psf
# Note: You may omit the flux, since the default is flux=1.
psf = galsim.Moffat(flux = 1.0, beta = moffat_beta, fwhm = moffat_fwhm)
# Take the (e1, e2) shape parameters for psf
psf = psf.shear(e1=0.0, e2=0.0)
# ---
# Set the stamp size
# ---
# set up the stamp and stamp properties
# The stamp size in pixel. it has to be the multiply of 2.
stamp_size = np.int(stamp_size_arcsec / pixel_scale) \
if np.int(stamp_size_arcsec / pixel_scale) % 2 == 0 else \
np.int(stamp_size_arcsec / pixel_scale) + 1
# copy the image
image_copy = full_image.copy()
# true_cats
# true data type
data_type_in_cat= np.dtype([ ("x_true" , "f8"), ("y_true" , "f8"), ("mag_true" , "f8"),
("flux_true", "f8"), ("hlr_true[arcsec]", "f8"), ("fbulge_true", "f8"),
("q_true" , "f8"), ("pos_ang[deg]" , "f8") ])
# the catalog order is x, y, mag, flux, hlr(arcsec), fbulge, q, pos_ang
true_cats = np.array([], dtype = data_type_in_cat)
# take the random generator first
# rng is the random number generator generated from the Deviation with the random_seed.
# ud is the uniform random number between 0 and 1.
rng = galsim.BaseDeviate(random_seed)
ud = galsim.UniformDeviate(rng)
# ---
# Start simulate the image
# ---
# looping all gals
for k in xrange(0, ngals):
# generate the position of the stamp in the full image.
xcen = ud() * full_image_xsize
ycen = ud() * full_image_ysize
# the pixel of gal center in the full image
ixcen = int(floor(xcen+1.0))
iycen = int(floor(ycen+1.0))
# stamp size
stamp_bounds = galsim.BoundsI(ixcen-0.5*stamp_size, ixcen+0.5*stamp_size-1,
iycen-0.5*stamp_size, iycen+0.5*stamp_size-1)
# gal properties
gal_mag = mag_lo + ud() * (mag_hi - mag_lo)
gal_hlr = hlr_lo + ud() * (hlr_hi - hlr_lo)
gal_fbulge = fbulge_lo + ud() * (fbulge_hi - fbulge_lo)
gal_q = q_lo + ud() * (q_hi - q_lo )
gal_pos_ang = pos_ang_lo + ud() * (pos_ang_hi - pos_ang_lo )
# Gal profile
# Galaxy is a bulge + disk with parameters taken from the catalog:
disk = galsim.Exponential(flux = (1.0 - gal_fbulge),
half_light_radius = gal_hlr )
#bulge = galsim.DeVaucouleurs(flux = gal_fbulge,
# half_light_radius = gal_hlr,
# flux_untruncated = False )
bulge = galsim.Gaussian( flux = gal_fbulge,
half_light_radius = gal_hlr )
# sum disk and bulge
gal = disk + bulge
'''
bulge = galsim.DeVaucouleurs(flux = 1.0,
half_light_radius = gal_hlr,
flux_untruncated = False )
gal = bulge
'''
# set the flue
gal_flux = _Mag2Flux(mag = gal_mag, ZP = zeropoint)
gal = gal.withFlux(gal_flux)
# set the ellipticity (or shear)
gal = gal.shear(q = gal_q, beta = gal_pos_ang * galsim.degrees)
# Convolve with psf
final = galsim.Convolve([psf, gal])
# stamp the final gal image
stamp = final.drawImage(bounds = stamp_bounds, scale = pixel_scale)
# calc the overlapping bounds
overlapping_bounds = stamp_bounds & full_image.bounds
# add it to the full image
image_copy[overlapping_bounds] = image_copy[overlapping_bounds] + stamp[overlapping_bounds]
# collect gal properties
gal_prop = np.array([ ( float(xcen) , float(ycen) , float(gal_mag) ,
float(gal_flux), float(gal_hlr) , float(gal_fbulge),
float(gal_q) , float(gal_pos_ang) ) ],
dtype = data_type_in_cat )
# append gals in this image
true_cats = np.append(true_cats, gal_prop)
# return
return image_copy.copy(), true_cats.copy()
def _ModelGalLocator_single(
path2image,
readincat,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
stamp_size_arcsec = 20.0,
mag_dict = {"lo":19.0, "hi":23.0 },
hlr_dict = {"lo":0.35 , "hi":0.75 },
fbulge_dict= {"lo":0.5 , "hi":0.9 },
q_dict = {"lo":0.4 , "hi":1.0 },
pos_ang_dict={"lo":0.0 , "hi":180.0},
ngals_arcmin2 = 30.0,
random_seed = 8241573
):
"""
This function reads the path2image and put the fake galaxies on it.
The properties of the fake galaxies are controlled by the input catalog and they
are convolved with psf before being put on the image.
Parameters:
-`path2image`: string. The abs path to the image which you want to put the fake galaxies. It is usually
the source free image if you want to estimate the completeness as a function of image. It
can also be the BnB image if you want to simulate the galaxies in the image with Bright and Big
sources.
-`readincat`: ndarray. This array consists of the input catalog, including the input values with the field names of
``x_true``, ``y_true``, ``mag_true``, ``hlr_true[arcsec]``, ``fbulge_true``, ``q_true`` and ``pos_ang[deg]``.
-`zeropoint`: float. The zeropoint of the image.
-`psf_dict`: dict. The moffat psf configuration. It has to be in the form of {"moffat":{"lo": [value], "high": [value]}}.
-`stamp_size_arcec`: float. The size (in arcmin) the GalSim will create for one single galaxy (or source).
-`mag_dict`: dict. The magnitude configuration of GalSim galaxies in the unit of magnitude.
It has to be in the form of {"lo": [value], "high": [value]}.
-`hlr_dict`: dict. The half light radius configuration of GalSim galaxies in the unit of arcsec.
It has to be in the form of {"lo": [value], "high": [value]}.
-`fbulge_dict`: dict. The configuration of the fraction of the bulge component.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
-`q_dict`: dict. The axis ratio configuration of GalSim galaxies.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
and q=1 means spherical.
-`pos_ang_dict`: dict. The position angle configuration of GalSim galaxies in the unit of degree.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,180.0].
Moreover, it is counter-clockwise with +x is 0 degree.
-`ngals_arcmin2`: float. The projected number per arcmin square of the galaxies you want to simulate.
-`random_seed`: int. The random seed of the random generator.
Returns:
-`image`: galsim image. It is the simulated image outputed by Galsim.
"""
# ---
# sanitize the parameters
# ---
# zeropoint
zeropoint = float(zeropoint)
# psf
moffat_beta = float(psf_dict["moffat"]["beta"]) # the moffat psf beta, see https://www.gnu.org/software/gnuastro/manual/html_node/PSF.html
moffat_fwhm = float(psf_dict["moffat"]["fwhm"]) # arcsec
stamp_size_arcsec= float(stamp_size_arcsec) # arcsec.
# galaxy and galaxy properties set up
# galaxy master - magnitude / hlr / bulge fraction / axis_ratio / pos_ang
# magnitude master
mag_lo = float(mag_dict["lo"]) # the magnitude range
mag_hi = float(mag_dict["hi"]) # tha magnitude range
# hlr master
hlr_lo = float(hlr_dict["lo"]) # arcsec
hlr_hi = float(hlr_dict["hi"]) # arcsec
# fbulge_master
fbulge_lo = float(fbulge_dict["lo"]) # lower limit fraction of bulge in [0,1]
fbulge_hi = float(fbulge_dict["hi"]) # upper limit fraction of bulge in [0,1]
# axis_ratio
q_lo = float(q_dict["lo"]) # the axis ratio min
q_hi = float(q_dict["hi"]) # the axis ratio max
# pos_ang
pos_ang_lo = float(pos_ang_dict["lo"]) # deg, the lower limit of position angle in [0, 180.0]
pos_ang_hi = float(pos_ang_dict["hi"]) # deg, the upper limit of position angle in [0, 180.0]
# ngals density
ngals_arcmin2 = float(ngals_arcmin2) # the density [arcmin^2] of the simualted gals
# ---
# Read in the image
# ---
# read in the full image and set up the properties of the full image
full_image = galsim.fits.read(path2image)
# get the pixel_scale from cd matrix -> there should be a simpler way but I dont know how right now.
# it has to multiply 3600 to convert it from degree to arcsec.
try:
pixel_scale = sqrt( abs( np.linalg.det(full_image.wcs.cd) ) ) * 3600.0
except:
pixel_scale = sqrt( abs( galsim.fits.FitsHeader(path2image).header["CD1_1"] * \
galsim.fits.FitsHeader(path2image).header["CD2_2"] - \
galsim.fits.FitsHeader(path2image).header["CD1_2"] * \
galsim.fits.FitsHeader(path2image).header["CD2_1"] ) ) * 3600.0
# get the image size -> it seems it is 1-basis, hence the xmax means the number of pixel.
full_image_xsize = full_image.xmax
full_image_ysize = full_image.ymax
# ---
# ngals
# ---
# ngals
ngals = len(readincat)
# ---
# PSF properties
# ---
# Take the Moffat psf
# Note: You may omit the flux, since the default is flux=1.
psf = galsim.Moffat(flux = 1.0, beta = moffat_beta, fwhm = moffat_fwhm)
# Take the (e1, e2) shape parameters for psf
psf = psf.shear(e1=0.0, e2=0.0)
# ---
# Set the stamp size
# ---
# set up the stamp and stamp properties
# The stamp size in pixel. it has to be the multiply of 2.
stamp_size = np.int(stamp_size_arcsec / pixel_scale) \
if np.int(stamp_size_arcsec / pixel_scale) % 2 == 0 else \
np.int(stamp_size_arcsec / pixel_scale) + 1
#np.int(10.0 / pixel_scale) + 1
# copy the image
image_copy = full_image.copy()
# true_cats
x_true = readincat["x_true" ].astype(np.float)
y_true = readincat["y_true" ].astype(np.float)
mag_true = readincat["mag_true" ].astype(np.float)
hlr_true_arcsec = readincat["hlr_true[arcsec]"].astype(np.float)
fbulge_true = readincat["fbulge_true" ].astype(np.float)
q_true = readincat["q_true" ].astype(np.float)
pos_ang = readincat["pos_ang[deg]" ].astype(np.float)
# take the random generator first
# rng is the random number generator generated from the Deviation with the random_seed.
# ud is the uniform random number between 0 and 1.
rng = galsim.BaseDeviate(random_seed)
ud = galsim.UniformDeviate(rng)
# ---
# Start simulate the image
# ---
# looping all gals
for k in xrange(0, ngals):
# generate the position of the stamp in the full image.
xcen = x_true[k]
ycen = y_true[k]
# the pixel of gal center in the full image
#ixcen = int(floor(xcen+0.5))
#iycen = int(floor(ycen+0.5))
ixcen = int(floor(xcen+1.0))
iycen = int(floor(ycen+1.0))
# stamp size
stamp_bounds = galsim.BoundsI(ixcen-0.5*stamp_size, ixcen+0.5*stamp_size-1,
iycen-0.5*stamp_size, iycen+0.5*stamp_size-1)
# gal properties
gal_mag = mag_true[k]
gal_hlr = hlr_true_arcsec[k]
gal_fbulge = fbulge_true[k]
gal_q = q_true[k]
gal_pos_ang = pos_ang[k]
# Gal profile
# Galaxy is a bulge + disk with parameters taken from the catalog:
disk = galsim.Exponential(flux = (1.0 - gal_fbulge),
half_light_radius = gal_hlr )
#bulge = galsim.DeVaucouleurs(flux = gal_fbulge,
# half_light_radius = gal_hlr,
# flux_untruncated = False )
bulge = galsim.Gaussian( flux = gal_fbulge,
half_light_radius = gal_hlr )
# sum disk and bulge
gal = disk + bulge
# set the flue
gal_flux = _Mag2Flux(mag = gal_mag, ZP = zeropoint)
gal = gal.withFlux(gal_flux)
# set the ellipticity (or shear)
gal = gal.shear(q = gal_q, beta = gal_pos_ang * galsim.degrees)
# Convolve with psf
final = galsim.Convolve([psf, gal])
# stamp the final gal image
stamp = final.drawImage(bounds = stamp_bounds, scale = pixel_scale)
# calc the overlapping bounds
overlapping_bounds = stamp_bounds & full_image.bounds
# add it to the full image
image_copy[overlapping_bounds] = image_copy[overlapping_bounds] + stamp[overlapping_bounds]
# return
return image_copy.copy()
def _RealGalLocator_single(
path2image,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
stamp_size_arcsec = 20.0,
mag_dict = {"lo":19.0, "hi":23.0 },
hlr_dict = {"lo":0.35 , "hi":0.75 },
fbulge_dict= {"lo":0.5 , "hi":0.9 },
q_dict = {"lo":0.4 , "hi":1.0 },
pos_ang_dict={"lo":0.0 , "hi":180.0},
ngals_arcmin2 = 30.0,
random_seed = 8241573
):
"""
This function reads the path2image and put the fake galaxies on it.
The properties of the fake galaxies are directly taken from COSMOS catalog provided by Galsim team and they
are convolved with psf before being put on the image.
In this routine, we only use pos_ang_dict to re-rotate the galaxies.
Please run `galsim_download_cosmos` provided by GalSim to download the COSMOS catalog - perhaps we should add the input catalog arguments.
Parameters:
-`path2image`: string. The abs path to the image which you want to put the fake galaxies. It is usually
the source free image if you want to estimate the completeness as a function of image. It
can also be the BnB image if you want to simulate the galaxies in the image with Bright and Big
sources.
-`zeropoint`: float. The zeropoint of the image.
-`psf_dict`: dict. The moffat psf configuration. It has to be in the form of {"moffat":{"lo": [value], "high": [value]}}.
-`stamp_size_arcec`: float. The size (in arcmin) the GalSim will create for one single galaxy (or source).
-`mag_dict`: dict. The magnitude configuration of GalSim galaxies in the unit of magnitude.
It has to be in the form of {"lo": [value], "high": [value]}.
-`hlr_dict`: dict. The half light radius configuration of GalSim galaxies in the unit of arcsec.
It has to be in the form of {"lo": [value], "high": [value]}.
-`fbulge_dict`: dict. The configuration of the fraction of the bulge component.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
-`q_dict`: dict. The axis ratio configuration of GalSim galaxies.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,1]
and q=1 means spherical.
-`pos_ang_dict`: dict. The position angle configuration of GalSim galaxies in the unit of degree.
It has to be in the form of {"lo": [value], "high": [value]}. Note that the value has to be within [0,180.0].
Moreover, it is counter-clockwise with +x is 0 degree.
-`ngals_arcmin2`: float. The projected number per arcmin square of the galaxies you want to simulate.
-`random_seed`: int. The random seed of the random generator.
Returns:
-`image`: galsim image. It is the simulated image outputed by Galsim.
-`true_cats`: structured data array. It is the true catalog simulated by Galsim.
"""
# ---
# sanitize the parameters
# ---
# zeropoint
zeropoint = float(zeropoint)
# psf
moffat_beta = float(psf_dict["moffat"]["beta"]) # the moffat psf beta, see https://www.gnu.org/software/gnuastro/manual/html_node/PSF.html
moffat_fwhm = float(psf_dict["moffat"]["fwhm"]) # arcsec
stamp_size_arcsec= float(stamp_size_arcsec) # arcsec.
# galaxy and galaxy properties set up
# galaxy master - magnitude / hlr / bulge fraction / axis_ratio / pos_ang
# magnitude master
mag_lo = float(mag_dict["lo"]) # the magnitude range
mag_hi = float(mag_dict["hi"]) # tha magnitude range
# hlr master
hlr_lo = float(hlr_dict["lo"]) # arcsec
hlr_hi = float(hlr_dict["hi"]) # arcsec
# fbulge_master
fbulge_lo = float(fbulge_dict["lo"]) # lower limit fraction of bulge in [0,1]
fbulge_hi = float(fbulge_dict["hi"]) # upper limit fraction of bulge in [0,1]
# axis_ratio
q_lo = float(q_dict["lo"]) # the axis ratio min
q_hi = float(q_dict["hi"]) # the axis ratio max
# pos_ang
pos_ang_lo = float(pos_ang_dict["lo"]) # deg, the lower limit of position angle in [0, 180.0]
pos_ang_hi = float(pos_ang_dict["hi"]) # deg, the upper limit of position angle in [0, 180.0]
# ngals density
ngals_arcmin2 = float(ngals_arcmin2) # the density [arcmin^2] of the simualted gals
# ---
# Read in the image
# ---
# read in the full image and set up the properties of the full image
full_image = galsim.fits.read(path2image)
# get the pixel_scale from cd matrix -> there should be a simpler way but I dont know how right now.
# it has to multiply 3600 to convert it from degree to arcsec.
try:
pixel_scale = sqrt( abs( np.linalg.det(full_image.wcs.cd) ) ) * 3600.0
except:
pixel_scale = sqrt( abs( galsim.fits.FitsHeader(path2image).header["CD1_1"] * \
galsim.fits.FitsHeader(path2image).header["CD2_2"] - \
galsim.fits.FitsHeader(path2image).header["CD1_2"] * \
galsim.fits.FitsHeader(path2image).header["CD2_1"] ) ) * 3600.0
# get the image size -> it seems it is 1-basis, hence the xmax means the number of pixel.
full_image_xsize = full_image.xmax
full_image_ysize = full_image.ymax
# ---
# ngals
# ---
# ngals
ngals = np.int( full_image_xsize * full_image_ysize * ( pixel_scale / 60.0 )**2 * ngals_arcmin2 ) # the number of galaxies you want to simulate
# ---
# PSF properties
# ---
# Take the Moffat psf
# Note: You may omit the flux, since the default is flux=1.
psf = galsim.Moffat(flux = 1.0, beta = moffat_beta, fwhm = moffat_fwhm)
# Take the (e1, e2) shape parameters for psf
psf = psf.shear(e1=0.0, e2=0.0)
# ---
# Set the stamp size
# ---
# set up the stamp and stamp properties
# The stamp size in pixel. it has to be the multiply of 2.
stamp_size = np.int(stamp_size_arcsec / pixel_scale) \
if np.int(stamp_size_arcsec / pixel_scale) % 2 == 0 else \
np.int(stamp_size_arcsec / pixel_scale) + 1
#np.int(10.0 / pixel_scale) + 1
# copy the image
image_copy = full_image.copy()
# true_cats
# true data type
data_type_in_cat= np.dtype([ ("x_true" , "f8"), ("y_true" , "f8"), ("mag_true" , "f8"),
("flux_true", "f8"), ("pos_ang[deg]" , "f8") ])
# the catalog order is x, y, mag, flux, pos_ang
true_cats = np.array([], dtype = data_type_in_cat)
# take the random generator first
# rng is the random number generator generated from the Deviation with the random_seed.
# ud is the uniform random number between 0 and 1.
rng = galsim.BaseDeviate(random_seed)
ud = galsim.UniformDeviate(rng)
# read in real galaxy catalog
real_galaxy_catalog = galsim.RealGalaxyCatalog()
# ---
# Start simulate the image
# ---
# looping all gals
for k in xrange(0, ngals):
# generate the position of the stamp in the full image.
xcen = ud() * full_image_xsize
ycen = ud() * full_image_ysize
# the pixel of gal center in the full image
ixcen = int(floor(xcen+1.0))
iycen = int(floor(ycen+1.0))
# stamp size
stamp_bounds = galsim.BoundsI(ixcen-0.5*stamp_size, ixcen+0.5*stamp_size-1,
iycen-0.5*stamp_size, iycen+0.5*stamp_size-1)
# gal properties
gal_mag = mag_lo + ud() * (mag_hi - mag_lo)
gal_pos_ang = pos_ang_lo + ud() * (pos_ang_hi - pos_ang_lo )
# Gal profile
# Galaxy is a bulge + disk with parameters taken from the catalog:
gal = galsim.RealGalaxy(real_galaxy_catalog, random = True)
# set the flue
gal_flux = _Mag2Flux(mag = gal_mag, ZP = zeropoint)
gal = gal.withFlux(gal_flux)
# set the rotation
gal = gal.rotate(gal_pos_ang * galsim.degrees)
# Convolve with psf
final = galsim.Convolve([psf, gal])
# stamp the final gal image
stamp = final.drawImage(bounds = stamp_bounds, scale = pixel_scale)
# calc the overlapping bounds
overlapping_bounds = stamp_bounds & full_image.bounds
# add it to the full image
image_copy[overlapping_bounds] = image_copy[overlapping_bounds] + stamp[overlapping_bounds]
# collect gal properties
gal_prop = np.array([ ( float(xcen) , float(ycen) , float(gal_mag) ,
float(gal_flux), float(gal_pos_ang) ) ],
dtype = data_type_in_cat )
# append gals in this image
true_cats = np.append(true_cats, gal_prop)
# return
return image_copy.copy(), true_cats.copy()
#########################
#
# Engine for multiprocessing...
#
#########################
def BulDiskLocator(
path2image,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
nsimimages = 10,
random_seed = 8241573,
args_pssr = utils.argpasser(),
):
"""
:param path2image: The absolute path to the image which you want to put the simulated sources on. This is is usually the source free image (SFI), or it can also be the BnB image if you want to simulate the sources on the image where the observed BnB sources are kept. One can uses BnB image to test how the BnB sources affect the detection.
:param psf_dict: The psf configuration. Currently it only supports Moffat PSF with beta parameter of 4.5. ``psf_dict`` must be a dictionary in the form of ``{"moffat":{ "beta": _value_, "fwhm": _value_ } }``, where _value_ of ``fwhm`` is in the unit of arcsec. By default, ``psf_dict = {"moffat":{ "beta": 4.5, "fwhm": img_fwhm } }``.
:param stamp_size_arcsec: The size of the stamp of each simulated source by **GalSim**. The stamp is with the size of ``stamp_size_arcsec`` x ``stamp_size_arcsec`` (``stamp_size_arcsec`` in arcsec) where the **GalSim** will simulate one single source on. By default, it is ``stamp_size_arcsec = 20.0``.
:param mag_dict: The magnitude range which **GalSim** will simulate sources. It must be in the form of ``{"lo": _value_, "hi": _value_}``, where _value_ is expressed in magnitude. By default, it is ``mag_dict = {"lo":20.0, "hi":25.0 }``.
:param hlr_dict: The half light radius configuration of the sources simulated by **GalSim**. It is in the unit of arcsec. It has to be in the form of ``{"lo": _value_, "high": _value_}``. By default, it is ``hlr_dict = {"lo":0.35 , "hi":0.75 }``.
:param fbulge_dict: The configuration of the fraction of the bulge component. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and 1 means the galaxy has zero fraction of light from the disk component. By default, it is ``fbulge_dict = {"lo":0.5 , "hi":0.9 }``.
:param q_dict: The minor-to-major axis ratio configuration of the sources simulated by **GalSim**. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and ``q = 1`` means spherical. By default, it is ``q_dict = {"lo":0.4 , "hi":1.0 }``.
:param pos_ang_dict: The position angle configuration of the sources simulated by **GalSim**. It is in the unit of degree. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,180.0] and it is counter-clockwise with +x is 0 degree. By default, it is ``pos_ang_dict={"lo":0.0 , "hi":180.0 }``.
:param ngals_arcmin2: The projected number of the sources simulated by **GalSim** per arcmin square. You dont want to set this number too high because it will cause the problem from blending in the source detection. However, you dont want to lose the statistic power if you set this number too low. By defualt, it is ``ngals_arcmin2 = 25.0``.
:param nsimimages: The number of the images you want to simulate. It will be saved in the multi-extension file with the code name ``sims_nameroot``. By default, it is ``nsimimages = 50``.
:param random_seed: The random seed of the random generator. It will be passed to **GalSim** for simulating the sources.
:param sims_nameroot: The code name you want to identify this run of simulation. It is not only the name of the subdirectory for saving the images simulated in this run, but also the code name for **ComEst** to identify the simulation for the remaining analysis pipeline. IMPORTANT: Please use the consistent code name ``sims_nameroot`` for this set of simulated images throughout **ComEst**. By default, it is ``sims_nameroot = "buldisk"``.
:param ncpu: The number of cpu for parallel running. By default, it is ``ncpu = 2``. Please do not set this number higher than the CPU cores you have.
:type path2image: str
:type psf_dict: dict
:type stamp_size_arcsec: float
:type mag_dict: dict
:type hlr_dict: dict
:type fbulge_dict: dict
:type q_dict: dict
:type pos_ang_dict: dict
:type ngals_arcmin2: float
:type nsimimages: int
:type random_seed: int
:type sims_nameroot: str
:type ncpu: int
:returns: ``image_collection`` is the list containing the simulated image and ``true_cats_collection`` is the list containing the information of the mock catalog (hence ``len(image_collection) = len(true_cats_collection) = nsimimages``). Each element of the lists is the simulated image outputed by Galsim.
:rtype: list, list
This function reads the ``path2image`` and put the fake galaxies on it. In this case it is the galaxies consisting of buldge and disk components. The properties of the fake galaxies are configured by several input parameters and they are convolved with psf before being put on the image. The simulated sources are uniformly distributed in the CCD ( so are in all the provided configuration) with the number density of ``ngals_arcmin2``.
.. seealso:: ``comest.ComEst.BulDiskLocator`` for more details about the configuration.
"""
# ---
# extract args_pssr
# ---
stamp_size_arcsec = args_pssr.stamp_size_arcsec
mag_dict = args_pssr.mag_dict
hlr_dict = args_pssr.hlr_dict
fbulge_dict = args_pssr.fbulge_dict
q_dict = args_pssr.q_dict
pos_ang_dict = args_pssr.pos_ang_dict
ngals_arcmin2 = args_pssr.ngals_arcmin2
nsimimages = args_pssr.nsimimages
ncpu = args_pssr.ncpu
# set up the cpu number
if ncpu > multiprocessing.cpu_count():
print
print RuntimeWarning("ncpu:", ncpu, "is larger than total number of cpu:", multiprocessing.cpu_count())
print RuntimeWarning("Using the total number of the cpu:", multiprocessing.cpu_count())
print
nproc = multiprocessing.cpu_count()
else:
nproc = ncpu
# ---
# Start simulate the image one by one
# ---
# set up the task queue
task_queue = multiprocessing.Queue()
# set up the done task
done_queue = multiprocessing.Queue()
# looping over images
for nimage in xrange(nsimimages):
# put the task
task_queue.put(
( (path2image, zeropoint, psf_dict, stamp_size_arcsec, mag_dict, hlr_dict, fbulge_dict, q_dict, pos_ang_dict, ngals_arcmin2, random_seed + nimage),
"simulated %i image" % nimage ) )
# Run the tasks and create done_queue
# Each Process command starts up a parallel process that will keep checking the queue
# for a new task. If there is one there, it grabs it and does it. If not, it waits
# until there is one to grab. When it finds a 'STOP', it shuts down.
done_queue = multiprocessing.Queue()
for nnk in xrange(nproc):
multiprocessing.Process(target = _worker, args = ( _BulDiskLocator_single, task_queue, done_queue) ).start()
# In the meanwhile, the main process keeps going. We pull each image off of the
# done_queue and put it in the appropriate place on the main image.
# This loop is happening while the other processes are still working on their tasks.
# You'll see that these logging statements get print out as the stamp images are still
# being drawn.
# claim the image
image_collection = []
# claim true_cats_collection
true_cats_collection= []
# claim the processing_info
info_list = []
for nnk in xrange(nsimimages):
single_image_and_cat, processing_info, processing_name = done_queue.get()
print "#", "%s: for file %s was done." % (processing_name, processing_info)
# info_list
info_list.append( int( processing_info.strip("simulated image") ) )
# append image
image_collection.append(single_image_and_cat[0].copy())
# append true catalog
true_cats_collection.append(single_image_and_cat[1].copy())
# Re-order according to the info_list - since the order of the image is important.
image_collection = list(np.array(image_collection )[ np.argsort(info_list) ])
true_cats_collection= list(np.array(true_cats_collection)[ np.argsort(info_list) ])
# Stop the processes
# The 'STOP's could have been put on the task list before starting the processes, or you
# can wait. In some cases it can be useful to clear out the done_queue (as we just did)
# and then add on some more tasks. We don't need that here, but it's perfectly fine to do.
# Once you are done with the processes, putting nproc 'STOP's will stop them all.
# This is important, because the program will keep running as long as there are running
# processes, even if the main process gets to the end. So you do want to make sure to
# add those 'STOP's at some point!
for nnk in xrange(nproc):
task_queue.put('STOP')
# return
return image_collection, true_cats_collection
def ModelGalLocator(
path2image,
readincat,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
random_seed = 8241573,
args_pssr = utils.argpasser(),
):
"""
:param path2image: The absolute path to the image which you want to put the simulated sources on. This is is usually the source free image (SFI), or it can also be the BnB image if you want to simulate the sources on the image where the observed BnB sources are kept. One can uses BnB image to test how the BnB sources affect the detection.
:param readincat: ndarray with the shape of (nsimimages, len(field names)), where field names are listed below. This array consists of the input catalog, including the input values with the field names of
``x_true``, ``y_true``, ``mag_true``, ``hlr_true[arcsec]``, ``fbulge_true``, ``q_true`` and ``pos_ang[deg]``.
:param psf_dict: The psf configuration. Currently it only supports Moffat PSF with beta parameter of 4.5. ``psf_dict`` must be a dictionary in the form of ``{"moffat":{ "beta": _value_, "fwhm": _value_ } }``, where _value_ of ``fwhm`` is in the unit of arcsec. By default, ``psf_dict = {"moffat":{ "beta": 4.5, "fwhm": img_fwhm } }``.
:param stamp_size_arcsec: The size of the stamp of each simulated source by **GalSim**. The stamp is with the size of ``stamp_size_arcsec`` x ``stamp_size_arcsec`` (``stamp_size_arcsec`` in arcsec) where the **GalSim** will simulate one single source on. By default, it is ``stamp_size_arcsec = 20.0``.
:param mag_dict: The magnitude range which **GalSim** will simulate sources. It must be in the form of ``{"lo": _value_, "hi": _value_}``, where _value_ is expressed in magnitude. By default, it is ``mag_dict = {"lo":20.0, "hi":25.0 }``.
:param hlr_dict: The half light radius configuration of the sources simulated by **GalSim**. It is in the unit of arcsec. It has to be in the form of ``{"lo": _value_, "high": _value_}``. By default, it is ``hlr_dict = {"lo":0.35 , "hi":0.75 }``.
:param fbulge_dict: The configuration of the fraction of the bulge component. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and 1 means the galaxy has zero fraction of light from the disk component. By default, it is ``fbulge_dict = {"lo":0.5 , "hi":0.9 }``.
:param q_dict: The minor-to-major axis ratio configuration of the sources simulated by **GalSim**. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and ``q = 1`` means spherical. By default, it is ``q_dict = {"lo":0.4 , "hi":1.0 }``.
:param pos_ang_dict: The position angle configuration of the sources simulated by **GalSim**. It is in the unit of degree. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,180.0] and it is counter-clockwise with +x is 0 degree. By default, it is ``pos_ang_dict={"lo":0.0 , "hi":180.0 }``.
:param ngals_arcmin2: The projected number of the sources simulated by **GalSim** per arcmin square. You dont want to set this number too high because it will cause the problem from blending in the source detection. However, you dont want to lose the statistic power if you set this number too low. By defualt, it is ``ngals_arcmin2 = 25.0``.
:param nsimimages: The number of the images you want to simulate. It will be saved in the multi-extension file with the code name ``sims_nameroot``. By default, it is ``nsimimages = 50``.
:param random_seed: The random seed of the random generator. It will be passed to **GalSim** for simulating the sources.
:param sims_nameroot: The code name you want to identify this run of simulation. It is not only the name of the subdirectory for saving the images simulated in this run, but also the code name for **ComEst** to identify the simulation for the remaining analysis pipeline. IMPORTANT: Please use the consistent code name ``sims_nameroot`` for this set of simulated images throughout **ComEst**. By default, it is ``sims_nameroot = "buldisk"``.
:param ncpu: The number of cpu for parallel running. By default, it is ``ncpu = 2``. Please do not set this number higher than the CPU cores you have.
:type path2image: str
:type readincat: ndarray
:type psf_dict: dict
:type stamp_size_arcsec: float
:type mag_dict: dict
:type hlr_dict: dict
:type fbulge_dict: dict
:type q_dict: dict
:type pos_ang_dict: dict
:type ngals_arcmin2: float
:type nsimimages: int
:type random_seed: int
:type sims_nameroot: str
:type ncpu: int
:returns: ``image_collection`` is the list containing the simulated image and ``true_cats_collection`` is the list containing the information of the mock catalog (hence ``len(image_collection) = len(true_cats_collection) = nsimimages``). Each element of the lists is the simulated image outputed by Galsim.
:rtype: list, list
This function reads the ``path2image`` and put the fake galaxies on it. In this case it is the galaxies consisting of buldge and disk components. The properties of the fake galaxies are configured by several input parameters and they are convolved with psf before being put on the image. The simulated sources are uniformly distributed in the CCD ( so are in all the provided configuration) with the number density of ``ngals_arcmin2``.
.. seealso:: ``comest.ComEst.BulDiskLocator`` for more details about the configuration.
"""
# ---
# extract args_pssr
# ---
stamp_size_arcsec = args_pssr.stamp_size_arcsec
mag_dict = args_pssr.mag_dict
hlr_dict = args_pssr.hlr_dict
fbulge_dict = args_pssr.fbulge_dict
q_dict = args_pssr.q_dict
pos_ang_dict = args_pssr.pos_ang_dict
ngals_arcmin2 = args_pssr.ngals_arcmin2
#nsimimages = args_pssr.nsimimages
ncpu = args_pssr.ncpu
# set up the cpu number
if ncpu > multiprocessing.cpu_count():
print
print RuntimeWarning("ncpu:", ncpu, "is larger than total number of cpu:", multiprocessing.cpu_count())
print RuntimeWarning("Using the total number of the cpu:", multiprocessing.cpu_count())
print
nproc = multiprocessing.cpu_count()
else:
nproc = ncpu
# ---
# Start simulate the image one by one
# ---
# set up the task queue
task_queue = multiprocessing.Queue()
# set up the done task
done_queue = multiprocessing.Queue()
# get the nsimimages
nsimimages = len(readincat)
# looping over images
for nimage in xrange(nsimimages):
# put the task
task_queue.put(
( (path2image, readincat[nimage], zeropoint, psf_dict, stamp_size_arcsec, mag_dict, hlr_dict, fbulge_dict, q_dict, pos_ang_dict, ngals_arcmin2, random_seed + nimage),
"simulated %i image" % nimage ) )
# Run the tasks and create done_queue
# Each Process command starts up a parallel process that will keep checking the queue
# for a new task. If there is one there, it grabs it and does it. If not, it waits
# until there is one to grab. When it finds a 'STOP', it shuts down.
done_queue = multiprocessing.Queue()
for nnk in xrange(nproc):
multiprocessing.Process(target = _worker, args = ( _ModelGalLocator_single, task_queue, done_queue) ).start()
# In the meanwhile, the main process keeps going. We pull each image off of the
# done_queue and put it in the appropriate place on the main image.
# This loop is happening while the other processes are still working on their tasks.
# You'll see that these logging statements get print out as the stamp images are still
# being drawn.
# claim the image
image_collection = []
# claim true_cats_collection
true_cats_collection= []
# claim the processing_info
info_list = []
for nnk in xrange(nsimimages):
single_image_and_cat, processing_info, processing_name = done_queue.get()
print "#", "%s: for file %s was done." % (processing_name, processing_info)
# info_list
info_list.append( int( processing_info.strip("simulated image") ) )
# append image
image_collection.append(single_image_and_cat.copy())
#image_collection.append(single_image_and_cat[0].copy())
# append true catalog
#true_cats_collection.append(single_image_and_cat[1].copy())
# Re-order according to the info_list - since the order of the image is important.
image_collection = list(np.array(image_collection )[ np.argsort(info_list) ])
#true_cats_collection= list(np.array(true_cats_collection)[ np.argsort(info_list) ])
# Stop the processes
# The 'STOP's could have been put on the task list before starting the processes, or you
# can wait. In some cases it can be useful to clear out the done_queue (as we just did)
# and then add on some more tasks. We don't need that here, but it's perfectly fine to do.
# Once you are done with the processes, putting nproc 'STOP's will stop them all.
# This is important, because the program will keep running as long as there are running
# processes, even if the main process gets to the end. So you do want to make sure to
# add those 'STOP's at some point!
for nnk in xrange(nproc):
task_queue.put('STOP')
# return
return image_collection
def RealGalLocator(
path2image,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
random_seed = 8241573,
args_pssr = utils.argpasser(),
):
"""
:param path2image: The absolute path to the image which you want to put the simulated sources on. This is is usually the source free image (SFI), or it can also be the BnB image if you want to simulate the sources on the image where the observed BnB sources are kept. One can uses BnB image to test how the BnB sources affect the detection.
:param psf_dict: The psf configuration. Currently it only supports Moffat PSF with beta parameter of 4.5. ``psf_dict`` must be a dictionary in the form of ``{"moffat":{ "beta": _value_, "fwhm": _value_ } }``, where _value_ of ``fwhm`` is in the unit of arcsec. By default, ``psf_dict = {"moffat":{ "beta": 4.5, "fwhm": img_fwhm } }``.
:param stamp_size_arcsec: The size of the stamp of each simulated source by **GalSim**. The stamp is with the size of ``stamp_size_arcsec`` x ``stamp_size_arcsec`` (``stamp_size_arcsec`` in arcsec) where the **GalSim** will simulate one single source on. By default, it is ``stamp_size_arcsec = 20.0``.
:param mag_dict: The magnitude range which **GalSim** will simulate sources. It must be in the form of ``{"lo": _value_, "hi": _value_}``, where _value_ is expressed in magnitude. By default, it is ``mag_dict = {"lo":20.0, "hi":25.0 }``.
:param hlr_dict: The half light radius configuration of the sources simulated by **GalSim**. It is in the unit of arcsec. It has to be in the form of ``{"lo": _value_, "high": _value_}``. By default, it is ``hlr_dict = {"lo":0.35 , "hi":0.75 }``.
:param fbulge_dict: The configuration of the fraction of the bulge component. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and 1 means the galaxy has zero fraction of light from the disk component. By default, it is ``fbulge_dict = {"lo":0.5 , "hi":0.9 }``.
:param q_dict: The minor-to-major axis ratio configuration of the sources simulated by **GalSim**. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and ``q = 1`` means spherical. By default, it is ``q_dict = {"lo":0.4 , "hi":1.0 }``.
:param pos_ang_dict: The position angle configuration of the sources simulated by **GalSim**. It is in the unit of degree. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,180.0] and it is counter-clockwise with +x is 0 degree. By default, it is ``pos_ang_dict={"lo":0.0 , "hi":180.0 }``.
:param ngals_arcmin2: The projected number of the sources simulated by **GalSim** per arcmin square. You dont want to set this number too high because it will cause the problem from blending in the source detection. However, you dont want to lose the statistic power if you set this number too low. By defualt, it is ``ngals_arcmin2 = 25.0``.
:param nsimimages: The number of the images you want to simulate. It will be saved in the multi-extension file with the code name ``sims_nameroot``. By default, it is ``nsimimages = 50``.
:param random_seed: The random seed of the random generator. It will be passed to **GalSim** for simulating the sources.
:param sims_nameroot: The code name you want to identify this run of simulation. It is not only the name of the subdirectory for saving the images simulated in this run, but also the code name for **ComEst** to identify the simulation for the remaining analysis pipeline. IMPORTANT: Please use the consistent code name ``sims_nameroot`` for this set of simulated images throughout **ComEst**. By default, it is ``sims_nameroot = "buldisk"``.
:param ncpu: The number of cpu for parallel running. By default, it is ``ncpu = 2``. Please do not set this number higher than the CPU cores you have.
:type path2image: str
:type psf_dict: dict
:type stamp_size_arcsec: float
:type mag_dict: dict
:type hlr_dict: dict
:type fbulge_dict: dict
:type q_dict: dict
:type pos_ang_dict: dict
:type ngals_arcmin2: float
:type nsimimages: int
:type random_seed: int
:type sims_nameroot: str
:type ncpu: int
:returns: ``image_collection`` is the list containing the simulated image and ``true_cats_collection`` is the list containing the information of the mock catalog (hence ``len(image_collection) = len(true_cats_collection) = nsimimages``). Each element of the lists is the simulated image outputed by Galsim.
:rtype: list, list
This function reads the ``path2image`` and put the fake galaxies on it. The properties of the fake galaxies are directly taken from **COSMOS** catalog provided by **Galsim** team and they are convolved with psf before being put on the image. In this routine, we only use ``pos_ang_dict`` to re-rotate the galaxies. Please run ``galsim_download_cosmos`` provided by **GalSim** to download the **COSMOS** catalog. Please note that since we are resampling from the observed catalog, hence the configurations of the galaxy shape of ``hlr_dict``, ``fbulge_dict`` and ``q_dict`` do _NOT_ apply on this set of simulation. But for the sake of consistency,this routine still requires these input configuration. The simulated sources are uniformly distributed in the CCD with the number density of ``ngals_arcmin2``.
.. seealso:: ``comest.ComEst.RealGalLocator`` for more details about the configuration.
.. todo:: Perhaps we should add the input catalog arguments in the future.
"""
# ---
# extract args_pssr
# ---
stamp_size_arcsec = args_pssr.stamp_size_arcsec
mag_dict = args_pssr.mag_dict
hlr_dict = args_pssr.hlr_dict
fbulge_dict = args_pssr.fbulge_dict
q_dict = args_pssr.q_dict
pos_ang_dict = args_pssr.pos_ang_dict
ngals_arcmin2 = args_pssr.ngals_arcmin2
nsimimages = args_pssr.nsimimages
ncpu = args_pssr.ncpu
# set up the cpu number
if ncpu > multiprocessing.cpu_count():
print
print RuntimeWarning("ncpu:", ncpu, "is larger than total number of cpu:", multiprocessing.cpu_count())
print RuntimeWarning("Using the total number of the cpu:", multiprocessing.cpu_count())
print
nproc = multiprocessing.cpu_count()
else:
nproc = ncpu
# ---
# Start simulate the image one by one
# ---
# set up the task queue
task_queue = multiprocessing.Queue()
# set up the done task
done_queue = multiprocessing.Queue()
# looping over images
for nimage in xrange(nsimimages):
# put the task
task_queue.put(
( (path2image, zeropoint, psf_dict, stamp_size_arcsec, mag_dict, hlr_dict, fbulge_dict, q_dict, pos_ang_dict, ngals_arcmin2, random_seed + nimage),
"simulated %i image" % nimage ) )
# Run the tasks and create done_queue
# Each Process command starts up a parallel process that will keep checking the queue
# for a new task. If there is one there, it grabs it and does it. If not, it waits
# until there is one to grab. When it finds a 'STOP', it shuts down.
done_queue = multiprocessing.Queue()
for nnk in xrange(nproc):
multiprocessing.Process(target = _worker, args = ( _RealGalLocator_single, task_queue, done_queue) ).start()
# In the meanwhile, the main process keeps going. We pull each image off of the
# done_queue and put it in the appropriate place on the main image.
# This loop is happening while the other processes are still working on their tasks.
# You'll see that these logging statements get print out as the stamp images are still
# being drawn.
# claim the image
image_collection = []
# claim true_cats_collection
true_cats_collection= []
# claim the processing_info
info_list = []
for nnk in xrange(nsimimages):
single_image_and_cat, processing_info, processing_name = done_queue.get()
print "#", "%s: for file %s was done." % (processing_name, processing_info)
# info_list
info_list.append( int( processing_info.strip("simulated image") ) )
# append image
image_collection.append(single_image_and_cat[0].copy())
# append true catalog
true_cats_collection.append(single_image_and_cat[1].copy())
# Re-order according to the info_list - since the order of the image is important.
image_collection = list(np.array(image_collection )[ np.argsort(info_list) ])
true_cats_collection= list(np.array(true_cats_collection)[ np.argsort(info_list) ])
# Stop the processes
# The 'STOP's could have been put on the task list before starting the processes, or you
# can wait. In some cases it can be useful to clear out the done_queue (as we just did)
# and then add on some more tasks. We don't need that here, but it's perfectly fine to do.
# Once you are done with the processes, putting nproc 'STOP's will stop them all.
# This is important, because the program will keep running as long as there are running
# processes, even if the main process gets to the end. So you do want to make sure to
# add those 'STOP's at some point!
for nnk in xrange(nproc):
task_queue.put('STOP')
# return
return image_collection, true_cats_collection
def PntSrcLocator(
path2image,
zeropoint = 20.789,
psf_dict = {"moffat":{ "beta": 4.5, "fwhm": 1.6} },
random_seed = 8241573,
args_pssr = utils.argpasser(),
):
"""
:param path2image: The absolute path to the image which you want to put the simulated sources on. This is is usually the source free image (SFI), or it can also be the BnB image if you want to simulate the sources on the image where the observed BnB sources are kept. One can uses BnB image to test how the BnB sources affect the detection.
:param psf_dict: The psf configuration. Currently it only supports Moffat PSF with beta parameter of 4.5. ``psf_dict`` must be a dictionary in the form of ``{"moffat":{ "beta": _value_, "fwhm": _value_ } }``, where _value_ of ``fwhm`` is in the unit of arcsec. By default, ``psf_dict = {"moffat":{ "beta": 4.5, "fwhm": img_fwhm } }``.
:param stamp_size_arcsec: The size of the stamp of each simulated source by **GalSim**. The stamp is with the size of ``stamp_size_arcsec`` x ``stamp_size_arcsec`` (``stamp_size_arcsec`` in arcsec) where the **GalSim** will simulate one single source on. By default, it is ``stamp_size_arcsec = 20.0``.
:param mag_dict: The magnitude range which **GalSim** will simulate sources. It must be in the form of ``{"lo": _value_, "hi": _value_}``, where _value_ is expressed in magnitude. By default, it is ``mag_dict = {"lo":20.0, "hi":25.0 }``.
:param hlr_dict: The half light radius configuration of the sources simulated by **GalSim**. It is in the unit of arcsec. It has to be in the form of ``{"lo": _value_, "high": _value_}``. By default, it is ``hlr_dict = {"lo":0.35 , "hi":0.75 }``.
:param fbulge_dict: The configuration of the fraction of the bulge component. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and 1 means the galaxy has zero fraction of light from the disk component. By default, it is ``fbulge_dict = {"lo":0.5 , "hi":0.9 }``.
:param q_dict: The minor-to-major axis ratio configuration of the sources simulated by **GalSim**. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,1] and ``q = 1`` means spherical. By default, it is ``q_dict = {"lo":0.4 , "hi":1.0 }``.
:param pos_ang_dict: The position angle configuration of the sources simulated by **GalSim**. It is in the unit of degree. It must be in the form of ``{"lo": _value_, "high": _value_}``. Note that the _value_ has to be within [0,180.0] and it is counter-clockwise with +x is 0 degree. By default, it is ``pos_ang_dict={"lo":0.0 , "hi":180.0 }``.
:param ngals_arcmin2: The projected number of the sources simulated by **GalSim** per arcmin square. You dont want to set this number too high because it will cause the problem from blending in the source detection. However, you dont want to lose the statistic power if you set this number too low. By defualt, it is ``ngals_arcmin2 = 25.0``.
:param nsimimages: The number of the images you want to simulate. It will be saved in the multi-extension file with the code name ``sims_nameroot``. By default, it is ``nsimimages = 50``.
:param random_seed: The random seed of the random generator. It will be passed to **GalSim** for simulating the sources.
:param sims_nameroot: The code name you want to identify this run of simulation. It is not only the name of the subdirectory for saving the images simulated in this run, but also the code name for **ComEst** to identify the simulation for the remaining analysis pipeline. IMPORTANT: Please use the consistent code name ``sims_nameroot`` for this set of simulated images throughout **ComEst**. By default, it is ``sims_nameroot = "buldisk"``.
:param ncpu: The number of cpu for parallel running. By default, it is ``ncpu = 2``. Please do not set this number higher than the CPU cores you have.
:type path2image: str
:type psf_dict: dict
:type stamp_size_arcsec: float
:type mag_dict: dict
:type hlr_dict: dict
:type fbulge_dict: dict
:type q_dict: dict
:type pos_ang_dict: dict
:type ngals_arcmin2: float
:type nsimimages: int
:type random_seed: int
:type sims_nameroot: str
:type ncpu: int
:returns: ``image_collection`` is the list containing the simulated image and ``true_cats_collection`` is the list containing the information of the mock catalog (hence ``len(image_collection) = len(true_cats_collection) = nsimimages``). Each element of the lists is the simulated image outputed by Galsim.
:rtype: list, list
This function reads the ``path2image`` and put the fake point sources on it. This method puts the fake sources on this image. In this case it is for the point sources (e.g., stars) convolving with the given PSF. Please note that since we are simulating point sources, the actually shape of simulated sources is just the PSF with the size of given ``img_fwhm``. Therefore the configurations of ``hlr_dict``, ``fbulge_dict``, ``q_dict`` and ``pos_ang_dict`` do _NOT_ apply here. But for the sake of consistency,this routine still requires these input configuration. The simulated sources are uniformly distributed in the CCD with the number density of ``ngals_arcmin2``.
.. seealso:: ``comest.ComEst.PntSrcLocator`` for more details about the configuration.
"""
# ---
# extract args_pssr
# ---
stamp_size_arcsec = args_pssr.stamp_size_arcsec
mag_dict = args_pssr.mag_dict
hlr_dict = args_pssr.hlr_dict
fbulge_dict = args_pssr.fbulge_dict
q_dict = args_pssr.q_dict
pos_ang_dict = args_pssr.pos_ang_dict
ngals_arcmin2 = args_pssr.ngals_arcmin2
nsimimages = args_pssr.nsimimages
ncpu = args_pssr.ncpu
# set up the cpu number
if ncpu > multiprocessing.cpu_count():
print
print RuntimeWarning("ncpu:", ncpu, "is larger than total number of cpu:", multiprocessing.cpu_count())
print RuntimeWarning("Using the total number of the cpu:", multiprocessing.cpu_count())
print
nproc = multiprocessing.cpu_count()
else:
nproc = ncpu
# ---
# Start simulate the image one by one
# ---
# set up the task queue
task_queue = multiprocessing.Queue()
# set up the done task
done_queue = multiprocessing.Queue()
# looping over images
for nimage in xrange(nsimimages):
# put the task
task_queue.put(
( (path2image, zeropoint, psf_dict, stamp_size_arcsec, mag_dict, hlr_dict, fbulge_dict, q_dict, pos_ang_dict, ngals_arcmin2, random_seed + nimage),
"simulated %i image" % nimage ) )
# Run the tasks and create done_queue
# Each Process command starts up a parallel process that will keep checking the queue
# for a new task. If there is one there, it grabs it and does it. If not, it waits
# until there is one to grab. When it finds a 'STOP', it shuts down.
done_queue = multiprocessing.Queue()
for nnk in xrange(nproc):
multiprocessing.Process(target = _worker, args = ( _PntSrcLocator_single, task_queue, done_queue) ).start()
# In the meanwhile, the main process keeps going. We pull each image off of the
# done_queue and put it in the appropriate place on the main image.
# This loop is happening while the other processes are still working on their tasks.
# You'll see that these logging statements get print out as the stamp images are still
# being drawn.
# claim the image
image_collection = []
# claim true_cats_collection
true_cats_collection= []
# claim the processing_info
info_list = []
for nnk in xrange(nsimimages):
single_image_and_cat, processing_info, processing_name = done_queue.get()
print "#", "%s: for file %s was done." % (processing_name, processing_info)
# info_list
info_list.append( int( processing_info.strip("simulated image") ) )
# append image
image_collection.append(single_image_and_cat[0].copy())
# append true catalog
true_cats_collection.append(single_image_and_cat[1].copy())
# Re-order according to the info_list - since the order of the image is important.
image_collection = list(np.array(image_collection )[ np.argsort(info_list) ])
true_cats_collection= list(np.array(true_cats_collection)[ np.argsort(info_list) ])
# Stop the processes
# The 'STOP's could have been put on the task list before starting the processes, or you
# can wait. In some cases it can be useful to clear out the done_queue (as we just did)
# and then add on some more tasks. We don't need that here, but it's perfectly fine to do.
# Once you are done with the processes, putting nproc 'STOP's will stop them all.
# This is important, because the program will keep running as long as there are running
# processes, even if the main process gets to the end. So you do want to make sure to
# add those 'STOP's at some point!
for nnk in xrange(nproc):
task_queue.put('STOP')
# return
return image_collection, true_cats_collection
|
inonchiu/ComEst
|
comest/SrcPlacer.py
|
Python
|
mit
| 78,803
|
[
"Galaxy",
"Gaussian"
] |
e3a6496d58a5efede203e3cb09c0e9725e6944cd1d3434ddeb71a421f8ca9e90
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
***********************************
**espresso.esutil.UniformOnSphere**
***********************************
"""
from espresso import pmi
from _espresso import esutil_UniformOnSphere
class UniformOnSphereLocal(esutil_UniformOnSphere):
pass
if pmi.isController:
class UniformOnSphere(object):
__metaclass__ = pmi.Proxy
"""A random variate that generates 3D vectors that are uniformly
distributed on a sphere."""
pmiproxydefs = dict(
cls = 'espresso.esutil.UniformOnSphereLocal',
localcall = [ '__call__' ],
)
|
BackupTheBerlios/espressopp
|
src/esutil/UniformOnSphere.py
|
Python
|
gpl-3.0
| 1,464
|
[
"ESPResSo"
] |
b681cd48c900af33c72979e35fdfc2458f1bcf66163b6e8dca5ecf0260be7f76
|
"""
Light Actors
~~~~~~~~~~~~
Positional lights in PyVista have customizable beam shapes, see the
:ref:`ref_light_beam_shape_example` example. Spotlights are special in
the sense that they are unidirectional lights with a finite position,
so they can be visualized using a cone.
This is exactly the purpose of a ``vtk.vtkLightActor``, the
functionality of which can be enabled for spotlights:
"""
# sphinx_gallery_thumbnail_number = 1
import numpy as np
import pyvista as pv
from pyvista import examples
cow = examples.download_cow()
cow.rotate_x(90)
plotter = pv.Plotter(lighting='none', window_size=(1000, 1000))
plotter.add_mesh(cow, color='white')
floor = pv.Plane(center=(*cow.center[:2], cow.bounds[-2]),
i_size=30, j_size=25)
plotter.add_mesh(floor, color='green')
UFO = pv.Light(position=(0, 0, 10), focal_point=(0, 0, 0), color='white')
UFO.positional = True
UFO.cone_angle = 40
UFO.exponent = 10
UFO.intensity = 3
UFO.show_actor()
plotter.add_light(UFO)
# enable shadows to better demonstrate lighting
plotter.enable_shadows()
plotter.camera_position = [(28, 30, 22), (0.77, 0, -0.44), (0, 0, 1)]
plotter.show()
###############################################################################
# Light actors can be very useful when designing complex scenes where
# spotlights are involved in lighting.
plotter = pv.Plotter(lighting='none')
plane = pv.Plane(i_size=4, j_size=4)
plotter.add_mesh(plane, color='white')
rot120 = np.array([[-0.5, -np.sqrt(3)/2, 0], [np.sqrt(3)/2, -0.5, 0], [0, 0, 1]])
position = (-1.5, -1.5, 3)
focus = (-0.5, -0.5, 0)
colors = ['red', 'lime', 'blue']
for color in colors:
position = rot120 @ position
focus = rot120 @ focus
light = pv.Light(position=position, focal_point=focus, color=color)
light.positional = True
light.cone_angle = 15
light.show_actor()
plotter.add_light(light)
plotter.show()
###############################################################################
# One thing to watch out for is that the light actors are represented such that
# their cone has a fixed height. This implies that for very large cone angles
# we typically end up with enormous light actors, in which case setting a manual
# camera position before rendering is usually a good idea. Increasing the first
# example's cone angle and omitting the manual camera positioning exemplifies
# the problem:
plotter = pv.Plotter(lighting='none')
plotter.add_mesh(cow, color='white')
floor = pv.Plane(center=(*cow.center[:2], cow.bounds[-2]),
i_size=30, j_size=25)
plotter.add_mesh(floor, color='green')
UFO = pv.Light(position=(0, 0, 10), focal_point=(0, 0, 0), color='white')
UFO.positional = True
UFO.cone_angle = 89
UFO.exponent = 10
UFO.intensity = 3
UFO.show_actor()
plotter.add_light(UFO)
plotter.show()
|
akaszynski/vtkInterface
|
examples/04-lights/actors.py
|
Python
|
mit
| 2,810
|
[
"VTK"
] |
5c0bcfb596cd9a2ca3d4ee5dc8791817000b9eb2fe579132dda94f66f3cfcee5
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto
from pyscf.nao import nao, conv_yzx2xyz_c
mol = gto.M( verbose = 1,
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''', basis = 'cc-pvdz',)
conv = conv_yzx2xyz_c(mol)
sv = nao(gto=mol)
class KnowValues(unittest.TestCase):
def test_verify(self):
print(mol.spin, __file__)
def test_gto2sv(self):
""" Test transformation of the radial orbitals from GTO to NAO type"""
psi_log = sv.ao_log.psi_log
self.assertEqual((sv.natoms,sv.norbs,len(psi_log)), (3,24,2))
rr = sv.ao_log.rr
self.assertEqual(len(rr), 1024)
dr = np.log(rr[1]/rr[0])
for mu2ff in psi_log:
for ff in mu2ff:
norm = (ff**2*rr**3).sum()*dr
self.assertAlmostEqual(norm, 1.0)
def test_atom2sv(self):
""" Test costructing a skeleton for later use to define spherical grid with pySCF """
dl = [ [1, [1.0, 0.44, 2.0]], [8, [0.0, 0.0, 0.1]], [1, [0.0, 0.0, -2.0]]]
sv = nao(xyz_list=dl)
self.assertEqual(sv.natoms, len(dl))
for ia,a in enumerate(dl):
self.assertEqual(sv.sp2charge[sv.atom2sp[ia]], a[0])
self.assertTrue(np.all(sv.atom2coord[ia,:]==a[1]))
self.assertTrue(sv.atom2s is None)
def test_overlap_gto_vs_nao(self):
""" Test computation of overlaps computed between NAOs against overlaps computed between GTOs"""
from pyscf.nao.m_overlap_am import overlap_am
oref = conv.conv_yzx2xyz_2d(mol.intor_symmetric('cint1e_ovlp_sph'))
over = sv.overlap_coo(funct=overlap_am).toarray()
self.assertTrue(abs(over-oref).sum()<5e-9)
def test_laplace_gto_vs_nao(self):
""" Test computation of kinetic energy between NAOs against those computed between GTOs"""
from pyscf.nao.m_laplace_am import laplace_am
tref = conv.conv_yzx2xyz_2d(mol.intor_symmetric('int1e_kin'))
tkin = (-0.5*sv.overlap_coo(funct=laplace_am)).toarray()
self.assertTrue(abs(tref-tkin).sum()/len(tkin)<5e-9)
def test_energy_nuc_gto_vs_nao(self):
""" Test computation of matrix elements of nuclear-electron attraction """
sv = nao(gto=mol)
e_nao = sv.energy_nuc()
e_gto = mol.energy_nuc()
self.assertAlmostEqual(e_nao, e_gto)
if __name__ == "__main__":
unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0001_nao.py
|
Python
|
apache-2.0
| 2,947
|
[
"PySCF"
] |
ae0b261f84e922e2849d88115dc686f1a48f2f11c3d9e24992b992914f80f3fc
|
"""Learn to estimate functions from examples. (Chapters 18-20)"""
from .utils import (
removeall, unique, product, argmax, argmax_random_tie, isclose,
dotproduct, vector_add, scalar_vector_product, weighted_sample_with_replacement,
weighted_sampler, num_or_str, normalize, clip, sigmoid, print_table, DataFile
)
import copy
import heapq
import math
import random
# XXX statistics.mode is not quite the same as the old utils.mode:
# it insists on there being a unique most-frequent value. Code using mode
# needs to be revisited, or we need to restore utils.mode.
from statistics import mean, mode
from collections import defaultdict
# ______________________________________________________________________________
def rms_error(predictions, targets):
return math.sqrt(ms_error(predictions, targets))
def ms_error(predictions, targets):
return mean([(p - t)**2 for p, t in zip(predictions, targets)])
def mean_error(predictions, targets):
return mean([abs(p - t) for p, t in zip(predictions, targets)])
def manhattan_distance(predictions, targets):
return sum([abs(p - t) for p, t in zip(predictions, targets)])
def mean_boolean_error(predictions, targets):
return mean([(p != t) for p, t in zip(predictions, targets)])
# ______________________________________________________________________________
class DataSet:
"""A data set for a machine learning problem. It has the following fields:
d.examples A list of examples. Each one is a list of attribute values.
d.attrs A list of integers to index into an example, so example[attr]
gives a value. Normally the same as range(len(d.examples[0])).
d.attrnames Optional list of mnemonic names for corresponding attrs.
d.target The attribute that a learning algorithm will try to predict.
By default the final attribute.
d.inputs The list of attrs without the target.
d.values A list of lists: each sublist is the set of possible
values for the corresponding attribute. If initially None,
it is computed from the known examples by self.setproblem.
If not None, an erroneous value raises ValueError.
d.distance A function from a pair of examples to a nonnegative number.
Should be symmetric, etc. Defaults to mean_boolean_error
since that can handle any field types.
d.name Name of the data set (for output display only).
d.source URL or other source where the data came from.
Normally, you call the constructor and you're done; then you just
access fields like d.examples and d.target and d.inputs."""
def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
inputs=None, values=None, distance=mean_boolean_error,
name='', source='', exclude=()):
"""Accepts any of DataSet's fields. Examples can also be a
string or file from which to parse examples using parse_csv.
Optional parameter: exclude, as documented in .setproblem().
>>> DataSet(examples='1, 2, 3')
<DataSet(): 1 examples, 3 attributes>
"""
self.name = name
self.source = source
self.values = values
self.distance = distance
if values is None:
self.got_values_flag = False
else:
self.got_values_flag = True
# Initialize .examples from string or list or data directory
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(DataFile(name + '.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
if attrs is None and self.examples is not None:
attrs = list(range(len(self.examples[0])))
self.attrs = attrs
# Initialize .attrnames from string, list, or by default
if isinstance(attrnames, str):
self.attrnames = attrnames.split()
else:
self.attrnames = attrnames or attrs
self.setproblem(target, inputs=inputs, exclude=exclude)
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = map(self.attrnum, exclude)
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.values = list(map(unique, zip(*self.examples)))
self.check_me()
def check_me(self):
"Check that my fields make sense."
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
if self.got_values_flag:
# only check if values are provided while initializing DataSet
list(map(self.check_example, self.examples))
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example)
def check_example(self, example):
"Raise ValueError if example has any invalid values."
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value %s for attribute %s in %s' %
(example[a], self.attrnames[a], example))
def attrnum(self, attr):
"Returns the number used for attr, which can be a name, or -n .. n-1."
if isinstance(attr, str):
return self.attrnames.index(attr)
elif attr < 0:
return len(self.attrs) + attr
else:
return attr
def sanitize(self, example):
"Return a copy of example, with non-input attributes replaced by None."
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
def __repr__(self):
return '<DataSet(%s): %d examples, %d attributes>' % (
self.name, len(self.examples), len(self.attrs))
# ______________________________________________________________________________
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]
"""
lines = [line for line in input.splitlines() if line.strip()]
return [list(map(num_or_str, line.split(delim))) for line in lines]
# ______________________________________________________________________________
class CountingProbDist:
"""A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
self.dictionary = {}
self.n_obs = 0.0
self.default = default
self.sampler = None
for o in observations:
self.add(o)
def add(self, o):
"Add an observation o to the distribution."
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"Return an estimate of the probability of item."
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"Return (count, obs) tuples for the n most frequent observations."
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
def sample(self):
"Return a random sample from the distribution."
if self.sampler is None:
self.sampler = weighted_sampler(list(self.dictionary.keys()),
list(self.dictionary.values()))
return self.sampler()
# ______________________________________________________________________________
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"Always return same result: the most popular from the training set."
return most_popular
return predict
# ______________________________________________________________________________
def NaiveBayesLearner(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
targetvals = dataset.values[dataset.target]
target_dist = CountingProbDist(targetvals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr])
for gv in targetvals
for attr in dataset.inputs}
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval] *
product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, key=class_probability)
return predict
# ______________________________________________________________________________
def NearestNeighborLearner(dataset, k=1):
"k-NearestNeighbor: the k nearest neighbors vote."
def predict(example):
"Find the k closest, and have them vote for the best."
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
# ______________________________________________________________________________
class DecisionFork:
"""A fork of a decision tree holds an attribute to test, and a dict
of branches, one for each of the attribute's values."""
def __init__(self, attr, attrname=None, branches=None):
"Initialize by saying what attribute this node tests."
self.attr = attr
self.attrname = attrname or attr
self.branches = branches or {}
def __call__(self, example):
"Given an example, classify it using the attribute and the branches."
attrvalue = example[self.attr]
return self.branches[attrvalue](example)
def add(self, val, subtree):
"Add a branch. If self.attr = val, go to the given subtree."
self.branches[val] = subtree
def display(self, indent=0):
name = self.attrname
print('Test', name)
for (val, subtree) in self.branches.items():
print(' ' * 4 * indent, name, '=', val, '==>', end=' ')
subtree.display(indent + 1)
def __repr__(self):
return ('DecisionFork(%r, %r, %r)'
% (self.attr, self.attrname, self.branches))
class DecisionLeaf:
"A leaf of a decision tree holds just a result."
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self, indent=0):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
# ______________________________________________________________________________
def DecisionTreeLearner(dataset):
"[Figure 18.5]"
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A])
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
key=lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
"Count the number of examples that have attr = val."
return count(e[attr] == val for e in examples)
def all_same_class(examples):
"Are all these examples in the same target class?"
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"Choose the attribute with the highest information gain."
return argmax_random_tie(attrs,
key=lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"Return the expected reduction in entropy from splitting by attr."
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"Return a list of (val, examples) pairs for each val of attr."
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"Number of bits to represent the probability distribution in values."
probabilities = normalize(removeall(0, values))
return sum(-p * math.log2(p) for p in probabilities)
# ______________________________________________________________________________
# A decision list is implemented as a list of (test, value) pairs.
def DecisionListLearner(dataset):
"""[Figure 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Failure
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
raise NotImplementedError
def passes(example, test):
"Does the example pass the test?"
raise NotImplementedError
def predict(example):
"Predict the outcome for the first passing test."
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
# ______________________________________________________________________________
def NeuralNetLearner(dataset, hidden_layer_sizes=[3],
learning_rate=0.01, epoches=100):
"""
Layered feed-forward network.
hidden_layer_sizes: List of number of hidden units per hidden layer
learning_rate: Learning rate of gradient decent
epoches: Number of passes over the dataset
"""
i_units = len(dataset.inputs)
o_units = 1 # As of now, dataset.target gives only one index.
# construct a network
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net,
learning_rate, epoches)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
pred = [o_nodes[i].value for i in range(o_units)]
return 1 if pred[0] >= 0.5 else 0
return predict
class NNUnit:
"""
Single Unit of Multiple Layer Neural Network
inputs: Incoming connections
weights: weights to incoming connections
"""
def __init__(self, weights=None, inputs=None):
self.weights = []
self.inputs = []
self.value = None
self.activation = sigmoid
def network(input_units, hidden_layer_sizes, output_units):
"""
Create of Directed Acyclic Network of given number layers
hidden_layers_sizes : list number of neuron units in each hidden layer
excluding input and output layers.
"""
# Check for PerceptronLearner
if hidden_layer_sizes:
layers_sizes = [input_units] + hidden_layer_sizes + [output_units]
else:
layers_sizes = [input_units] + [output_units]
net = [[NNUnit() for n in range(size)]
for size in layers_sizes]
n_layers = len(net)
# Make Connection
for i in range(1, n_layers):
for n in net[i]:
for k in net[i-1]:
n.inputs.append(k)
n.weights.append(0)
return net
def BackPropagationLearner(dataset, net, learning_rate, epoches):
"[Figure 18.23] The back-propagation algorithm for multilayer network"
# Initialise weights
for layer in net:
for node in layer:
node.weights = [random.uniform(-0.5, 0.5)
for i in range(len(node.weights))]
examples = dataset.examples
'''
As of now dataset.target gives an int instead of list,
Changing dataset class will have effect on all the learners.
Will be taken care of later
'''
idx_t = [dataset.target]
idx_i = dataset.inputs
n_layers = len(net)
o_nodes = net[-1]
i_nodes = net[0]
for epoch in range(epoches):
# Iterate over each example
for e in examples:
i_val = [e[i] for i in idx_i]
t_val = [e[i] for i in idx_t]
# Activate input layer
for v, n in zip(i_val, i_nodes):
n.value = v
# Forward pass
for layer in net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Initialize delta
delta = [[] for i in range(n_layers)]
# Compute outer layer delta
o_units = len(o_nodes)
err = [t_val[i] - o_nodes[i].value
for i in range(o_units)]
delta[-1] = [(o_nodes[i].value) * (1 - o_nodes[i].value) *
(err[i]) for i in range(o_units)]
# Backward pass
h_layers = n_layers - 2
for i in range(h_layers, 0, -1):
layer = net[i]
h_units = len(layer)
nx_layer = net[i+1]
# weights from each ith layer node to each i + 1th layer node
w = [[node.weights[k] for node in nx_layer]
for k in range(h_units)]
delta[i] = [(layer[j].value) * (1 - layer[j].value) *
dotproduct(w[j], delta[i+1])
for j in range(h_units)]
# Update weights
for i in range(1, n_layers):
layer = net[i]
inc = [node.value for node in net[i-1]]
units = len(layer)
for j in range(units):
layer[j].weights = vector_add(layer[j].weights,
scalar_vector_product(
learning_rate * delta[i][j], inc))
return net
def PerceptronLearner(dataset, learning_rate=0.01, epoches=100):
"""Logistic Regression, NO hidden layer"""
i_units = len(dataset.inputs)
o_units = 1 # As of now, dataset.target gives only one index.
hidden_layer_sizes = []
raw_net = network(i_units, hidden_layer_sizes, o_units)
learned_net = BackPropagationLearner(dataset, raw_net, learning_rate, epoches)
def predict(example):
# Input nodes
i_nodes = learned_net[0]
# Activate input layer
for v, n in zip(example, i_nodes):
n.value = v
# Forward pass
for layer in learned_net[1:]:
for node in layer:
inc = [n.value for n in node.inputs]
in_val = dotproduct(inc, node.weights)
node.value = node.activation(in_val)
# Hypothesis
o_nodes = learned_net[-1]
pred = [o_nodes[i].value for i in range(o_units)]
return 1 if pred[0] >= 0.5 else 0
return predict
# ______________________________________________________________________________
def Linearlearner(dataset, learning_rate=0.01, epochs=100):
"""Define with learner = Linearlearner(data); infer with learner(x)."""
idx_i = dataset.inputs
idx_t = dataset.target # As of now, dataset.target gives only one index.
examples = dataset.examples
# X transpose
X_col = [dataset.values[i] for i in idx_i] # vertical columns of X
# Add dummy
ones = [1 for i in range(len(examples))]
X_col = ones + X_col
# Initialize random weigts
w = [random(-0.5, 0.5) for i in range(len(idx_i) + 1)]
for epoch in range(epochs):
err = []
# Pass over all examples
for example in examples:
x = [example[i] for i in range(idx_i)]
x = [1] + x
y = dotproduct(w, x)
t = example[idx_t]
err.append(t - y)
# update weights
for i in range(len(w)):
w[i] = w[i] - dotproduct(err, X_col[i])
def predict(example):
x = [1] + example
return dotproduct(w, x)
return predict
# ______________________________________________________________________________
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
# ______________________________________________________________________________
def AdaBoost(L, K):
"""[Figure 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1. / (2 * N)
w = [1. / N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1 - epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
def WeightedMajority(predictors, weights):
"Return a predictor that takes a weighted vote."
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(list(totals.keys()), key=totals.get)
# _____________________________________________________________________________
# Adapting an unweighted learner for AdaBoost
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w * n) for w in weights]
fractions = [(w * n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes)) +
weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))
def flatten(seqs): return sum(seqs, [])
# _____________________________________________________________________________
# Functions for testing learners on examples
def test(predict, dataset, examples=None, verbose=0):
"Return the proportion of the examples that are NOT correctly predicted."
if examples is None:
examples = dataset.examples
if len(examples) == 0:
return 0.0
right = 0.0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print(' OK: got %s for %s' % (desired, example))
elif verbose:
print('WRONG: got %s, expected %s for %s' % (
output, desired, example))
return 1 - (right / len(examples))
def train_and_test(dataset, start, end):
"""Reserve dataset.examples[start:end] for test; train on the remainder."""
start = int(start)
end = int(end)
examples = dataset.examples
train = examples[:start] + examples[end:]
val = examples[start:end]
return train, val
def cross_validation(learner, size, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; If trials>1, average over several shuffles.
Returns Training error, Validataion error"""
if k is None:
k = len(dataset.examples)
if trials > 1:
trial_errT = 0
trial_errV = 0
for t in range(trials):
errT, errV = cross_validation(learner, size, dataset,
k=10, trials=1)
trial_errT += errT
trial_errV += errV
return trial_errT / trials, trial_errV / trials
else:
fold_errT = 0
fold_errV = 0
n = len(dataset.examples)
examples = dataset.examples
for fold in range(k):
random.shuffle(dataset.examples)
train_data, val_data = train_and_test(dataset, fold * (n / k),
(fold + 1) * (n / k))
dataset.examples = train_data
h = learner(dataset, size)
fold_errT += test(h, dataset, train_data)
fold_errV += test(h, dataset, val_data)
# Reverting back to original once test is completed
dataset.examples = examples
return fold_errT / k, fold_errV / k
def cross_validation_wrapper(learner, dataset, k=10, trials=1):
"""
Fig 18.8
Return the optimal value of size having minimum error
on validataion set
err_train: a training error array, indexed by size
err_val: a validataion error array, indexed by size
"""
err_val = []
err_train = []
size = 1
while True:
errT, errV = cross_validation(learner, size, dataset, k)
# Check for convergence provided err_val is not empty
if (err_val and isclose(err_val[-1], errV, rel_tol=1e-6)):
best_size = size
return learner(dataset, best_size)
err_val.append(errV)
err_train.append(errT)
print(err_val)
size += 1
def leave_one_out(learner, dataset):
"Leave one out cross-validation over the dataset."
return cross_validation(learner, size, dataset, k=len(dataset.examples))
def learningcurve(learner, dataset, trials=10, sizes=None):
if sizes is None:
sizes = list(range(2, len(dataset.examples) - 10, 2))
def score(learner, size):
random.shuffle(dataset.examples)
return train_and_test(learner, dataset, 0, size)
return [(size, mean([score(learner, size) for t in range(trials)]))
for size in sizes]
# ______________________________________________________________________________
# The rest of this file gives datasets for machine learning problems.
orings = DataSet(name='orings', target='Distressed',
attrnames="Rings Distressed Temp Pressure Flightnum")
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attrnames="name hair feathers eggs milk airborne aquatic " +
"predator toothed backbone breathes venomous fins legs tail " +
"domestic catsize type")
iris = DataSet(name="iris", target="class",
attrnames="sepal-len sepal-width petal-len petal-width class")
# ______________________________________________________________________________
# The Restaurant example from [Figure 18.2]
def RestaurantDataSet(examples=None):
"Build a DataSet of Restaurant waiting examples. [Figure 18.3]"
return DataSet(name='restaurant', target='Wait', examples=examples,
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price ' +
'Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attrname, branches):
branches = {value: (child if isinstance(child, DecisionFork)
else DecisionLeaf(child))
for value, child in branches.items()}
return DecisionFork(restaurant.attrnum(attrname), attrname, branches)
""" [Figure 18.2]
A decision tree for deciding whether to wait for a table at a hotel.
"""
waiting_decision_tree = T('Patrons',
{'None': 'No', 'Some': 'Yes', 'Full':
T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60':
T('Alternate', {'No':
T('Reservation', {'Yes': 'Yes', 'No':
T('Bar', {'No': 'No',
'Yes': 'Yes'
})}),
'Yes':
T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}),
'10-30':
T('Hungry', {'No': 'Yes', 'Yes':
T('Alternate',
{'No': 'Yes', 'Yes':
T('Raining', {'No': 'No', 'Yes': 'Yes'})
})})})})
def SyntheticRestaurant(n=20):
"Generate a DataSet with n examples."
def gen():
example = list(map(random.choice, restaurant.values))
example[restaurant.target] = waiting_decision_tree(example)
return example
return RestaurantDataSet([gen() for i in range(n)])
# ______________________________________________________________________________
# Artificial, generated datasets.
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k / 2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
def Parity(k, n, name="parity"):
"""Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name="xor")
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
# ______________________________________________________________________________
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner', '')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
|
andres-root/AIND
|
Therm1/Planning/Project/aimacode/learning.py
|
Python
|
mit
| 35,994
|
[
"NEURON"
] |
7dacb8892de3ce6f4c861c98ffcaea3b326168374c844c6be19dd2a84a1b44e1
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Neurongrouper
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Specials.Simulaters.Simulater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
SYS.addDo('Neurongrouper','Neurongroup','Neurongrouping','Neurongrouped')
#</DefineAugmentation>
#<ImportSpecificModules>
Classer=DecorationModule
from ShareYourSystem.Standards.Itemizers import Networker
from ShareYourSystem.Standards.Recorders import Tracer,Moniter
#</ImportSpecificModules>
#<DefineLocals>
class PostletsClass(Networker.NetworkerClass):pass
class PreletsClass(Networker.NetworkerClass):pass
NeurongroupPostTeamKeyStr="Postlets"
NeurongroupPreTeamKeyStr="Prelets"
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class NeurongrouperClass(BaseClass):
def default_init(self,
_NeurongroupingBrianDict=None,
_NeurongroupingStatesDict=None,
_NeurongroupingSpikesDict=None,
_NeurongroupedBrianVariable=None,
_NeurongroupedDeriveTracersList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_neurongroup(
self
):
#/########################/#
# Import brian
# adapt the shape of the NeurongroupingBrianDict
#debug
'''
self.debug(
[
'We adapt the shape of NeurongroupingBrianDict',
('self.',self,[
'NeurongroupingBrianDict'
])
]
)
'''
#Check
if 'N' not in self.NeurongroupingBrianDict:
self.NeurongroupingBrianDict['N']=self.SimulatingUnitsInt
else:
self.SimulatingUnitsInt=self.NeurongroupingBrianDict['N']
#Check
if 'model' not in self.NeurongroupingBrianDict:
self.NeurongroupingBrianDict['model']=''
#/##################/#
# Set finally the Neurongroup
#
#Check
if self.NeurongroupingBrianDict['model']!="" or self.NeurongroupingBrianDict['N']>0:
#maybe should import
from brian2 import NeuronGroup
#debug
self.debug(
[
'We set the Neurongroup',
('self.',self,[
'NeurongroupingBrianDict'
])
]
)
#init
self.NeurongroupedBrianVariable=NeuronGroup(
**self.NeurongroupingBrianDict
)
else:
#return
return
#/##################/#
# team Traces first all the brian variables
#
#debug
self.debug(
[
'We simulate with neurongroup',
'adapt the initial conditions of all the brian variables',
'so first we team Traces and put Tracers inside'
]
)
#Check
if 'Traces' not in self.TeamDict:
NeurongroupedTracesDeriveTeamer=self.team(
'Traces'
).TeamedValueVariable
else:
NeurongroupedTracesDeriveTeamer=self.TeamDict[
'Traces'
]
#map
self.NeurongroupedDeriveTracersList=map(
lambda __TraceStr:
NeurongroupedTracesDeriveTeamer.manage(
Tracer.TracerPrefixStr+__TraceStr,
{
'TracingKeyVariable':getattr(
self.NeurongroupedBrianVariable,
__TraceStr
),
'TraceKeyStr':__TraceStr
}
).ManagedValueVariable,
self.NeurongroupedBrianVariable.equations._equations.keys()
)
#/##################/#
# Now analyze the NeurongroupingStatesDict to set Moniters
#
#debug
self.debug(
[
'We analyze the NeurongroupingStatesDict',
('self.',self,['NeurongroupingStatesDict'])
]
)
#get
NeurongroupedTracesMoniterKeyStrsList=Moniter.MoniterClass.DoingAttributeVariablesOrderedDict.keys()
#map
self.NeurongroupedDeriveMonitersList=SYS.flat(
map(
lambda __DeriveMoniter,__SampleTuplesList:
map(
lambda __SampleTuple:
__DeriveMoniter.manage(
__SampleTuple[0],
SYS.match(
NeurongroupedTracesMoniterKeyStrsList,
__SampleTuple[1:]
)
).ManagedValueVariable,
__SampleTuplesList
),
map(
lambda __KeyStr:
NeurongroupedTracesDeriveTeamer.ManagementDict[
Tracer.TracerPrefixStr+__KeyStr
].team('Samples').TeamedValueVariable,
self.NeurongroupingStatesDict.keys()
),
self.NeurongroupingStatesDict.values()
)
)
#/##################/#
# Set Monitors inside
#
#Check
if len(NeurongroupedTracesMoniterKeyStrsList)>0:
#debug
self.debug(
[
'We set the brian monitor inside'
]
)
#import
from brian2 import StateMonitor
#map
self.NeurongroupedDeriveStateMonitorsList=map(
lambda __NeurongroupedDeriveMoniter:
__NeurongroupedDeriveMoniter.set(
'MonitBrianVariable',
StateMonitor(
#NeuronGroup
self.NeurongroupedBrianVariable,
#varname
__NeurongroupedDeriveMoniter.ParentDeriveTeamerVariable.ParentDeriveTeamerVariable.TraceKeyStr,
#record
__NeurongroupedDeriveMoniter.MoniteringLabelIndexIntsArray
)
).MonitBrianVariable,
self.NeurongroupedDeriveMonitersList
)
#/##################/#
# team Events
#
"""
def propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable):
#debug
self.debug(
[
'We have grand parents',
'map(type,self.ParentedDeriveTeamersList) is '+str(
map(type,self.ParentedDeriveTeamersList))
]
)
#Check
if type(self.ParentTopDeriveTeamerVariable)==SYS.BrianerClass:
#alias
self.NeurongroupDeriveBrianerVariable=self.ParentTopDeriveTeamerVariable
else:
#index
self.NeurongroupDeriveBrianerVariable=self.ParentedDeriveTeamersList[
map(
type,
self.ParentedDeriveTeamersList
).index(SYS.BrianerClass)
]
#manage self
self.NeurongroupDeriveBrianerVariable.TeamDict[
self.ParentTopDeriveTeamerVariable.Module.BrianPopulationTeamKeyStr
].manage(self)
#call the base method
BaseClass.propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable)
"""
def mimic_simulate(
self
):
#/##################/#
# Team States first all the brian variables
#
#debug
self.debug(
[
'We simulate with neurongroup'
'adapt the initial conditions of all the brian variables',
'so first we team in States'
]
)
#Check
if Tracer.TracerTracesTeamKeyStr not in self.TeamDict:
self.team(Tracer.TracerTracesTeamKeyStr)
#map
map(
lambda __TraceStr:
self.TeamDict[
Tracer.TracerTracesTeamKeyStr
].manage(
Tracer.TracerPrefixStr+__TraceStr,
{
'TracingKeyVariable':getattr(
self.NeurongroupedBrianVariable,
__TraceStr
)
}
),
self.NeurongroupedBrianVariable.equations._equations.keys()
)
#/##################/#
# Call the base method
#
#simulate
BaseClass.simulate(self)
#</DefineClass>
#<DefineLocals>
#set
#SpikesClass.ManagingValueClass=Moniter.MoniterClass
#StatesClass.ManagingValueClass=Moniter.MoniterClass
#update
#NeurongrouperClass.TeamingClassesDict.update(
# {
# 'Spikes':SpikesClass,
# 'States':StatesClass
# }
#)
#</DefineLocals>
#</DefinePrint>
NeurongrouperClass.PrintingClassSkipKeyStrsList.extend(
[
'NeurongroupingBrianDict',
'NeurongroupingStatesDict',
'NeurongroupingSpikesDict',
'NeurongroupedBrianVariable',
'NeurongroupedDeriveTracersList',
'NeurongroupedDeriveMonitersList'
]
)
#<DefinePrint>
|
Ledoux/ShareYourSystem
|
Pythonlogy/draft/Simulaters/Neurongrouper/__init__.py
|
Python
|
mit
| 7,319
|
[
"Brian"
] |
47fcd492b9d3c07994c8e4f753d6e49434ee7dff848dd8266055c6b3ff2dc431
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2008-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
"""Dialog for listing payment categories"""
import collections
import gtk
from kiwi.datatypes import ValidationError
from kiwi.ui.forms import ColorField, ChoiceField, TextField
from stoqlib.api import api
from stoqlib.domain.payment.category import PaymentCategory
from stoqlib.domain.payment.payment import Payment
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.lib.colorutils import get_random_color
from stoqlib.lib.decorators import cached_property
from stoqlib.lib.message import yesno
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class PaymentCategoryEditor(BaseEditor):
model_name = _('Payment Category')
model_type = PaymentCategory
confirm_widgets = ['name']
_category_type_values = [
(_('Payable'), PaymentCategory.TYPE_PAYABLE),
(_('Receivable'), PaymentCategory.TYPE_RECEIVABLE)
]
@cached_property()
def fields(self):
return collections.OrderedDict(
name=TextField(_('Name'), proxy=True),
color=ColorField(_('Color'), proxy=True),
category_type=ChoiceField(
_('Type'), data_type=int,
values=self._category_type_values, proxy=True),
)
def __init__(self, store, model=None,
category_type=None, visual_mode=False):
self._category_type = category_type or PaymentCategory.TYPE_PAYABLE
BaseEditor.__init__(self, store, model, visual_mode=visual_mode)
if category_type is not None:
self.category_type.set_sensitive(False)
#
# BaseEditor
#
def validate_confirm(self):
category_type = self.model.category_type
if (not self.edit_mode or
self._original_category_type == category_type):
return True
payments = self.store.find(Payment,
category=self.model)
payments_count = payments.count()
if (payments_count > 0 and not
yesno(_("Changing the payment type will remove this category "
"from %s payments. Are you sure?") % payments_count,
gtk.RESPONSE_NO, _("Change"), _("Don't change"))):
return False
for p in payments:
p.category = None
return True
def create_model(self, store):
used_colors = set([
pc.color for pc in store.find(PaymentCategory)])
color = get_random_color(ignore=used_colors)
return PaymentCategory(name=u'',
color=color,
category_type=self._category_type,
store=store)
def setup_proxies(self):
self.name.grab_focus()
self._original_category_type = self.model.category_type
#
# Kiwi Callbacks
#
def on_name__validate(self, widget, new_name):
if not new_name:
return ValidationError(
_(u"The payment category should have name."))
if self.model.check_unique_value_exists(PaymentCategory.name,
new_name):
return ValidationError(
_(u"The payment category '%s' already exists.") % new_name)
def test(): # pragma nocover
creator = api.prepare_test()
retval = run_dialog(PaymentCategoryEditor, None, creator.store, None)
creator.store.confirm(retval)
if __name__ == '__main__': # pragma nocover
test()
|
andrebellafronte/stoq
|
stoqlib/gui/editors/paymentcategoryeditor.py
|
Python
|
gpl-2.0
| 4,450
|
[
"VisIt"
] |
b7321b7a80b9a86a3d0339eff4dcfd6ff96564305824917cde5fec1bdafc0fd4
|
#! /usr/bin/env python
"""
Module containing functions for cubes frame registration.
"""
__author__ = 'C. A. Gomez Gonzalez, V. Christiaens, G. Ruane, R. Farkas'
__all__ = ['frame_shift',
'cube_shift',
'frame_center_radon',
'frame_center_satspots',
'cube_recenter_satspots',
'cube_recenter_radon',
'cube_recenter_dft_upsampling',
'cube_recenter_2dfit',
'cube_recenter_via_speckles']
import numpy as np
import warnings
from packaging import version
try:
import cv2
no_opencv = False
except ImportError:
msg = "Opencv python bindings are missing."
warnings.warn(msg, ImportWarning)
no_opencv = True
from hciplot import plot_frames
from scipy.ndimage import fourier_shift
from scipy.ndimage import shift
import skimage
from skimage.transform import radon
if version.parse(skimage.__version__) <= version.parse('0.17.0'):
from skimage.feature import register_translation as cc_center
else:
from skimage.registration import phase_cross_correlation as cc_center
from multiprocessing import cpu_count
from matplotlib import pyplot as plt
from . import frame_crop
from ..config import time_ini, timing, Progressbar
from ..config.utils_conf import vip_figsize, check_array
from ..config.utils_conf import pool_map, iterable
from ..stats import frame_basic_stats
from ..var import (get_square, frame_center, get_annulus_segments,
fit_2dmoffat, fit_2dgaussian, fit_2dairydisk,
fit_2d2gaussian, cube_filter_lowpass, cube_filter_highpass)
from ..preproc import cube_crop_frames
def frame_shift(array, shift_y, shift_x, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect'):
""" Shifts a 2D array by shift_y, shift_x. Boundaries are filled with zeros.
Parameters
----------
array : numpy ndarray
Input 2d array.
shift_y, shift_x: float
Shifts in y and x directions.
imlib : {'opencv', 'ndimage-fourier', 'ndimage-interp', 'vip-fft'}, str opt
Library or method used for performing the image shift.
'ndimage-fourier' or 'vip-fft': does a fourier shift operation and
preserves better the pixel values - therefore the flux and photometry
(wrapper of scipy.ndimage.fourier_shift). Interpolation-based shift
('opencv' and 'ndimage-interp') is faster but less accurate than the
fourier shift. 'opencv' is recommended when speed is critical.
interpolation : str, optional
Only used in case of imlib is set to 'opencv' or 'ndimage-interp'
(Scipy.ndimage), where the images are shifted via interpolation.
For Scipy.ndimage the options are: 'nearneig', bilinear', 'biquadratic',
'bicubic', 'biquartic' or 'biquintic'. The 'nearneig' interpolation is
the fastest and the 'biquintic' the slowest. The 'nearneig' is the
poorer option for interpolation of noisy astronomical images.
For Opencv the options are: 'nearneig', 'bilinear', 'bicubic' or
'lanczos4'. The 'nearneig' interpolation is the fastest and the
'lanczos4' the slowest and accurate. 'lanczos4' is the default for
Opencv and 'biquartic' for Scipy.ndimage.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
Returns
-------
array_shifted : numpy ndarray
Shifted 2d array.
"""
check_array(array, dim=2)
image = array.copy()
if imlib == 'ndimage-fourier' or imlib == 'vip-fft':
shift_val = (shift_y, shift_x)
array_shifted = fourier_shift(np.fft.fftn(image), shift_val)
array_shifted = np.fft.ifftn(array_shifted)
array_shifted = array_shifted.real
elif imlib == 'ndimage-interp':
if interpolation == 'nearneig':
order = 0
elif interpolation == 'bilinear':
order = 1
elif interpolation == 'biquadratic':
order = 2
elif interpolation == 'bicubic':
order = 3
elif interpolation == 'biquartic' or interpolation == 'lanczos4':
order = 4
elif interpolation == 'biquintic':
order = 5
else:
raise ValueError('Scipy.ndimage interpolation method not '
'recognized')
if border_mode not in ['reflect', 'nearest', 'constant', 'mirror',
'wrap']:
raise ValueError('`border_mode` not recognized')
array_shifted = shift(image, (shift_y, shift_x), order=order,
mode=border_mode)
elif imlib == 'opencv':
if no_opencv:
msg = 'Opencv python bindings cannot be imported. Install opencv or'
msg += ' set imlib to ndimage-fourier or ndimage-interp'
raise RuntimeError(msg)
if interpolation == 'bilinear':
intp = cv2.INTER_LINEAR
elif interpolation == 'bicubic':
intp = cv2.INTER_CUBIC
elif interpolation == 'nearneig':
intp = cv2.INTER_NEAREST
elif interpolation == 'lanczos4':
intp = cv2.INTER_LANCZOS4
else:
raise ValueError('Opencv interpolation method not recognized')
if border_mode == 'mirror':
bormo = cv2.BORDER_REFLECT_101 # gfedcb|abcdefgh|gfedcba
elif border_mode == 'reflect':
bormo = cv2.BORDER_REFLECT # fedcba|abcdefgh|hgfedcb
elif border_mode == 'wrap':
bormo = cv2.BORDER_WRAP # cdefgh|abcdefgh|abcdefg
elif border_mode == 'constant':
bormo = cv2.BORDER_CONSTANT # iiiiii|abcdefgh|iiiiiii
elif border_mode == 'nearest':
bormo = cv2.BORDER_REPLICATE # aaaaaa|abcdefgh|hhhhhhh
else:
raise ValueError('`border_mode` not recognized')
image = np.float32(image)
y, x = image.shape
M = np.float32([[1, 0, shift_x], [0, 1, shift_y]])
array_shifted = cv2.warpAffine(image, M, (x, y), flags=intp,
borderMode=bormo)
else:
raise ValueError('Image transformation library not recognized')
return array_shifted
def cube_shift(cube, shift_y, shift_x, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect'):
""" Shifts the X-Y coordinates of a cube or 3D array by x and y values.
Parameters
----------
cube : numpy ndarray, 3d
Input cube.
shift_y, shift_x: float, list of floats or np.ndarray of floats
Shifts in y and x directions for each frame. If the a single value is
given then all the frames will be shifted by the same amount.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
border_mode : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
Returns
-------
cube_out : numpy ndarray, 3d
Cube with shifted frames.
"""
check_array(cube, dim=3)
nfr = cube.shape[0]
cube_out = np.zeros_like(cube)
if isinstance(shift_x, (int, float)):
shift_x = np.ones((nfr)) * shift_x
if isinstance(shift_y, (int, float)):
shift_y = np.ones((nfr)) * shift_y
for i in range(cube.shape[0]):
cube_out[i] = frame_shift(cube[i], shift_y[i], shift_x[i], imlib,
interpolation, border_mode)
return cube_out
def frame_center_satspots(array, xy, subi_size=19, sigfactor=6, shift=False,
imlib='vip-fft', interpolation='lanczos4',
fit_type='moff', border_mode='reflect', debug=False,
verbose=True):
""" Finds the center of a frame with waffle/satellite spots (e.g. for
VLT/SPHERE). The method used to determine the center is by centroiding the
4 spots via a 2d Gaussian fit and finding the intersection of the
lines they create (see Notes). This method is very sensitive to the SNR of
the satellite spots, therefore thresholding of the background pixels is
performed. If the results are too extreme, the debug parameter will allow to
see in depth what is going on with the fit (maybe you'll need to adjust the
sigfactor for the background pixels thresholding).
Parameters
----------
array : numpy ndarray, 2d
Image or frame.
xy : tuple of 4 tuples of 2 elements
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
shift : bool, optional
If True the image is shifted.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
debug : bool, optional
If True debug information is printed and plotted.
verbose : bool, optional
If True the intersection and shifts information is printed out.
Returns
-------
array_rec
Shifted images. *Only returned if ``shift=True``.*
shifty, shiftx
Shift Y,X to get to the true center.
Notes
-----
linear system:
.. code-block: none
A1 * x + B1 * y = C1
A2 * x + B2 * y = C2
Cramer's rule - solution can be found in determinants:
.. code-block: none
x = Dx/D
y = Dy/D
where D is main determinant of the system:
.. code-block: none
A1 B1
A2 B2
and Dx and Dy can be found from matrices:
.. code-block: none
C1 B1
C2 B2
and
.. code-block: none
A1 C1
A2 C2
C column consequently substitutes the coef. columns of x and y
L stores our coefs A, B, C of the line equations.
.. code-block: none
For D: L1[0] L1[1] for Dx: L1[2] L1[1] for Dy: L1[0] L1[2]
L2[0] L2[1] L2[2] L2[1] L2[0] L2[2]
"""
def line(p1, p2):
""" produces coefs A, B, C of line equation by 2 points
"""
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0] * p2[1] - p2[0] * p1[1])
return A, B, -C
def intersection(L1, L2):
""" finds intersection point (if any) of 2 lines provided by coefs
"""
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
if D != 0:
x = Dx / D
y = Dy / D
return x, y
else:
return None
# --------------------------------------------------------------------------
check_array(array, dim=2)
if fit_type not in ['gaus','moff']:
raise TypeError('fit_type is not recognized')
if not isinstance(xy, (tuple, list)) or len(xy) != 4:
raise TypeError('Input waffle spot coordinates in wrong format (must '
'be a tuple of 4 tuples')
cy, cx = frame_center(array)
centx = []
centy = []
subims = []
for i in range(len(xy)):
sim, y, x = get_square(array, subi_size, xy[i][1], xy[i][0],
position=True, verbose=False)
if fit_type=='gaus':
cent2dgy, cent2dgx = fit_2dgaussian(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
else:
cent2dgy, cent2dgx = fit_2dmoffat(sim, crop=False, threshold=True,
sigfactor=sigfactor, debug=debug,
full_output=False)
centx.append(cent2dgx + x)
centy.append(cent2dgy + y)
subims.append(sim)
cent2dgx_1, cent2dgx_2, cent2dgx_3, cent2dgx_4 = centx
cent2dgy_1, cent2dgy_2, cent2dgy_3, cent2dgy_4 = centy
si1, si2, si3, si4 = subims
if debug:
plot_frames((si1, si2, si3, si4), colorbar=True)
print('Centroids X,Y:')
print(cent2dgx_1, cent2dgy_1)
print(cent2dgx_2, cent2dgy_2)
print(cent2dgx_3, cent2dgy_3)
print(cent2dgx_4, cent2dgy_4)
L1 = line([cent2dgx_1, cent2dgy_1], [cent2dgx_4, cent2dgy_4])
L2 = line([cent2dgx_2, cent2dgy_2], [cent2dgx_3, cent2dgy_3])
R = intersection(L1, L2)
msgerr = "Check that the order of the tuples in `xy` is correct and"
msgerr += " the satellite spots have good S/N"
if R is not None:
shiftx = cx - R[0]
shifty = cy - R[1]
if np.abs(shiftx) < cx * 2 and np.abs(shifty) < cy * 2:
if debug or verbose:
print('Intersection coordinates (X,Y):', R[0], R[1], '\n')
print('Shifts (X,Y): {:.3f}, {:.3f}'.format(shiftx, shifty))
if shift:
array_rec = frame_shift(array, shifty, shiftx, imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
return array_rec, shifty, shiftx, centy, centx
else:
return shifty, shiftx
else:
raise RuntimeError("Too large shifts. " + msgerr)
else:
raise RuntimeError("Something went wrong, no intersection found. " +
msgerr)
def cube_recenter_satspots(array, xy, subi_size=19, sigfactor=6, plot=True,
fit_type='moff', lbda=None, border_mode='constant',
debug=False, verbose=True, full_output=False):
""" Function analog to frame_center_satspots but for image sequences. It
actually will call frame_center_satspots for each image in the cube. The
function also returns the shifted images (not recommended to use when the
shifts are of a few percents of a pixel) and plots the histogram of the
shifts and calculate its statistics. This is important to assess the
dispersion of the star center by using artificial waffle/satellite spots
(like those in VLT/SPHERE images) and evaluate the uncertainty of the
position of the center. The use of the shifted images is not recommended.
Parameters
----------
array : numpy ndarray, 3d
Input cube.
xy : tuple of 4 tuples of 2 elements
Tuple with coordinates X,Y of the 4 satellite spots. When the spots are
in an X configuration, the order is the following: top-left, top-right,
bottom-left and bottom-right. When the spots are in an + (cross-like)
configuration, the order is the following: top, right, left, bottom.
If wavelength vector is not provided, assumes all sat spots of the cube
are at a similar location. If wavelength is provided, only coordinates
of the sat spots in the first channel should be provided. The boxes
location in other channels will be scaled accordingly.
subi_size : int, optional
Size of subimage where the fitting is done.
sigfactor : int, optional
The background pixels will be thresholded before fitting a 2d Gaussian
to the data using sigma clipped statistics. All values smaller than
(MEDIAN + sigfactor*STDDEV) will be replaced by small random Gaussian
noise.
plot : bool, optional
Whether to plot the shifts.
fit_type: str, optional {'gaus','moff'}
Type of 2d fit to infer the centroid of the satellite spots.
lbda: 1d array or list, opt
Wavelength vector. If provided, the subimages will be scaled accordingly
to follow the motion of the satellite spots.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
debug : bool, optional
If True debug information is printed and plotted (fit and residuals,
intersections and shifts). This has to be used carefully as it can
produce too much output and plots.
verbose : bool, optional
Whether to print to stdout the timing and additional info.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
Returns
-------
array_rec
The shifted cube.
shift_y, shift_x
[full_output==True] Shifts Y,X to get to the true center for each image.
sat_y, sat_x
[full_output==True] Y,X positions of the satellite spots in each image.
Order: top-left, top-right, bottom-left and bottom-right.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
shift_x = np.zeros((n_frames))
shift_y = np.zeros((n_frames))
sat_y = np.zeros([n_frames,4])
sat_x = np.zeros([n_frames,4])
array_rec = []
if lbda is not None:
cy, cx = frame_center(array[0])
final_xy = []
rescal = lbda/lbda[0]
for i in range(n_frames):
xy_new = []
for s in range(4):
xy_new.append((cx+rescal[i]*(xy[s][0]-cx),cy+rescal[i]*(xy[s][1]-cy)))
xy_new = tuple(xy_new)
final_xy.append(xy_new)
else:
final_xy = [xy for i in range(n_frames)]
if verbose:
print("Final xy positions for sat spots:", final_xy)
print('Looping through the frames, fitting the intersections:')
for i in Progressbar(range(n_frames), verbose=verbose):
res = frame_center_satspots(array[i], final_xy[i], debug=debug, shift=True,
subi_size=subi_size, sigfactor=sigfactor,
fit_type=fit_type, verbose=False,
border_mode=border_mode)
array_rec.append(res[0])
shift_y[i] = res[1]
shift_x[i] = res[2]
sat_y[i] = res[3]
sat_x[i] = res[4]
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(shift_x, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(shift_y, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(shift_x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(shift_y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if verbose:
msg1 = 'MEAN X,Y: {:.3f}, {:.3f}'
print(msg1.format(np.mean(shift_x), np.mean(shift_y)))
msg2 = 'MEDIAN X,Y: {:.3f}, {:.3f}'
print(msg2.format(np.median(shift_x), np.median(shift_y)))
msg3 = 'STDDEV X,Y: {:.3f}, {:.3f}'
print(msg3.format(np.std(shift_x), np.std(shift_y)))
array_rec = np.array(array_rec)
if full_output:
return array_rec, shift_y, shift_x, sat_y, sat_x
else:
return array_rec
def frame_center_radon(array, cropsize=None, hsize=0.4, step=0.01,
mask_center=None, nproc=None, satspots_cfg=None,
full_output=False, verbose=True, plot=True, debug=False):
""" Finding the center of a broadband (co-added) frame with speckles and
satellite spots elongated towards the star (center). We use the radon
transform implementation from scikit-image.
Parameters
----------
array : numpy ndarray
Input 2d array or image.
cropsize : None or odd int, optional
Size in pixels of the cropped central area of the input array that will
be used. It should be large enough to contain the bright elongated
speckle or satellite spots.
hsize : float, optional
Size of the box for the grid search. The frame is shifted to each
direction from the center in a hsize length with a given step.
step : float, optional
The step of the coordinates change.
mask_center : None or int, optional
If None the central area of the frame is kept. If int a centered zero
mask will be applied to the frame. By default the center isn't masked.
nproc : int, optional
Number of processes for parallel computing. If None the number of
processes will be set to cpu_count()/2.
satspots_cfg: None or str ('x' or '+'), opt
If satellite spots are present, provide a string corresponding to the
configuration of the satellite spots: either as a cross ('x') or as a
plus sign ('+'). Leave to None if no satellite spots present. Usually
the Radon transform centering works better if bright satellite spots
are present.
verbose : bool optional
Whether to print to stdout some messages and info.
plot : bool, optional
Whether to plot the radon cost function.
debug : bool, optional
Whether to print and plot intermediate info.
Returns
-------
[full_output=True] 2d np array
Radon cost function surface is returned if full_output set to True
optimy, optimx : float
Values of the Y, X coordinates of the center of the frame based on the
radon optimization. (always returned)
Notes
-----
Based on Pueyo et al. 2014: http://arxiv.org/abs/1409.6388
"""
from .cosmetics import frame_crop
if array.ndim != 2:
raise TypeError('Input array is not a frame or 2d array')
if verbose:
start_time = time_ini()
frame = array.copy()
ori_cent, _ = frame_center(frame)
if cropsize is not None:
if not cropsize%2:
raise TypeError("If not None, cropsize should be odd integer")
frame = frame_crop(frame, cropsize, verbose=False)
listyx = np.linspace(start=-hsize, stop=hsize, num=int(2*hsize/step)+1,
endpoint=True)
if not mask_center:
radint = 0
else:
if not isinstance(mask_center, int):
raise TypeError
radint = mask_center
coords = [(y, x) for y in listyx for x in listyx]
cent, _ = frame_center(frame)
frame = get_annulus_segments(frame, radint, cent-radint, mode="mask")[0]
if debug:
if satspots_cfg is not None:
samples = 10
if satspots_cfg == 'x':
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
elif satspots_cfg == '+':
theta = np.hstack((np.linspace(start=-5, stop=5, num=samples,
endpoint=False),
np.linspace(start=85, stop=95, num=samples,
endpoint=False),
np.linspace(start=175, stop=185, num=samples,
endpoint=False),
np.linspace(start=265, stop=275, num=samples,
endpoint=False)))
else:
msg = "If not None, satspots_cfg can only be 'x' or '+'."
raise ValueError(msg)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
else:
theta = np.linspace(start=0, stop=360, num=int(cent*2),
endpoint=False)
sinogram = radon(frame, theta=theta, circle=True)
plot_frames((frame, sinogram))
print(np.sum(np.abs(sinogram[int(cent), :])))
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
costf = []
for coord in coords:
res = _radon_costf(frame, cent, radint, coord, satspots_cfg)
costf.append(res)
costf = np.array(costf)
elif nproc > 1:
res = pool_map(nproc, _radon_costf, frame, cent, radint,
iterable(coords), satspots_cfg)
costf = np.array(res)
if verbose:
msg = 'Done {} radon transform calls distributed in {} processes'
print(msg.format(len(coords), nproc))
cost_bound = costf.reshape(listyx.shape[0], listyx.shape[0])
if plot:
plt.contour(cost_bound, cmap='CMRmap', origin='lower', lw=1, hold='on')
plt.imshow(cost_bound, cmap='CMRmap', origin='lower',
interpolation='nearest')
plt.colorbar()
plt.grid('off')
plt.show()
# argm = np.argmax(costf) # index of 1st max in 1d cost function 'surface'
# optimy, optimx = coords[argm]
# maxima in the 2d cost function surface
num_max = np.where(cost_bound == cost_bound.max())[0].shape[0]
ind_maximay, ind_maximax = np.where(cost_bound == cost_bound.max())
argmy = ind_maximay[int(np.ceil(num_max/2)) - 1]
argmx = ind_maximax[int(np.ceil(num_max/2)) - 1]
y_grid = np.array(coords)[:, 0].reshape(listyx.shape[0], listyx.shape[0])
x_grid = np.array(coords)[:, 1].reshape(listyx.shape[0], listyx.shape[0])
optimy = y_grid[argmy, 0]+(ori_cent-cent)/2
optimx = x_grid[0, argmx]+(ori_cent-cent)/2
if verbose:
print('Cost function max: {}'.format(costf.max()))
print('Cost function # maxima: {}'.format(num_max))
msg = 'Finished grid search radon optimization. Y={:.5f}, X={:.5f}'
print(msg.format(optimy, optimx))
timing(start_time)
if full_output:
return cost_bound, optimy, optimx
else:
return optimy, optimx
def _radon_costf(frame, cent, radint, coords, satspots_cfg=None):
""" Radon cost function used in frame_center_radon().
"""
frame_shifted = frame_shift(frame, coords[0], coords[1])
frame_shifted_ann = get_annulus_segments(frame_shifted, radint,
cent-radint, mode="mask")[0]
if satspots_cfg is None:
theta = np.linspace(start=0, stop=360, num=frame_shifted_ann.shape[0],
endpoint=False)
elif satspots_cfg == 'x':
samples = 10
theta = np.hstack((np.linspace(start=40, stop=50, num=samples,
endpoint=False),
np.linspace(start=130, stop=140, num=samples,
endpoint=False),
np.linspace(start=220, stop=230, num=samples,
endpoint=False),
np.linspace(start=310, stop=320, num=samples,
endpoint=False)))
else:
samples = 10
theta = np.hstack((np.linspace(start=-5, stop=5, num=samples,
endpoint=False),
np.linspace(start=85, stop=95, num=samples,
endpoint=False),
np.linspace(start=175, stop=185, num=samples,
endpoint=False),
np.linspace(start=265, stop=275, num=samples,
endpoint=False)))
sinogram = radon(frame_shifted_ann, theta=theta, circle=True)
costf = np.sum(np.abs(sinogram[int(cent), :]))
return costf
def cube_recenter_radon(array, full_output=False, verbose=True, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect',
**kwargs):
""" Recenters a cube looping through its frames and calling the
``frame_center_radon`` function.
Parameters
----------
array : numpy ndarray
Input 3d array or cube.
full_output : {False, True}, bool optional
If True the recentered cube is returned along with the y and x shifts.
verbose : {True, False}, bool optional
Whether to print timing and intermediate information to stdout.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
kwargs:
Additional optional parameters from vip_hci.preproc.frame_center_radon
function, such as cropsize, hsize, step, satspots_cfg, mask_center,
nproc or debug.
Returns
-------
array_rec : 3d ndarray
Recentered cube.
y, x : 1d arrays of floats
[full_output] Shifts in y and x.
"""
check_array(array, dim=3)
if verbose:
start_time = time_ini()
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
y[i], x[i] = frame_center_radon(array[i], verbose=False, plot=False,
**kwargs)
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
if verbose:
timing(start_time)
if full_output:
return array_rec, y, x
else:
return array_rec
def cube_recenter_dft_upsampling(array, center_fr1=None, negative=False,
fwhm=4, subi_size=None, upsample_factor=100,
imlib='vip-fft', interpolation='lanczos4',
mask=None, border_mode='reflect',
full_output=False, verbose=True, nproc=1,
save_shifts=False, debug=False, plot=True):
""" Recenters a cube of frames using the DFT upsampling method as
proposed in Guizar et al. 2008 and implemented in the
``register_translation`` function from scikit-image.
The algorithm (DFT upsampling) obtains an initial estimate of the
cross-correlation peak by an FFT and then refines the shift estimation by
upsampling the DFT only in a small neighborhood of that estimate by means
of a matrix-multiply DFT.
Parameters
----------
array : numpy ndarray
Input cube.
center_fr1 = (cy_1, cx_1) : Tuple, optional
Coordinates of the center of the subimage for fitting a 2d Gaussian and
centroiding the 1st frame.
negative : bool, optional
If True the centroiding of the 1st frames is done with a negative
2d Gaussian fit.
fwhm : float, optional
FWHM size in pixels.
subi_size : int or None, optional
Size of the square subimage sides in pixels, used to centroid to first
frame. If subi_size is None then the first frame is assumed to be
centered already.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
upsample_factor : int, optional
Upsampling factor (default 100). Images will be registered to within
1/upsample_factor of a pixel.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
[Note: only used if version of skimage >= 0.18.0]
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
debug : bool, optional
Whether to print to stdout the shifts or not.
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_recentered : numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
Notes
-----
Using the implementation from scikit-image of the algorithm described in
Guizar-Sicairos et al. "Efficient subpixel image registration algorithms,"
Opt. Lett. 33, 156-158 (2008). This algorithm registers two images (2-D
rigid translation) within a fraction of a pixel specified by the user.
Instead of computing a zero-padded FFT (fast Fourier transform), this code
uses selective upsampling by a matrix-multiply DFT (discrete FT) to
dramatically reduce computation time and memory without sacrificing
accuracy. With this procedure all the image points are used to compute the
upsampled cross-correlation in a very small neighborhood around its peak.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
if mask is not None:
if mask.shape[-1]!=array.shape[-1] or mask.shape[-2]!=array.shape[-2]:
msg = "If provided, mask should have same shape as frames"
raise TypeError(msg)
n_frames, sizey, sizex = array.shape
if subi_size is not None:
if center_fr1 is None:
print('`cx_1` or `cy_1` not be provided')
print('Using the coordinated of the 1st frame center for '
'the Gaussian 2d fit')
cy_1, cx_1 = frame_center(array[0])
else:
cy_1, cx_1 = center_fr1
if not isinstance(subi_size, int):
raise ValueError('subi_size must be an integer or None')
if subi_size < fwhm:
raise ValueError('`subi_size` (value in pixels) is too small')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
n_frames = array.shape[0]
x = np.zeros((n_frames))
y = np.zeros((n_frames))
array_rec = array.copy()
cy, cx = frame_center(array[0])
# Centroiding first frame with 2d gaussian and shifting
msg0 = "The rest of the frames will be shifted by cross-correlation wrt the" \
" 1st"
if subi_size is not None:
y1, x1 = _centroid_2dg_frame(array_rec, 0, subi_size, cy_1, cx_1,
negative, debug, fwhm)
x[0] = cx - x1
y[0] = cy - y1
array_rec[0] = frame_shift(array_rec[0], shift_y=y[0], shift_x=x[0],
imlib=imlib, interpolation=interpolation)
if verbose:
msg = "Shift for first frame X,Y=({:.3f}, {:.3f})"
print(msg.format(x[0], y[0]))
print(msg0)
if debug:
titd = "original / shifted 1st frame subimage"
plot_frames((frame_crop(array[0], subi_size, verbose=False),
frame_crop(array_rec[0], subi_size, verbose=False)),
grid=True, title=titd)
else:
if verbose:
print("The first frame is assumed to be well centered wrt the"
"center of the array")
print(msg0)
x[0] = 0
y[0] = 0
# Finding the shifts with DFT upsampling of each frame wrt the first
if nproc == 1:
for i in Progressbar(range(1, n_frames), desc="frames", verbose=verbose):
y[i], x[i], array_rec[i] = _shift_dft(array_rec, array, i,
upsample_factor, mask,
interpolation, imlib,
border_mode)
elif nproc > 1:
res = pool_map(nproc, _shift_dft, array_rec, array,
iterable(range(1, n_frames)), upsample_factor, mask,
interpolation, imlib, border_mode)
res = np.array(res)
y[1:] = res[:,0]
x[1:] = res[:,1]
array_rec[1:] = [frames for frames in res[:,2]]
if debug:
print("\nShifts in X and Y")
for i in range(n_frames):
print(x[i], y[i])
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if save_shifts:
np.savetxt('recent_dft_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
return array_rec, y, x
else:
return array_rec
def _shift_dft(array_rec, array, frnum, upsample_factor, mask, interpolation,
imlib, border_mode):
"""
function used in recenter_dft_unsampling
"""
if version.parse(skimage.__version__) > version.parse('0.17.0'):
shift_yx = cc_center(array_rec[0], array[frnum],
upsample_factor=upsample_factor, reference_mask=mask,
return_error=False)
else:
shift_yx = cc_center(array_rec[0], array[frnum],
upsample_factor=upsample_factor)
y_i, x_i = shift_yx
array_rec_i = frame_shift(array[frnum], shift_y=y_i, shift_x=x_i,
imlib=imlib, interpolation=interpolation,
border_mode=border_mode)
return y_i, x_i, array_rec_i
def cube_recenter_2dfit(array, xy=None, fwhm=4, subi_size=5, model='gauss',
nproc=1, imlib='vip-fft', interpolation='lanczos4',
offset=None, negative=False, threshold=False,
sigfactor=2, fix_neg=False, params_2g=None,
border_mode='reflect', save_shifts=False,
full_output=False, verbose=True, debug=False,
plot=True):
""" Recenters the frames of a cube. The shifts are found by fitting a 2d
Gaussian or Moffat to a subimage centered at ``xy``. This assumes the frames
don't have too large shifts (>5px). The frames are shifted using the
function frame_shift().
Parameters
----------
array : numpy ndarray
Input cube.
xy : tuple of integers or floats
Integer coordinates of the center of the subimage (wrt the original frame).
For the double gaussian fit with fixed negative gaussian, this should
correspond to the exact location of the center of the negative gaussiam
(e.g. the center of the coronagraph mask) - in that case a tuple of
floats is also accepted.
fwhm : float or numpy ndarray
FWHM size in pixels, either one value (float) that will be the same for
the whole cube, or an array of floats with the same dimension as the
0th dim of array, containing the fwhm for each channel (e.g. in the case
of an ifs cube, where the fwhm varies with wavelength)
subi_size : int, optional
Size of the square subimage sides in pixels.
model : str, optional
Sets the type of fit to be used. 'gauss' for a 2d Gaussian fit,
'moff' for a 2d Moffat fit, 'airy' for a 2d Airy disk fit, and
'2gauss' for a 2d double Gaussian (positive+negative) fit.
nproc : int or None, optional
Number of processes (>1) for parallel computing. If 1 then it runs in
serial. If None the number of processes will be set to (cpu_count()/2).
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
offset : tuple of floats, optional
If None the region of the frames used for the 2d Gaussian/Moffat fit is
shifted to the center of the images (2d arrays). If a tuple is given it
serves as the offset of the fitted area wrt the center of the 2d arrays.
negative : bool, optional
If True a negative 2d Gaussian/Moffat fit is performed.
fix_neg: bool, optional
In case of a double gaussian fit, whether to fix the parameters of the
megative gaussian. If True, they should be provided in params_2g.
params_2g: None or dictionary, optional
In case of a double gaussian fit, dictionary with either fixed or first
guess parameters for the double gaussian. E.g.:
params_2g = {'fwhm_neg': 3.5, 'fwhm_pos': (3.5,4.2), 'theta_neg': 48.,
'theta_pos':145., 'neg_amp': 0.5}
fwhm_neg: float or tuple with fwhm of neg gaussian
fwhm_pos: can be a tuple for x and y axes of pos gaussian (replaces fwhm)
theta_neg: trigonometric angle of the x axis of the neg gaussian (deg)
theta_pos: trigonometric angle of the x axis of the pos gaussian (deg)
neg_amp: amplitude of the neg gaussian wrt the amp of the positive one
Note: it is always recommended to provide theta_pos and theta_neg for a
better fit.
threshold : bool, optional
If True the background pixels (estimated using sigma clipped statistics)
will be replaced by small random Gaussian noise (recommended for 2g).
sigfactor: float, optional
If thresholding is performed, set the the threshold in terms of
gaussian sigma in the subimage (will depend on your cropping size).
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
save_shifts : bool, optional
Whether to save the shifts to a file in disk.
full_output : bool, optional
Whether to return 2 1d arrays of shifts along with the recentered cube
or not.
verbose : bool, optional
Whether to print to stdout the timing or not.
debug : bool, optional
If True the details of the fitting are shown. Won't work when the cube
contains >20 frames (as it might produce an extremely long output).
plot : bool, optional
If True, the shifts are plotted.
Returns
-------
array_rec: numpy ndarray
The recentered cube.
y : numpy ndarray
[full_output=True] 1d array with the shifts in y.
x : numpy ndarray
[full_output=True] 1d array with the shifts in x.
"""
if verbose:
start_time = time_ini()
check_array(array, dim=3)
n_frames, sizey, sizex = array.shape
if not isinstance(subi_size, int):
raise ValueError('`subi_size` must be an integer')
if sizey % 2 == 0:
if subi_size % 2 != 0:
subi_size += 1
print('`subi_size` is odd (while frame size is even)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
else:
if subi_size % 2 == 0:
subi_size += 1
print('`subi_size` is even (while frame size is odd)')
print('Setting `subi_size` to {} pixels'.format(subi_size))
if isinstance(fwhm, (float, int, np.float32, np.float64)):
fwhm = np.ones(n_frames) * fwhm
if debug and array.shape[0] > 20:
msg = 'Debug with a big array will produce a very long output. '
msg += 'Try with less than 20 frames in debug mode'
raise RuntimeWarning(msg)
if xy is not None:
pos_x, pos_y = xy
cond = model != '2gauss'
if (not isinstance(pos_x, int) or not isinstance(pos_y, int)) and cond:
raise TypeError('`xy` must be a tuple of integers')
else:
pos_y, pos_x = frame_center(array[0])
cy, cx = frame_center(array[0])
array_rec = np.empty_like(array)
if model == 'gauss':
func = _centroid_2dg_frame
elif model == 'moff':
func = _centroid_2dm_frame
elif model == 'airy':
func = _centroid_2da_frame
elif model == '2gauss':
func = _centroid_2d2g_frame
else:
raise ValueError('model not recognized')
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
if nproc == 1:
res = []
print('2d {}-fitting'.format(model))
for i in Progressbar(range(n_frames), desc="frames", verbose=verbose):
if model == "2gauss":
args = [array, i, subi_size, pos_y, pos_x, debug, fwhm[i],
fix_neg, params_2g, threshold, sigfactor]
else:
args = [array, i, subi_size, pos_y, pos_x, negative, debug,
fwhm[i], threshold, sigfactor]
res.append(func(*args))
res = np.array(res)
elif nproc > 1:
if model == "2gauss":
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
debug, iterable(fwhm), fix_neg, params_2g, threshold,
sigfactor]
else:
args = [array, iterable(range(n_frames)), subi_size, pos_y, pos_x,
negative, debug, iterable(fwhm), threshold, sigfactor]
res = pool_map(nproc, func, *args)
res = np.array(res)
y = cy - res[:, 0]
x = cx - res[:, 1]
if model == "2gauss" and not fix_neg:
y_neg = res[:, 2]
x_neg = res[:, 3]
fwhm_x = res[:, 4]
fwhm_y = res[:, 5]
fwhm_neg_x = res[:, 6]
fwhm_neg_y = res[:, 7]
theta = res[:, 8]
theta_neg = res[:, 9]
amp_pos = res[:,10]
amp_neg = res[:, 11]
if offset is not None:
offx, offy = offset
y -= offy
x -= offx
for i in Progressbar(range(n_frames), desc="Shifting", verbose=verbose):
if debug:
print("\nShifts in X and Y")
print(x[i], y[i])
array_rec[i] = frame_shift(array[i], y[i], x[i], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
if verbose:
timing(start_time)
if plot:
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n_frames))
la = 'Histogram'
_ = plt.hist(x, bins=b, alpha=0.5, label=la + ' shifts X')
_ = plt.hist(y, bins=b, alpha=0.5, label=la + ' shifts Y')
if model == "2gauss" and not fix_neg:
_ = plt.hist(cx-x_neg, bins=b, alpha=0.5,
label=la + ' shifts X (neg gaussian)')
_ = plt.hist(cy-y_neg, bins=b, alpha=0.5,
label=la + ' shifts Y (neg gaussian)')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
plt.figure(figsize=vip_figsize)
plt.plot(y, 'o-', label='shifts in y', alpha=0.5)
plt.plot(x, 'o-', label='shifts in x', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
if save_shifts:
np.savetxt('recent_gauss_shifts.txt', np.transpose([y, x]), fmt='%f')
if full_output:
if model == "2gauss" and not fix_neg:
return (array_rec, y, x, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x,
fwhm_neg_y, theta, theta_neg, amp_pos, amp_neg)
return array_rec, y, x
else:
return array_rec
def _centroid_2dg_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d gaussian fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative gaussian fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False, fwhmx=fwhm, fwhmy=fwhm,
threshold=threshold, sigfactor=sigfactor, debug=debug,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2dm_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d moffat fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dmoffat(sub_image, crop=False, fwhm=fwhm, debug=debug,
threshold=threshold, sigfactor=sigfactor,
full_output=False)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2da_frame(cube, frnum, size, pos_y, pos_x, negative, debug,
fwhm, threshold=False, sigfactor=1):
""" Finds the centroid by using a 2d Airy disk fitting in one frame from a
cube.
"""
sub_image, y1, x1 = get_square(cube[frnum], size=size, y=pos_y, x=pos_x,
position=True)
# negative fit
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dairydisk(sub_image, crop=False, fwhm=fwhm,
threshold=threshold, sigfactor=sigfactor,
full_output=False, debug=debug)
y_i = y1 + y_i
x_i = x1 + x_i
return y_i, x_i
def _centroid_2d2g_frame(cube, frnum, size, pos_y, pos_x, debug=False, fwhm=4,
fix_neg=True, params_2g=None, threshold=False,
sigfactor=1):
""" Finds the centroid by using a 2d double gaussian (positive+negative)
fitting in one frame from a cube. To be called from within
cube_recenter_doublegauss2d_fit().
"""
size = min(cube[frnum].shape[0],cube[frnum].shape[1],size)
if isinstance(params_2g,dict):
fwhm_neg = params_2g.get('fwhm_neg', 0.8*fwhm)
fwhm_pos = params_2g.get('fwhm_pos', 2*fwhm)
theta_neg = params_2g.get('theta_neg', 0.)
theta_pos = params_2g.get('theta_pos', 0.)
neg_amp = params_2g.get('neg_amp', 1)
res_DF = fit_2d2gaussian(cube[frnum], crop=True, cent=(pos_x,pos_y),
cropsize=size, fwhm_neg=fwhm_neg, fwhm_pos=fwhm_pos,
neg_amp=neg_amp, fix_neg=fix_neg, theta_neg=theta_neg,
theta_pos=theta_pos, threshold=threshold,
sigfactor=sigfactor, full_output=True, debug=debug)
y_i = res_DF['centroid_y']
x_i = res_DF['centroid_x']
if not fix_neg:
y_neg = res_DF['centroid_y_neg']
x_neg = res_DF['centroid_x_neg']
fwhm_x = res_DF['fwhm_x']
fwhm_y = res_DF['fwhm_y']
fwhm_neg_x = res_DF['fwhm_x_neg']
fwhm_neg_y = res_DF['fwhm_y_neg']
theta = res_DF['theta']
theta_neg = res_DF['theta_neg']
amp_pos = res_DF['amplitude']
amp_neg = res_DF['amplitude_neg']
return (y_i, x_i, y_neg, x_neg, fwhm_x, fwhm_y, fwhm_neg_x, fwhm_neg_y,
theta, theta_neg, amp_pos, amp_neg)
return y_i, x_i
# TODO: make parameter names match the API
def cube_recenter_via_speckles(cube_sci, cube_ref=None, alignment_iter=5,
gammaval=1, min_spat_freq=0.5, max_spat_freq=3,
fwhm=4, debug=False, recenter_median=False,
fit_type='gaus', negative=True, crop=True,
subframesize=21, mask=None, imlib='vip-fft',
interpolation='lanczos4', border_mode='reflect',
plot=True, full_output=False):
""" Registers frames based on the median speckle pattern. Optionally centers
based on the position of the vortex null in the median frame. Images are
filtered to isolate speckle spatial frequencies.
Parameters
----------
cube_sci : numpy ndarray
Science cube.
cube_ref : numpy ndarray
Reference cube (e.g. for NIRC2 data in RDI mode).
alignment_iter : int, optional
Number of alignment iterations (recomputes median after each iteration).
gammaval : int, optional
Applies a gamma correction to emphasize speckles (useful for faint
stars).
min_spat_freq : float, optional
Spatial frequency for low pass filter.
max_spat_freq : float, optional
Spatial frequency for high pass filter.
fwhm : float, optional
Full width at half maximum.
debug : bool, optional
Outputs extra info.
recenter_median : bool, optional
Recenter the frames at each iteration based on a 2d fit.
fit_type : str, optional
If recenter_median is True, this is the model to which the image is
fitted to for recentering. 'gaus' works well for NIRC2_AGPM data.
'ann' works better for NACO+AGPM data.
negative : bool, optional
If True, uses a negative gaussian fit to determine the center of the
median frame.
crop: bool, optional
Whether to calculate the recentering on a cropped version of the cube
that is speckle-dominated (recommended).
subframesize : int, optional
Sub-frame window size used. Should cover the region where speckles are
the dominant noise source.
mask: 2D np.ndarray, optional
Binary mask indicating where the cross-correlation should be calculated
in the images. If provided, should be the same size as array frames.
imlib : str, optional
Image processing library to use.
interpolation : str, optional
Interpolation method to use.
border_mode : {'reflect', 'nearest', 'constant', 'mirror', 'wrap'}
Points outside the boundaries of the input are filled accordingly.
With 'reflect', the input is extended by reflecting about the edge of
the last pixel. With 'nearest', the input is extended by replicating the
last pixel. With 'constant', the input is extended by filling all values
beyond the edge with zeros. With 'mirror', the input is extended by
reflecting about the center of the last pixel. With 'wrap', the input is
extended by wrapping around to the opposite edge. Default is 'reflect'.
plot : bool, optional
If True, the shifts are plotted.
full_ouput: bool, optional
Whether to return more varibales, useful for debugging.
Returns
-------
if full_output is False, returns:
cube_reg_sci: Registered science cube (numpy 3d ndarray)
If cube_ref is not None, also returns:
cube_reg_ref: Ref. cube registered to science frames (np 3d ndarray)
If full_output is True, returns in addition to the above:
cube_sci_lpf: Low+high-pass filtered science cube (np 3d ndarray)
cube_stret: Cube with stretched values used for cross-corr (np 3d ndarray)
cum_x_shifts_sci: Vector of x shifts for science frames (np 1d array)
cum_y_shifts_sci: Vector of y shifts for science frames (np 1d array)
And if cube_ref is not None, also returns:
cum_x_shifts_ref: Vector of x shifts for ref. frames.
cum_y_shifts_ref: Vector of y shifts for ref. frames.
"""
n, y, x = cube_sci.shape
check_array(cube_sci, dim=3)
if recenter_median and fit_type not in {'gaus','ann'}:
raise TypeError("fit type not recognized. Should be 'ann' or 'gaus'")
if crop and not subframesize < y/2.:
raise ValueError('`Subframesize` is too large')
if cube_ref is not None:
ref_star = True
nref = cube_ref.shape[0]
else:
ref_star = False
if crop:
cube_sci_subframe = cube_crop_frames(cube_sci, subframesize,
verbose=False)
if ref_star:
cube_ref_subframe = cube_crop_frames(cube_ref, subframesize,
verbose=False)
else:
subframesize = cube_sci.shape[-1]
cube_sci_subframe = cube_sci.copy()
if ref_star:
cube_ref_subframe = cube_ref.copy()
ceny, cenx = frame_center(cube_sci_subframe[0])
print('Sub frame shape: {}'.format(cube_sci_subframe.shape))
print('Center pixel: ({}, {})'.format(ceny, cenx))
# Filtering cubes. Will be used for alignment purposes
cube_sci_lpf = cube_sci_subframe.copy()
if ref_star:
cube_ref_lpf = cube_ref_subframe.copy()
cube_sci_lpf = cube_sci_lpf + np.abs(np.min(cube_sci_lpf))
if ref_star:
cube_ref_lpf = cube_ref_lpf + np.abs(np.min(cube_ref_lpf))
median_size = int(fwhm * max_spat_freq)
# Remove spatial frequencies <0.5 lam/D and >3lam/D to isolate speckles
cube_sci_hpf = cube_filter_highpass(cube_sci_lpf, 'median-subt',
median_size=median_size, verbose=False)
if min_spat_freq>0:
cube_sci_lpf = cube_filter_lowpass(cube_sci_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_sci_lpf = cube_sci_hpf
if ref_star:
cube_ref_hpf = cube_filter_highpass(cube_ref_lpf, 'median-subt',
median_size=median_size,
verbose=False)
if min_spat_freq>0:
cube_ref_lpf = cube_filter_lowpass(cube_ref_hpf, 'gauss',
fwhm_size=min_spat_freq * fwhm,
verbose=False)
else:
cube_ref_lpf = cube_ref_hpf
if ref_star:
alignment_cube = np.zeros((1 + n + nref, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
alignment_cube[(n + 1):(n + 2 + nref), :, :] = cube_ref_lpf
else:
alignment_cube = np.zeros((1 + n, subframesize, subframesize))
alignment_cube[1:(n + 1), :, :] = cube_sci_lpf
n_frames = alignment_cube.shape[0] # 1+n or 1+n+nref
cum_y_shifts = 0
cum_x_shifts = 0
for i in range(alignment_iter):
alignment_cube[0] = np.median(alignment_cube[1:(n + 1)], axis=0)
if recenter_median:
# Recenter the median frame using a 2d fit
if fit_type == 'gaus':
crop_sz = int(fwhm)
else:
crop_sz = int(6*fwhm)
if not crop_sz%2:
crop_sz+=1
sub_image, y1, x1 = get_square(alignment_cube[0], size=crop_sz,
y=ceny, x=cenx, position=True)
if fit_type == 'gaus':
if negative:
sub_image = -sub_image + np.abs(np.min(-sub_image))
y_i, x_i = fit_2dgaussian(sub_image, crop=False,
threshold=False, sigfactor=1,
debug=debug, full_output=False)
elif fit_type == 'ann':
y_i, x_i, rad = _fit_2dannulus(sub_image, fwhm=fwhm, crop=False,
hole_rad=0.5, sampl_cen=0.1,
sampl_rad=0.2, ann_width=0.5,
unc_in=2.)
yshift = ceny - (y1 + y_i)
xshift = cenx - (x1 + x_i)
alignment_cube[0] = frame_shift(alignment_cube[0, :, :], yshift,
xshift, imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
# center the cube with stretched values
cube_stret = np.log10((np.abs(alignment_cube) + 1) ** gammaval)
if mask is not None and crop:
mask_tmp = frame_crop(mask, subframesize)
else:
mask_tmp = mask
res = cube_recenter_dft_upsampling(cube_stret, (ceny, cenx), fwhm=fwhm,
subi_size=None, full_output=True,
verbose=False, plot=False,
mask=mask_tmp, imlib=imlib,
interpolation=interpolation)
_, y_shift, x_shift = res
sqsum_shifts = np.sum(np.sqrt(y_shift ** 2 + x_shift ** 2))
print('Square sum of shift vecs: ' + str(sqsum_shifts))
for j in range(1, n_frames):
alignment_cube[j] = frame_shift(alignment_cube[j], y_shift[j],
x_shift[j], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
cum_y_shifts += y_shift
cum_x_shifts += x_shift
cube_reg_sci = cube_sci.copy()
cum_y_shifts_sci = cum_y_shifts[1:(n + 1)]
cum_x_shifts_sci = cum_x_shifts[1:(n + 1)]
for i in range(n):
cube_reg_sci[i] = frame_shift(cube_sci[i], cum_y_shifts_sci[i],
cum_x_shifts_sci[i], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
if plot:
plt.figure(figsize=vip_figsize)
plt.plot(cum_x_shifts_sci, 'o-', label='Shifts in x', alpha=0.5)
plt.plot(cum_y_shifts_sci, 'o-', label='Shifts in y', alpha=0.5)
plt.legend(loc='best')
plt.grid('on', alpha=0.2)
plt.ylabel('Pixels')
plt.xlabel('Frame number')
plt.figure(figsize=vip_figsize)
b = int(np.sqrt(n))
la = 'Histogram'
_ = plt.hist(cum_x_shifts_sci, bins=b, alpha=0.5, label=la+' shifts X')
_ = plt.hist(cum_y_shifts_sci, bins=b, alpha=0.5, label=la+' shifts Y')
plt.legend(loc='best')
plt.ylabel('Bin counts')
plt.xlabel('Pixels')
if ref_star:
cube_reg_ref = cube_ref.copy()
cum_y_shifts_ref = cum_y_shifts[(n + 1):]
cum_x_shifts_ref = cum_x_shifts[(n + 1):]
for i in range(nref):
cube_reg_ref[i] = frame_shift(cube_ref[i], cum_y_shifts_ref[i],
cum_x_shifts_ref[i], imlib=imlib,
interpolation=interpolation,
border_mode=border_mode)
if ref_star:
if full_output:
return (cube_reg_sci, cube_reg_ref, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci, cum_x_shifts_ref,
cum_y_shifts_ref)
else:
return (cube_reg_sci, cube_reg_ref)
else:
if full_output:
return (cube_reg_sci, cube_sci_lpf, cube_stret,
cum_x_shifts_sci, cum_y_shifts_sci)
else:
return cube_reg_sci
def _fit_2dannulus(array, fwhm=4, crop=False, cent=None, cropsize=15,
hole_rad=0.5, sampl_cen=0.1, sampl_rad=None, ann_width=0.5,
unc_in=2.):
"""Finds the center the center of a donut-shape signal (e.g. a coronagraphic
PSF) by fitting an annulus, using a grid of positions for the center and
radius of the annulus. The best fit is found by maximizing the mean flux
measured in the annular mask. Requires the image to be already roughly
centered (by an uncertainty provided by unc_in).
Parameters
----------
array : array_like
Image with a single donut-like source, already approximately at the
center of the frame.
fwhm : float
Gaussian PSF full width half maximum from fitting (in pixels).
hole_rad: float, opt
First estimate of the hole radius (in terms of fwhm). The grid search
on the radius of the optimal annulus goes from 0.5 to 2 times hole_rad.
Note: for the AGPM PSF of VLT/NACO, the optimal hole_rad ~ 0.5FWHM.
sampl_cen: float, opt
Precision of the grid sampling to find the center of the annulus (in
pixels)
sampl_rad: float, opt or None.
Precision of the grid sampling to find the optimal radius of the
annulus (in pixels). If set to None, there is no grid search for the
optimal radius of the annulus, the value given by hole_rad is used.
ann_width: float, opt
Width of the annulus in FWHM; default is 0.5 FWHM.
unc_in: float, opt
Initial uncertainty on the center location (with respect to center of
input subframe) in pixels; this will set the grid width.
Returns
-------
mean_y : float
Source centroid y position on the full image from fitting.
mean_x : float
Source centroid x position on the full image from fitting.
if sampl_rad is not None, also returns final_hole_rad:
final_hole_rad : float
Best fit radius of the hole, in terms of fwhm.
"""
if cent is None:
ceny, cenx = frame_center(array)
else:
cenx, ceny = cent
if crop:
x_sub_px = cenx%1
y_sub_px = ceny%1
imside = array.shape[0]
psf_subimage, suby, subx = get_square(array, min(cropsize, imside),
int(ceny), int(cenx),
position=True)
ceny, cenx = frame_center(psf_subimage)
ceny+=y_sub_px
cenx+=x_sub_px
else:
psf_subimage = array.copy()
ann_sz = ann_width*fwhm
grid_sh_x = np.arange(-unc_in,unc_in,sampl_cen)
grid_sh_y = np.arange(-unc_in,unc_in,sampl_cen)
if sampl_rad is None:
rads = [hole_rad*fwhm]
else:
rads = np.arange(0.5*hole_rad*fwhm,2*hole_rad*fwhm,sampl_rad)
flux_ann = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
best_rad = np.zeros([grid_sh_x.shape[0],grid_sh_y.shape[0]])
for ii, xx in enumerate(grid_sh_x):
for jj, yy in enumerate(grid_sh_y):
tmp_tmp = frame_shift(array,yy,xx)
for rr, rad in enumerate(rads):
# mean flux in the annulus
tmp = frame_basic_stats(tmp_tmp, 'annulus',inner_radius=rad,
size=ann_sz, plot=False)
if tmp > flux_ann[ii,jj]:
flux_ann[ii,jj] = tmp
best_rad[ii,jj] = rad
i_max,j_max = np.unravel_index(np.argmax(flux_ann),flux_ann.shape)
mean_x = cenx - grid_sh_x[i_max]
mean_y = ceny - grid_sh_y[j_max]
if sampl_rad is None:
return mean_y, mean_x
else:
final_hole_rad = best_rad[i_max,j_max]/fwhm
return mean_y, mean_x, final_hole_rad
|
vortex-exoplanet/VIP
|
vip_hci/preproc/recentering.py
|
Python
|
mit
| 72,782
|
[
"Gaussian"
] |
5d5d5baf5efe413e7bf991c1d5694fe919e281ae4a65a0d65173f37829e35c45
|
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""left navigation pane"""
import logging
logger = logging.getLogger('camelot.view.controls.navpane2')
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QMenu
from PyQt4.QtGui import QFrame
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QToolBox
from PyQt4.QtGui import QDockWidget
from PyQt4.QtGui import QVBoxLayout
from camelot.admin.action.application_action import ApplicationActionGuiContext
from camelot.admin.section import Section, SectionItem
from camelot.core.utils import variant_to_pyobject
from camelot.view.model_thread import post
from camelot.view.controls.modeltree import ModelItem
from camelot.view.controls.modeltree import ModelTree
class PaneSection(QWidget):
def __init__(self, parent, section, workspace):
super(PaneSection, self).__init__(parent)
self._items = []
self._workspace = workspace
self._section = section
layout = QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
section_tree = ModelTree(parent=self)
# i hate the sunken frame style
section_tree.setFrameShape(QFrame.NoFrame)
section_tree.setFrameShadow(QFrame.Plain)
section_tree.contextmenu = QMenu(self)
section_tree.setContextMenuPolicy(Qt.CustomContextMenu)
section_tree.customContextMenuRequested.connect(self.create_context_menu)
section_tree.setObjectName( 'SectionTree' )
section_tree.itemClicked.connect( self._item_clicked )
section_tree.setWordWrap( False )
layout.addWidget( section_tree )
self.setLayout(layout)
post( section.get_items, self.set_items )
@QtCore.pyqtSlot(object)
def set_items(self, items, parent = None):
logger.debug('setting items for current navpane section')
section_tree = self.findChild(QtGui.QWidget, 'SectionTree')
if section_tree:
if parent == None:
# take a copy, so the copy can be extended
self._items = list(i for i in items)
section_tree.clear()
section_tree.clear_model_items()
parent = section_tree
if not items: return
for item in items:
label = item.get_verbose_name()
icon = item.get_icon()
model_item = ModelItem( parent,
[unicode(label)],
item )
if icon:
model_item.set_icon(icon.getQIcon())
section_tree.modelitems.append( model_item )
if isinstance( item, Section ):
child_items = item.get_items()
self.set_items( child_items, parent = model_item )
self._items.extend( child_items )
section_tree.resizeColumnToContents( 0 )
def create_context_menu(self, point):
logger.debug('creating context menu')
section_tree = self.findChild(QtGui.QWidget, 'SectionTree')
if section_tree:
item = section_tree.itemAt(point)
if item:
section_tree.contextmenu.clear()
for mode in item.section_item.get_modes():
action = mode.render( self )
action.triggered.connect( self._action_triggered )
section_tree.contextmenu.addAction( action )
section_tree.setCurrentItem(item)
section_tree.contextmenu.popup(section_tree.mapToGlobal(point))
@QtCore.pyqtSlot(bool)
def _action_triggered( self, _checked ):
action = self.sender()
mode_name = variant_to_pyobject( action.data() )
self._run_current_action( mode_name )
@QtCore.pyqtSlot(QtGui.QTreeWidgetItem, int)
def _item_clicked(self, _item, _column):
self._run_current_action()
def _run_current_action( self, mode_name=None ):
section_tree = self.findChild(QtGui.QWidget, 'SectionTree')
if section_tree:
item = section_tree.currentItem()
index = section_tree.indexFromItem(item)
parent = index.parent()
if parent.row() >= 0:
section = self._items[parent.row()]
section_item = section.items[index.row()]
else:
section_item = self._items[index.row()]
if not isinstance( section_item, SectionItem ):
return
gui_context = ApplicationActionGuiContext()
gui_context.mode_name = mode_name
gui_context.workspace = self._workspace
gui_context.admin = self._section.admin
section_item.get_action().gui_run( gui_context )
class NavigationPane(QDockWidget):
def __init__(self, app_admin, workspace, parent):
super(NavigationPane, self).__init__(parent)
self._workspace = workspace
self.app_admin = app_admin
tb = QToolBox()
tb.setFrameShape(QFrame.NoFrame)
tb.layout().setContentsMargins(0,0,0,0)
tb.layout().setSpacing(1)
tb.setObjectName('toolbox')
tb.setMouseTracking(True)
# hack for removing the dock title bar
self.setTitleBarWidget(QWidget())
self.setWidget(tb)
self.setFeatures(QDockWidget.NoDockWidgetFeatures)
self.app_admin.sections_changed_signal.connect(self.update_sections)
self.update_sections()
def wheelEvent(self, wheel_event):
steps = -1 * wheel_event.delta() / (8 * 15)
toolbox = self.findChild(QtGui.QWidget, 'toolbox')
if steps and toolbox:
current_index = toolbox.currentIndex()
toolbox.setCurrentIndex( max( 0, min( current_index + steps, toolbox.count() ) ) )
@QtCore.pyqtSlot()
def update_sections(self):
post(self.app_admin.get_sections, self.set_sections)
def get_sections(self):
return self._sections
@QtCore.pyqtSlot(object)
def set_sections(self, sections):
logger.debug('setting navpane sections')
if not sections:
self.setMaximumWidth(0)
return
toolbox = self.findChild(QtGui.QWidget, 'toolbox')
animation = QtCore.QPropertyAnimation(toolbox, 'minimumWidth', self)
animation.setDuration( 500 )
animation.setStartValue( 0 )
animation.setEndValue( 220 )
if self._workspace:
animation.finished.connect(self._workspace._background_widget.makeInteractive)
animation.start()
# performs QToolBox clean up
# QToolbox won't delete items we have to do it explicitly
count = toolbox.count()
while count:
item = toolbox.widget(count-1)
toolbox.removeItem(count-1)
item.deleteLater()
count -= 1
for section in sections:
# TODO: old navpane used translation here
name = unicode( section.get_verbose_name() )
icon = section.get_icon().getQIcon()
pwdg = PaneSection(toolbox, section, self._workspace)
toolbox.addItem(pwdg, icon, name)
toolbox.setCurrentIndex(0)
# WARNING: hardcoded width
#self._toolbox.setMinimumWidth(220)
|
jeroendierckx/Camelot
|
camelot/view/controls/navpane2.py
|
Python
|
gpl-2.0
| 8,619
|
[
"VisIt"
] |
b2114f74e15e636a1b82fd99ebdf56e4607a75b29da9973abd0c308e0fdeccc3
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gaussian mixture models Operations."""
# TODO(xavigonzalvo): Factor out covariance matrix operations to make
# code reusable for different types (e.g. diag).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.embedding_ops import embedding_lookup
from tensorflow.python.summary import summary
# Machine epsilon.
MEPS = np.finfo(float).eps
FULL_COVARIANCE = 'full'
DIAG_COVARIANCE = 'diag'
def _covariance(x, diag):
"""Defines the covariance operation of a matrix.
Args:
x: a matrix Tensor. Dimension 0 should contain the number of examples.
diag: if True, it computes the diagonal covariance.
Returns:
A Tensor representing the covariance of x. In the case of
diagonal matrix just the diagonal is returned.
"""
num_points = math_ops.to_float(array_ops.shape(x)[0])
x -= math_ops.reduce_mean(x, 0, keep_dims=True)
if diag:
cov = math_ops.reduce_sum(
math_ops.square(x), 0, keep_dims=True) / (num_points - 1)
else:
cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1)
return cov
def _init_clusters_random(data, num_clusters, random_seed):
"""Does random initialization of clusters.
Args:
data: a list of Tensors with a matrix of data, each row is an example.
num_clusters: an integer with the number of clusters.
random_seed: Seed for PRNG used to initialize seeds.
Returns:
A Tensor with num_clusters random rows of data.
"""
assert isinstance(data, list)
num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
with ops.control_dependencies(
[check_ops.assert_less_equal(num_clusters, num_data)]):
indices = random_ops.random_uniform(
[num_clusters],
minval=0,
maxval=math_ops.cast(num_data, dtypes.int64),
seed=random_seed,
dtype=dtypes.int64)
indices = math_ops.cast(indices, dtypes.int32) % num_data
clusters_init = embedding_lookup(data, indices, partition_strategy='div')
return clusters_init
class GmmAlgorithm(object):
"""Tensorflow Gaussian mixture model clustering class."""
CLUSTERS_WEIGHT = 'alphas'
CLUSTERS_VARIABLE = 'clusters'
CLUSTERS_COVS_VARIABLE = 'clusters_covs'
def __init__(self,
data,
num_classes,
initial_means=None,
params='wmc',
covariance_type=FULL_COVARIANCE,
random_seed=0):
"""Constructor.
Args:
data: a list of Tensors with data, each row is a new example.
num_classes: number of clusters.
initial_means: a Tensor with a matrix of means. If None, means are
computed by sampling randomly.
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covariances.
covariance_type: one of "full", "diag".
random_seed: Seed for PRNG used to initialize seeds.
Raises:
Exception if covariance type is unknown.
"""
self._params = params
self._random_seed = random_seed
self._covariance_type = covariance_type
if self._covariance_type not in [DIAG_COVARIANCE, FULL_COVARIANCE]:
raise Exception( # pylint: disable=g-doc-exception
'programmer error: Invalid covariance type: %s' %
self._covariance_type)
# Create sharded variables for multiple shards. The following
# lists are indexed by shard.
# Probability per example in a class.
num_shards = len(data)
self._probs = [None] * num_shards
# Prior probability.
self._prior_probs = [None] * num_shards
# Membership weights w_{ik} where "i" is the i-th example and "k"
# is the k-th mixture.
self._w = [None] * num_shards
# Number of examples in a class.
self._points_in_k = [None] * num_shards
first_shard = data[0]
self._dimensions = array_ops.shape(first_shard)[1]
self._num_classes = num_classes
# Small value to guarantee that covariances are invertible.
self._min_var = array_ops.diag(
array_ops.ones(array_ops.stack([self._dimensions]))) * 1e-3
self._create_variables(data, initial_means)
# Operations of partial statistics for the computation of the means.
self._w_mul_x = []
# Operations of partial statistics for the computation of the covariances.
self._w_mul_x2 = []
self._define_graph(data)
def _create_variables(self, data, initial_means=None):
"""Initializes GMM algorithm.
Args:
data: a list of Tensors with data, each row is a new example.
initial_means: a Tensor with a matrix of means.
"""
first_shard = data[0]
# Initialize means: num_classes X 1 X dimensions.
if initial_means is not None:
self._means = variables.Variable(
array_ops.expand_dims(initial_means, 1),
name=self.CLUSTERS_VARIABLE,
validate_shape=False,
dtype=dtypes.float32)
else:
# Sample data randomly
self._means = variables.Variable(
array_ops.expand_dims(
_init_clusters_random(data, self._num_classes, self._random_seed),
1),
name=self.CLUSTERS_VARIABLE,
validate_shape=False)
# Initialize covariances.
if self._covariance_type == FULL_COVARIANCE:
cov = _covariance(first_shard, False) + self._min_var
# A matrix per class, num_classes X dimensions X dimensions
covs = array_ops.tile(
array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
elif self._covariance_type == DIAG_COVARIANCE:
cov = _covariance(first_shard, True) + self._min_var
# A diagonal per row, num_classes X dimensions.
covs = array_ops.tile(
array_ops.expand_dims(array_ops.diag_part(cov), 0),
[self._num_classes, 1])
self._covs = variables.Variable(
covs, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
# Mixture weights, representing the probability that a randomly
# selected unobservable data (in EM terms) was generated by component k.
self._alpha = variables.Variable(
array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
name=self.CLUSTERS_WEIGHT,
validate_shape=False)
def training_ops(self):
"""Returns the training operation."""
return self._train_ops
def alphas(self):
return self._alpha
def clusters(self):
"""Returns the clusters with dimensions num_classes X 1 X num_dimensions."""
return self._means
def covariances(self):
"""Returns the covariances matrices."""
return self._covs
def assignments(self):
"""Returns a list of Tensors with the matrix of assignments per shard."""
ret = []
for w in self._w:
ret.append(math_ops.argmax(w, 1))
return ret
def scores(self):
"""Returns the distances to each class.
Returns:
A tuple with two Tensors. The first contains the distance to
each class. The second contains the distance to the assigned
class.
"""
return (self._all_scores, self._scores)
def _define_graph(self, data):
"""Define graph for a single iteration.
Args:
data: a list of Tensors defining the training data.
"""
for shard_id, shard in enumerate(data):
self._num_examples = array_ops.shape(shard)[0]
shard = array_ops.expand_dims(shard, 0)
self._define_log_prob_operation(shard_id, shard)
self._define_prior_log_prob_operation(shard_id)
self._define_expectation_operation(shard_id)
self._define_partial_maximization_operation(shard_id, shard)
self._define_maximization_operation(len(data))
self._define_distance_to_clusters(data)
def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilties per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
diff = shard - self._means
cholesky = linalg_ops.cholesky(self._covs + self._min_var)
log_det_covs = 2.0 * math_ops.reduce_sum(
math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
x_mu_cov = math_ops.square(
linalg_ops.matrix_triangular_solve(
cholesky, array_ops.transpose(
diff, perm=[0, 2, 1]), lower=True))
diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
* math_ops.log(2 * np.pi) + log_det_covs)
def _define_diag_covariance_probs(self, shard_id, shard):
"""Defines the diagonal covariance probabilities per example in a class.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
Returns a matrix num_examples * num_classes.
"""
# num_classes X 1
# TODO(xavigonzalvo): look into alternatives to log for
# reparametrization of variance parameters.
det_expanded = math_ops.reduce_sum(
math_ops.log(self._covs + 1e-3), 1, keep_dims=True)
diff = shard - self._means
x2 = math_ops.square(diff)
cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2)
# num_classes X num_examples
x2_cov = math_ops.matmul(x2, cov_expanded)
x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2]))
self._probs[shard_id] = -0.5 * (
math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) +
array_ops.transpose(det_expanded) + x2_cov)
def _define_log_prob_operation(self, shard_id, shard):
"""Probability per example in a class.
Updates a matrix with dimension num_examples X num_classes.
Args:
shard_id: id of the current shard.
shard: current data shard, 1 X num_examples X dimensions.
"""
# TODO(xavigonzalvo): Use the pdf defined in
# third_party/tensorflow/contrib/distributions/python/ops/gaussian.py
if self._covariance_type == FULL_COVARIANCE:
self._define_full_covariance_probs(shard_id, shard)
elif self._covariance_type == DIAG_COVARIANCE:
self._define_diag_covariance_probs(shard_id, shard)
self._probs[shard_id] += math_ops.log(self._alpha)
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.
Updates a vector where each item is the prior probabibility of an
input example.
Args:
shard_id: id of current shard_id.
"""
self._prior_probs[shard_id] = math_ops.reduce_logsumexp(
self._probs[shard_id], axis=1, keep_dims=True)
def _define_expectation_operation(self, shard_id):
# Shape broadcasting.
probs = array_ops.expand_dims(self._probs[shard_id], 0)
# Membership weights are computed as:
# w_{ik} = \frac{\alpha_k f(\mathbf{y_i}|\mathbf{\theta}_k)}
# {\sum_{m=1}^{K}\alpha_mf(\mathbf{y_i}|\mathbf{\theta}_m)}
# where "i" is the i-th example, "k" is the k-th mixture, theta are
# the model parameters and y_i the observations.
# These are defined for each shard.
self._w[shard_id] = array_ops.reshape(
math_ops.exp(probs - self._prior_probs[shard_id]),
array_ops.stack([self._num_examples, self._num_classes]))
def _define_partial_maximization_operation(self, shard_id, shard):
"""Computes the partial statistics of the means and covariances.
Args:
shard_id: current shard id.
shard: current data shard, 1 X num_examples X dimensions.
"""
# Soft assignment of each data point to each of the two clusters.
self._points_in_k[shard_id] = math_ops.reduce_sum(
self._w[shard_id], 0, keep_dims=True)
# Partial means.
w_mul_x = array_ops.expand_dims(
math_ops.matmul(
self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
1)
self._w_mul_x.append(w_mul_x)
# Partial covariances.
x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
x_trans = array_ops.transpose(x, perm=[0, 2, 1])
x_mul_w = array_ops.concat([
array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
for k in range(self._num_classes)
], 0)
self._w_mul_x2.append(math_ops.matmul(x_mul_w, x))
def _define_maximization_operation(self, num_batches):
"""Maximization operations."""
# TODO(xavigonzalvo): some of these operations could be moved to C++.
# Compute the effective number of data points assigned to component k.
with ops.control_dependencies(self._w):
points_in_k = array_ops.squeeze(
math_ops.add_n(self._points_in_k), squeeze_dims=[0])
# Update alpha.
if 'w' in self._params:
final_points_in_k = points_in_k / num_batches
num_examples = math_ops.to_float(math_ops.reduce_sum(final_points_in_k))
self._alpha_op = self._alpha.assign(final_points_in_k /
(num_examples + MEPS))
else:
self._alpha_op = control_flow_ops.no_op()
self._train_ops = [self._alpha_op]
# Update means.
points_in_k_expanded = array_ops.reshape(points_in_k,
[self._num_classes, 1, 1])
if 'm' in self._params:
self._means_op = self._means.assign(
math_ops.div(
math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
else:
self._means_op = control_flow_ops.no_op()
# means are (num_classes x 1 x dims)
# Update covariances.
with ops.control_dependencies([self._means_op]):
b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
new_covs = []
for k in range(self._num_classes):
mean = self._means.value()[k, :, :]
square_mean = math_ops.matmul(mean, mean, transpose_a=True)
new_cov = b[k, :, :] - square_mean + self._min_var
if self._covariance_type == FULL_COVARIANCE:
new_covs.append(array_ops.expand_dims(new_cov, 0))
elif self._covariance_type == DIAG_COVARIANCE:
new_covs.append(
array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
new_covs = array_ops.concat(new_covs, 0)
if 'c' in self._params:
# Train operations don't need to take care of the means
# because covariances already depend on it.
with ops.control_dependencies([self._means_op, new_covs]):
self._train_ops.append(
state_ops.assign(
self._covs, new_covs, validate_shape=False))
def _define_distance_to_clusters(self, data):
"""Defines the Mahalanobis distance to the assigned Gaussian."""
# TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
# mean) from log probability function.
self._all_scores = []
for shard in data:
all_scores = []
shard = array_ops.expand_dims(shard, 0)
for c in xrange(self._num_classes):
if self._covariance_type == FULL_COVARIANCE:
cov = self._covs[c, :, :]
elif self._covariance_type == DIAG_COVARIANCE:
cov = array_ops.diag(self._covs[c, :])
inverse = linalg_ops.matrix_inverse(cov + self._min_var)
inv_cov = array_ops.tile(
array_ops.expand_dims(inverse, 0),
array_ops.stack([self._num_examples, 1, 1]))
diff = array_ops.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
m_left = math_ops.matmul(diff, inv_cov)
all_scores.append(
math_ops.sqrt(
math_ops.matmul(
m_left, array_ops.transpose(
diff, perm=[0, 2, 1]))))
self._all_scores.append(
array_ops.reshape(
array_ops.concat(all_scores, 1),
array_ops.stack([self._num_examples, self._num_classes])))
# Distance to the associated class.
self._all_scores = array_ops.concat(self._all_scores, 0)
assignments = array_ops.concat(self.assignments(), 0)
rows = math_ops.to_int64(math_ops.range(0, self._num_examples))
indices = array_ops.concat(
[array_ops.expand_dims(rows, 1), array_ops.expand_dims(assignments, 1)],
1)
self._scores = array_ops.gather_nd(self._all_scores, indices)
def _define_loglikelihood_operation(self):
"""Defines the total log-likelihood of current iteration."""
self._ll_op = []
for prior_probs in self._prior_probs:
self._ll_op.append(math_ops.reduce_sum(math_ops.log(prior_probs)))
summary.scalar('ll', math_ops.reduce_sum(self._ll_op))
def gmm(inp,
initial_clusters,
num_clusters,
random_seed,
covariance_type=FULL_COVARIANCE,
params='wmc'):
"""Creates the graph for Gaussian mixture model (GMM) clustering.
Args:
inp: An input tensor or list of input tensors
initial_clusters: Specifies the clusters used during
initialization. Can be a tensor or numpy array, or a function
that generates the clusters. Can also be "random" to specify
that clusters should be chosen randomly from input data. Note: type
is diverse to be consistent with skflow.
num_clusters: number of clusters.
random_seed: Python integer. Seed for PRNG used to initialize centers.
covariance_type: one of "diag", "full".
params: Controls which parameters are updated in the training
process. Can contain any combination of "w" for weights, "m" for
means, and "c" for covars.
Returns:
Note: tuple of lists returned to be consistent with skflow
A tuple consisting of:
all_scores: A matrix (or list of matrices) of dimensions (num_input,
num_clusters) where the value is the distance of an input vector and a
cluster center.
assignments: A vector (or list of vectors). Each element in the vector
corresponds to an input row in 'inp' and specifies the cluster id
corresponding to the input.
scores: Similar to assignments but specifies the distance to the
assigned cluster instead.
training_op: an op that runs an iteration of training.
"""
initial_means = None
if initial_clusters != 'random' and not isinstance(initial_clusters,
ops.Tensor):
initial_means = constant_op.constant(initial_clusters, dtype=dtypes.float32)
# Implementation of GMM.
inp = inp if isinstance(inp, list) else [inp]
gmm_tool = GmmAlgorithm(inp, num_clusters, initial_means, params,
covariance_type, random_seed)
training_ops = gmm_tool.training_ops()
assignments = gmm_tool.assignments()
all_scores, scores = gmm_tool.scores()
return [all_scores], [assignments], [scores], control_flow_ops.group(
*training_ops)
|
mengxn/tensorflow
|
tensorflow/contrib/factorization/python/ops/gmm_ops.py
|
Python
|
apache-2.0
| 20,164
|
[
"Gaussian"
] |
adafd6a94af81db61f8361075262b6b96431a1024036c25abc3e3a8d384529eb
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import numpy as np
from time import time
import argparse
parser = argparse.ArgumentParser(description="Benchmark P3M simulations. "
"Save the results to a CSV file.")
parser.add_argument("--particles_per_core", metavar="N", action="store",
type=int, default=1000, required=False,
help="Number of particles in the simulation box")
parser.add_argument("--volume_fraction", metavar="FRAC", action="store",
type=float, default=0.25, required=False,
help="Fraction of the simulation box volume occupied by "
"particles (range: [0.01-0.74], default: 0.25)")
parser.add_argument("--prefactor", metavar="PREFACTOR", action="store",
type=float, default=4., required=False,
help="P3M prefactor (default: 4)")
group = parser.add_mutually_exclusive_group()
group.add_argument("--output", metavar="FILEPATH", action="store",
type=str, required=False, default="benchmarks.csv",
help="Output file (default: benchmarks.csv)")
group.add_argument("--visualizer", action="store_true",
help="Starts the visualizer (for debugging purposes)")
args = parser.parse_args()
# process and check arguments
n_proc = int(os.environ.get("OMPI_COMM_WORLD_SIZE", 1))
n_part = n_proc * args.particles_per_core
measurement_steps = int(np.round(5e5 / args.particles_per_core, -1))
n_iterations = 30
assert args.prefactor > 0, "prefactor must be a positive number"
assert args.volume_fraction > 0, "volume_fraction must be a positive number"
assert args.volume_fraction < np.pi / (3 * np.sqrt(2)), \
"volume_fraction exceeds the physical limit of sphere packing (~0.74)"
if not args.visualizer:
assert(measurement_steps >= 50), \
"{} steps per tick are too short".format(measurement_steps)
import espressomd
from espressomd import thermostat
from espressomd import electrostatics
if args.visualizer:
from espressomd import visualization
from threading import Thread
required_features = ["P3M", "LENNARD_JONES", "MASS"]
espressomd.assert_features(required_features)
print(espressomd.features())
# Interaction parameters (Lennard-Jones, Coulomb)
#############################################################
species = ["anion", "cation"]
types = {"anion": 0, "cation": 0}
charges = {"anion": -1.0, "cation": 1.0}
lj_sigmas = {"anion": 1.0, "cation": 1.0}
lj_epsilons = {"anion": 1.0, "cation": 1.0}
WCA_cut = 2.**(1. / 6.)
lj_cuts = {"anion": WCA_cut * lj_sigmas["anion"],
"cation": WCA_cut * lj_sigmas["cation"]}
masses = {"anion": 1.0, "cation": 1.0}
# System parameters
#############################################################
# volume of N spheres with radius r: N * (4/3*pi*r^3)
lj_sig = (lj_sigmas["cation"] + lj_sigmas["anion"]) / 2
box_l = (n_part * 4. / 3. * np.pi * (lj_sig / 2.)**3
/ args.volume_fraction)**(1. / 3.)
# System
#############################################################
system = espressomd.System(box_l=3 * (box_l,))
system.cell_system.set_domain_decomposition(use_verlet_lists=True)
# PRNG seeds
#############################################################
system.random_number_generator_state = list(range(
n_proc * (system._get_PRNG_state_size() + 1)))
# Integration parameters
#############################################################
system.time_step = 0.01
system.cell_system.skin = .4
system.thermostat.turn_off()
#############################################################
# Setup System #
#############################################################
# Interaction setup
#############################################################
for i in range(len(species)):
ion1 = species[i]
for j in range(i, len(species)):
ion2 = species[j]
lj_sig = (lj_sigmas[ion1] + lj_sigmas[ion2]) / 2
lj_cut = (lj_cuts[ion1] + lj_cuts[ion2]) / 2
lj_eps = (lj_epsilons[ion1] * lj_epsilons[ion2])**(1. / 2.)
system.non_bonded_inter[types[ion1],
types[ion2]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
# Particle setup
#############################################################
for i in range(0, n_part, len(species)):
for t in species:
system.part.add(pos=np.random.random(3) * system.box_l,
q=charges[t], type=types[t], mass=masses[t])
#############################################################
# Warmup Integration #
#############################################################
energy = system.analysis.energy()
print("Before Minimization: E_total = {}".format(energy["total"]))
system.minimize_energy.init(f_max=1000, gamma=30.0,
max_steps=1000, max_displacement=0.05)
system.minimize_energy.minimize()
system.minimize_energy.minimize()
energy = system.analysis.energy()
print("After Minimization: E_total = {}".format(energy["total"]))
system.integrator.set_vv()
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# tuning and equilibration
system.integrator.run(min(3 * measurement_steps, 1000))
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=0.4, max_skin=1.6, tol=0.05, int_steps=100,
adjust_max_skin=True)))
system.integrator.run(min(3 * measurement_steps, 3000))
print("Tune p3m")
p3m = electrostatics.P3M(prefactor=args.prefactor, accuracy=1e-4)
system.actors.add(p3m)
system.integrator.run(min(3 * measurement_steps, 3000))
print("Tune skin: {}".format(system.cell_system.tune_skin(
min_skin=1.0, max_skin=1.6, tol=0.05, int_steps=100,
adjust_max_skin=True)))
if not args.visualizer:
# print initial energies
energies = system.analysis.energy()
print(energies)
# time integration loop
print("Timing every {} steps".format(measurement_steps))
main_tick = time()
all_t = []
for i in range(n_iterations):
tick = time()
system.integrator.run(measurement_steps)
tock = time()
t = (tock - tick) / measurement_steps
print("step {}, time = {:.2e}, verlet: {:.2f}"
.format(i, t, system.cell_system.get_state()["verlet_reuse"]))
all_t.append(t)
main_tock = time()
# average time
all_t = np.array(all_t)
avg = np.average(all_t)
ci = 1.96 * np.std(all_t) / np.sqrt(len(all_t) - 1)
print("average: {:.3e} +/- {:.3e} (95% C.I.)".format(avg, ci))
# print final energies
energies = system.analysis.energy()
print(energies)
# write report
cmd = " ".join(x for x in sys.argv[1:] if not x.startswith("--output"))
report = ('"{script}","{arguments}",{cores},"{mpi}",{mean:.3e},'
'{ci:.3e},{n},{dur:.1f}\n'.format(
script=os.path.basename(sys.argv[0]), arguments=cmd,
cores=n_proc, dur=main_tock - main_tick, n=measurement_steps,
mpi="OMPI_COMM_WORLD_SIZE" in os.environ, mean=avg, ci=ci))
if not os.path.isfile(args.output):
report = ('"script","arguments","cores","MPI","mean","ci",'
'"nsteps","duration"\n' + report)
with open(args.output, "a") as f:
f.write(report)
else:
# use visualizer
visualizer = visualization.openGLLive(system)
visualizer.run(1)
|
mkuron/espresso
|
maintainer/benchmarks/p3m.py
|
Python
|
gpl-3.0
| 8,176
|
[
"ESPResSo"
] |
b3e7a65c2d008f4dc8c9ab3c1fb8f08240fd123fa892a310eec6cfd06cd8cfb6
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2007 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
""" Editors definitions for products"""
import collections
from decimal import Decimal
import gtk
from kiwi.currency import currency
from kiwi.datatypes import ValidationError
from kiwi.ui.forms import TextField
from stoqdrivers.enum import TaxType
from stoqlib.api import api
from stoqlib.domain.product import (ProductSupplierInfo, Product,
ProductComponent,
ProductQualityTest, Storable,
ProductManufacturer, ProductAttribute)
from stoqlib.domain.sellable import (Sellable,
SellableTaxConstant)
from stoqlib.gui.base.dialogs import run_dialog
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.gui.editors.sellableeditor import SellableEditor
from stoqlib.lib.decorators import cached_property
from stoqlib.lib.defaults import quantize, MAX_INT
from stoqlib.lib.message import info
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
#
# Slaves
#
class TemporaryProductComponent(object):
def __init__(self, product=None, component=None, quantity=Decimal(1),
design_reference=u''):
self.product = product
self.component = component
self.quantity = quantity
self.design_reference = design_reference
if self.component is not None:
# keep this values in memory in order to speed up the
# data access
sellable = self.component.sellable
self.id = sellable.id
self.code = sellable.code
self.description = sellable.get_description()
self.category = sellable.get_category_description()
self.unit = sellable.unit_description
self.production_cost = self.component.get_production_cost()
def _get_product_component(self, store):
return store.find(ProductComponent,
product=self.product, component=self.component).one()
#
# Public API
#
def get_total_production_cost(self):
return quantize(self.production_cost * self.quantity)
def delete_product_component(self, store):
component = self._get_product_component(store)
if component is not None:
# FIXME: bug 5581 Check if we can really remove this object when
# working with synced databases
store.remove(component)
def add_or_update_product_component(self, store):
component = self._get_product_component(store)
if component is not None:
# updating
component.quantity = self.quantity
component.design_reference = self.design_reference
else:
# adding
ProductComponent(product=self.product,
component=self.component,
quantity=self.quantity,
design_reference=self.design_reference,
store=store)
#
# Quality Test Editor & Slave
#
class QualityTestEditor(BaseEditor):
model_name = _('Quality Test')
model_type = ProductQualityTest
gladefile = 'QualityTestEditor'
proxy_widgets = ['description', 'test_type']
confirm_widgets = ['description']
def __init__(self, store, model=None, product=None):
self._product = product
BaseEditor.__init__(self, store=store, model=model)
def _setup_widgets(self):
self.sizegroup1.add_widget(self.decimal_value)
self.sizegroup1.add_widget(self.boolean_value)
self.test_type.prefill([(value, key)
for key, value in ProductQualityTest.types.items()])
self.boolean_value.prefill([(_('True'), True), (_(('False')), False)])
# Editing values
if self.model.test_type == ProductQualityTest.TYPE_BOOLEAN:
self.boolean_value.select(self.model.get_boolean_value())
else:
min_value, max_value = self.model.get_range_value()
self.min_value.set_value(min_value)
self.max_value.set_value(max_value)
def create_model(self, store):
return ProductQualityTest(product=self._product, store=store)
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
def on_confirm(self):
if self.model.test_type == ProductQualityTest.TYPE_BOOLEAN:
self.model.set_boolean_value(self.boolean_value.read())
else:
self.model.set_range_value(self.min_value.read(),
self.max_value.read())
#
# Callbacks
#
def on_test_type__changed(self, widget):
if self.model.test_type == ProductQualityTest.TYPE_BOOLEAN:
self.boolean_value.show()
self.decimal_value.hide()
else:
self.boolean_value.hide()
self.decimal_value.show()
#
# Product Supplier Editor & Slave
#
class ProductSupplierEditor(BaseEditor):
model_name = _('Product Supplier')
model_type = ProductSupplierInfo
gladefile = 'ProductSupplierEditor'
proxy_widgets = ('base_cost', 'icms', 'notes', 'lead_time',
'minimum_purchase', 'supplier_code')
confirm_widgets = ['base_cost', 'icms', 'lead_time', 'minimum_purchase',
'supplier_code']
def _setup_widgets(self):
unit = self.model.product.sellable.unit
if unit is None:
description = _(u'Unit(s)')
else:
description = unit.description
self.unit_label.set_text(description)
self.base_cost.set_digits(sysparam.get_int('COST_PRECISION_DIGITS'))
self.base_cost.set_adjustment(
gtk.Adjustment(lower=0, upper=MAX_INT, step_incr=1))
self.minimum_purchase.set_adjustment(
gtk.Adjustment(lower=0, upper=MAX_INT, step_incr=1))
#
# BaseEditor hooks
#
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(self.model, self.proxy_widgets)
def validate_confirm(self):
return self.base_cost.read() > 0
#
# Kiwi handlers
#
def on_minimum_purchase__validate(self, entry, value):
if not value or value <= Decimal(0):
return ValidationError(_("Minimum purchase must be greater than "
"zero."))
def on_base_cost__validate(self, entry, value):
if not value or value <= currency(0):
return ValidationError(_("Value must be greater than zero."))
def on_lead_time__validate(self, entry, value):
if value < 1:
return ValidationError(_("Lead time must be greater or equal one "
"day"))
def on_supplier_code__validate(self, entry, value):
if not value:
return
d = {self.model_type.supplier_id: self.model.supplier.id,
self.model_type.supplier_code: value}
supplier_info = self.model.check_unique_tuple_exists(d)
if supplier_info is not None:
desc = supplier_info.product.sellable.description
return ValidationError(
_("This code already exists for this supplier "
"on product '%s'") % (desc, ))
#
# Editors
#
class ProductComponentEditor(BaseEditor):
gladefile = 'ProductComponentEditor'
proxy_widgets = ['quantity', 'design_reference']
title = _(u'Product Component')
model_type = TemporaryProductComponent
def _setup_widgets(self):
self.component_description.set_text(self.model.description)
self.quantity.set_adjustment(
gtk.Adjustment(lower=0, upper=MAX_INT, step_incr=1,
page_incr=10))
# set a default quantity value for new components
if not self.model.quantity:
self.quantity.set_value(1)
#
# BaseEditor
#
def setup_proxies(self):
self._setup_widgets()
self.proxy = self.add_proxy(
self.model, ProductComponentEditor.proxy_widgets)
def validate_confirm(self):
return self.quantity.read() > 0
#
# Kiwi Callbacks
#
def on_quantity__validate(self, widget, value):
if not value > 0:
# FIXME: value < upper bound
return ValidationError(_(u'The component quantity must be '
'greater than zero.'))
class ProductEditor(SellableEditor):
model_name = _('Product')
model_type = Product
help_section = 'product'
ui_form_name = u'product'
product_widgets = ['product_type_str']
proxy_widgets = SellableEditor.proxy_widgets + product_widgets
def __init__(self, store, model=None, visual_mode=False,
product_type=Product.TYPE_COMMON, template=None, wizard=None):
"""
:param product_type: one of the available
:attr:`stoqlib.domain.product.Product.product_types` that
will be used when creating a new one
:param template: a product to use as a template when creating
a new one. Some properties will be copied from it.
"""
self._template = template
self._product_type = product_type
self._wizard = wizard
SellableEditor.__init__(self, store, model, visual_mode=visual_mode)
# This can't be done in setup_slaves() as we need to access
# self.main_dialog when setting up the quality test slave
self._add_extra_tabs()
#
# Private
#
def _add_extra_tabs(self):
for tabname, tabslave in self.get_extra_tabs():
self.add_extra_tab(tabname, tabslave)
def _disable_child_widgets(self):
"""This method disables edition of attributes gotten from parent.
"""
widgets = [self.description, self.category_combo, self.cost, self.price,
self.default_sale_cfop, self.unit_combo, self.tax_constant,
self.add_category]
for widget in widgets:
widget.set_property('sensitive', False)
#
# SellableEditor
#
def get_taxes(self):
query = (SellableTaxConstant.tax_type != int(TaxType.SERVICE))
constants = self.store.find(
SellableTaxConstant, query).order_by(SellableTaxConstant.description)
return [(c.description, c) for c in constants]
def setup_slaves(self):
super(ProductEditor, self).setup_slaves()
from stoqlib.gui.slaves.productslave import ProductInformationSlave
info_slave = ProductInformationSlave(self.store, self.model, self.db_form,
visual_mode=self.visual_mode)
self.add_extra_tab(_(u'Details'), info_slave)
def setup_proxies(self):
super(ProductEditor, self).setup_proxies()
self.add_proxy(self.model, self.product_widgets)
if self.model.parent is not None:
self._disable_child_widgets()
def get_extra_tabs(self):
from stoqlib.gui.slaves.productslave import (ProductTaxSlave,
ProductSupplierSlave,
ProductGridSlave)
extra_tabs = []
suppliers_slave = ProductSupplierSlave(self.store, self.model,
self.visual_mode)
extra_tabs.append((_(u'Suppliers'), suppliers_slave))
tax_slave = ProductTaxSlave(self.store, self.model, self.visual_mode)
extra_tabs.append((_(u'Taxes'), tax_slave))
if self.model.product_type == Product.TYPE_GRID:
# If there is a wizard, it means we are creating a new product.
# Store the selected attributes in the database
if self._wizard:
for attribute in self._wizard.attr_list:
ProductAttribute(store=self.store,
product_id=self.model.id,
attribute_id=attribute.id)
attribute_option_slave = ProductGridSlave(self.store, self.model,
self.visual_mode)
extra_tabs.append((_(u'Grid'), attribute_option_slave))
attribute_option_slave.grid_tab_alignment.connect('focus',
self._on_grid_tab_alignment__focus)
return extra_tabs
def setup_widgets(self):
self.cost.set_digits(sysparam.get_int('COST_PRECISION_DIGITS'))
self.description.grab_focus()
def create_model(self, store):
self._model_created = True
sellable = Sellable(store=store)
model = Product(store=store, sellable=sellable)
if self._product_type != Product.TYPE_WITHOUT_STOCK:
storable = Storable(product=model, store=store)
if self._product_type == Product.TYPE_BATCH:
storable.is_batch = True
elif self._product_type == Product.TYPE_WITHOUT_STOCK:
model.manage_stock = False
elif self._product_type == Product.TYPE_CONSIGNED:
model.consignment = True
elif self._product_type == Product.TYPE_GRID:
model.is_grid = True
# Configurable products should not manage stock
model.manage_stock = False
if self._template is not None:
sellable.tax_constant = self._template.sellable.tax_constant
sellable.unit = self._template.sellable.unit
sellable.category = self._template.sellable.category
model.manufacturer = self._template.manufacturer
model.brand = self._template.brand
model.family = self._template.family
model.ncm = self._template.ncm
model.icms_template = self._template.icms_template
model.ipi_template = self._template.ipi_template
for product_attr in self._template.attributes:
ProductAttribute(store=self.store,
product_id=model.id,
attribute_id=product_attr.attribute.id)
for supplier_info in self._template.suppliers:
ProductSupplierInfo(
store=self.store,
product=model,
supplier=supplier_info.supplier)
else:
sellable.tax_constant_id = sysparam.get_object_id(
'DEFAULT_PRODUCT_TAX_CONSTANT')
sellable.unit_id = sysparam.get_object_id('SUGGESTED_UNIT')
return model
def on_confirm(self):
# The user choose not to manage stock for this product, so we must
# remove the storable.
if not self.model.manage_stock and self.model.storable:
self.store.remove(self.model.storable)
# When creating a purchase, we use the supplier cost instead of the one
# in the sellable. If there is only one supplier for this product, also
# update its cost. TODO: What should we do when there is more than one
# supplier?
infos = list(self.model.get_suppliers_info())
if len(infos) == 1:
infos[0].base_cost = self.model.sellable.cost
if self.model.is_grid and self.has_changes():
self.model.update_children_info()
#
# Callbacks
#
def _on_grid_tab_alignment__focus(self, widget, value):
self.model.update_children_description()
class ProductionProductEditor(ProductEditor):
_cost_msg = _(u'Cost must be greater than the sum of the components.')
def _is_valid_cost(self, cost):
if hasattr(self, '_component_slave'):
component_cost = self.component_slave.get_component_cost()
return cost >= component_cost
return True
def create_model(self, store):
model = ProductEditor.create_model(self, store)
model.is_composed = True
return model
def get_extra_tabs(self):
from stoqlib.gui.slaves.productslave import (ProductTaxSlave,
ProductComponentSlave,
ProductQualityTestSlave)
self.component_slave = ProductComponentSlave(self.store, self.model,
self.visual_mode)
tax_slave = ProductTaxSlave(self.store, self.model, self.visual_mode)
quality_slave = ProductQualityTestSlave(self, self.store, self.model,
self.visual_mode)
return [(_(u'Components'), self.component_slave),
(_(u'Taxes'), tax_slave),
(_(u'Quality'), quality_slave),
]
def validate_confirm(self):
if not self._is_valid_cost(self.cost.read()):
info(self._cost_msg)
return False
return True
def on_cost__validate(self, widget, value):
if value <= 0:
return ValidationError(_(u'Cost cannot be zero or negative.'))
if not self._is_valid_cost(value):
return ValidationError(self._cost_msg)
class ProductStockEditor(BaseEditor):
model_name = _('Product')
model_type = Product
gladefile = 'ProductStockEditor'
def setup_slaves(self):
from stoqlib.gui.slaves.productslave import ProductInformationSlave
info_slave = ProductInformationSlave(self.store, self.model,
visual_mode=self.visual_mode)
info_slave.nfe_frame.hide()
self.attach_slave('information_holder', info_slave)
from stoqlib.gui.slaves.sellableslave import SellableDetailsSlave
details_slave = SellableDetailsSlave(self.store, self.model.sellable,
visual_mode=self.visual_mode)
self.attach_slave('details_holder', details_slave)
# Make everything aligned by pytting notes_lbl on the same size group
info_slave.left_labels_group.add_widget(details_slave.notes_lbl)
class ProductManufacturerEditor(BaseEditor):
model_name = _('Manufacturer')
model_type = ProductManufacturer
confirm_widgets = ['name']
@cached_property()
def fields(self):
return collections.OrderedDict(
name=TextField(_('Name'), proxy=True, mandatory=True),
code=TextField(_('Code'), proxy=True),
)
def create_model(self, store):
return ProductManufacturer(name=u'', store=store)
def setup_proxies(self):
self.name.grab_focus()
#
# Kiwi Callbacks
#
def on_name__validate(self, widget, new_name):
if not new_name:
return ValidationError(
_("The manufacturer should have a name."))
if self.model.check_unique_value_exists(ProductManufacturer.name,
new_name):
return ValidationError(
_("The manufacturer '%s' already exists.") % new_name)
def on_code__validate(self, widget, new_code):
if self.model.check_unique_value_exists(ProductManufacturer.code,
new_code):
return ValidationError(_("The code '%s' already exists") % new_code)
def test_product(): # pragma nocover
ec = api.prepare_test()
product = ec.create_product()
run_dialog(ProductEditor,
parent=None, store=ec.store, model=product)
if __name__ == '__main__': # pragma nocover
test_product()
|
andrebellafronte/stoq
|
stoqlib/gui/editors/producteditor.py
|
Python
|
gpl-2.0
| 20,617
|
[
"VisIt"
] |
b4ecdfb741cbefb6a2bf60a10e81620930b422eb9467502f4b13c3c5a0b0cc1d
|
# coding: utf8
Paises=(
(4, 'AF', 'AFG', 93, 'Afganistán', 'Asia', '', 'AFN', 'Afgani afgano'),
(8, 'AL', 'ALB', 355, 'Albania', 'Europa', '', 'ALL', 'Lek albanés'),
(10, 'AQ', 'ATA', 672, 'Antártida', 'Antártida', '', '', ''),
(12, 'DZ', 'DZA', 213, 'Argelia', 'África', '', 'DZD', 'Dinar algerino'),
(16, 'AS', 'ASM', 1684, 'Samoa Americana', 'Oceanía', '', '', ''),
(20, 'AD', 'AND', 376, 'Andorra', 'Europa', '', 'EUR', 'Euro'),
(24, 'AO', 'AGO', 244, 'Angola', 'África', '', 'AOA', 'Kwanza angoleño'),
(28, 'AG', 'ATG', 1268, 'Antigua y Barbuda', 'América', 'El Caribe', '', ''),
(31, 'AZ', 'AZE', 994, 'Azerbaiyán', 'Asia', '', 'AZM', 'Manat azerbaiyano'),
(32, 'AR', 'ARG', 54, 'Argentina', 'América', 'América del Sur', 'ARS', 'Peso argentino'),
(36, 'AU', 'AUS', 61, 'Australia', 'Oceanía', '', 'AUD', 'Dólar australiano'),
(40, 'AT', 'AUT', 43, 'Austria', 'Europa', '', 'EUR', 'Euro'),
(44, 'BS', 'BHS', 1242, 'Bahamas', 'América', 'El Caribe', 'BSD', 'Dólar bahameño'),
(48, 'BH', 'BHR', 973, 'Bahréin', 'Asia', '', 'BHD', 'Dinar bahreiní'),
(50, 'BD', 'BGD', 880, 'Bangladesh', 'Asia', '', 'BDT', 'Taka de Bangladesh'),
(51, 'AM', 'ARM', 374, 'Armenia', 'Asia', '', 'AMD', 'Dram armenio'),
(52, 'BB', 'BRB', 1246, 'Barbados', 'América', 'El Caribe', 'BBD', 'Dólar de Barbados'),
(56, 'BE', 'BEL', 32, 'Bélgica', 'Europa', '', 'EUR', 'Euro'),
(60, 'BM', 'BMU', 1441, 'Bermudas', 'América', 'El Caribe', 'BMD', 'Dólar de Bermuda'),
(64, 'BT', 'BTN', 975, 'Bhután', 'Asia', '', 'BTN', 'Ngultrum de Bután'),
(68, 'BO', 'BOL', 591, 'Bolivia', 'América', 'América del Sur', 'BOB', 'Boliviano'),
(70, 'BA', 'BIH', 387, 'Bosnia y Herzegovina', 'Europa', '', 'BAM', 'Marco convertible de Bosnia-Herzegovina'),
(72, 'BW', 'BWA', 267, 'Botsuana', 'África', '', 'BWP', 'Pula de Botsuana'),
(74, 'BV', 'BVT', 0, 'Isla Bouvet', '', '', '', ''),
(76, 'BR', 'BRA', 55, 'Brasil', 'América', 'América del Sur', 'BRL', 'Real brasileño'),
(84, 'BZ', 'BLZ', 501, 'Belice', 'América', 'América Central', 'BZD', 'Dólar de Belice'),
(86, 'IO', 'IOT', 0, 'Territorio Británico del Océano Índico', '', '', '', ''),
(90, 'SB', 'SLB', 677, 'Islas Salomón', 'Oceanía', '', 'SBD', 'Dólar de las Islas Salomón'),
(92, 'VG', 'VGB', 1284, 'Islas Vírgenes Británicas', 'América', 'El Caribe', '', ''),
(96, 'BN', 'BRN', 673, 'Brunéi', 'Asia', '', 'BND', 'Dólar de Brunéi'),
(100, 'BG', 'BGR', 359, 'Bulgaria', 'Europa', '', 'BGN', 'Lev búlgaro'),
(104, 'MM', 'MMR', 95, 'Myanmar', 'Asia', '', 'MMK', 'Kyat birmano'),
(108, 'BI', 'BDI', 257, 'Burundi', 'África', '', 'BIF', 'Franco burundés'),
(112, 'BY', 'BLR', 375, 'Bielorrusia', 'Europa', '', 'BYR', 'Rublo bielorruso'),
(116, 'KH', 'KHM', 855, 'Camboya', 'Asia', '', 'KHR', 'Riel camboyano'),
(120, 'CM', 'CMR', 237, 'Camerún', 'África', '', '', ''),
(124, 'CA', 'CAN', 1, 'Canadá', 'América', 'América del Norte', 'CAD', 'Dólar canadiense'),
(132, 'CV', 'CPV', 238, 'Cabo Verde', 'África', '', 'CVE', 'Escudo caboverdiano'),
(136, 'KY', 'CYM', 1345, 'Islas Caimán', 'América', 'El Caribe', 'KYD', 'Dólar caimano de Islas Caimán'),
(140, 'CF', 'CAF', 236, 'República Centroafricana', 'África', '', '', ''),
(144, 'LK', 'LKA', 94, 'Sri Lanka', 'Asia', '', 'LKR', 'Rupia de Sri Lanka'),
(148, 'TD', 'TCD', 235, 'Chad', 'África', '', '', ''),
(152, 'CL', 'CHL', 56, 'Chile', 'América', 'América del Sur', 'CLP', 'Peso chileno'),
(156, 'CN', 'CHN', 86, 'China', 'Asia', '', 'CNY', 'Yuan Renminbi de China'),
(158, 'TW', 'TWN', 886, 'Taiwán', 'Asia', '', 'TWD', 'Dólar taiwanés'),
(162, 'CX', 'CXR', 61, 'Isla de Navidad', 'Oceanía', '', '', ''),
(166, 'CC', 'CCK', 61, 'Islas Cocos', 'Óceanía', '', '', ''),
(170, 'CO', 'COL', 57, 'Colombia', 'América', 'América del Sur', 'COP', 'Peso colombiano'),
(174, 'KM', 'COM', 269, 'Comoras', 'África', '', 'KMF', 'Franco comoriano de Comoras'),
(175, 'YT', 'MYT', 262, 'Mayotte', 'África', '', '', ''),
(178, 'CG', 'COG', 242, 'Congo', 'África', '', '', ''),
(180, 'CD', 'COD', 243, 'República Democrática del Congo', 'África', '', 'CDF', 'Franco congoleño'),
(184, 'CK', 'COK', 682, 'Islas Cook', 'Oceanía', '', '', ''),
(188, 'CR', 'CRI', 506, 'Costa Rica', 'América', 'América Central', 'CRC', 'Colón costarricense'),
(191, 'HR', 'HRV', 385, 'Croacia', 'Europa', '', 'HRK', 'Kuna croata'),
(192, 'CU', 'CUB', 53, 'Cuba', 'América', 'El Caribe', 'CUP', 'Peso cubano'),
(196, 'CY', 'CYP', 357, 'Chipre', 'Europa', '', 'CYP', 'Libra chipriota'),
(203, 'CZ', 'CZE', 420, 'República Checa', 'Europa', '', 'CZK', 'Koruna checa'),
(204, 'BJ', 'BEN', 229, 'Benín', 'África', '', '', ''),
(208, 'DK', 'DNK', 45, 'Dinamarca', 'Europa', '', 'DKK', 'Corona danesa'),
(212, 'DM', 'DMA', 1767, 'Dominica', 'América', 'El Caribe', '', ''),
(214, 'DO', 'DOM', 1809, 'República Dominicana', 'América', 'El Caribe', 'DOP', 'Peso dominicano'),
(218, 'EC', 'ECU', 593, 'Ecuador', 'América', 'América del Sur', '', ''),
(222, 'SV', 'SLV', 503, 'El Salvador', 'América', 'América Central', 'SVC', 'Colón salvadoreño'),
(226, 'GQ', 'GNQ', 240, 'Guinea Ecuatorial', 'África', '', '', ''),
(231, 'ET', 'ETH', 251, 'Etiopía', 'África', '', 'ETB', 'Birr etíope'),
(232, 'ER', 'ERI', 291, 'Eritrea', 'África', '', 'ERN', 'Nakfa eritreo'),
(233, 'EE', 'EST', 372, 'Estonia', 'Europa', '', 'EEK', 'Corona estonia'),
(234, 'FO', 'FRO', 298, 'Islas Feroe', 'Europa', '', '', ''),
(238, 'FK', 'FLK', 500, 'Islas Malvinas', 'América', 'América del Sur', 'FKP', 'Libra malvinense'),
(239, 'GS', 'SGS', 0, 'Islas Georgias del Sur y Sandwich del Sur', 'América', 'América del Sur', '', ''),
(242, 'FJ', 'FJI', 679, 'Fiyi', 'Oceanía', '', 'FJD', 'Dólar fijiano'),
(246, 'FI', 'FIN', 358, 'Finlandia', 'Europa', '', 'EUR', 'Euro'),
(248, 'AX', 'ALA', 0, 'Islas Gland', 'Europa', '', '', ''),
(250, 'FR', 'FRA', 33, 'Francia', 'Europa', '', 'EUR', 'Euro'),
(254, 'GF', 'GUF', 0, 'Guayana Francesa', 'América', 'América del Sur', '', ''),
(258, 'PF', 'PYF', 689, 'Polinesia Francesa', 'Oceanía', '', '', ''),
(260, 'TF', 'ATF', 0, 'Territorios Australes Franceses', '', '', '', ''),
(262, 'DJ', 'DJI', 253, 'Yibuti', 'África', '', 'DJF', 'Franco yibutiano'),
(266, 'GA', 'GAB', 241, 'Gabón', 'África', '', '', ''),
(268, 'GE', 'GEO', 995, 'Georgia', 'Europa', '', 'GEL', 'Lari georgiano'),
(270, 'GM', 'GMB', 220, 'Gambia', 'África', '', 'GMD', 'Dalasi gambiano'),
(275, 'PS', 'PSE', 0, 'Palestina', 'Asia', '', '', ''),
(276, 'DE', 'DEU', 49, 'Alemania', 'Europa', '', 'EUR', 'Euro'),
(288, 'GH', 'GHA', 233, 'Ghana', 'África', '', 'GHC', 'Cedi ghanés'),
(292, 'GI', 'GIB', 350, 'Gibraltar', 'Europa', '', 'GIP', 'Libra de Gibraltar'),
(296, 'KI', 'KIR', 686, 'Kiribati', 'Oceanía', '', '', ''),
(300, 'GR', 'GRC', 30, 'Grecia', 'Europa', '', 'EUR', 'Euro'),
(304, 'GL', 'GRL', 299, 'Groenlandia', 'América', 'América del Norte', '', ''),
(308, 'GD', 'GRD', 1473, 'Granada', 'América', 'El Caribe', '', ''),
(312, 'GP', 'GLP', 0, 'Guadalupe', 'América', 'El Caribe', '', ''),
(316, 'GU', 'GUM', 1671, 'Guam', 'Oceanía', '', '', ''),
(320, 'GT', 'GTM', 502, 'Guatemala', 'América', 'América Central', 'GTQ', 'Quetzal guatemalteco'),
(324, 'GN', 'GIN', 224, 'Guinea', 'África', '', 'GNF', 'Franco guineano'),
(328, 'GY', 'GUY', 592, 'Guyana', 'América', 'América del Sur', 'GYD', 'Dólar guyanés'),
(332, 'HT', 'HTI', 509, 'Haití', 'América', 'El Caribe', 'HTG', 'Gourde haitiano'),
(334, 'HM', 'HMD', 0, 'Islas Heard y McDonald', 'Oceanía', '', '', ''),
(336, 'VA', 'VAT', 39, 'Ciudad del Vaticano', 'Europa', '', '', ''),
(340, 'HN', 'HND', 504, 'Honduras', 'América', 'América Central', 'HNL', 'Lempira hondureño'),
(344, 'HK', 'HKG', 852, 'Hong Kong', 'Asia', '', 'HKD', 'Dólar de Hong Kong'),
(348, 'HU', 'HUN', 36, 'Hungría', 'Europa', '', 'HUF', 'Forint húngaro'),
(352, 'IS', 'ISL', 354, 'Islandia', 'Europa', '', 'ISK', 'Króna islandesa'),
(356, 'IN', 'IND', 91, 'India', 'Asia', '', 'INR', 'Rupia india'),
(360, 'ID', 'IDN', 62, 'Indonesia', 'Asia', '', 'IDR', 'Rupiah indonesia'),
(364, 'IR', 'IRN', 98, 'Irán', 'Asia', '', 'IRR', 'Rial iraní'),
(368, 'IQ', 'IRQ', 964, 'Iraq', 'Asia', '', 'IQD', 'Dinar iraquí'),
(372, 'IE', 'IRL', 353, 'Irlanda', 'Europa', '', 'EUR', 'Euro'),
(376, 'IL', 'ISR', 972, 'Israel', 'Asia', '', 'ILS', 'Nuevo shéquel israelí'),
(380, 'IT', 'ITA', 39, 'Italia', 'Europa', '', 'EUR', 'Euro'),
(384, 'CI', 'CIV', 225, 'Costa de Marfil', 'África', '', '', ''),
(388, 'JM', 'JAM', 1876, 'Jamaica', 'América', 'El Caribe', 'JMD', 'Dólar jamaicano'),
(392, 'JP', 'JPN', 81, 'Japón', 'Asia', '', 'JPY', 'Yen japonés'),
(398, 'KZ', 'KAZ', 7, 'Kazajstán', 'Asia', '', 'KZT', 'Tenge kazajo'),
(400, 'JO', 'JOR', 962, 'Jordania', 'Asia', '', 'JOD', 'Dinar jordano'),
(404, 'KE', 'KEN', 254, 'Kenia', 'África', '', 'KES', 'Chelín keniata'),
(408, 'KP', 'PRK', 850, 'Corea del Norte', 'Asia', '', 'KPW', 'Won norcoreano'),
(410, 'KR', 'KOR', 82, 'Corea del Sur', 'Asia', '', 'KRW', 'Won surcoreano'),
(414, 'KW', 'KWT', 965, 'Kuwait', 'Asia', '', 'KWD', 'Dinar kuwaití'),
(417, 'KG', 'KGZ', 996, 'Kirguistán', 'Asia', '', 'KGS', 'Som kirguís de Kirguistán'),
(418, 'LA', 'LAO', 856, 'Laos', 'Asia', '', 'LAK', 'Kip lao'),
(422, 'LB', 'LBN', 961, 'Líbano', 'Asia', '', 'LBP', 'Libra libanesa'),
(426, 'LS', 'LSO', 266, 'Lesotho', 'África', '', 'LSL', 'Loti lesotense'),
(428, 'LV', 'LVA', 371, 'Letonia', 'Europa', '', 'LVL', 'Lat letón'),
(430, 'LR', 'LBR', 231, 'Liberia', 'África', '', 'LRD', 'Dólar liberiano'),
(434, 'LY', 'LBY', 218, 'Libia', 'África', '', 'LYD', 'Dinar libio'),
(438, 'LI', 'LIE', 423, 'Liechtenstein', 'Europa', '', '', ''),
(440, 'LT', 'LTU', 370, 'Lituania', 'Europa', '', 'LTL', 'Litas lituano'),
(442, 'LU', 'LUX', 352, 'Luxemburgo', 'Europa', '', 'EUR', 'Euro'),
(446, 'MO', 'MAC', 853, 'Macao', 'Asia', '', 'MOP', 'Pataca de Macao'),
(450, 'MG', 'MDG', 261, 'Madagascar', 'África', '', 'MGA', 'Ariary malgache'),
(454, 'MW', 'MWI', 265, 'Malaui', 'África', '', 'MWK', 'Kwacha malauiano'),
(458, 'MY', 'MYS', 60, 'Malasia', 'Asia', '', 'MYR', 'Ringgit malayo'),
(462, 'MV', 'MDV', 960, 'Maldivas', 'Asia', '', 'MVR', 'Rufiyaa maldiva'),
(466, 'ML', 'MLI', 223, 'Malí', 'África', '', '', ''),
(470, 'MT', 'MLT', 356, 'Malta', 'Europa', '', 'MTL', 'Lira maltesa'),
(474, 'MQ', 'MTQ', 0, 'Martinica', 'América', 'El Caribe', '', ''),
(478, 'MR', 'MRT', 222, 'Mauritania', 'África', '', 'MRO', 'Ouguiya mauritana'),
(480, 'MU', 'MUS', 230, 'Mauricio', 'África', '', 'MUR', 'Rupia mauricia'),
(484, 'MX', 'MEX', 52, 'México', 'América', 'América del Norte', 'MXN', 'Peso mexicano'),
(492, 'MC', 'MCO', 377, 'Mónaco', 'Europa', '', '', ''),
(496, 'MN', 'MNG', 976, 'Mongolia', 'Asia', '', 'MNT', 'Tughrik mongol'),
(498, 'MD', 'MDA', 373, 'Moldavia', 'Europa', '', 'MDL', 'Leu moldavo'),
(499, 'ME', 'MNE', 382, 'Montenegro', 'Europa', '', '', ''),
(500, 'MS', 'MSR', 1664, 'Montserrat', 'América', 'El Caribe', '', ''),
(504, 'MA', 'MAR', 212, 'Marruecos', 'África', '', 'MAD', 'Dirham marroquí'),
(508, 'MZ', 'MOZ', 258, 'Mozambique', 'África', '', 'MZM', 'Metical mozambiqueño'),
(512, 'OM', 'OMN', 968, 'Omán', 'Asia', '', 'OMR', 'Rial omaní'),
(516, 'NA', 'NAM', 264, 'Namibia', 'África', '', 'NAD', 'Dólar namibio'),
(520, 'NR', 'NRU', 674, 'Nauru', 'Oceanía', '', '', ''),
(524, 'NP', 'NPL', 977, 'Nepal', 'Asia', '', 'NPR', 'Rupia nepalesa'),
(528, 'NL', 'NLD', 31, 'Países Bajos', 'Europa', '', 'EUR', 'Euro'),
(530, 'AN', 'ANT', 599, 'Antillas Holandesas', 'América', 'El Caribe', 'ANG', 'Florín antillano neerlandés'),
(533, 'AW', 'ABW', 297, 'Aruba', 'América', 'El Caribe', 'AWG', 'Florín arubeño'),
(540, 'NC', 'NCL', 687, 'Nueva Caledonia', 'Oceanía', '', '', ''),
(548, 'VU', 'VUT', 678, 'Vanuatu', 'Oceanía', '', 'VUV', 'Vatu vanuatense'),
(554, 'NZ', 'NZL', 64, 'Nueva Zelanda', 'Oceanía', '', 'NZD', 'Dólar neozelandés'),
(558, 'NI', 'NIC', 505, 'Nicaragua', 'América', 'América Central', 'NIO', 'Córdoba nicaragüense'),
(562, 'NE', 'NER', 227, 'Níger', 'África', '', '', ''),
(566, 'NG', 'NGA', 234, 'Nigeria', 'África', '', 'NGN', 'Naira nigeriana'),
(570, 'NU', 'NIU', 683, 'Niue', 'Oceanía', '', '', ''),
(574, 'NF', 'NFK', 0, 'Isla Norfolk', 'Oceanía', '', '', ''),
(578, 'NO', 'NOR', 47, 'Noruega', 'Europa', '', 'NOK', 'Corona noruega'),
(580, 'MP', 'MNP', 1670, 'Islas Marianas del Norte', 'Oceanía', '', '', ''),
(581, 'UM', 'UMI', 0, 'Islas Ultramarinas de Estados Unidos', '', '', '', ''),
(583, 'FM', 'FSM', 691, 'Micronesia', 'Oceanía', '', '', ''),
(584, 'MH', 'MHL', 692, 'Islas Marshall', 'Oceanía', '', '', ''),
(585, 'PW', 'PLW', 680, 'Palaos', 'Oceanía', '', '', ''),
(586, 'PK', 'PAK', 92, 'Pakistán', 'Asia', '', 'PKR', 'Rupia pakistaní'),
(591, 'PA', 'PAN', 507, 'Panamá', 'América', 'América Central', 'PAB', 'Balboa panameña'),
(598, 'PG', 'PNG', 675, 'Papúa Nueva Guinea', 'Oceanía', '', 'PGK', 'Kina de Papúa Nueva Guinea'),
(600, 'PY', 'PRY', 595, 'Paraguay', 'América', 'América del Sur', 'PYG', 'Guaraní paraguayo'),
(604, 'PE', 'PER', 51, 'Perú', 'América', 'América del Sur', 'PEN', 'Nuevo sol peruano'),
(608, 'PH', 'PHL', 63, 'Filipinas', 'Asia', '', 'PHP', 'Peso filipino'),
(612, 'PN', 'PCN', 870, 'Islas Pitcairn', 'Oceanía', '', '', ''),
(616, 'PL', 'POL', 48, 'Polonia', 'Europa', '', 'PLN', 'zloty polaco'),
(620, 'PT', 'PRT', 351, 'Portugal', 'Europa', '', 'EUR', 'Euro'),
(624, 'GW', 'GNB', 245, 'Guinea-Bissau', 'África', '', '', ''),
(626, 'TL', 'TLS', 670, 'Timor Oriental', 'Asia', '', '', ''),
(630, 'PR', 'PRI', 1, 'Puerto Rico', 'América', 'El Caribe', '', ''),
(634, 'QA', 'QAT', 974, 'Qatar', 'Asia', '', 'QAR', 'Rial qatarí'),
(638, 'RE', 'REU', 262, 'Reunión', 'África', '', '', ''),
(642, 'RO', 'ROU', 40, 'Rumania', 'Europa', '', 'RON', 'Leu rumano'),
(643, 'RU', 'RUS', 7, 'Rusia', 'Asia', '', 'RUB', 'Rublo ruso'),
(646, 'RW', 'RWA', 250, 'Ruanda', 'África', '', 'RWF', 'Franco ruandés'),
(654, 'SH', 'SHN', 290, 'Santa Helena', 'África', '', 'SHP', 'Libra de Santa Helena'),
(659, 'KN', 'KNA', 1869, 'San Cristóbal y Nieves', 'América', 'El Caribe', '', ''),
(660, 'AI', 'AIA', 1264, 'Anguila', 'América', 'El Caribe', '', ''),
(662, 'LC', 'LCA', 1758, 'Santa Lucía', 'América', 'El Caribe', '', ''),
(666, 'PM', 'SPM', 508, 'San Pedro y Miquelón', 'América', 'América del Norte', '', ''),
(670, 'VC', 'VCT', 1784, 'San Vicente y las Granadinas', 'América', 'El Caribe', '', ''),
(674, 'SM', 'SMR', 378, 'San Marino', 'Europa', '', '', ''),
(678, 'ST', 'STP', 239, 'Santo Tomé y Príncipe', 'África', '', 'STD', 'Dobra de Santo Tomé y Príncipe'),
(682, 'SA', 'SAU', 966, 'Arabia Saudí', 'Asia', '', 'SAR', 'Riyal saudí'),
(686, 'SN', 'SEN', 221, 'Senegal', 'África', '', '', ''),
(688, 'RS', 'SRB', 381, 'Serbia', 'Europa', '', '', ''),
(690, 'SC', 'SYC', 248, 'Seychelles', 'África', '', 'SCR', 'Rupia de Seychelles'),
(694, 'SL', 'SLE', 232, 'Sierra Leona', 'África', '', 'SLL', 'Leone de Sierra Leona'),
(702, 'SG', 'SGP', 65, 'Singapur', 'Asia', '', 'SGD', 'Dólar de Singapur'),
(703, 'SK', 'SVK', 421, 'Eslovaquia', 'Europa', '', 'SKK', 'Corona eslovaca'),
(704, 'VN', 'VNM', 84, 'Vietnam', 'Asia', '', 'VND', 'Dong vietnamita'),
(705, 'SI', 'SVN', 386, 'Eslovenia', 'Europa', '', '', ''),
(706, 'SO', 'SOM', 252, 'Somalia', 'África', '', 'SOS', 'Chelín somalí'),
(710, 'ZA', 'ZAF', 27, 'Sudáfrica', 'África', '', 'ZAR', 'Rand sudafricano'),
(716, 'ZW', 'ZWE', 263, 'Zimbabue', 'África', '', 'ZWL', 'Dólar zimbabuense'),
(724, 'ES', 'ESP', 34, 'España', 'Europa', '', 'EUR', 'Euro'),
(732, 'EH', 'ESH', 0, 'Sahara Occidental', 'África', '', '', ''),
(736, 'SD', 'SDN', 249, 'Sudán', 'África', '', 'SDD', 'Dinar sudanés'),
(740, 'SR', 'SUR', 597, 'Surinam', 'América', 'América del Sur', 'SRD', 'Dólar surinamés'),
(744, 'SJ', 'SJM', 0, 'Svalbard y Jan Mayen', 'Europa', '', '', ''),
(748, 'SZ', 'SWZ', 268, 'Suazilandia', 'África', '', 'SZL', 'Lilangeni suazi'),
(752, 'SE', 'SWE', 46, 'Suecia', 'Europa', '', 'SEK', 'Corona sueca'),
(756, 'CH', 'CHE', 41, 'Suiza', 'Europa', '', 'CHF', 'Franco suizo'),
(760, 'SY', 'SYR', 963, 'Siria', 'Asia', '', 'SYP', 'Libra siria'),
(762, 'TJ', 'TJK', 992, 'Tayikistán', 'Asia', '', 'TJS', 'Somoni tayik de Tayikistán'),
(764, 'TH', 'THA', 66, 'Tailandia', 'Asia', '', 'THB', 'Baht tailandés'),
(768, 'TG', 'TGO', 228, 'Togo', 'África', '', '', ''),
(772, 'TK', 'TKL', 690, 'Tokelau', 'Oceanía', '', '', ''),
(776, 'TO', 'TON', 676, 'Tonga', 'Oceanía', '', 'TOP', 'Pa''anga tongano'),
(780, 'TT', 'TTO', 1868, 'Trinidad y Tobago', 'América', 'El Caribe', 'TTD', 'Dólar de Trinidad y Tobago'),
(784, 'AE', 'ARE', 971, 'Emiratos Árabes Unidos', 'Asia', '', 'AED', 'Dirham de los Emiratos Árabes Unidos'),
(788, 'TN', 'TUN', 216, 'Túnez', 'África', '', 'TND', 'Dinar tunecino'),
(792, 'TR', 'TUR', 90, 'Turquía', 'Asia', '', 'TRY', 'Lira turca'),
(795, 'TM', 'TKM', 993, 'Turkmenistán', 'Asia', '', 'TMM', 'Manat turcomano'),
(796, 'TC', 'TCA', 1649, 'Islas Turcas y Caicos', 'América', 'El Caribe', '', ''),
(798, 'TV', 'TUV', 688, 'Tuvalu', 'Oceanía', '', '', ''),
(800, 'UG', 'UGA', 256, 'Uganda', 'África', '', 'UGX', 'Chelín ugandés'),
(804, 'UA', 'UKR', 380, 'Ucrania', 'Europa', '', 'UAH', 'Grivna ucraniana'),
(807, 'MK', 'MKD', 389, 'Macedonia', 'Europa', '', 'MKD', 'Denar macedonio'),
(818, 'EG', 'EGY', 20, 'Egipto', 'África', '', 'EGP', 'Libra egipcia'),
(826, 'GB', 'GBR', 44, 'Reino Unido', 'Europa', '', 'GBP', 'Libra esterlina libra de Gran Bretaña'),
(834, 'TZ', 'TZA', 255, 'Tanzania', 'África', '', 'TZS', 'Chelín tanzano'),
(840, 'US', 'USA', 1, 'Estados Unidos', 'América', 'América del Norte', 'USD', 'Dólar estadounidense'),
(850, 'VI', 'VIR', 1340, 'Islas Vírgenes de los Estados Unidos', 'América', 'El Caribe', '', ''),
(854, 'BF', 'BFA', 226, 'Burkina Faso', 'África', '', '', ''),
(858, 'UY', 'URY', 598, 'Uruguay', 'América', 'América del Sur', 'UYU', 'Peso uruguayo'),
(860, 'UZ', 'UZB', 998, 'Uzbekistán', 'Asia', '', 'UZS', 'Som uzbeko'),
(862, 'VE', 'VEN', 58, 'Venezuela', 'América', 'América del Sur', 'VEB', 'Bolívar venezolano'),
(876, 'WF', 'WLF', 681, 'Wallis y Futuna', 'Oceanía', '', '', ''),
(882, 'WS', 'WSM', 685, 'Samoa', 'Oceanía', '', 'WST', 'Tala samoana'),
(887, 'YE', 'YEM', 967, 'Yemen', 'Asia', '', 'YER', 'Rial yemení de Yemen'),
(894, 'ZM', 'ZMB', 260, 'Zambia', 'África', '', 'ZMK', 'Kwacha zambiano')
)
|
jredrejo/bancal
|
web2py/applications/bancal/modules/paises.py
|
Python
|
gpl-3.0
| 18,468
|
[
"BWA"
] |
8a44c74b8b79dd6dd5871e988c54df637a6b7ce55741004d6e1c03c9fd543307
|
#!/usr/bin/env python
import sys
import http.client
import requests
import glob, os
import os.path, time, datetime
import re
class CodeSync:
ErrorCounter = 0
Executed = []
def query(server, port, uri, projects):
conn = http.client.HTTPConnection(server, port)
conn.request("bGET", uri)
r = conn.getresponse()
content = r.getheader("Content-type")
if content == "application/octet-stream":
return r.read()
else:
for line in r.read().decode("utf-8").splitlines():
parts = line.split("\t")
localObject = {}
# Empty line
if parts[0] == "":
continue
# The line is defining the hosting server
if parts[0] == "S":
continue
# The line is defining the version to be used
elif parts[0] == "V":
if parts[1] != 'v2':
print("Operation aborted:")
print("CodeSync server is running CodeSync."+parts[1]+" which is not supported locally.")
return 0
else:
continue
# The line is defining a pre-built query to sync a folder
elif parts[0] == "D" or parts[0] == "D+":
localObject = CodeSync.objectFromQuery("D", parts[2], projects)
# The line is defining a pre-built query to sync a file
elif parts[0] == "F" or parts[0] == "F+":
localObject = CodeSync.objectFromQuery("F", parts[2], projects)
# Pulling from server, locally creating/updating
dt = time.mktime(datetime.datetime.strptime(parts[1], "%Y-%m-%d %H:%M:%S").timetuple())
if (not localObject['exists']) or (localObject['lastedit'] < dt):
print("PULL-" + localObject['type'] + "\t" + localObject['path'])
if(localObject['type'] == "D"):
try:
os.makedirs(localObject['path'])
except FileExistsError:
pass
else:
pfur = uri.split("/")
pfur = "/" + pfur[1] + "/" + pfur[2] + "/pull/file/" + localObject['serverPath']
fcontent = CodeSync.query(server, port, pfur, projects)
fh = open(localObject['path'], "wb")
fh.write(fcontent)
fh.close()
# Pushing to server
elif localObject['lastedit'] > dt:
pfur = uri.split("/")
fullObj = "folder"
if localObject["type"] == "F" or localObject["type"] == "F+":
fullObj = "file"
pfur = "/" + pfur[1] + "/" + pfur[2] + "/push/" + fullObj + "/" + localObject['serverPath']
CodeSync.pushObject(server, port, localObject['type'], pfur, localObject['path'])
try:
CodeSync.Executed.append([localObject['type'], localObject['serverPath'], localObject['path']])
except:
pass
def pushObject(server, port, otype, query, localPath):
success = False
error = ""
if otype == "D":
r = requests.post("http://" + server + query)
if "PUSH-SUCCESSFUL" in r.text:
success = True
else:
error = r.text.split('\r\n')
if otype == "F":
files = {'file': open(localPath, 'rb')}
r = requests.post("http://" + server + query, files=files)
if "PUSH-SUCCESSFUL" in r.text:
success = True
print(r.text)
else:
error = r.text.split('\r\n')
if success:
print("PUSH-" + otype + "\t" + localPath)
else:
CodeSync.ErrorCounter += 1
print("*ERR-" + otype + "\t Failed to push " + localPath)
print("\t FAILED QUERY: " + "http://" + server + query)
for line in error:
print("\t " + str(line))
def objectFromQuery(otype, query, projectsToPath):
result = {
'code': 0,
'type': '',
'path': '',
'serverPath': '',
'project': '',
'extension': '',
'exists': False,
'lastedit': 0,
}
if otype == "D":
result['path'] = re.search('(.*)/pull/folder/(.*)', query).group(2)
result['type'] = 'D'
elif otype == "F":
result['path'] = re.search('(.*)/pull/file/(.*)', query).group(2)
result['type'] = 'F'
result['extension'] = result['path'].split(".")[1]
else:
return { 'code': 1 }
result['serverPath'] = result['path']
result['project'] = result['path'].split("/")[0]
result['path'] = projectsToPath[result['project']] + "/" + result['path']
result['exists'] = os.path.exists(result['path'])
if result['exists']:
result['lastedit'] = os.path.getmtime(result['path'])
return result
class ConsoleInterface:
version = 0
device = "device"
server = "Unknown"
defaultProjectsRoot = "/"
projects = {}
srvPort = 80
mode = ""
obj = ""
op = []
queries = []
def __init__(self):
self.version = 2
self.loadCfg()
def loadCfg(self):
cfg = open('codesync.cfg')
for line in cfg.read().splitlines():
fields = line.split("\t")
if fields[0] == "server":
self.server = fields[1]
elif fields[0] == "device":
self.device = fields[1]
elif fields[0] == "default-projects-root":
self.defaultProjectsRoot = fields[1]
elif fields[0] == "project":
self.projects[fields[1]] = fields[2]
def process(self, args):
last = ''
error = 0
if len(args) <= 1:
self.showHelp()
for arg in args:
if error > 0:
break
if arg == 'codesync.py' or arg == 'codesync':
continue
if arg == '-v2':
# Force version to version2
self.version = 2
elif arg == '-port':
# Set TCP port for HTTP connection
last = arg
elif arg == '-op':
# Perform a query manually
last = arg
elif arg == '-sync':
# Perform some kind of synchronization
last = arg
elif arg == '--help':
# Show help
self.showHelp()
break
else:
# Process as input value rather than symbol
if last == '':
error = 1
elif last == '-port':
self.srvPort = arg
elif last == '-op':
self.mode = "Manual"
self.op.append(arg)
elif last == '-sync':
if arg == 'all':
self.mode = 'SyncAll'
elif arg == 'project':
last = '-sync project'
elif arg == 'folder':
last = '-sync folder'
elif arg == 'file':
last = '-sync file'
elif last == '-sync project':
self.mode = 'SyncProject'
self.obj = arg
elif last == '-sync folder':
self.mode = 'SyncFolder'
self.obj = arg
elif last == '-sync file':
self.mode = 'SyncFile'
self.obj = arg
else:
error = 1
if error > 0:
self.showHelp(True)
else:
self.buildQuery()
self.launchOps()
def buildQuery(self):
if self.mode == "Manual":
return self.op
elif self.mode == "SyncAll":
return self.createQuerySyncAll()
elif self.mode == "SyncProject":
return self.createQuerySyncProject(self.obj)
elif self.mode == "SyncFolder":
return self.createQuerySyncFolder(self.obj)
elif self.mode == "SyncFile":
return self.createQuerySyncFile(self.obj)
else:
return 1000
def createQuerySyncAll(self):
lastResult = 0
for key in self.projects:
value = self.projects[key]
currentResult = self.createQuerySyncProject(key)
if currentResult > lastResult:
lastResult = currentResult
return lastResult
def createQuerySyncProject(self, name):
self.op.append("/pull/project/" + name)
return 0
def createQuerySyncFolder(self, name):
self.op.append("/pull/folder/" + name)
return 0
def createQuerySyncFile(self, name):
self.op.append("/pull/file/" + name)
return 0
def launchOps(self):
devstr = "/v" + str(self.version) + "/device:" + self.device
# Looping through root operations
for operation in self.op:
CodeSync.query(self.server, self.srvPort, devstr + operation, self.projects)
def showHelp(self, badInput=False):
if badInput:
print("Unrecognized syntax. Here's the help:")
print("")
print("CodeSync v" + str(self.version) + " client is working OK")
print("For more information about CodeSync please visit: ")
print(" * http://alessandromaggio.com/project/codesync/")
print(" * http://github.com/alessandromaggio/codesync")
print("")
print("")
print("Quick reference: ")
print("python codesync.py [-v2] { -op url |")
print(" -sync {all | project name |")
print(" folder path |")
print(" file path }")
print(" }")
print("")
print(" -v2 Force CodeSync to work with version 2")
print(" -op url Execute a custom query (the url)")
print(" -sync")
print(" project Synchronize an entire project")
print(" folder Synchronize a folder")
print(" file Synchronize a single file")
print("")
print("You can tune default settings by editing codesync.cfg")
try:
console = ConsoleInterface()
console.process(sys.argv)
except Exception as e:
print("CodeSync experienced an error")
print("The exception details are: " + str(e))
|
alessandromaggio/codesync
|
client/codesync.py
|
Python
|
gpl-3.0
| 11,269
|
[
"VisIt"
] |
e3629db03eef3af8b377711f451f26b0058f8efcac1feba00c2909a41e7e1c38
|
import scipy.stats
import numpy
def poly_transform(u):
v = numpy.copy(u)
v[0] = 10**(v[0]*10 - 5)
v[1:] = 20*v[1:] - 10
return v
class PolyModel(object):
"""
A gaussian (or other) distribution of width 'syserror' around a polynomial
of the given degree.
degree = 1:
y is distributed independently of x.
The parameter 'a' specifies the location of the distribution.
degree = 2:
y is correlated with x according to a line, whose parameters are
y ~ N(a + b*x; syserror)
degree = 3:
same as above with a quadratic function.
"""
def __init__(self, degree, rv_type = scipy.stats.norm):
self.degree = degree
self.parameter_names = ['syserror'] + ['a', 'b', 'c', 'd', 'e', 'f', 'g'][:degree]
self.chains = None
def poly_model(v):
sys_error = v[0]
params = v[1:]
poly = numpy.poly1d(params)
def model(x):
y_model = poly(x)
rv_sys = rv_type(y_model, sys_error)
return rv_sys
return model
def poly_likelihood(v):
model = poly_model(v)
like = 0
for k, c in self.chains:
x = c[:,0]
y = c[:,1]
w = c[:,2] if c.shape[1] > 2 else 1
rv = model(x)
prob = (rv.pdf(y) * w).mean()
if prob == 0:
print 'parameters %s ruled out by object %s' % (str(v), k)
return -1e100
like += numpy.log(prob)
return like
self.transform = poly_transform
self.loglikelihood = poly_likelihood
self.model = poly_model
|
JohannesBuchner/syscorr
|
syscorr/poly.py
|
Python
|
agpl-3.0
| 1,405
|
[
"Gaussian"
] |
62ca768bf40662b0066c94a5010faf4db4ecc319a79a558346d7ca1ef8b0b621
|
import json
import requests
from django.conf import settings
from django.db.models.signals import post_save, pre_delete, post_delete
from django.dispatch import receiver
from karrot.conversations.models import Conversation
from karrot.groups import roles, stats
from karrot.groups.emails import prepare_user_became_editor_email, prepare_user_lost_editor_role_email
from karrot.groups.models import Group, GroupMembership, Trust
from karrot.history.models import History, HistoryTypus
from karrot.utils import frontend_urls
@receiver(post_save, sender=Group)
def group_created(sender, instance, created, **kwargs):
"""Ensure every group has a conversation."""
if not created:
return
group = instance
conversation = Conversation.objects.get_or_create_for_target(group)
conversation.sync_users(group.members.all())
@receiver(pre_delete, sender=Group)
def group_deleted(sender, instance, **kwargs):
"""Delete the conversation when the group is deleted."""
group = instance
conversation = Conversation.objects.get_for_target(group)
if conversation:
conversation.delete()
@receiver(post_save, sender=GroupMembership)
def group_member_added(sender, instance, created, **kwargs):
if not created:
return
group = instance.group
user = instance.user
conversation = Conversation.objects.get_or_create_for_target(group)
conversation.join(user, muted=False)
stats.group_joined(group)
@receiver(pre_delete, sender=GroupMembership)
def group_member_removed(sender, instance, **kwargs):
group = instance.group
user = instance.user
conversation = Conversation.objects.get_for_target(group)
if conversation:
conversation.leave(user)
stats.group_left(group)
@receiver(post_save, sender=Trust)
def trust_given(sender, instance, created, **kwargs):
if not created:
return
membership = instance.membership
relevant_trust = Trust.objects.filter(membership=membership)
trust_threshold = membership.group.trust_threshold_for_newcomer()
if relevant_trust.count() >= trust_threshold and roles.GROUP_EDITOR not in membership.roles:
membership.add_roles([roles.GROUP_EDITOR])
membership.save()
History.objects.create(
typus=HistoryTypus.MEMBER_BECAME_EDITOR,
group=membership.group,
users=[membership.user],
payload={
'threshold': trust_threshold,
},
)
prepare_user_became_editor_email(user=membership.user, group=membership.group).send()
stats.member_became_editor(membership.group)
stats.trust_given(membership.group)
@receiver(post_delete, sender=Trust)
def trust_revoked(sender, instance, **kwargs):
membership = instance.membership
relevant_trust = Trust.objects.filter(membership=membership)
trust_threshold = membership.group.trust_threshold_for_newcomer()
if relevant_trust.count() < trust_threshold and roles.GROUP_EDITOR in membership.roles:
membership.remove_roles([roles.GROUP_EDITOR])
membership.save()
History.objects.create(
typus=HistoryTypus.USER_LOST_EDITOR_ROLE,
group=membership.group,
users=[membership.user],
payload={
'threshold': trust_threshold,
},
)
prepare_user_lost_editor_role_email(user=membership.user, group=membership.group).send()
stats.user_lost_editor_role(membership.group)
stats.trust_revoked(membership.group)
@receiver(pre_delete, sender=GroupMembership)
def remove_trust(sender, instance, **kwargs):
membership = instance
Trust.objects.filter(given_by=membership.user, membership__group=membership.group).delete()
@receiver(post_save, sender=Group)
def notify_chat_on_group_creation(sender, instance, created, **kwargs):
"""Send notifications to admin chat"""
if not created:
return
group = instance
webhook_url = getattr(settings, 'ADMIN_CHAT_WEBHOOK', None)
if webhook_url is None:
return
group_url = frontend_urls.group_preview_url(group)
message_data = {
'text': f':tada: A new group has been created on **{settings.SITE_NAME}**! [Visit {group.name}]({group_url})',
}
response = requests.post(webhook_url, data=json.dumps(message_data), headers={'Content-Type': 'application/json'})
response.raise_for_status()
|
yunity/yunity-core
|
karrot/groups/receivers.py
|
Python
|
agpl-3.0
| 4,447
|
[
"VisIt"
] |
a776caa2a6a0c03650cc380fb83bfc5edc66fb9d2385ea0d5afabe9c40dd8eff
|
# -*-python-*-
#
# Copyright (C) 1999-2006 The ViewCVS Group. All Rights Reserved.
#
# By using this file, you agree to the terms and conditions set forth in
# the LICENSE.html file which can be found at the top level of the ViewVC
# distribution or at http://viewvc.org/license-1.html.
#
# For more information, visit http://viewvc.org/
#
# -----------------------------------------------------------------------
#
# idiff: display differences between files highlighting intraline changes
#
# -----------------------------------------------------------------------
from __future__ import generators
import difflib
import sys
import re
import ezt
import cgi
def sidebyside(fromlines, tolines, context):
"""Generate side by side diff"""
### for some reason mdiff chokes on \n's in input lines
line_strip = lambda line: line.rstrip("\n")
fromlines = map(line_strip, fromlines)
tolines = map(line_strip, tolines)
gap = False
for fromdata, todata, flag in difflib._mdiff(fromlines, tolines, context):
if fromdata is None and todata is None and flag is None:
gap = True
else:
from_item = _mdiff_split(flag, fromdata)
to_item = _mdiff_split(flag, todata)
yield _item(gap=ezt.boolean(gap), columns=(from_item, to_item))
gap = False
_re_mdiff = re.compile("\0([+-^])(.*?)\1")
def _mdiff_split(flag, (line_number, text)):
"""Break up row from mdiff output into segments"""
segments = []
pos = 0
while True:
m = _re_mdiff.search(text, pos)
if not m:
segments.append(_item(text=cgi.escape(text[pos:]), type=None))
break
if m.start() > pos:
segments.append(_item(text=cgi.escape(text[pos:m.start()]), type=None))
if m.group(1) == "+":
segments.append(_item(text=cgi.escape(m.group(2)), type="add"))
elif m.group(1) == "-":
segments.append(_item(text=cgi.escape(m.group(2)), type="remove"))
elif m.group(1) == "^":
segments.append(_item(text=cgi.escape(m.group(2)), type="change"))
pos = m.end()
return _item(segments=segments, line_number=line_number)
def unified(fromlines, tolines, context):
"""Generate unified diff"""
diff = difflib.Differ().compare(fromlines, tolines)
lastrow = None
for row in _trim_context(diff, context):
if row[0].startswith("? "):
yield _differ_split(lastrow, row[0])
lastrow = None
else:
if lastrow:
yield _differ_split(lastrow, None)
lastrow = row
if lastrow:
yield _differ_split(lastrow, None)
def _trim_context(lines, context_size):
"""Trim context lines that don't surround changes from Differ results
yields (line, leftnum, rightnum, gap) tuples"""
# circular buffer to hold context lines
context_buffer = [None] * (context_size or 0)
context_start = context_len = 0
# number of context lines left to print after encountering a change
context_owed = 0
# current line numbers
leftnum = rightnum = 0
# whether context lines have been dropped
gap = False
for line in lines:
row = save = None
if line.startswith("- "):
leftnum = leftnum + 1
row = line, leftnum, None
context_owed = context_size
elif line.startswith("+ "):
rightnum = rightnum + 1
row = line, None, rightnum
context_owed = context_size
else:
if line.startswith(" "):
leftnum = leftnum = leftnum + 1
rightnum = rightnum = rightnum + 1
if context_owed > 0:
context_owed = context_owed - 1
elif context_size is not None:
save = True
row = line, leftnum, rightnum
if save:
# don't yield row right away, store it in buffer
context_buffer[(context_start + context_len) % context_size] = row
if context_len == context_size:
context_start = (context_start + 1) % context_size
gap = True
else:
context_len = context_len + 1
else:
# yield row, but first drain stuff in buffer
context_len == context_size
while context_len:
yield context_buffer[context_start] + (gap,)
gap = False
context_start = (context_start + 1) % context_size
context_len = context_len - 1
yield row + (gap,)
gap = False
_re_differ = re.compile(r"[+-^]+")
def _differ_split(row, guide):
"""Break row into segments using guide line"""
line, left_number, right_number, gap = row
if left_number and right_number:
type = ""
elif left_number:
type = "remove"
elif right_number:
type = "add"
segments = []
pos = 2
if guide:
assert guide.startswith("? ")
for m in _re_differ.finditer(guide, pos):
if m.start() > pos:
segments.append(_item(text=cgi.escape(line[pos:m.start()]), type=None))
segments.append(_item(text=cgi.escape(line[m.start():m.end()]),
type="change"))
pos = m.end()
segments.append(_item(text=cgi.escape(line[pos:]), type=None))
return _item(gap=ezt.boolean(gap), type=type, segments=segments,
left_number=left_number, right_number=right_number)
class _item:
def __init__(self, **kw):
vars(self).update(kw)
try:
### Using difflib._mdiff function here was the easiest way of obtaining
### intraline diffs for use in ViewVC, but it doesn't exist prior to
### Python 2.4 and is not part of the public difflib API, so for now
### fall back if it doesn't exist.
difflib._mdiff
except AttributeError:
sidebyside = None
|
foresthz/fusion5.1
|
www/scm/viewvc/lib/idiff.py
|
Python
|
gpl-2.0
| 5,475
|
[
"VisIt"
] |
07b26cc410fd10a707d66313e639697858fbc589509e44b59e397d6dd87ce07f
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.datastream_v1.services.datastream import DatastreamAsyncClient
from google.cloud.datastream_v1.services.datastream import DatastreamClient
from google.cloud.datastream_v1.services.datastream import pagers
from google.cloud.datastream_v1.services.datastream import transports
from google.cloud.datastream_v1.types import datastream
from google.cloud.datastream_v1.types import datastream_resources
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DatastreamClient._get_default_mtls_endpoint(None) is None
assert (
DatastreamClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
DatastreamClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DatastreamClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DatastreamClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert DatastreamClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [DatastreamClient, DatastreamAsyncClient,])
def test_datastream_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datastream.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DatastreamGrpcTransport, "grpc"),
(transports.DatastreamGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_datastream_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [DatastreamClient, DatastreamAsyncClient,])
def test_datastream_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "datastream.googleapis.com:443"
def test_datastream_client_get_transport_class():
transport = DatastreamClient.get_transport_class()
available_transports = [
transports.DatastreamGrpcTransport,
]
assert transport in available_transports
transport = DatastreamClient.get_transport_class("grpc")
assert transport == transports.DatastreamGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastreamClient, transports.DatastreamGrpcTransport, "grpc"),
(
DatastreamAsyncClient,
transports.DatastreamGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DatastreamClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatastreamClient)
)
@mock.patch.object(
DatastreamAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastreamAsyncClient),
)
def test_datastream_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DatastreamClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DatastreamClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DatastreamClient, transports.DatastreamGrpcTransport, "grpc", "true"),
(
DatastreamAsyncClient,
transports.DatastreamGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DatastreamClient, transports.DatastreamGrpcTransport, "grpc", "false"),
(
DatastreamAsyncClient,
transports.DatastreamGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DatastreamClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatastreamClient)
)
@mock.patch.object(
DatastreamAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastreamAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_datastream_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [DatastreamClient, DatastreamAsyncClient])
@mock.patch.object(
DatastreamClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DatastreamClient)
)
@mock.patch.object(
DatastreamAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DatastreamAsyncClient),
)
def test_datastream_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DatastreamClient, transports.DatastreamGrpcTransport, "grpc"),
(
DatastreamAsyncClient,
transports.DatastreamGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_datastream_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DatastreamClient, transports.DatastreamGrpcTransport, "grpc", grpc_helpers),
(
DatastreamAsyncClient,
transports.DatastreamGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_datastream_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_datastream_client_client_options_from_dict():
with mock.patch(
"google.cloud.datastream_v1.services.datastream.transports.DatastreamGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DatastreamClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DatastreamClient, transports.DatastreamGrpcTransport, "grpc", grpc_helpers),
(
DatastreamAsyncClient,
transports.DatastreamGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_datastream_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"datastream.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="datastream.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [datastream.ListConnectionProfilesRequest, dict,]
)
def test_list_connection_profiles(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListConnectionProfilesResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_connection_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListConnectionProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListConnectionProfilesPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_connection_profiles_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
client.list_connection_profiles()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListConnectionProfilesRequest()
@pytest.mark.asyncio
async def test_list_connection_profiles_async(
transport: str = "grpc_asyncio",
request_type=datastream.ListConnectionProfilesRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListConnectionProfilesResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_connection_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListConnectionProfilesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListConnectionProfilesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_connection_profiles_async_from_dict():
await test_list_connection_profiles_async(request_type=dict)
def test_list_connection_profiles_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListConnectionProfilesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
call.return_value = datastream.ListConnectionProfilesResponse()
client.list_connection_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_connection_profiles_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListConnectionProfilesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListConnectionProfilesResponse()
)
await client.list_connection_profiles(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_connection_profiles_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListConnectionProfilesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_connection_profiles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_connection_profiles_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_connection_profiles(
datastream.ListConnectionProfilesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_connection_profiles_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListConnectionProfilesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListConnectionProfilesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_connection_profiles(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_connection_profiles_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_connection_profiles(
datastream.ListConnectionProfilesRequest(), parent="parent_value",
)
def test_list_connection_profiles_pager(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
next_page_token="abc",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[], next_page_token="def",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[datastream_resources.ConnectionProfile(),],
next_page_token="ghi",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_connection_profiles(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, datastream_resources.ConnectionProfile) for i in results
)
def test_list_connection_profiles_pages(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
next_page_token="abc",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[], next_page_token="def",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[datastream_resources.ConnectionProfile(),],
next_page_token="ghi",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
),
RuntimeError,
)
pages = list(client.list_connection_profiles(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_connection_profiles_async_pager():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
next_page_token="abc",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[], next_page_token="def",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[datastream_resources.ConnectionProfile(),],
next_page_token="ghi",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
),
RuntimeError,
)
async_pager = await client.list_connection_profiles(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, datastream_resources.ConnectionProfile) for i in responses
)
@pytest.mark.asyncio
async def test_list_connection_profiles_async_pages():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_connection_profiles),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
next_page_token="abc",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[], next_page_token="def",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[datastream_resources.ConnectionProfile(),],
next_page_token="ghi",
),
datastream.ListConnectionProfilesResponse(
connection_profiles=[
datastream_resources.ConnectionProfile(),
datastream_resources.ConnectionProfile(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_connection_profiles(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [datastream.GetConnectionProfileRequest, dict,]
)
def test_get_connection_profile(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.ConnectionProfile(
name="name_value",
display_name="display_name_value",
oracle_profile=datastream_resources.OracleProfile(
hostname="hostname_value"
),
static_service_ip_connectivity=None,
)
response = client.get_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.ConnectionProfile)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_get_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
client.get_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetConnectionProfileRequest()
@pytest.mark.asyncio
async def test_get_connection_profile_async(
transport: str = "grpc_asyncio", request_type=datastream.GetConnectionProfileRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.ConnectionProfile(
name="name_value", display_name="display_name_value",
)
)
response = await client.get_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.ConnectionProfile)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_get_connection_profile_async_from_dict():
await test_get_connection_profile_async(request_type=dict)
def test_get_connection_profile_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetConnectionProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
call.return_value = datastream_resources.ConnectionProfile()
client.get_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_connection_profile_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetConnectionProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.ConnectionProfile()
)
await client.get_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_connection_profile_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.ConnectionProfile()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_connection_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_connection_profile_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_connection_profile(
datastream.GetConnectionProfileRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_connection_profile_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.ConnectionProfile()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.ConnectionProfile()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_connection_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_connection_profile_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_connection_profile(
datastream.GetConnectionProfileRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [datastream.CreateConnectionProfileRequest, dict,]
)
def test_create_connection_profile(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
client.create_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateConnectionProfileRequest()
@pytest.mark.asyncio
async def test_create_connection_profile_async(
transport: str = "grpc_asyncio",
request_type=datastream.CreateConnectionProfileRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_connection_profile_async_from_dict():
await test_create_connection_profile_async(request_type=dict)
def test_create_connection_profile_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreateConnectionProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_connection_profile_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreateConnectionProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_connection_profile_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_connection_profile(
parent="parent_value",
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
connection_profile_id="connection_profile_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].connection_profile
mock_val = datastream_resources.ConnectionProfile(name="name_value")
assert arg == mock_val
arg = args[0].connection_profile_id
mock_val = "connection_profile_id_value"
assert arg == mock_val
def test_create_connection_profile_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_connection_profile(
datastream.CreateConnectionProfileRequest(),
parent="parent_value",
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
connection_profile_id="connection_profile_id_value",
)
@pytest.mark.asyncio
async def test_create_connection_profile_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_connection_profile(
parent="parent_value",
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
connection_profile_id="connection_profile_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].connection_profile
mock_val = datastream_resources.ConnectionProfile(name="name_value")
assert arg == mock_val
arg = args[0].connection_profile_id
mock_val = "connection_profile_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_connection_profile_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_connection_profile(
datastream.CreateConnectionProfileRequest(),
parent="parent_value",
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
connection_profile_id="connection_profile_id_value",
)
@pytest.mark.parametrize(
"request_type", [datastream.UpdateConnectionProfileRequest, dict,]
)
def test_update_connection_profile(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.UpdateConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
client.update_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.UpdateConnectionProfileRequest()
@pytest.mark.asyncio
async def test_update_connection_profile_async(
transport: str = "grpc_asyncio",
request_type=datastream.UpdateConnectionProfileRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.UpdateConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_connection_profile_async_from_dict():
await test_update_connection_profile_async(request_type=dict)
def test_update_connection_profile_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.UpdateConnectionProfileRequest()
request.connection_profile.name = "connection_profile.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"connection_profile.name=connection_profile.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_connection_profile_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.UpdateConnectionProfileRequest()
request.connection_profile.name = "connection_profile.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"connection_profile.name=connection_profile.name/value",
) in kw["metadata"]
def test_update_connection_profile_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_connection_profile(
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].connection_profile
mock_val = datastream_resources.ConnectionProfile(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_connection_profile_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_connection_profile(
datastream.UpdateConnectionProfileRequest(),
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_connection_profile_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_connection_profile(
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].connection_profile
mock_val = datastream_resources.ConnectionProfile(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_connection_profile_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_connection_profile(
datastream.UpdateConnectionProfileRequest(),
connection_profile=datastream_resources.ConnectionProfile(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type", [datastream.DeleteConnectionProfileRequest, dict,]
)
def test_delete_connection_profile(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
client.delete_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteConnectionProfileRequest()
@pytest.mark.asyncio
async def test_delete_connection_profile_async(
transport: str = "grpc_asyncio",
request_type=datastream.DeleteConnectionProfileRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_connection_profile_async_from_dict():
await test_delete_connection_profile_async(request_type=dict)
def test_delete_connection_profile_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeleteConnectionProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_connection_profile_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeleteConnectionProfileRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_connection_profile_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_connection_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_connection_profile_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_connection_profile(
datastream.DeleteConnectionProfileRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_connection_profile_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_connection_profile(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_connection_profile_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_connection_profile(
datastream.DeleteConnectionProfileRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [datastream.DiscoverConnectionProfileRequest, dict,]
)
def test_discover_connection_profile(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.discover_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.DiscoverConnectionProfileResponse(
oracle_rdbms=datastream_resources.OracleRdbms(
oracle_schemas=[
datastream_resources.OracleSchema(schema="schema_value")
]
),
)
response = client.discover_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DiscoverConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream.DiscoverConnectionProfileResponse)
def test_discover_connection_profile_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.discover_connection_profile), "__call__"
) as call:
client.discover_connection_profile()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DiscoverConnectionProfileRequest()
@pytest.mark.asyncio
async def test_discover_connection_profile_async(
transport: str = "grpc_asyncio",
request_type=datastream.DiscoverConnectionProfileRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.discover_connection_profile), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.DiscoverConnectionProfileResponse()
)
response = await client.discover_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DiscoverConnectionProfileRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream.DiscoverConnectionProfileResponse)
@pytest.mark.asyncio
async def test_discover_connection_profile_async_from_dict():
await test_discover_connection_profile_async(request_type=dict)
def test_discover_connection_profile_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DiscoverConnectionProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.discover_connection_profile), "__call__"
) as call:
call.return_value = datastream.DiscoverConnectionProfileResponse()
client.discover_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_discover_connection_profile_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DiscoverConnectionProfileRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.discover_connection_profile), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.DiscoverConnectionProfileResponse()
)
await client.discover_connection_profile(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [datastream.ListStreamsRequest, dict,])
def test_list_streams(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListStreamsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_streams(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListStreamsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListStreamsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_streams_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
client.list_streams()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListStreamsRequest()
@pytest.mark.asyncio
async def test_list_streams_async(
transport: str = "grpc_asyncio", request_type=datastream.ListStreamsRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListStreamsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_streams(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListStreamsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListStreamsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_streams_async_from_dict():
await test_list_streams_async(request_type=dict)
def test_list_streams_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListStreamsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
call.return_value = datastream.ListStreamsResponse()
client.list_streams(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_streams_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListStreamsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListStreamsResponse()
)
await client.list_streams(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_streams_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListStreamsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_streams(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_streams_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_streams(
datastream.ListStreamsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_streams_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListStreamsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListStreamsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_streams(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_streams_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_streams(
datastream.ListStreamsRequest(), parent="parent_value",
)
def test_list_streams_pager(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamsResponse(
streams=[
datastream_resources.Stream(),
datastream_resources.Stream(),
datastream_resources.Stream(),
],
next_page_token="abc",
),
datastream.ListStreamsResponse(streams=[], next_page_token="def",),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(),], next_page_token="ghi",
),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(), datastream_resources.Stream(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_streams(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, datastream_resources.Stream) for i in results)
def test_list_streams_pages(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_streams), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamsResponse(
streams=[
datastream_resources.Stream(),
datastream_resources.Stream(),
datastream_resources.Stream(),
],
next_page_token="abc",
),
datastream.ListStreamsResponse(streams=[], next_page_token="def",),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(),], next_page_token="ghi",
),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(), datastream_resources.Stream(),],
),
RuntimeError,
)
pages = list(client.list_streams(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_streams_async_pager():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_streams), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamsResponse(
streams=[
datastream_resources.Stream(),
datastream_resources.Stream(),
datastream_resources.Stream(),
],
next_page_token="abc",
),
datastream.ListStreamsResponse(streams=[], next_page_token="def",),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(),], next_page_token="ghi",
),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(), datastream_resources.Stream(),],
),
RuntimeError,
)
async_pager = await client.list_streams(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datastream_resources.Stream) for i in responses)
@pytest.mark.asyncio
async def test_list_streams_async_pages():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_streams), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamsResponse(
streams=[
datastream_resources.Stream(),
datastream_resources.Stream(),
datastream_resources.Stream(),
],
next_page_token="abc",
),
datastream.ListStreamsResponse(streams=[], next_page_token="def",),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(),], next_page_token="ghi",
),
datastream.ListStreamsResponse(
streams=[datastream_resources.Stream(), datastream_resources.Stream(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_streams(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [datastream.GetStreamRequest, dict,])
def test_get_stream(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.Stream(
name="name_value",
display_name="display_name_value",
state=datastream_resources.Stream.State.NOT_STARTED,
customer_managed_encryption_key="customer_managed_encryption_key_value",
backfill_all=datastream_resources.Stream.BackfillAllStrategy(
oracle_excluded_objects=datastream_resources.OracleRdbms(
oracle_schemas=[
datastream_resources.OracleSchema(schema="schema_value")
]
)
),
)
response = client.get_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.Stream)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == datastream_resources.Stream.State.NOT_STARTED
assert (
response.customer_managed_encryption_key
== "customer_managed_encryption_key_value"
)
def test_get_stream_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
client.get_stream()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetStreamRequest()
@pytest.mark.asyncio
async def test_get_stream_async(
transport: str = "grpc_asyncio", request_type=datastream.GetStreamRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.Stream(
name="name_value",
display_name="display_name_value",
state=datastream_resources.Stream.State.NOT_STARTED,
customer_managed_encryption_key="customer_managed_encryption_key_value",
)
)
response = await client.get_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.Stream)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == datastream_resources.Stream.State.NOT_STARTED
assert (
response.customer_managed_encryption_key
== "customer_managed_encryption_key_value"
)
@pytest.mark.asyncio
async def test_get_stream_async_from_dict():
await test_get_stream_async(request_type=dict)
def test_get_stream_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetStreamRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
call.return_value = datastream_resources.Stream()
client.get_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_stream_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetStreamRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.Stream()
)
await client.get_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_stream_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.Stream()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_stream(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_stream_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_stream(
datastream.GetStreamRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_stream_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.Stream()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.Stream()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_stream(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_stream_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_stream(
datastream.GetStreamRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datastream.CreateStreamRequest, dict,])
def test_create_stream(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_stream_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
client.create_stream()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateStreamRequest()
@pytest.mark.asyncio
async def test_create_stream_async(
transport: str = "grpc_asyncio", request_type=datastream.CreateStreamRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_stream_async_from_dict():
await test_create_stream_async(request_type=dict)
def test_create_stream_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreateStreamRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_stream_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreateStreamRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_stream_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_stream(
parent="parent_value",
stream=datastream_resources.Stream(name="name_value"),
stream_id="stream_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].stream
mock_val = datastream_resources.Stream(name="name_value")
assert arg == mock_val
arg = args[0].stream_id
mock_val = "stream_id_value"
assert arg == mock_val
def test_create_stream_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_stream(
datastream.CreateStreamRequest(),
parent="parent_value",
stream=datastream_resources.Stream(name="name_value"),
stream_id="stream_id_value",
)
@pytest.mark.asyncio
async def test_create_stream_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_stream(
parent="parent_value",
stream=datastream_resources.Stream(name="name_value"),
stream_id="stream_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].stream
mock_val = datastream_resources.Stream(name="name_value")
assert arg == mock_val
arg = args[0].stream_id
mock_val = "stream_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_stream_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_stream(
datastream.CreateStreamRequest(),
parent="parent_value",
stream=datastream_resources.Stream(name="name_value"),
stream_id="stream_id_value",
)
@pytest.mark.parametrize("request_type", [datastream.UpdateStreamRequest, dict,])
def test_update_stream(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.UpdateStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_stream_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
client.update_stream()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.UpdateStreamRequest()
@pytest.mark.asyncio
async def test_update_stream_async(
transport: str = "grpc_asyncio", request_type=datastream.UpdateStreamRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.UpdateStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_stream_async_from_dict():
await test_update_stream_async(request_type=dict)
def test_update_stream_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.UpdateStreamRequest()
request.stream.name = "stream.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "stream.name=stream.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_stream_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.UpdateStreamRequest()
request.stream.name = "stream.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "stream.name=stream.name/value",) in kw["metadata"]
def test_update_stream_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_stream(
stream=datastream_resources.Stream(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].stream
mock_val = datastream_resources.Stream(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_stream_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_stream(
datastream.UpdateStreamRequest(),
stream=datastream_resources.Stream(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_stream_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_stream(
stream=datastream_resources.Stream(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].stream
mock_val = datastream_resources.Stream(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_stream_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_stream(
datastream.UpdateStreamRequest(),
stream=datastream_resources.Stream(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [datastream.DeleteStreamRequest, dict,])
def test_delete_stream(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_stream_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
client.delete_stream()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteStreamRequest()
@pytest.mark.asyncio
async def test_delete_stream_async(
transport: str = "grpc_asyncio", request_type=datastream.DeleteStreamRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteStreamRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_stream_async_from_dict():
await test_delete_stream_async(request_type=dict)
def test_delete_stream_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeleteStreamRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_stream_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeleteStreamRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_stream(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_stream_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_stream(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_stream_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_stream(
datastream.DeleteStreamRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_stream_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_stream), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_stream(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_stream_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_stream(
datastream.DeleteStreamRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datastream.GetStreamObjectRequest, dict,])
def test_get_stream_object(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.StreamObject(
name="name_value", display_name="display_name_value",
)
response = client.get_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetStreamObjectRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.StreamObject)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_get_stream_object_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
client.get_stream_object()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetStreamObjectRequest()
@pytest.mark.asyncio
async def test_get_stream_object_async(
transport: str = "grpc_asyncio", request_type=datastream.GetStreamObjectRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.StreamObject(
name="name_value", display_name="display_name_value",
)
)
response = await client.get_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetStreamObjectRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.StreamObject)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_get_stream_object_async_from_dict():
await test_get_stream_object_async(request_type=dict)
def test_get_stream_object_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetStreamObjectRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
call.return_value = datastream_resources.StreamObject()
client.get_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_stream_object_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetStreamObjectRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.StreamObject()
)
await client.get_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_stream_object_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.StreamObject()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_stream_object(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_stream_object_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_stream_object(
datastream.GetStreamObjectRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_stream_object_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_stream_object), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.StreamObject()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.StreamObject()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_stream_object(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_stream_object_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_stream_object(
datastream.GetStreamObjectRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datastream.LookupStreamObjectRequest, dict,])
def test_lookup_stream_object(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_stream_object), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.StreamObject(
name="name_value", display_name="display_name_value",
)
response = client.lookup_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.LookupStreamObjectRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.StreamObject)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
def test_lookup_stream_object_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_stream_object), "__call__"
) as call:
client.lookup_stream_object()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.LookupStreamObjectRequest()
@pytest.mark.asyncio
async def test_lookup_stream_object_async(
transport: str = "grpc_asyncio", request_type=datastream.LookupStreamObjectRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_stream_object), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.StreamObject(
name="name_value", display_name="display_name_value",
)
)
response = await client.lookup_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.LookupStreamObjectRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.StreamObject)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
@pytest.mark.asyncio
async def test_lookup_stream_object_async_from_dict():
await test_lookup_stream_object_async(request_type=dict)
def test_lookup_stream_object_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.LookupStreamObjectRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_stream_object), "__call__"
) as call:
call.return_value = datastream_resources.StreamObject()
client.lookup_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_lookup_stream_object_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.LookupStreamObjectRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.lookup_stream_object), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.StreamObject()
)
await client.lookup_stream_object(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [datastream.ListStreamObjectsRequest, dict,])
def test_list_stream_objects(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListStreamObjectsResponse(
next_page_token="next_page_token_value",
)
response = client.list_stream_objects(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListStreamObjectsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListStreamObjectsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_stream_objects_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
client.list_stream_objects()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListStreamObjectsRequest()
@pytest.mark.asyncio
async def test_list_stream_objects_async(
transport: str = "grpc_asyncio", request_type=datastream.ListStreamObjectsRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListStreamObjectsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_stream_objects(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListStreamObjectsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListStreamObjectsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_stream_objects_async_from_dict():
await test_list_stream_objects_async(request_type=dict)
def test_list_stream_objects_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListStreamObjectsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
call.return_value = datastream.ListStreamObjectsResponse()
client.list_stream_objects(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_stream_objects_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListStreamObjectsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListStreamObjectsResponse()
)
await client.list_stream_objects(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_stream_objects_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListStreamObjectsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_stream_objects(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_stream_objects_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_stream_objects(
datastream.ListStreamObjectsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_stream_objects_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListStreamObjectsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListStreamObjectsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_stream_objects(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_stream_objects_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_stream_objects(
datastream.ListStreamObjectsRequest(), parent="parent_value",
)
def test_list_stream_objects_pager(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
next_page_token="abc",
),
datastream.ListStreamObjectsResponse(
stream_objects=[], next_page_token="def",
),
datastream.ListStreamObjectsResponse(
stream_objects=[datastream_resources.StreamObject(),],
next_page_token="ghi",
),
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_stream_objects(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, datastream_resources.StreamObject) for i in results)
def test_list_stream_objects_pages(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
next_page_token="abc",
),
datastream.ListStreamObjectsResponse(
stream_objects=[], next_page_token="def",
),
datastream.ListStreamObjectsResponse(
stream_objects=[datastream_resources.StreamObject(),],
next_page_token="ghi",
),
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
),
RuntimeError,
)
pages = list(client.list_stream_objects(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_stream_objects_async_pager():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
next_page_token="abc",
),
datastream.ListStreamObjectsResponse(
stream_objects=[], next_page_token="def",
),
datastream.ListStreamObjectsResponse(
stream_objects=[datastream_resources.StreamObject(),],
next_page_token="ghi",
),
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
),
RuntimeError,
)
async_pager = await client.list_stream_objects(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datastream_resources.StreamObject) for i in responses)
@pytest.mark.asyncio
async def test_list_stream_objects_async_pages():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_stream_objects),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
next_page_token="abc",
),
datastream.ListStreamObjectsResponse(
stream_objects=[], next_page_token="def",
),
datastream.ListStreamObjectsResponse(
stream_objects=[datastream_resources.StreamObject(),],
next_page_token="ghi",
),
datastream.ListStreamObjectsResponse(
stream_objects=[
datastream_resources.StreamObject(),
datastream_resources.StreamObject(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_stream_objects(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [datastream.StartBackfillJobRequest, dict,])
def test_start_backfill_job(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.StartBackfillJobResponse()
response = client.start_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.StartBackfillJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream.StartBackfillJobResponse)
def test_start_backfill_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
client.start_backfill_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.StartBackfillJobRequest()
@pytest.mark.asyncio
async def test_start_backfill_job_async(
transport: str = "grpc_asyncio", request_type=datastream.StartBackfillJobRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.StartBackfillJobResponse()
)
response = await client.start_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.StartBackfillJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream.StartBackfillJobResponse)
@pytest.mark.asyncio
async def test_start_backfill_job_async_from_dict():
await test_start_backfill_job_async(request_type=dict)
def test_start_backfill_job_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.StartBackfillJobRequest()
request.object_ = "object/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
call.return_value = datastream.StartBackfillJobResponse()
client.start_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "object=object/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_start_backfill_job_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.StartBackfillJobRequest()
request.object_ = "object/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.StartBackfillJobResponse()
)
await client.start_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "object=object/value",) in kw["metadata"]
def test_start_backfill_job_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.StartBackfillJobResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.start_backfill_job(object_="object__value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].object_
mock_val = "object__value"
assert arg == mock_val
def test_start_backfill_job_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.start_backfill_job(
datastream.StartBackfillJobRequest(), object_="object__value",
)
@pytest.mark.asyncio
async def test_start_backfill_job_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.start_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.StartBackfillJobResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.StartBackfillJobResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.start_backfill_job(object_="object__value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].object_
mock_val = "object__value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_start_backfill_job_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.start_backfill_job(
datastream.StartBackfillJobRequest(), object_="object__value",
)
@pytest.mark.parametrize("request_type", [datastream.StopBackfillJobRequest, dict,])
def test_stop_backfill_job(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.StopBackfillJobResponse()
response = client.stop_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.StopBackfillJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream.StopBackfillJobResponse)
def test_stop_backfill_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
client.stop_backfill_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.StopBackfillJobRequest()
@pytest.mark.asyncio
async def test_stop_backfill_job_async(
transport: str = "grpc_asyncio", request_type=datastream.StopBackfillJobRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.StopBackfillJobResponse()
)
response = await client.stop_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.StopBackfillJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream.StopBackfillJobResponse)
@pytest.mark.asyncio
async def test_stop_backfill_job_async_from_dict():
await test_stop_backfill_job_async(request_type=dict)
def test_stop_backfill_job_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.StopBackfillJobRequest()
request.object_ = "object/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
call.return_value = datastream.StopBackfillJobResponse()
client.stop_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "object=object/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_stop_backfill_job_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.StopBackfillJobRequest()
request.object_ = "object/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.StopBackfillJobResponse()
)
await client.stop_backfill_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "object=object/value",) in kw["metadata"]
def test_stop_backfill_job_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.StopBackfillJobResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.stop_backfill_job(object_="object__value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].object_
mock_val = "object__value"
assert arg == mock_val
def test_stop_backfill_job_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.stop_backfill_job(
datastream.StopBackfillJobRequest(), object_="object__value",
)
@pytest.mark.asyncio
async def test_stop_backfill_job_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.stop_backfill_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.StopBackfillJobResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.StopBackfillJobResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.stop_backfill_job(object_="object__value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].object_
mock_val = "object__value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_stop_backfill_job_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.stop_backfill_job(
datastream.StopBackfillJobRequest(), object_="object__value",
)
@pytest.mark.parametrize("request_type", [datastream.FetchStaticIpsRequest, dict,])
def test_fetch_static_ips(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.FetchStaticIpsResponse(
static_ips=["static_ips_value"], next_page_token="next_page_token_value",
)
response = client.fetch_static_ips(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.FetchStaticIpsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.FetchStaticIpsPager)
assert response.static_ips == ["static_ips_value"]
assert response.next_page_token == "next_page_token_value"
def test_fetch_static_ips_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
client.fetch_static_ips()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.FetchStaticIpsRequest()
@pytest.mark.asyncio
async def test_fetch_static_ips_async(
transport: str = "grpc_asyncio", request_type=datastream.FetchStaticIpsRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.FetchStaticIpsResponse(
static_ips=["static_ips_value"],
next_page_token="next_page_token_value",
)
)
response = await client.fetch_static_ips(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.FetchStaticIpsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.FetchStaticIpsAsyncPager)
assert response.static_ips == ["static_ips_value"]
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_fetch_static_ips_async_from_dict():
await test_fetch_static_ips_async(request_type=dict)
def test_fetch_static_ips_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.FetchStaticIpsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
call.return_value = datastream.FetchStaticIpsResponse()
client.fetch_static_ips(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_fetch_static_ips_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.FetchStaticIpsRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.FetchStaticIpsResponse()
)
await client.fetch_static_ips(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_fetch_static_ips_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.FetchStaticIpsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.fetch_static_ips(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_fetch_static_ips_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.fetch_static_ips(
datastream.FetchStaticIpsRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_fetch_static_ips_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.FetchStaticIpsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.FetchStaticIpsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.fetch_static_ips(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_fetch_static_ips_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.fetch_static_ips(
datastream.FetchStaticIpsRequest(), name="name_value",
)
def test_fetch_static_ips_pager(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.FetchStaticIpsResponse(
static_ips=[str(), str(), str(),], next_page_token="abc",
),
datastream.FetchStaticIpsResponse(static_ips=[], next_page_token="def",),
datastream.FetchStaticIpsResponse(
static_ips=[str(),], next_page_token="ghi",
),
datastream.FetchStaticIpsResponse(static_ips=[str(), str(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", ""),)),
)
pager = client.fetch_static_ips(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, str) for i in results)
def test_fetch_static_ips_pages(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.fetch_static_ips), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.FetchStaticIpsResponse(
static_ips=[str(), str(), str(),], next_page_token="abc",
),
datastream.FetchStaticIpsResponse(static_ips=[], next_page_token="def",),
datastream.FetchStaticIpsResponse(
static_ips=[str(),], next_page_token="ghi",
),
datastream.FetchStaticIpsResponse(static_ips=[str(), str(),],),
RuntimeError,
)
pages = list(client.fetch_static_ips(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_fetch_static_ips_async_pager():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_static_ips), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.FetchStaticIpsResponse(
static_ips=[str(), str(), str(),], next_page_token="abc",
),
datastream.FetchStaticIpsResponse(static_ips=[], next_page_token="def",),
datastream.FetchStaticIpsResponse(
static_ips=[str(),], next_page_token="ghi",
),
datastream.FetchStaticIpsResponse(static_ips=[str(), str(),],),
RuntimeError,
)
async_pager = await client.fetch_static_ips(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, str) for i in responses)
@pytest.mark.asyncio
async def test_fetch_static_ips_async_pages():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.fetch_static_ips), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.FetchStaticIpsResponse(
static_ips=[str(), str(), str(),], next_page_token="abc",
),
datastream.FetchStaticIpsResponse(static_ips=[], next_page_token="def",),
datastream.FetchStaticIpsResponse(
static_ips=[str(),], next_page_token="ghi",
),
datastream.FetchStaticIpsResponse(static_ips=[str(), str(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.fetch_static_ips(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [datastream.CreatePrivateConnectionRequest, dict,]
)
def test_create_private_connection(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreatePrivateConnectionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_private_connection_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
client.create_private_connection()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreatePrivateConnectionRequest()
@pytest.mark.asyncio
async def test_create_private_connection_async(
transport: str = "grpc_asyncio",
request_type=datastream.CreatePrivateConnectionRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreatePrivateConnectionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_private_connection_async_from_dict():
await test_create_private_connection_async(request_type=dict)
def test_create_private_connection_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreatePrivateConnectionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_private_connection_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreatePrivateConnectionRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_private_connection_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_private_connection(
parent="parent_value",
private_connection=datastream_resources.PrivateConnection(
name="name_value"
),
private_connection_id="private_connection_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].private_connection
mock_val = datastream_resources.PrivateConnection(name="name_value")
assert arg == mock_val
arg = args[0].private_connection_id
mock_val = "private_connection_id_value"
assert arg == mock_val
def test_create_private_connection_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_private_connection(
datastream.CreatePrivateConnectionRequest(),
parent="parent_value",
private_connection=datastream_resources.PrivateConnection(
name="name_value"
),
private_connection_id="private_connection_id_value",
)
@pytest.mark.asyncio
async def test_create_private_connection_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_private_connection(
parent="parent_value",
private_connection=datastream_resources.PrivateConnection(
name="name_value"
),
private_connection_id="private_connection_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].private_connection
mock_val = datastream_resources.PrivateConnection(name="name_value")
assert arg == mock_val
arg = args[0].private_connection_id
mock_val = "private_connection_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_private_connection_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_private_connection(
datastream.CreatePrivateConnectionRequest(),
parent="parent_value",
private_connection=datastream_resources.PrivateConnection(
name="name_value"
),
private_connection_id="private_connection_id_value",
)
@pytest.mark.parametrize(
"request_type", [datastream.GetPrivateConnectionRequest, dict,]
)
def test_get_private_connection(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.PrivateConnection(
name="name_value",
display_name="display_name_value",
state=datastream_resources.PrivateConnection.State.CREATING,
)
response = client.get_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetPrivateConnectionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.PrivateConnection)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == datastream_resources.PrivateConnection.State.CREATING
def test_get_private_connection_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
client.get_private_connection()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetPrivateConnectionRequest()
@pytest.mark.asyncio
async def test_get_private_connection_async(
transport: str = "grpc_asyncio", request_type=datastream.GetPrivateConnectionRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.PrivateConnection(
name="name_value",
display_name="display_name_value",
state=datastream_resources.PrivateConnection.State.CREATING,
)
)
response = await client.get_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetPrivateConnectionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.PrivateConnection)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == datastream_resources.PrivateConnection.State.CREATING
@pytest.mark.asyncio
async def test_get_private_connection_async_from_dict():
await test_get_private_connection_async(request_type=dict)
def test_get_private_connection_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetPrivateConnectionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
call.return_value = datastream_resources.PrivateConnection()
client.get_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_private_connection_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetPrivateConnectionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.PrivateConnection()
)
await client.get_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_private_connection_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.PrivateConnection()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_private_connection(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_private_connection_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_private_connection(
datastream.GetPrivateConnectionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_private_connection_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.PrivateConnection()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.PrivateConnection()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_private_connection(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_private_connection_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_private_connection(
datastream.GetPrivateConnectionRequest(), name="name_value",
)
@pytest.mark.parametrize(
"request_type", [datastream.ListPrivateConnectionsRequest, dict,]
)
def test_list_private_connections(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListPrivateConnectionsResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_private_connections(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListPrivateConnectionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPrivateConnectionsPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_private_connections_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
client.list_private_connections()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListPrivateConnectionsRequest()
@pytest.mark.asyncio
async def test_list_private_connections_async(
transport: str = "grpc_asyncio",
request_type=datastream.ListPrivateConnectionsRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListPrivateConnectionsResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_private_connections(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListPrivateConnectionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPrivateConnectionsAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_private_connections_async_from_dict():
await test_list_private_connections_async(request_type=dict)
def test_list_private_connections_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListPrivateConnectionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
call.return_value = datastream.ListPrivateConnectionsResponse()
client.list_private_connections(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_private_connections_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListPrivateConnectionsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListPrivateConnectionsResponse()
)
await client.list_private_connections(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_private_connections_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListPrivateConnectionsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_private_connections(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_private_connections_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_private_connections(
datastream.ListPrivateConnectionsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_private_connections_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListPrivateConnectionsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListPrivateConnectionsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_private_connections(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_private_connections_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_private_connections(
datastream.ListPrivateConnectionsRequest(), parent="parent_value",
)
def test_list_private_connections_pager(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
next_page_token="abc",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[], next_page_token="def",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[datastream_resources.PrivateConnection(),],
next_page_token="ghi",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_private_connections(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, datastream_resources.PrivateConnection) for i in results
)
def test_list_private_connections_pages(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
next_page_token="abc",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[], next_page_token="def",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[datastream_resources.PrivateConnection(),],
next_page_token="ghi",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
),
RuntimeError,
)
pages = list(client.list_private_connections(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_private_connections_async_pager():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
next_page_token="abc",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[], next_page_token="def",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[datastream_resources.PrivateConnection(),],
next_page_token="ghi",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
),
RuntimeError,
)
async_pager = await client.list_private_connections(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, datastream_resources.PrivateConnection) for i in responses
)
@pytest.mark.asyncio
async def test_list_private_connections_async_pages():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_private_connections),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
next_page_token="abc",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[], next_page_token="def",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[datastream_resources.PrivateConnection(),],
next_page_token="ghi",
),
datastream.ListPrivateConnectionsResponse(
private_connections=[
datastream_resources.PrivateConnection(),
datastream_resources.PrivateConnection(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_private_connections(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [datastream.DeletePrivateConnectionRequest, dict,]
)
def test_delete_private_connection(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeletePrivateConnectionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_private_connection_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
client.delete_private_connection()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeletePrivateConnectionRequest()
@pytest.mark.asyncio
async def test_delete_private_connection_async(
transport: str = "grpc_asyncio",
request_type=datastream.DeletePrivateConnectionRequest,
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeletePrivateConnectionRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_private_connection_async_from_dict():
await test_delete_private_connection_async(request_type=dict)
def test_delete_private_connection_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeletePrivateConnectionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_private_connection_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeletePrivateConnectionRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_private_connection(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_private_connection_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_private_connection(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_private_connection_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_private_connection(
datastream.DeletePrivateConnectionRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_private_connection_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_private_connection), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_private_connection(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_private_connection_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_private_connection(
datastream.DeletePrivateConnectionRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datastream.CreateRouteRequest, dict,])
def test_create_route(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateRouteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_route_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
client.create_route()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateRouteRequest()
@pytest.mark.asyncio
async def test_create_route_async(
transport: str = "grpc_asyncio", request_type=datastream.CreateRouteRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.CreateRouteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_route_async_from_dict():
await test_create_route_async(request_type=dict)
def test_create_route_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreateRouteRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_route_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.CreateRouteRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_route_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_route(
parent="parent_value",
route=datastream_resources.Route(name="name_value"),
route_id="route_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].route
mock_val = datastream_resources.Route(name="name_value")
assert arg == mock_val
arg = args[0].route_id
mock_val = "route_id_value"
assert arg == mock_val
def test_create_route_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_route(
datastream.CreateRouteRequest(),
parent="parent_value",
route=datastream_resources.Route(name="name_value"),
route_id="route_id_value",
)
@pytest.mark.asyncio
async def test_create_route_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_route(
parent="parent_value",
route=datastream_resources.Route(name="name_value"),
route_id="route_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].route
mock_val = datastream_resources.Route(name="name_value")
assert arg == mock_val
arg = args[0].route_id
mock_val = "route_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_route_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_route(
datastream.CreateRouteRequest(),
parent="parent_value",
route=datastream_resources.Route(name="name_value"),
route_id="route_id_value",
)
@pytest.mark.parametrize("request_type", [datastream.GetRouteRequest, dict,])
def test_get_route(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.Route(
name="name_value",
display_name="display_name_value",
destination_address="destination_address_value",
destination_port=1734,
)
response = client.get_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetRouteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.Route)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.destination_address == "destination_address_value"
assert response.destination_port == 1734
def test_get_route_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
client.get_route()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetRouteRequest()
@pytest.mark.asyncio
async def test_get_route_async(
transport: str = "grpc_asyncio", request_type=datastream.GetRouteRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.Route(
name="name_value",
display_name="display_name_value",
destination_address="destination_address_value",
destination_port=1734,
)
)
response = await client.get_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.GetRouteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datastream_resources.Route)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.destination_address == "destination_address_value"
assert response.destination_port == 1734
@pytest.mark.asyncio
async def test_get_route_async_from_dict():
await test_get_route_async(request_type=dict)
def test_get_route_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetRouteRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
call.return_value = datastream_resources.Route()
client.get_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_route_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.GetRouteRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.Route()
)
await client.get_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_route_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.Route()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_route(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_route_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_route(
datastream.GetRouteRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_route_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream_resources.Route()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream_resources.Route()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_route(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_route_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_route(
datastream.GetRouteRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [datastream.ListRoutesRequest, dict,])
def test_list_routes(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListRoutesResponse(
next_page_token="next_page_token_value", unreachable=["unreachable_value"],
)
response = client.list_routes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListRoutesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRoutesPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_list_routes_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
client.list_routes()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListRoutesRequest()
@pytest.mark.asyncio
async def test_list_routes_async(
transport: str = "grpc_asyncio", request_type=datastream.ListRoutesRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListRoutesResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.list_routes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.ListRoutesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListRoutesAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_list_routes_async_from_dict():
await test_list_routes_async(request_type=dict)
def test_list_routes_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListRoutesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
call.return_value = datastream.ListRoutesResponse()
client.list_routes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_routes_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.ListRoutesRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListRoutesResponse()
)
await client.list_routes(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_routes_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListRoutesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_routes(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_routes_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_routes(
datastream.ListRoutesRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_routes_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datastream.ListRoutesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datastream.ListRoutesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_routes(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_routes_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_routes(
datastream.ListRoutesRequest(), parent="parent_value",
)
def test_list_routes_pager(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListRoutesResponse(
routes=[
datastream_resources.Route(),
datastream_resources.Route(),
datastream_resources.Route(),
],
next_page_token="abc",
),
datastream.ListRoutesResponse(routes=[], next_page_token="def",),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(),], next_page_token="ghi",
),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(), datastream_resources.Route(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_routes(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, datastream_resources.Route) for i in results)
def test_list_routes_pages(transport_name: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_routes), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListRoutesResponse(
routes=[
datastream_resources.Route(),
datastream_resources.Route(),
datastream_resources.Route(),
],
next_page_token="abc",
),
datastream.ListRoutesResponse(routes=[], next_page_token="def",),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(),], next_page_token="ghi",
),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(), datastream_resources.Route(),],
),
RuntimeError,
)
pages = list(client.list_routes(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_routes_async_pager():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_routes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListRoutesResponse(
routes=[
datastream_resources.Route(),
datastream_resources.Route(),
datastream_resources.Route(),
],
next_page_token="abc",
),
datastream.ListRoutesResponse(routes=[], next_page_token="def",),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(),], next_page_token="ghi",
),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(), datastream_resources.Route(),],
),
RuntimeError,
)
async_pager = await client.list_routes(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datastream_resources.Route) for i in responses)
@pytest.mark.asyncio
async def test_list_routes_async_pages():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_routes), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datastream.ListRoutesResponse(
routes=[
datastream_resources.Route(),
datastream_resources.Route(),
datastream_resources.Route(),
],
next_page_token="abc",
),
datastream.ListRoutesResponse(routes=[], next_page_token="def",),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(),], next_page_token="ghi",
),
datastream.ListRoutesResponse(
routes=[datastream_resources.Route(), datastream_resources.Route(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_routes(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [datastream.DeleteRouteRequest, dict,])
def test_delete_route(request_type, transport: str = "grpc"):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteRouteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_route_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
client.delete_route()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteRouteRequest()
@pytest.mark.asyncio
async def test_delete_route_async(
transport: str = "grpc_asyncio", request_type=datastream.DeleteRouteRequest
):
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datastream.DeleteRouteRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_route_async_from_dict():
await test_delete_route_async(request_type=dict)
def test_delete_route_field_headers():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeleteRouteRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_route_field_headers_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datastream.DeleteRouteRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_route(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_route_flattened():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_route(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_route_flattened_error():
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_route(
datastream.DeleteRouteRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_route_flattened_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_route), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_route(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_route_flattened_error_async():
client = DatastreamAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_route(
datastream.DeleteRouteRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DatastreamGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DatastreamGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastreamClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DatastreamGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DatastreamClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DatastreamClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DatastreamGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DatastreamClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DatastreamGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DatastreamClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DatastreamGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DatastreamGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.DatastreamGrpcTransport, transports.DatastreamGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DatastreamClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DatastreamGrpcTransport,)
def test_datastream_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DatastreamTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_datastream_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.datastream_v1.services.datastream.transports.DatastreamTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DatastreamTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_connection_profiles",
"get_connection_profile",
"create_connection_profile",
"update_connection_profile",
"delete_connection_profile",
"discover_connection_profile",
"list_streams",
"get_stream",
"create_stream",
"update_stream",
"delete_stream",
"get_stream_object",
"lookup_stream_object",
"list_stream_objects",
"start_backfill_job",
"stop_backfill_job",
"fetch_static_ips",
"create_private_connection",
"get_private_connection",
"list_private_connections",
"delete_private_connection",
"create_route",
"get_route",
"list_routes",
"delete_route",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_datastream_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.datastream_v1.services.datastream.transports.DatastreamTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatastreamTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_datastream_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.datastream_v1.services.datastream.transports.DatastreamTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DatastreamTransport()
adc.assert_called_once()
def test_datastream_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DatastreamClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.DatastreamGrpcTransport, transports.DatastreamGrpcAsyncIOTransport,],
)
def test_datastream_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DatastreamGrpcTransport, grpc_helpers),
(transports.DatastreamGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_datastream_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"datastream.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="datastream.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.DatastreamGrpcTransport, transports.DatastreamGrpcAsyncIOTransport],
)
def test_datastream_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_datastream_host_no_port():
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datastream.googleapis.com"
),
)
assert client.transport._host == "datastream.googleapis.com:443"
def test_datastream_host_with_port():
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datastream.googleapis.com:8000"
),
)
assert client.transport._host == "datastream.googleapis.com:8000"
def test_datastream_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatastreamGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_datastream_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DatastreamGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DatastreamGrpcTransport, transports.DatastreamGrpcAsyncIOTransport],
)
def test_datastream_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DatastreamGrpcTransport, transports.DatastreamGrpcAsyncIOTransport],
)
def test_datastream_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_datastream_grpc_lro_client():
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_datastream_grpc_lro_async_client():
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_connection_profile_path():
project = "squid"
location = "clam"
connection_profile = "whelk"
expected = "projects/{project}/locations/{location}/connectionProfiles/{connection_profile}".format(
project=project, location=location, connection_profile=connection_profile,
)
actual = DatastreamClient.connection_profile_path(
project, location, connection_profile
)
assert expected == actual
def test_parse_connection_profile_path():
expected = {
"project": "octopus",
"location": "oyster",
"connection_profile": "nudibranch",
}
path = DatastreamClient.connection_profile_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_connection_profile_path(path)
assert expected == actual
def test_networks_path():
project = "cuttlefish"
network = "mussel"
expected = "projects/{project}/global/networks/{network}".format(
project=project, network=network,
)
actual = DatastreamClient.networks_path(project, network)
assert expected == actual
def test_parse_networks_path():
expected = {
"project": "winkle",
"network": "nautilus",
}
path = DatastreamClient.networks_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_networks_path(path)
assert expected == actual
def test_private_connection_path():
project = "scallop"
location = "abalone"
private_connection = "squid"
expected = "projects/{project}/locations/{location}/privateConnections/{private_connection}".format(
project=project, location=location, private_connection=private_connection,
)
actual = DatastreamClient.private_connection_path(
project, location, private_connection
)
assert expected == actual
def test_parse_private_connection_path():
expected = {
"project": "clam",
"location": "whelk",
"private_connection": "octopus",
}
path = DatastreamClient.private_connection_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_private_connection_path(path)
assert expected == actual
def test_route_path():
project = "oyster"
location = "nudibranch"
private_connection = "cuttlefish"
route = "mussel"
expected = "projects/{project}/locations/{location}/privateConnections/{private_connection}/routes/{route}".format(
project=project,
location=location,
private_connection=private_connection,
route=route,
)
actual = DatastreamClient.route_path(project, location, private_connection, route)
assert expected == actual
def test_parse_route_path():
expected = {
"project": "winkle",
"location": "nautilus",
"private_connection": "scallop",
"route": "abalone",
}
path = DatastreamClient.route_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_route_path(path)
assert expected == actual
def test_stream_path():
project = "squid"
location = "clam"
stream = "whelk"
expected = "projects/{project}/locations/{location}/streams/{stream}".format(
project=project, location=location, stream=stream,
)
actual = DatastreamClient.stream_path(project, location, stream)
assert expected == actual
def test_parse_stream_path():
expected = {
"project": "octopus",
"location": "oyster",
"stream": "nudibranch",
}
path = DatastreamClient.stream_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_stream_path(path)
assert expected == actual
def test_stream_object_path():
project = "cuttlefish"
location = "mussel"
stream = "winkle"
object = "nautilus"
expected = "projects/{project}/locations/{location}/streams/{stream}/objects/{object}".format(
project=project, location=location, stream=stream, object=object,
)
actual = DatastreamClient.stream_object_path(project, location, stream, object)
assert expected == actual
def test_parse_stream_object_path():
expected = {
"project": "scallop",
"location": "abalone",
"stream": "squid",
"object": "clam",
}
path = DatastreamClient.stream_object_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_stream_object_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "whelk"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DatastreamClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "octopus",
}
path = DatastreamClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "oyster"
expected = "folders/{folder}".format(folder=folder,)
actual = DatastreamClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nudibranch",
}
path = DatastreamClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "cuttlefish"
expected = "organizations/{organization}".format(organization=organization,)
actual = DatastreamClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "mussel",
}
path = DatastreamClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "winkle"
expected = "projects/{project}".format(project=project,)
actual = DatastreamClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nautilus",
}
path = DatastreamClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "scallop"
location = "abalone"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DatastreamClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "squid",
"location": "clam",
}
path = DatastreamClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DatastreamClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DatastreamTransport, "_prep_wrapped_messages"
) as prep:
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DatastreamTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DatastreamClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DatastreamAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DatastreamClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DatastreamClient, transports.DatastreamGrpcTransport),
(DatastreamAsyncClient, transports.DatastreamGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-datastream
|
tests/unit/gapic/datastream_v1/test_datastream.py
|
Python
|
apache-2.0
| 295,710
|
[
"Octopus"
] |
7ccb006918d32b21f2f7b5a76f662c58fe0378e01ca3017f515a46d043749d20
|
# -*- coding: utf-8 -*-
"""Tests for PyBEL-Tools."""
|
pybel/pybel-tools
|
tests/__init__.py
|
Python
|
mit
| 54
|
[
"Pybel"
] |
a9981d19a53a9bba2d376711fa9016aa6e878f7d0ccae04ed685bd1ec3725d36
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 4 20:44:24 2016
@author: ddboline
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import os
import re
import datetime
from subprocess import call, Popen, PIPE
HOMEDIR = os.getenv('HOME')
HOSTNAME = os.uname()[1]
REMOTEHOST = 'ddbolineathome.mooo.com'
POSTGRESTRING = 'postgresql://ddboline:BQGIvkKFZPejrKvX@localhost'
dailies = {
'the_late_show_with_stephen_colbert', 'the_daily_show', 'the_nightly_show', 'at_midnight'
}
def play_file(fname, yad=False):
''' play file using mplayer / mpv '''
downloaded_file = get_remote_file(fname)
_cmd = 'mpv --fs --softvol=yes --softvol-max 1000 --vf='
if yad:
_cmd = '%syadif,' % _cmd
_cmd = '%sdsize=470:-2' % _cmd
if HOSTNAME == 'dilepton2' or HOSTNAME == 'dilepton-tower':
_cmd = '%s -vo xv' % _cmd
_cmd = '%s %s' % (_cmd, downloaded_file)
call(_cmd, shell=True)
def get_length_of_mpg(fname='%s/netflix/mpg/test_roku_0.mpg' % HOMEDIR):
''' get length of mpg/avi/mp4 with avconv '''
if not os.path.exists(fname):
return -1
command = 'ffprobe %s 2>&1' % fname
_cmd = Popen(command, shell=True, stdout=PIPE, close_fds=True).stdout
nsecs = 0
for line in _cmd:
if hasattr(line, 'decode'):
line = line.decode()
_line = line.split()
if _line[0] == 'Duration:':
items = _line[1].strip(',').split(':')
try:
nhour = int(items[0])
nmin = int(items[1])
nsecs = int(float(items[2])) + nmin * 60 + nhour * 60 * 60
except ValueError:
nsecs = -1
return nsecs
def read_time(fname):
''' find duration of mpg/avi/mp4 file using avconv '''
downloaded_file = has_been_downloaded(fname)
if not downloaded_file:
return -1
return get_length_of_mpg(downloaded_file)
def print_h_m_s(second):
''' convert time from seconds to hh:mm:ss format '''
hours = int(second / 3600)
minutes = int(second / 60) - hours * 60
seconds = int(second) - minutes * 60 - hours * 3600
return '%02i:%02i:%02i' % (hours, minutes, seconds)
def get_remote_file(fname):
''' download remote file '''
if HOSTNAME == 'dilepton-tower':
return fname
fn_ = fname.split('/')[-1]
downloaded_file = '%s/Downloads/%s' % (HOMEDIR, fn_)
if not os.path.isfile(downloaded_file):
_command = 'scp ddboline@%s:%s %s' % (REMOTEHOST, fname, downloaded_file)
call(_command, shell=True)
return downloaded_file
def get_remote_files(flist):
''' download list of files '''
new_flist = []
for fname in flist:
new_flist.append(get_remote_file(fname))
return new_flist
def has_been_downloaded(fname):
''' determine if file has been downloaded '''
fn_ = fname.split('/')[-1]
if HOSTNAME == 'dilepton-tower':
return fname
downloaded_file = '%s/Downloads/%s' % (HOMEDIR, fn_)
if not os.path.exists(downloaded_file):
return False
return downloaded_file
def remove_remote_file(fname):
''' remove local copy of remote file '''
downloaded_file = '%s/Downloads/%s' % (HOMEDIR, fname.split('/')[-1])
if HOSTNAME != 'dilepton-tower' and os.path.isfile(downloaded_file):
_cmd = 'rm %s' % downloaded_file
call(_cmd, shell=True)
def get_season_episode_from_name(fname, show):
tmp = fname.split('/')[-1]
if show not in tmp:
return -1, -1
tmp = tmp.split(show)[1]
if '.' not in tmp:
return -1, -1
tmp = tmp.split('.')[0]
if '_' not in tmp:
return -1, -1
tmp = tmp.split('_')
if len(tmp) < 3:
return -1, -1
tmp = tmp[1:3]
try:
season = int(tmp[0].strip('s'))
episode = int(tmp[1].strip('epw'))
return season, episode
except Exception:
return -1, -1
def get_dailies_airdate(fname, show):
tmp = fname.split('/')[-1]
if show not in tmp:
return None
tmp = tmp.split(show)[1]
if '.' not in tmp:
return None
tmp = tmp.split('.')[0]
if '_' not in tmp:
return None
if len(tmp) < 2:
return None
tmp = tmp.strip('_')
try:
year = int(tmp[:4])
month = int(tmp[4:6])
day = int(tmp[6:8])
return datetime.date(year=year, month=month, day=day)
except Exception as exc:
print(exc, tmp)
return None
def extract_show(fn_, full_path=True):
type_ = ''
if 'television' in fn_ or not full_path:
if 'unwatched' not in fn_ and full_path:
show = fn_.split('/')[-3]
type_ = 'tv'
else:
show = fn_.split('/')[-1]
show = re.sub('_s[0-9]+', ' ', show).split()[0]
type_ = 'tv'
for show_ in dailies:
if show_ in show:
show = show_
break
else:
show = fn_.split('/')[-1].split('.')[0]
type_ = 'movie'
return show, type_
class PopenWrapperClass(object):
""" context wrapper around subprocess.Popen """
def __init__(self, command):
""" init fn """
self.command = command
self.pop_ = Popen(self.command, shell=True, stdout=PIPE, close_fds=True)
def __iter__(self):
return self.pop_.stdout
def __enter__(self):
""" enter fn """
return self.pop_.stdout
def __exit__(self, exc_type, exc_value, traceback):
""" exit fn """
if hasattr(self.pop_, '__exit__'):
efunc = getattr(self.pop_, '__exit__')
return efunc(exc_type, exc_value, traceback)
self.pop_.wait()
if exc_type or exc_value or traceback:
return False
return True
def run_command(command, do_popen=False, turn_on_commands=True, single_line=False):
''' wrapper around os.system '''
if not turn_on_commands:
print(command)
return command
elif do_popen:
if single_line:
with PopenWrapperClass(command) as pop_:
return pop_.read()
else:
return PopenWrapperClass(command)
return call(command, shell=True)
def test_run_command():
""" test run_command """
cmd = 'echo "HELLO"'
out = run_command(cmd, do_popen=True, single_line=True).strip()
print(out, cmd)
assert out == b'HELLO'
def walk_wrapper(direc, callback, arg):
""" wrapper around os.walk for py2/py3 compatibility """
if hasattr(os.path, 'walk'):
os.path.walk(direc, callback, arg)
elif hasattr(os, 'walk'):
for dirpath, dirnames, filenames in os.walk(direc):
callback(arg, dirpath, dirnames + filenames)
|
ddboline/movie_collection_app
|
movie_collection_app/util.py
|
Python
|
mit
| 6,774
|
[
"EPW"
] |
66a6c30d9540d054b65b4365f4c5f2aef659cf821da565f12939b7e999813984
|
#! /usr/local/bioinfo/python/3.4.3_build2/bin/python
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
import pandas as pd
import argparse
def importBlastOutput():
'''
Import Blast output using pandas read_csv.
Pandas dataframe will used to make a list from the subject's id
This funcion outputs a list to be parsed
'''
blastResult = pd.read_csv(args.blastHandle, sep = '\t', header = None)
subjectList = list(set(blastResult[1]))
return subjectList
def strand():
'''
Find out in which sense the sequence is going.
Strand + or strand - and creates a fasta record.
If the strand is - the reverse complementary sequence is given.
In this precise script this info is given in the subject's
protein fasta.
'''
# print("---------")
# coordinatesVerified
# print("---------")
if coordinatesVerified[2] == '-':
record = SeqRecord(Seq(str(recordGenome[scaff].seq[ coordinatesVerified[0] : coordinatesVerified[1]].reverse_complement()),
IUPAC.ambiguous_dna),
id = str(protId),
name = recordPep[protId].name,
description = str(recordPep[protId].description )#+ " extrasequence=({0},{1})".format(plusNuc, minusNuc))
)
else:
record = SeqRecord(Seq(str(recordGenome[scaff].seq[ coordinatesVerified[0] : coordinatesVerified[1] ]),
IUPAC.ambiguous_dna),
id = str(protId),
name = recordPep[protId].name,
description = str(recordPep[protId].description )#+ " extrasequence=({0},{1})".format(plusNuc, minusNuc))
)
return record
#####
## Find which strand of the blast match on the subject (reference)
## The input are s.start and s.end
## If s.start < s.end, strand = +
## if s.end < s.start, strand = -
def verifyCoordinates():
'''
Check if the coordinates with the -upStream and +dwStream <int>
are into the limits of the size of the scaffold.
If they are not, the coordinates will be adjusted to 1 and to the last
character of the scaffold.
'''
## Work if strand is + or -
if coordinates[2] == '-':
if (coordinates[1] +args.upStream) > (len(recordGenome[scaff]) - 1) :
## Impose max value of the scaffold
rightCoord = (len(recordGenome[scaff]) - 1)
else:
rightCoord = coordinates[1] +args.upStream
if (coordinates[0] -1 -args.dwStream) < 0 :
## Impose min value of the scaffold
leftCoord = 0
else:
leftCoord = (coordinates[0] -1 -args.dwStream)
if coordinates[2] == '+':
if (coordinates[0] -1 -args.upStream) < 0 :
## Impose min value of the scaffold
leftCoord = 0
else:
leftCoord = (coordinates[0] -1 -args.upStream)
if (coordinates[1] +args.dwStream) > (len(recordGenome[scaff]) - 1) :
## Impose max value of the scaffold
rightCoord = (len(recordGenome[scaff]) - 1)
else:
rightCoord = (coordinates[1] +args.dwStream)
return [leftCoord, rightCoord, coordinates[2]]
def verifyCoordinatesTest():
'''
Verify that coordinates on the format [star, end, strand]
don't have coordinates outside the scaffold where they belong
when the user wants to have upstream and downstream sequences.
This is format of writting coordinates is close to that of a GFF file
'''
if (coordinates[0] -1 -args.upStream) < 1 :
## Impose min value of the scaffold
leftCoord = 1
else:
leftCoord = (coordinates[0] -1 -args.upStream)
if (coordinates[1] +args.dwStream) > (len(recordGenome[scaff]) - 1) :
## Impose max value of the scaffold
rightCoord = (len(recordGenome[scaff]) - 1)
else:
rightCoord = (coordinates[1] +args.dwStream)
return [leftCoord, rightCoord, coordinates[2]]
def __main__():
#################################
## Declare global variables
global args, recordPep, recordGenome, protId, protIdList, prot, locus, scaff
global coordinates, coordinatesVerified
#################################
## Parse arguments
parser = argparse.ArgumentParser(description='''
Slice a fasta sequence from the results of a blastp
And using the "Scaffold + coordinates" name of the
protein to obtain the genomic fasta sequence + and
- some extra nucleotides determined by the user'
''')
parser.add_argument("-p", "--subjectProt", dest = "subjectProtHandle",
type = str, default = None,
help = '''
<str> File containing reference (subject) fasta sequence.
File must be in fasta format. In this particular case it
is a protein fasta file.
'''
)
parser.add_argument("-g", "--subjectGenome", dest = "subjectGenometHandle",
type = str, default = None,
help = '''
<str> File containing reference (subject) genome fasta
sequence.
'''
)
parser.add_argument("-b", "--blast", dest = "blastHandle",
type = str, default = None,
help = '''
<str> File containing result from blast.
Format must be tsv, no headers.
'''
)
## Unused
parser.add_argument("-q", "--query", dest = "queryFastaHandle",
type = str, default = None,
help = '''
<str> File containing fasta sequences.
It's the same containign the query sequences
used in the blast
'''
)
parser.add_argument("-o", "--output", dest = "outputHandle",
type = str, default = "output.fasta",
help = '''
<str> Name of output fasta file
defaut= "output.fasta"
'''
)
parser.add_argument("-u", "--upstream", dest = "upStream",
type = int, default = 0,
help = '''
<int> Length of the upstream (promoter) sequence
'''
)
parser.add_argument("-d", "--downstream", dest = "dwStream",
type = int, default = 0,
help = '''
<int> Length of the downstream sequence
'''
)
# parser.add_argument("-f", "--files", metavar="files",
# type=str, default=None,
# nargs='+',
# help='''Put your genotype files in the order you want to treat them\n you can add as many files as'''
# )
## To call the markers use args.marker
## To call the the outfile use args.outfile
## The files to be treated are in the list args.genotype_files
args=parser.parse_args()
# ##########################################
# ## START SCRIPT HERE
# ## Check if mandatory options are well input
## Load Fastas:
#recordPep = SeqIO.index("/NAS/NGS/Hevea/Genome/Reyan7-33-97/Hbgenome.pep.fas", "fasta")
recordPep = SeqIO.index(args.subjectProtHandle, "fasta")
#recordGenome = SeqIO.index("/NAS/NGS/Hevea/Genome/Reyan7-33-97/Hbgenome.fas", "fasta")
recordGenome = SeqIO.index(args.subjectGenometHandle, "fasta")
## Load Blast result:
blastResult = pd.read_csv(args.blastHandle, sep = '\t', header = None)
## Assign scaffold that we will work with and lengths for the tests
## This will be looped into the reading of the blast.out
# protId = "scaffold4727_3605"
protIdList = importBlastOutput()
## Empty output file
with open(args.outputHandle, "w") as df:
pass
## Parse the list of protein ID
for i in protIdList:
protId = i
## Slice the description of the
prot = recordPep[protId]
locus = prot.description[ prot.description.find("locus=") : prot.description.find("length=") - 2 ]
scaff = locus[ locus.find("=") + 1 : locus.find("(") ]
coordinates = locus[ locus.find("(") + 1 : locus.find(")") ].split(",")
coordinates[0], coordinates[1] = int(coordinates[0]), int(coordinates[1])
coordinatesVerified = verifyCoordinates()
## Find strand and create fasta record,
fastaSeq = strand()
print(fastaSeq)
print("Sequence extracted from {0} to {1} in the strand {2}".format(coordinatesVerified[0], coordinatesVerified[1], coordinatesVerified[2]))
with open(args.outputHandle, "a") as df:
SeqIO.write(fastaSeq, df, "fasta")
print("########")
if __name__ == "__main__": __main__()
|
3nrique0/Tools_for_Bioinformatics
|
extract_fasta_from_blast/4_extract_genomic_sequences.py
|
Python
|
gpl-3.0
| 7,728
|
[
"BLAST"
] |
4ec169490930af23490ca9935c55207e8149c4418262745455f1e63201dfd9b7
|
# -*- test-case-name: xmantissa.test -*-
# Copyright 2008 Divmod, Inc. See LICENSE file for details
"""
Public interfaces used in Mantissa.
"""
from zope.interface import Interface, Attribute
from nevow.inevow import IRenderer
class IColumn(Interface):
"""
Represents a column that can be viewed via a scrolling table, and provides
hints & metadata about the column.
"""
def sortAttribute():
"""
@return: a sortable axiom.attribute, or None if this column cannot be
sorted
"""
def extractValue(model, item):
"""
@type model: L{xmantissa.tdb.TabularDataModel}
@param item: the L{axiom.item.Item} from which to extract column value
@return: the underlying value for this column
"""
def getType():
"""
returns a string describing the type of this column, or None
"""
def toComparableValue(value):
"""
Convert a value received from the client into one that can be compared
like-for-like with L{sortAttribute}, when executing an axiom query.
(Callers should note that this is new as of Mantissa 0.6.6, and be
prepared to deal with its absence in legacy code.)
"""
attributeID = Attribute(
"""
An ASCII-encoded str object uniquely describing this column.
""")
class ITemplateNameResolver(Interface):
"""
Loads Nevow document factories from a particular theme based on simple
string names.
"""
def getDocFactory(name, default=None):
"""
Retrieve a Nevow document factory for the given name.
@param name: a short string that names a fragment template for
development purposes.
@return: a Nevow docFactory
"""
class IPreferenceAggregator(Interface):
"""
Allows convenient retrieval of individual preferences
"""
def getPreferenceCollections():
"""
Return a list of all installed L{IPreferenceCollection}s
"""
def getPreferenceValue(key):
"""
Return the value of the preference associated with "key"
"""
class ISearchProvider(Interface):
"""
Represents an Item capable of searching for things
"""
def count(term):
"""
Return the number of items currently associated with the given
(unprocessed) search string
"""
def search(term, keywords=None, count=None, offset=0, sortAscending=True):
"""
Query for items which contain the given term.
@type term: C{unicode}
@param keywords: C{dict} mapping C{unicode} field name to C{unicode}
field contents. Search results will be limited to documents with
fields of these names containing these values.
@type count: C{int} or C{NoneType}
@type offset: C{int}, default is 0
@param sortAscending: should the results be sorted ascendingly
@type sortAscending: boolean
@rtype: L{twisted.internet.defer.Deferred}
@return: a Deferred which will fire with an iterable of
L{search.SearchResult} instances, representing C{count} results for the
unprocessed search represented by C{term}, starting at C{offset}. The
bounds of offset and count will be within the value last returned from
L{count} for this term.
"""
class ISearchAggregator(Interface):
"""
An Item responsible for interleaving and displaying search results
obtained from available ISearchProviders
"""
def count(term):
"""
same as ISearchProvider.count, but queries all search providers
"""
def search(term, keywords, count, offset, sortAscending):
"""
same as ISearchProvider.search, but queries all search providers
"""
def providers():
"""
returns the number of available search providers
"""
class IFulltextIndexer(Interface):
"""
A general interface to a low-level full-text indexer.
"""
def add(document):
"""
Add the given document to this index.
This method may only be called in the batch process (it will
synchronously invoke an indexer method which may block or cause a
segfault).
"""
def remove(document):
"""
Remove the given document from this index.
This method may be called from any process.
"""
class IFulltextIndexable(Interface):
"""
Something which can be indexed for later search.
"""
def uniqueIdentifier():
"""
@return: a C{str} uniquely identifying this item.
"""
def textParts():
"""
@return: an iterable of unicode strings to be indexed as the text of
this item.
"""
def keywordParts():
"""
@return: a C{dict} mapping C{str} to C{unicode} of additional
metadata. It will be possible to search on these fields using
L{ISearchAggregator.search}.
"""
def documentType():
"""
@return: a C{str} uniquely identifying the type of this item. Like
the return value of L{keywordParts}, it will be possible to search
for this using the C{"documentType"} key in the C{keywords} argument
to L{ISearchAggregator.search}.
"""
def sortKey():
"""
@return: A unicode string that will be used as the key when sorting
search results comprised of items of this type.
"""
class ISiteURLGenerator(Interface):
"""
Lowest-level APIs for generating URLs which refer to parts of a Mantissa
site.
"""
def cleartextRoot(hostname=None):
"""
Return the HTTP URL which is at the root of this site.
@param hostname: An optional unicode string which, if specified, will
be used as the hostname in the resulting URL, regardless of other
considerations.
@rtype: L{nevow.url.URL}
"""
def encryptedRoot(hostname=None):
"""
Return the HTTPS URL which is at the root of this site.
@param hostname: An optional unicode string which, if specified, will
be used as the hostname in the resulting URL, regardless of other
considerations.
@rtype: L{nevow.url.URL}
"""
def rootURL(request):
"""
Return the URL for the root of this website which is appropriate to use
in links generated in response to the given request.
@type request: L{twisted.web.http.Request}
@param request: The request which is being responded to.
@rtype: L{URL}
@return: The location at which the root of the resource hierarchy for
this website is available.
NOTE: This function may take an URL (which is the "base" URL, relative
to which all links in the ultimate view context will be interpreted)
instead of a Request in the future.
"""
class IStaticShellContent(Interface):
"""
Represents per-store header/footer content that's used to buttress
the shell template
"""
def getHeader():
"""
Returns stan to be added to the page header. Can return None
if no header is desired.
"""
def getFooter():
"""
Returns stan to be added to the page footer. Can return None
if no footer is desired.
"""
class IViewer(Interface):
def roleIn(userStore):
"""
Retrieve a L{xmantissa.sharing.Role} object for the user that this
viewer represents in the provided user-store.
@param userStore: a store that contains some sharing roles.
@type userStore: L{axiom.store.Store}
@rtype: L{xmantissa.sharing.Role}
"""
class IWebViewer(IViewer):
"""
An object that provides navigation bits for web content produced by
Mantissa applications.
"""
def wrapModel(model):
"""
Converts application-provided model objects to L{IResource} providers.
@param model: An L{Item} or L{SharedProxy}.
"""
class ISiteRootPlugin(Interface):
"""
Plugin Interface for functionality provided at the root of the website.
This interface is queried for on the Store by website.WebSite when
processing an HTTP request. Things which are installed on a Store using
s.powerUp(x, ISiteRootPlugin) will be visible to individual users when
installed on a user's store or visible to the general public when installed
on a top-level store.
"""
def resourceFactory(segments):
"""
This is deprecated; implement L{produceResource} instead.
Get an object that provides IResource.
@type segments: list of str, representing decoded requested URL
segments
@return: None or a two-tuple of the IResource provider and the segments
to pass to its locateChild.
"""
del resourceFactory # It's deprecated, so when we verifyObject, we
# don't want to check for this.
def produceResource(request, segments, webViewer):
"""
Get an object that provides IResource.
@param request: An L{IRequest}.
@type segments: list of str, representing decoded requested URL
segments
@param webViewer: An L{IWebViewer}.
@return: None or a two-tuple of the IResource provider and the segments
to pass to its locateChild.
"""
class IMantissaSite(Interface):
# XXX this documentation is terrible, rephrase to describe something
# abstract.
"""
This is different from ISiteRootPlugin because it is invoked in a different
context. ISiteRootPlugin is a plugin powerup interface that lots of
different things can provide. This is an interface that only the site
needs to provide, for L{CustomizedPublicPage} to do stuff with.
"""
def siteProduceResource(request, segments, webViewer):
"""
Give me a resource based on a bunch of ISiteRootPlugin powerups.
"""
class ISessionlessSiteRootPlugin(Interface):
"""
L{ISessionlessSiteRootPlugin} powerups installed on a site store are
powerups which can produce a resource to respond to a particular request,
even if the browser in question has no session.
This powerup interface exists mainly to allow applications to provide
resources which are not subject to the redirect that L{nevow.guard}
introduces. This can be important for interacting with limited user-agents
that do not support cookies or redirects.
However, this is a temporary workaround, as L{nevow.guard} should have this
redirect requirement removed. See ticket #2494 for details.
"""
def sessionlessProduceResource(request, segments):
"""
Return a 2-tuple of C{(resource, segments)} if a resource can be found
to match this request and its segments, or None.
"""
def resourceFactory(segments):
"""
This is deprecated; implement L{sessionlessProduceResource} instead.
Get an object that provides IResource.
@type segments: list of str, representing decoded requested URL
segments
@return: None or a two-tuple of the IResource provider and the segments
to pass to its locateChild.
"""
del resourceFactory # It's deprecated, so when we verifyObject, we
# don't want to check for this.
class ICustomizable(Interface):
"""
Factory for creating IResource objects which can be customized for
a specific user.
"""
def customizeFor(avatarName):
"""
Retrieve a IResource provider specialized for the given avatar.
@type avatarName: C{unicode}
@param avatarName: The user for whom to return a specialized resource.
@rtype: C{IResource}
@return: A public-page resource, possibly customized for the
indicated user.
"""
class ICustomizablePublicPage(Interface):
"""
Don't use this. Delete it if you notice it still exists but
upgradePublicWeb2To3 has been removed.
"""
class IWebTranslator(Interface):
"""
Provide methods for naming objects on the web, and vice versa.
"""
def fromWebID(webID):
"""
@param webID: A string that identifies an item through this translator.
@return: an Item, or None if no Item is found.
"""
def toWebID(item):
"""
@param item: an item in the same store as this translator.
@return: a string, shorter than 80 characters, which is an opaque
identifier that may be used to look items up through this translator
using fromWebID (or the legacy 'linkFrom')
"""
def linkTo(storeID):
"""
@param storeID: The Store ID of an Axiom item.
@rtype: C{str}
@return: An URL which refers to the item with the given Store ID.
"""
def linkFrom(webID):
"""
The inverse of L{linkTo}
"""
class INavigableElement(Interface):
"""Tab interface used by the web navigation plugin system.
Plugins for this interface are retrieved when generating the navigation
user-interface. Each result has C{getTabs} invoked, after which the
results are merged and the result used to construct various top- and
secondary-level \"tabs\" which can be used to visit different parts of
the application.
"""
def getTabs():
"""Retrieve data about this elements navigation.
This returns a list of C{xmantissa.webnav.Tab}s.
For example, a powerup which wanted to install navigation under the
Divmod tab would return this list:::
[Tab("Divmod", self.storeID, 1.0
children=[
Tab("Summary", self.storeID, 1.0),
Tab("Inbox", self.inbox.storeID, 0.8)
])]
"""
class INavigableFragment(Interface):
"""
Register an adapter to this interface in order to provide web UI content
within the context of the 'private' application with navigation, etc.
You will still need to produce some UI by implementing INavigableElement
and registering a powerup for that as well, which allows users to navigate
to this object.
The primary requirement of this interface is that providers of it also
provide L{nevow.inevow.IRenderer}. The easiest way to achieve this is to
subclass L{nevow.page.Element}.
"""
title = Attribute(
"""
The title of this fragment, which will be used in the <title> tag of
the page displaying it, or otherwise outside the content area of the
fragment but associated with it.
""")
fragmentName = Attribute(
"""
The name of this fragment; a string used to look up the template from
the current theme(s). This is done by implementors of
L{IWebViewer.wrapModel}.
This attribute may be set to None or left unset if you do not want this
type of customization. However, if you do not set it, you must set a
docFactory yourself, and your docFactory will not be changed to
accommodate the user's preferred theme.
""")
docFactory = Attribute(
"""
Nevow-style docFactory object. Must be set if fragmentName is not.
""")
def head():
"""
Provide some additional content to be included in the <head>
section of the page when this fragment is being rendered.
May return None if nothing needs to be added there.
This method is optional. If not implemented, nothing will be added to
the head.
"""
def locateChild(ctx, segments):
"""
INavigableFragments may optionally provide a locateChild method similar
to the one found on L{nevow.inevow.IResource.locateChild}. You may
implement this method if your INavigableFragment contains any resources
which it may need to refer to with hyperlinks when rendered. Please
note that an INavigableFragment may be rendered on any page within an
application, and that hyperlinks to resources returned from this method
must always be to /private/<your-webid>/..., not the current page's
URL, if you are using the default
L{xmantissa.webapp.PrivateApplication} URL dispatcher.
(There is a slight bug in the calling code's handling of Deferreds.
If you wish to delegate to normal child-resource handling, you must
return rend.NotFound exactly, not a Deferred which fires it.)
"""
def setFragmentParent(fragmentParent):
"""
Sets the L{LiveFragment} (or L{LivePage}) which is the logical parent
of this fragment.
See L{nevow.athena._LiveMixin.setFragmentParent}'s docstring for more
information.
"""
def customizeFor(userID):
"""
This method is optional. If not provided, it is the same as returning
'self'.
When a logged-in user is viewing an L{INavigableFragment} provider,
this method will be invoked with that user's ID, and the returned
navigable fragment will be presented to the user instead.
@param userID: a user@host formatted string, indicating what user is
viewing this.
@type userID: L{unicode}
@return: a fragment customized for the provided user-ID.
@rtype: L{INavigableFragment}
"""
class ITab(Interface):
"""
Abstract, non-UI representation of a tab that shows up in the UI. The only
concrete representation is xmantissa.webnav.Tab
"""
class IMessageReceiver(Interface):
"""
An L{IMessageReceiver} is an object that can receive messages via
inter-store messaging, L{xmantissa.interstore}. Share an item with this
interface and it will be able to receive messages queued with
L{xmantissa.interstore.MessageQueue.queueMessage}.
"""
def messageReceived(value, sender, target):
"""
@param value: A value to be sent as the body of the message.
@type value: L{xmantissa.interstore.Value}
@param sender: a L{xmantissa.sharing.Identifier}, identifying the sender
of the message being received.
@param target: a L{xmantissa.sharing.Identifier}, identifying the
object receiving the message, i.e. self.
@return: a value for the response.
@rtype: L{xmantissa.interstore.Value}
"""
class IDeliveryConsequence(Interface):
"""
A provider of L{IDeliveryConsequence} can receive notifications of messages
being successfully handled by a remote store via
L{IMessageReceiver.messageReceived}.
Providers of this interface must also be L{Item}s in Axiom stores so that
they can persist between process invocations along with the queued message
it is waiting for a response to.
"""
def answerReceived(value, originalValue, originalSender, originalTarget):
"""
An answer was received to a message sent via
L{xmantissa.messaging.MessageQueue.queueMessage} with this
L{IDeliveryConsequence} provider as its consequence.
@param value: the value of the answer.
@type value: L{xmantissa.interstore.Value}
@param originalValue: the C{value} argument passed to the
original L{IMessageReceiver.messageReceived} that this is a response
to.
@type originalData: L{xmantissa.interstore.Value}
@param originalSender: the C{sender} argument passed to the original
L{IMessageReceiver.messageReceived} that this is a response to.
@param originalTarget: the C{target} argument passed to the original
L{IMessageReceiver.messageReceived} that this is a response to.
"""
class IMessageRouter(Interface):
"""
An L{IMessageRouter} is an object that can route messages between different
users. No guarantees are provided; all message routing is potentially
unreliable.
The suggested API for applications is
L{xmantissa.interstore.AMPMessenger.messageRemote}. Applications with
specialized serialization needs might use the medium-level
L{xmantissa.messaging.MessageQueue.queueMessage} instead.
It is unlikely that you will want to use this interface unless you are
implementing your own routing mechanism. Any user of this interface should
take care to test the failure cases, since most transports which implement
this interface will, in practice, be (statistically speaking) extremely
reliable and fail only in the most obscure cases.
Application code should always be using something higher level, since this
interface provides no mechanism for transactionality guarantees, and
different providers may only know how to route to a subset of all possible
destinations. For example, the implementation installed on a user store
will only know how to route to that user.
"""
def routeMessage(sender, target, value, messageID):
"""
Route a message to the given target.
@param sender: The description of the shared item that is the sender of
the message.
@type sender: L{xmantissa.sharing.Identifier}
@param target: The description of the shared item that is the target
of the message.
@type target: L{xmantissa.sharing.Identifier}
@param messageID: An identifier for the message, unique to a given
sender.
@type messageID: L{int}
@param value: The value of the message to be delivered.
@type value: L{xmantissa.interstore.Value}
"""
def routeAnswer(originalSender, originalTarget, value, messageID):
"""
Route an answer to a message previously queued to a particular user.
@param originalSender: The original sender of the message; in this
case, the target of the answer.
@type originalSender: L{xmantissa.sharing.Identifier}
@param originalTarget: The original target of the message; in this
case; the target of the answer.
@type originalTarget: L{xmantissa.sharing.Identifier}
@param messageID: The unique identifier for the message, as passed to
L{IMessageRouter.routeMessage}.
@type messageID: L{int}
@param value: The value of the answer to be delivered. This is not the
value of the original message, but a separate value describing the
result of processing the message.
@type value: L{xmantissa.interstore.Value}
@return: a L{Deferred} which fires with None if the message is
successfully delivered, or fails with L{MessageTransportError} if the
message could not be delivered.
"""
class IBenefactor(Interface):
"""
Make accounts for users and give them things to use.
"""
def endow(ticket, avatar):
"""
Make a user and return it. Give the newly created user new powerups or
other functionality.
This is only called when the user has confirmed the email address
passed in by receiving a message and clicking on the link in the
provided email.
"""
def deprive(ticket, avatar):
"""
Remove the increment of functionality or privilege that we have previously
bestowed upon the indicated avatar.
"""
class IBenefactorFactory(Interface):
"""A factory which describes and creates IBenefactor providers.
"""
def dependencies():
"""
Return an iterable of other IBenefactorFactory providers that this one
depends upon, and must be installed before this one is invoked.
"""
def parameters():
"""
Return a description of keyword parameters to be passed to instantiate.
@rtype: A list of 4-tuples. The first element of each tuple
is a keyword argument to L{instantiate}. The second describes
the type of prompt to present for this field. The third is a
one-argument callable will should be invoked with a string the
user supplies and should return the value for this keyword
argument. The fourth is a description of the purpose of this
keyword argument.
"""
def instantiate(**kw):
"""
Create an IBenefactor provider and return it.
"""
class IQ2QService(Interface):
q2qPortNumber = Attribute(
"""
The TCP port number on which to listen for Q2Q connections.
""")
inboundTCPPortNumber = Attribute(
"""
The TCP port number on which to listen for Q2Q data connections.
""")
publicIP = Attribute(
"""
Dotted-quad format string representing the IP address via
which this service is exposed to the public internet.
""")
udpEnabled = Attribute(
"""
A boolean indicating whether or not PTCP connections will be
allowed or attempted.
""")
def listenQ2Q(fromAddress, protocolsToFactories, serverDescription):
"""
@see: L{vertex.q2q.Q2QService.connectQ2Q}
"""
def connectQ2Q(fromAddress, toAddress, protocolName, protocolFactory,
usePrivateCertificate=None, fakeFromDomain=None,
chooser=None):
"""
@see: L{vertex.q2q.Q2QService.connectQ2Q}
"""
class IPreferenceCollection(Interface):
"""
I am an item that groups preferences into logical chunks.
"""
def getPreferences():
"""
Returns a mapping of preference-name->preference-value.
"""
def getSections():
"""
Returns a sequence of INavigableFragments or None. These fragments
will be displayed alongside preferences under this collection's
settings group.
"""
def getPreferenceAttributes():
"""
Returns a sequence of L{xmantissa.liveform.Parameter} instances - one
for each preference. The names of the parameters should correspond
to the attribute names of the preference attributes on this item.
"""
def getTabs():
"""
Like L{ixmantissa.INavigableElement.getTabs}, but for preference tabs
"""
class ITemporalEvent(Interface):
"""
I am an event which happens at a particular time and has a specific duration.
"""
startTime = Attribute("""
An extime.Time. The start-point of this event.
""")
endTime = Attribute("""
An extime.Time. The end-point of this event.
""")
class IDateBook(Interface):
"""
A source of L{IAppointment}s which have times associated with them.
"""
def eventsBetween(startTime, endTime):
"""
Retrieve events which overlap a particular range.
@param startTime: an L{epsilon.extime.Time} that begins a range.
@param endTime: an L{epsilon.extime.Time} that ends a range.
@return: an iterable of L{ITemporalEvent} providers.
"""
class IContactType(Interface):
"""
A means by which communication with a L{Person} might occur. For example,
a telephone number.
"""
allowMultipleContactItems = Attribute("""
C{bool} indicating whether more than one contact item of this type can be
created of a particular L{Person}.
""")
def getParameters(contactInfoItem):
"""
Return some liveform parameters, one for each piece of information that is
needed to construct a contact info item of this type.
If C{contactInfoItem} is supplied, implementations may return C{None}
to indicate that the given contact item is not editable.
@param contactInfoItem: An existing contact info item of this type, or
C{None}. If not C{None}, then the current values of the contact info
type will be used to provide suitable defaults for the parameters that
are returned.
@type contactInfoItem: L{axiom.item.Item} subclass.
@return: Some liveform parameters or C{None}.
@rtype: C{NoneType} or C{list} of L{xmantissa.liveform.Parameter}.
"""
def createContactItem(person, **parameters):
"""
Create a new instance of this contact type for the given person.
@type person: L{Person}
@param person: The person to whom the contact item pertains.
@param parameters: The form input key/value pairs as returned by the
L{xmantissa.liveform.LiveForm} constructed from L{getParameters}'s
parameter instances.
@return: The created contact item or C{None} if one was not created for
any reason.
"""
def getContactItems(person):
"""
Return an iterator of contact items created by this contact type for
the given person.
@type person: L{Person}
@param person: The person to whom the contact item pertains.
"""
def uniqueIdentifier():
"""
Return a C{unicode} string which, for the lifetime of a single Python
process, uniquely identifies this type of contact information.
"""
def descriptiveIdentifier():
"""
A descriptive name for this type of contact information.
@rtype: C{unicode}
"""
def getEditFormForPerson(person):
"""
Return a L{LiveForm} which will allow the given person's contact items
to be edited.
@type person: L{xmantissa.people.Person}
@rtype: L{xmantissa.liveform.LiveForm}
"""
def editContactItem(contact, **parameters):
"""
Update the given contact item to reflect the new parameters.
@param **parameters: The form input key/value pairs, as produced by the
L{LiveForm} returned by L{getEditFormForPerson}.
"""
def getContactGroup(contactItem):
"""
Return a L{xmantissa.people.ContactGroup} describing the group
affinity of a contact item of the type created by this contact type,
or C{None} if the item doesn't have an explicit group.
@param contactItem: A contact item.
@rtype: L{xmantissa.people.ContactGroup} or C{NoneType}
"""
def getReadOnlyView(contact):
"""
Return an L{IRenderer} which will display the given contact.
"""
class IPeopleFilter(Interface):
"""
Object which collects L{Person} items into a logical group.
"""
filterName = Attribute("""
The name of this filter; something which describes the type of people
included in its group.""")
def getPeopleQueryComparison(store):
"""
Return a query comparison describing the subset of people in the given
store which are included in this group.
@type store: L{axiom.store.Store}
@rtype: L{axiom.iaxiom.IComparison}
"""
class IOrganizerPlugin(Interface):
"""
Powerup which provides additional functionality to Mantissa People.
Organizer plugins add support for new kinds of person data (for example,
one Organizer plugin might add support for contact information: physical
addresses, email addresses, telephone numbers, etc. Another plugin might
retrieve and aggregate blog posts, or provide an interface for configuring
sharing permissions).
"""
name = Attribute('The C{unicode} display name of this plugin.')
def getContactTypes():
"""
Return an iterator of L{IContactType} providers supplied by this
plugin.
"""
def getPeopleFilters():
"""
Return an iterator of L{IPeopleFilter} providers supplied by this
plugin.
"""
def personCreated(person):
"""
Called when a new L{Person} is created.
"""
def personNameChanged(person, oldName):
"""
Called after a L{Person} item's name has been changed.
@type person: L{Person}
@param person: The person whose name is being changed.
@type oldName: C{unicode}
@param oldName: The previous value of L{{Person.name}.
"""
def contactItemCreated(contact):
"""
Called when a new contact item is created.
@param contact: The new contact item. It may be any object returned by
an L{IContactType.createContactItem} implementation.
"""
def contactItemEdited(contact):
"""
Called when an existing contact item has been edited.
@param contact: The contact item.
"""
def personalize(person):
"""
Return some plugin-specific state for the given person.
@param person: A L{xmantissa.person.Person} instance.
@return: Something renderable by Nevow.
"""
class IPersonFragment(Interface):
"""
Deprecated. Nothing in Mantissa cares about this interface.
"""
class IOffering(Interface):
"""
Describes a product, service, application, or other unit of functionality
which can be added to a Mantissa server.
"""
name = Attribute("""
What it is called.
""")
description = Attribute("""
What it is.
""")
siteRequirements = Attribute("""
A list of 2-tuples of (interface, powerupClass) of Axiom Powerups which
will be installed on the Site store when this offering is installed if the
store cannot be adapted to the given interface.
""")
appPowerups = Attribute("""
A list of Axiom Powerups which will be installed on the App store when this
offering is installed. May be None if no App store is required (in this
case, none will be created).
""")
installablePowerups = Attribute("""
A C{list} of three-tuples each of which gives the name, description, and
powerup item class for a unit of functionality which this offering provides
for installation on a user store.
""")
loginInterfaces = Attribute("""
A list of 2-tuples of (interface, description) of interfaces
implemented by avatars provided by this offering, and human
readable descriptions of the service provided by logging into
them. Used by the statistics reporting system to label graphs of
login activity.
""")
themes = Attribute("""
Sequence of L{xmantissa.webtheme.XHTMLDirectoryTheme} instances,
constituting themes that belong to this offering
""")
staticContentPath = Attribute("""
A L{FilePath<twisted.python.filepath.FilePath>} referring to the root of
the static content hierarchy for this offering. This directory will be
served automatically by Mantissa at C{/static/<offering name>/}.
May be C{None} if there is no static content.
""")
version = Attribute("""
L{twisted.python.versions.Version} instance indicating the version of
this offering. If included, the Version's value will be displayed to
users once the offering is installed. Defaults to None.
""")
class IOfferingTechnician(Interface):
"""
Support installation, uninstallation, and inspection of offerings.
"""
def getInstalledOfferingNames():
"""
Return a C{list} of C{unicode} strings giving the names of all
installed L{IOffering}s.
"""
def getInstalledOfferings():
"""
Return a mapping from the names of installed L{IOffering} plugins to
the plugins themselves.
"""
def installOffering(offering):
"""
Install the given offering plugin using the given configuration.
@type offering: L{IOffering}
@param offering: The offering to install.
@raise L{xmantissa.offering.OfferingAlreadyInstalled}: If an offering
with the same name as C{offering} is already installed.
@return: The C{InstalledOffering} item created.
"""
class ISignupMechanism(Interface):
"""
Describe an Item which can be instantiated to add a means of
signing up to a Mantissa server.
"""
name = Attribute("""
What it is called.
""")
description = Attribute("""
What it does.
""")
itemClass = Attribute("""
An Axiom Item subclass which will be instantiated and added to the
site store when this signup mechanism is selected. The class
should implement L{ISessionlessSiteRootPlugin} or
L{ISiteRootPlugin}.
""")
configuration = Attribute("""
XXX EDOC ME
""")
class IProtocolFactoryFactory(Interface):
"""
Powerup interface for Items which can create Twisted protocol factories.
"""
def getFactory():
"""
Return a Twisted protocol factory.
"""
class IBoxReceiverFactory(Interface):
"""
Powerup interface for Items which can create L{IBoxReceiver} providers to
be made accessible via the standard Mantissa AMP server.
"""
protocol = Attribute(
"""
A short string describing the commands (ie, the protocol) provided by
the L{IBoxReceiver} this implementation can create.
It is B{strongly} recommended that this be a versioned, URI-style
identifier, after the fashion of XML namespace specifiers. For
example, the first version of a Divmod, Inc.-provided chat protocol
might use I{https://divmod.com/ns/funny-chat}. As no format
describing AMP command sets (or other protocols built on AMP) is yet
defined, there is no requirement that this URI be resolvable to an
actual resource. Its purpose at this time is merely to be unique.
""")
def getBoxReceiver():
"""
Return an L{IBoxReceiver} which will be hooked up to an AMP connection
and have messages sent to it.
"""
class IParameter(Interface):
"""
Description of a single variable which will take on a value from external
input and be used to perform some calculation or action.
For example, an HTML form is a collection of IParameters, most likely one
per input tag. When POSTed, each input supplies its text value as the
external input to a corresponding IParameter provider and the resulting
collection is used to respond to the POST somehow.
NOTE: This interface is highly unstable and subject to grossly incompatible
changes.
"""
# XXX - These shouldn't be attributes of IParameter, I expect. They are
# both really view things. Either they goes into the template which is
# used for this parameter (as an explanation to a user what the parameter
# is), or some code which creates the view supplies them as parameters to
# that object (in which case, it's probably more of a unique identifier in
# that view context for this parameter). -exarkun
name = Attribute(
"""
A short C{unicode} string uniquely identifying this parameter within
the context of a collection of L{IParameter} providers.
""")
label = Attribute(
"""
A short C{unicode} string uniquely identifying this parameter within
the context of a collection of L{IParameter} providers.
""")
# XXX - Another thing which belongs on the view. Who even says this will
# be rendered to an HTML form?
type = Attribute(
"""
One of C{liveform.TEXT_INPUT}, C{liveform.PASSWORD_INPUT},
C{liveform.TEXTAREA_INPUT}, C{liveform.FORM_INPUT},
C{liveform.RADIO_INPUT}, or C{liveform.CHECKBOX_INPUT} indicating the
kind of input interface which will be presented for this parameter.
""")
# XXX - This shouldn't be an attribute of IParameter. It's intended to be
# displayed to end users, it belongs in a template.
description = Attribute(
"""
A long C{unicode} string explaining the meaning or purpose of this
parameter. May be C{None} to provide the end user with an unpleasant
experience.
""")
# XXX - At this level, a default should be a structured object, not a
# unicode string. There is presently no way to serialize a structured
# object into the view, though, so we use unicode here.
default = Attribute(
"""
A C{unicode} string which will be initially presented in the view as
the value for this parameter, or C{None} if no such value should be
presented.
""")
def viewFactory(parameter, default):
"""
@type view: L{IParameter} provider
@param view: The parameter for which to create a view.
@param default: An object to return if no view can be created for the
given parameter.
@rtype: L{IParameterView} provider
"""
# XXX - This is most definitely a view thing.
def compact():
"""
Mutate the parameter so that when a view object is created for it, it
is more visually compact than it would otherwise have been.
"""
def fromInputs(inputs):
"""
Extract the value for this parameter from the given submission
dictionary and return a structured value for this parameter.
"""
class IParameterView(IRenderer):
"""
View interface for an individual LiveForm parameter.
"""
patternName = Attribute("""
Short string giving the name of the pattern for this parameter view. Must
be one of C{'text'}, C{'password'}, C{'repeatable-form'} or C{'choice'}.
""")
def setDefaultTemplate(tag):
"""
Called by L{xmantissa.liveform.LiveForm} to specify the default
template for this view.
@type tag: L{nevow.stan.Tag} or C{nevow.stan.Proto}
"""
class IPublicPage(Interface):
"""
Only needed for schema compatibility. This interface should be deleted once
Axiom gains the ability to remove interfaces from existing stores.
"""
class IOneTimePadGenerator(Interface):
"""
An object which can generate single-use pads for authentication purposes.
"""
def generateOneTimePad(userStore):
"""
Generate a one-time pad for the user who lives in the given store.
@param userStore: A user's store.
@type userStore: L{axiom.store.Store}
@rtype: C{str}
"""
class ITerminalServerFactory(Interface):
"""
A factory for L{ITerminalProtocol} providers which can create objects to
handle input from and produce output to a terminal interface.
"""
name = Attribute(
"A short, user-facing C{unicode} string which identifies the "
"functionality provided by this factory.")
def buildTerminalProtocol(shellViewer):
"""
Create and return a new L{ITerminalProtocol} provider to handle
interact with a user.
@param shellViewer: An L{IViewer} provider
"""
__all__ = [
'IColumn', 'ITemplateNameResolver', 'IPreferenceAggregator',
'ISearchProvider', 'ISearchAggregator', 'IFulltextIndexer',
'IFulltextIndexable', 'IStaticShellContent', 'ISiteRootPlugin',
'ISessionlessSiteRootPlugin', 'ICustomizable',
'ICustomizablePublicPage', 'IWebTranslator', 'INavigableElement',
'INavigableFragment', 'ITab', 'IBenefactor', 'IBenefactorFactory',
'IQ2QService', 'IPreferenceCollection', 'ITemporalEvent', 'IDateBook',
'IOrganizerPlugin', 'IPersonFragment', 'IOffering', 'ISignupMechanism',
'IProtocolFactoryFactory', 'IParameterView', 'IOneTimePadGenerator',
'ITerminalServerFactory',
]
|
twisted/mantissa
|
xmantissa/ixmantissa.py
|
Python
|
mit
| 43,809
|
[
"VisIt"
] |
b49c26cb40051a861b8e9cc3b24141edb0d1f30fc60e605ac04bfb6b9ea0af9c
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy.misc.doccer import inherit_docstring_from
from scipy import optimize
from scipy import integrate
import scipy.special as sc
from scipy._lib._numpy_compat import broadcast_to
from . import _stats
from ._tukeylambda_stats import (tukeylambda_variance as _tlvar,
tukeylambda_kurtosis as _tlkurt)
from ._distn_infrastructure import (get_distribution_names, _kurtosis,
_lazyselect, _lazywhere, _ncx2_cdf,
_ncx2_log_pdf, _ncx2_pdf,
rv_continuous, _skew, valarray)
from ._constants import _XMIN, _EULER, _ZETA3, _XMAX, _LOGXMAX
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self, x, n):
return 1.0 - sc.smirnov(n, x)
def _ppf(self, q, n):
return sc.smirnovi(n, 1.0 - q)
ksone = ksone_gen(a=0.0, name='ksone')
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self, x):
return 1.0 - sc.kolmogorov(x)
def _sf(self, x):
return sc.kolmogorov(x)
def _ppf(self, q):
return sc.kolmogi(1.0 - q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = np.sqrt(2*np.pi)
_norm_pdf_logC = np.log(_norm_pdf_C)
def _norm_pdf(x):
return np.exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return sc.ndtr(x)
def _norm_logcdf(x):
return sc.log_ndtr(x)
def _norm_ppf(q):
return sc.ndtri(q)
def _norm_sf(x):
return _norm_cdf(-x)
def _norm_logsf(x):
return _norm_logcdf(-x)
def _norm_isf(q):
return -_norm_ppf(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
The survival function, ``norm.sf``, is also referred to as the
Q-function in some contexts (see, e.g.,
`Wikipedia's <https://en.wikipedia.org/wiki/Q-function>`_ definition).
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.standard_normal(self._size)
def _pdf(self, x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self, x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_sf(x)
def _logsf(self, x):
return _norm_logsf(x)
def _ppf(self, q):
return _norm_ppf(q)
def _isf(self, q):
return _norm_isf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(np.log(2*np.pi)+1)
@inherit_docstring_from(rv_continuous)
def fit(self, data, **kwds):
"""%(super)s
This function (norm_gen.fit) uses explicit formulas for the maximum
likelihood estimation of the parameters, so the `optimizer` argument
is ignored.
"""
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
data = np.asarray(data)
if floc is None:
loc = data.mean()
else:
loc = floc
if fscale is None:
scale = np.sqrt(((data - loc)**2).mean())
else:
scale = fscale
return loc, scale
norm = norm_gen(name='norm')
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x, a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
`alpha` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return 1.0/(x**2)/_norm_cdf(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*np.log(x) + _norm_logpdf(a-1.0/x) - np.log(_norm_cdf(a))
def _cdf(self, x, a):
return _norm_cdf(a-1.0/x) / _norm_cdf(a)
def _ppf(self, q, a):
return 1.0/np.asarray(a-sc.ndtri(q*_norm_cdf(a)))
def _stats(self, a):
return [np.inf]*2 + [np.nan]*2
alpha = alpha_gen(a=0.0, name='alpha')
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.cos(2*x)
def _cdf(self, x):
return np.sin(x+np.pi/4)**2.0
def _ppf(self, q):
return np.arcsin(np.sqrt(q))-np.pi/4
def _stats(self):
return 0.0, np.pi*np.pi/16-0.5, 0.0, -2*(np.pi**4 - 96)/(np.pi*np.pi-8)**2
def _entropy(self):
return 1-np.log(2)
anglit = anglit_gen(a=-np.pi/4, b=np.pi/4, name='anglit')
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for ``0 < x < 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1.0/np.pi/np.sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/np.pi*np.arcsin(np.sqrt(x))
def _ppf(self, q):
return np.sin(np.pi/2.0*q)**2.0
def _stats(self):
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
class FitDataError(ValueError):
# This exception is raised by, for example, beta_gen.fit when both floc
# and fscale are fixed and there are values in the data not in the open
# interval (floc, floc+fscale).
def __init__(self, distr, lower, upper):
self.args = (
"Invalid values in `data`. Maximum likelihood "
"estimation with {distr!r} requires that {lower!r} < x "
"< {upper!r} for each x in `data`.".format(
distr=distr, lower=lower, upper=upper),
)
class FitSolverError(RuntimeError):
# This exception is raised by, for example, beta_gen.fit when
# optimize.fsolve returns with ier != 1.
def __init__(self, mesg):
emsg = "Solver for the MLE equations failed to converge: "
emsg += mesg.replace('\n', '')
self.args = (emsg,)
def _beta_mle_a(a, b, n, s1):
# The zeros of this function give the MLE for `a`, with
# `b`, `n` and `s1` given. `s1` is the sum of the logs of
# the data. `n` is the number of data points.
psiab = sc.psi(a + b)
func = s1 - n * (-psiab + sc.psi(a))
return func
def _beta_mle_ab(theta, n, s1, s2):
# Zeros of this function are critical points of
# the maximum likelihood function. Solving this system
# for theta (which contains a and b) gives the MLE for a and b
# given `n`, `s1` and `s2`. `s1` is the sum of the logs of the data,
# and `s2` is the sum of the logs of 1 - data. `n` is the number
# of data points.
a, b = theta
psiab = sc.psi(a + b)
func = [s1 - n * (-psiab + sc.psi(a)),
s2 - n * (-psiab + sc.psi(b))]
return func
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
gamma(a+b) * x**(a-1) * (1-x)**(b-1)
beta.pdf(x, a, b) = ------------------------------------
gamma(a)*gamma(b)
for ``0 < x < 1``, ``a > 0``, ``b > 0``, where ``gamma(z)`` is the gamma
function (`scipy.special.gamma`).
`beta` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, a, b):
return self._random_state.beta(a, b, self._size)
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
lPx = sc.xlog1py(b - 1.0, -x) + sc.xlogy(a - 1.0, x)
lPx -= sc.betaln(a, b)
return lPx
def _cdf(self, x, a, b):
return sc.btdtr(a, b, x)
def _ppf(self, q, a, b):
return sc.btdtri(a, b, q)
def _stats(self, a, b):
mn = a*1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*np.sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*np.sqrt(a + b + 1) / (a + b + 2) / np.sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a, b))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
"""%(super)s
In the special case where both `floc` and `fscale` are given, a
`ValueError` is raised if any value `x` in `data` does not satisfy
`floc < x < floc + fscale`.
"""
# Override rv_continuous.fit, so we can more efficiently handle the
# case where floc and fscale are given.
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
f1 = (kwds.get('f1', None) or kwds.get('fb', None) or
kwds.get('fix_b', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None or fscale is None:
# do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
if f0 is not None and f1 is not None:
# This check is for consistency with `rv_continuous.fit`.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Special case: loc and scale are constrained, so we are fitting
# just the shape parameters. This can be done much more efficiently
# than the method used in `rv_continuous.fit`. (See the subsection
# "Two unknown parameters" in the section "Maximum likelihood" of
# the Wikipedia article on the Beta distribution for the formulas.)
# Normalize the data to the interval [0, 1].
data = (np.ravel(data) - floc) / fscale
if np.any(data <= 0) or np.any(data >= 1):
raise FitDataError("beta", lower=floc, upper=floc + fscale)
xbar = data.mean()
if f0 is not None or f1 is not None:
# One of the shape parameters is fixed.
if f0 is not None:
# The shape parameter a is fixed, so swap the parameters
# and flip the data. We always solve for `a`. The result
# will be swapped back before returning.
b = f0
data = 1 - data
xbar = 1 - xbar
else:
b = f1
# Initial guess for a. Use the formula for the mean of the beta
# distribution, E[x] = a / (a + b), to generate a reasonable
# starting point based on the mean of the data and the given
# value of b.
a = b * xbar / (1 - xbar)
# Compute the MLE for `a` by solving _beta_mle_a.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_a, a,
args=(b, len(data), np.log(data).sum()),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a = theta[0]
if f0 is not None:
# The shape parameter a was fixed, so swap back the
# parameters.
a, b = b, a
else:
# Neither of the shape parameters is fixed.
# s1 and s2 are used in the extra arguments passed to _beta_mle_ab
# by optimize.fsolve.
s1 = np.log(data).sum()
s2 = sc.log1p(-data).sum()
# Use the "method of moments" to estimate the initial
# guess for a and b.
fac = xbar * (1 - xbar) / data.var(ddof=0) - 1
a = xbar * fac
b = (1 - xbar) * fac
# Compute the MLE for a and b by solving _beta_mle_ab.
theta, info, ier, mesg = optimize.fsolve(
_beta_mle_ab, [a, b],
args=(len(data), s1, s2),
full_output=True
)
if ier != 1:
raise FitSolverError(mesg=mesg)
a, b = theta
return a, b, floc, fscale
beta = beta_gen(a=0.0, b=1.0, name='beta')
class betaprime_gen(rv_continuous):
"""A beta prime continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) = x**(a-1) * (1+x)**(-a-b) / beta(a, b)
for ``x > 0``, ``a > 0``, ``b > 0``, where ``beta(a, b)`` is the beta
function (see `scipy.special.beta`).
`betaprime` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, a, b):
sz, rndm = self._size, self._random_state
u1 = gamma.rvs(a, size=sz, random_state=rndm)
u2 = gamma.rvs(b, size=sz, random_state=rndm)
return u1 / u2
def _pdf(self, x, a, b):
return np.exp(self._logpdf(x, a, b))
def _logpdf(self, x, a, b):
return sc.xlogy(a - 1.0, x) - sc.xlog1py(a + b, x) - sc.betaln(a, b)
def _cdf(self, x, a, b):
return sc.betainc(a, b, x/(1.+x))
def _munp(self, n, a, b):
if n == 1.0:
return np.where(b > 1,
a/(b-1.0),
np.inf)
elif n == 2.0:
return np.where(b > 2,
a*(a+1.0)/((b-2.0)*(b-1.0)),
np.inf)
elif n == 3.0:
return np.where(b > 3,
a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
np.inf)
elif n == 4.0:
return np.where(b > 4,
(a*(a + 1.0)*(a + 2.0)*(a + 3.0) /
((b - 4.0)*(b - 3.0)*(b - 2.0)*(b - 1.0))),
np.inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, name='betaprime')
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
`bradford` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / sc.log1p(c)
def _cdf(self, x, c):
return sc.log1p(c*x) / sc.log1p(c)
def _ppf(self, q, c):
return sc.expm1(q * sc.log1p(c)) / c
def _stats(self, c, moments='mv'):
k = np.log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = np.sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= np.sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = (c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3)
+ 6*c*k*k*(3*k-14) + 12*k**3)
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = np.log(1+c)
return k/2.0 - np.log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford')
class burr_gen(rv_continuous):
"""A Burr (Type III) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr12 : Burr Type XII distribution
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
`burr` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the third CDF given in Burr's list;
specifically, it is equation (11) in Burr's paper [1]_.
%(after_notes)s
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return c * d * (x**(-c - 1.0)) * ((1 + x**(-c))**(-d - 1.0))
def _cdf(self, x, c, d):
return (1 + x**(-c))**(-d)
def _ppf(self, q, c, d):
return (q**(-1.0/d) - 1)**(-1.0/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 - nc, d + nc)
burr = burr_gen(a=0.0, name='burr')
class burr12_gen(rv_continuous):
"""A Burr (Type XII) continuous random variable.
%(before_notes)s
See Also
--------
fisk : a special case of either `burr` or ``burr12`` with ``d = 1``
burr : Burr Type III distribution
Notes
-----
The probability density function for `burr` is::
burr12.pdf(x, c, d) = c * d * x**(c-1) * (1+x**(c))**(-d-1)
for ``x > 0``.
`burr12` takes ``c`` and ``d`` as shape parameters.
This is the PDF corresponding to the twelfth CDF given in Burr's list;
specifically, it is equation (20) in Burr's paper [1]_.
%(after_notes)s
The Burr type 12 distribution is also sometimes referred to as
the Singh-Maddala distribution from NIST [2]_.
References
----------
.. [1] Burr, I. W. "Cumulative frequency functions", Annals of
Mathematical Statistics, 13(2), pp 215-232 (1942).
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/b12pdf.htm
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, d):
return np.exp(self._logpdf(x, c, d))
def _logpdf(self, x, c, d):
return np.log(c) + np.log(d) + sc.xlogy(c - 1, x) + sc.xlog1py(-d-1, x**c)
def _cdf(self, x, c, d):
return -sc.expm1(self._logsf(x, c, d))
def _logcdf(self, x, c, d):
return sc.log1p(-(1 + x**c)**(-d))
def _sf(self, x, c, d):
return np.exp(self._logsf(x, c, d))
def _logsf(self, x, c, d):
return sc.xlog1py(-d, x**c)
def _ppf(self, q, c, d):
# The following is an implementation of
# ((1 - q)**(-1.0/d) - 1)**(1.0/c)
# that does a better job handling small values of q.
return sc.expm1(-1/d * sc.log1p(-q))**(1/c)
def _munp(self, n, c, d):
nc = 1. * n / c
return d * sc.beta(1.0 + nc, d - nc)
burr12 = burr12_gen(a=0.0, name='burr12')
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d == 1``.
`fisk` takes ``c`` as a shape parameter.
%(before_notes)s
Notes
-----
The probability density function for `fisk` is::
fisk.pdf(x, c) = c * x**(-c-1) * (1 + x**(-c))**(-2)
for ``x > 0``.
`fisk` takes ``c`` as a shape parameters.
%(after_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _munp(self, n, c):
return burr_gen._munp(self, n, c, 1.0)
def _entropy(self, c):
return 2 - np.log(c)
fisk = fisk_gen(a=0.0, name='fisk')
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/np.pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi*q-np.pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/np.pi*np.arctan(x)
def _isf(self, q):
return np.tan(np.pi/2.0-np.pi*q)
def _stats(self):
return np.nan, np.nan, np.nan, np.nan
def _entropy(self):
return np.log(4*np.pi)
def _fitstart(self, data, args=None):
# Initialize ML guesses using quartiles instead of moments.
p25, p50, p75 = np.percentile(data, [25, 50, 75])
return p50, (p75 - p25)/2
cauchy = cauchy_gen(name='cauchy')
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x, df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
Special cases of `chi` are:
- ``chi(1, loc, scale)`` is equivalent to `halfnorm`
- ``chi(2, 0, scale)`` is equivalent to `rayleigh`
- ``chi(3, 0, scale)`` is equivalent to `maxwell`
`chi` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
sz, rndm = self._size, self._random_state
return np.sqrt(chi2.rvs(df, size=sz, random_state=rndm))
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
l = np.log(2) - .5*np.log(2)*df - sc.gammaln(.5*df)
return l + sc.xlogy(df - 1., x) - .5*x**2
def _cdf(self, x, df):
return sc.gammainc(.5*df, .5*x**2)
def _ppf(self, q, df):
return np.sqrt(2*sc.gammaincinv(.5*df, q))
def _stats(self, df):
mu = np.sqrt(2)*sc.gamma(df/2.0+0.5)/sc.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/np.asarray(np.power(mu2, 1.5))
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= np.asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x, df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
`chi2` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.chisquare(df, self._size)
def _pdf(self, x, df):
return np.exp(self._logpdf(x, df))
def _logpdf(self, x, df):
return sc.xlogy(df/2.-1, x) - x/2. - sc.gammaln(df/2.) - (np.log(2)*df)/2.
def _cdf(self, x, df):
return sc.chdtr(df, x)
def _sf(self, x, df):
return sc.chdtrc(df, x)
def _isf(self, p, df):
return sc.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*np.sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2')
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/2/np.pi*(1+np.cos(x))
def _cdf(self, x):
return 1.0/2/np.pi*(np.pi + x + np.sin(x))
def _stats(self):
return 0.0, np.pi*np.pi/3.0-2.0, 0.0, -6.0*(np.pi**4-90)/(5.0*(np.pi*np.pi-6)**2)
def _entropy(self):
return np.log(4*np.pi)-1.0
cosine = cosine_gen(a=-np.pi, b=np.pi, name='cosine')
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
`dgamma` takes ``a`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
gm = gamma.rvs(a, size=sz, random_state=rndm)
return gm * np.where(u >= 0.5, 1, -1)
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*sc.gamma(a))*ax**(a-1.0) * np.exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return sc.xlogy(a - 1.0, ax) - ax - np.log(2) - sc.gammaln(a)
def _cdf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5 + fac, 0.5 - fac)
def _sf(self, x, a):
fac = 0.5*sc.gammainc(a, abs(x))
return np.where(x > 0, 0.5-fac, 0.5+fac)
def _ppf(self, q, a):
fac = sc.gammainccinv(a, 1-abs(2*q-1))
return np.where(q > 0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma')
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
`dweibull` takes ``d`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
sz, rndm = self._size, self._random_state
u = rndm.random_sample(size=sz)
w = weibull_min.rvs(c, size=sz, random_state=rndm)
return w * (np.where(u >= 0.5, 1, -1))
def _pdf(self, x, c):
ax = abs(x)
Px = c / 2.0 * ax**(c-1.0) * np.exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return np.log(c) - np.log(2.0) + sc.xlogy(c - 1.0, ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5 * np.exp(-abs(x)**c)
return np.where(x > 0, 1 - Cx1, Cx1)
def _ppf(self, q, c):
fac = 2. * np.where(q <= 0.5, q, 1. - q)
fac = np.power(-np.log(fac), 1.0 / c)
return np.where(q > 0.5, fac, -fac)
def _munp(self, n, c):
return (1 - (n % 2)) * sc.gamma(1.0 + 1.0 * n / c)
# since we know that all odd moments are zeros, return them at once.
# returning Nones from _stats makes the public stats call _munp
# so overall we're saving one or two gamma function evaluations here.
def _stats(self, c):
return 0, None, 0, None
dweibull = dweibull_gen(name='dweibull')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = exp(-x)
for ``x >= 0``.
%(after_notes)s
A common parameterization for `expon` is in terms of the rate parameter
``lambda``, such that ``pdf = lambda * exp(-lambda * x)``. This
parameterization corresponds to using ``scale = 1 / lambda``.
%(example)s
"""
def _rvs(self):
return self._random_state.standard_exponential(self._size)
def _pdf(self, x):
return np.exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -sc.expm1(-x)
def _ppf(self, q):
return -sc.log1p(-q)
def _sf(self, x):
return np.exp(-x)
def _logsf(self, x):
return -x
def _isf(self, q):
return -np.log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentially Modified Normal (exponential distribution
## convolved with a Normal).
## This is called an exponentially modified gaussian on wikipedia
class exponnorm_gen(rv_continuous):
"""An exponentially modified Normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponnorm` is::
exponnorm.pdf(x, K) =
1/(2*K) exp(1/(2 * K**2)) exp(-x / K) * erfc-(x - 1/K) / sqrt(2))
where the shape parameter ``K > 0``.
It can be thought of as the sum of a normally distributed random
value with mean ``loc`` and sigma ``scale`` and an exponentially
distributed random number with a pdf proportional to ``exp(-lambda * x)``
where ``lambda = (K * scale)**(-1)``.
%(after_notes)s
An alternative parameterization of this distribution (for example, in
`Wikipedia <http://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution>`_)
involves three parameters, :math:`\mu`, :math:`\lambda` and :math:`\sigma`.
In the present parameterization this corresponds to having ``loc`` and
``scale`` equal to :math:`\mu` and :math:`\sigma`, respectively, and
shape parameter :math:`K = 1/\sigma\lambda`.
.. versionadded:: 0.16.0
%(example)s
"""
def _rvs(self, K):
expval = self._random_state.standard_exponential(self._size) * K
gval = self._random_state.standard_normal(self._size)
return expval + gval
def _pdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
# Avoid overflows; setting np.exp(exparg) to the max float works
# all right here
expval = _lazywhere(exparg < _LOGXMAX, (exparg,), np.exp, _XMAX)
return 0.5 * invK * expval * sc.erfc(-(x - invK) / np.sqrt(2))
def _logpdf(self, x, K):
invK = 1.0 / K
exparg = 0.5 * invK**2 - invK * x
return exparg + np.log(0.5 * invK * sc.erfc(-(x - invK) / np.sqrt(2)))
def _cdf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(x) - np.exp(expval) * _norm_cdf(x - invK)
def _sf(self, x, K):
invK = 1.0 / K
expval = invK * (0.5 * invK - x)
return _norm_cdf(-x) + np.exp(expval) * _norm_cdf(x - invK)
def _stats(self, K):
K2 = K * K
opK2 = 1.0 + K2
skw = 2 * K**3 * opK2**(-1.5)
krt = 6.0 * K2 * K2 * opK2**(-2)
return K, opK2, skw, krt
exponnorm = exponnorm_gen(name='exponnorm')
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
`exponweib` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
negxc = -x**c
exm1c = -sc.expm1(negxc)
logp = (np.log(a) + np.log(c) + sc.xlogy(a - 1.0, exm1c) +
negxc + sc.xlogy(c - 1.0, x))
return logp
def _cdf(self, x, a, c):
exm1c = -sc.expm1(-x**c)
return exm1c**a
def _ppf(self, q, a, c):
return (-sc.log1p(-q**(1.0/a)))**np.asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib')
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1 + x**b - exp(x**b))
for ``x >= 0``, ``b > 0``. Note that this is a different distribution
from the exponential power distribution that is also known under the names
"generalized normal" or "generalized Gaussian".
`exponpow` takes ``b`` as a shape parameter.
%(after_notes)s
References
----------
http://www.math.wm.edu/~leemis/chart/UDR/PDFs/Exponentialpower.pdf
%(example)s
"""
def _pdf(self, x, b):
return np.exp(self._logpdf(x, b))
def _logpdf(self, x, b):
xb = x**b
f = 1 + np.log(b) + sc.xlogy(b - 1.0, x) + xb - np.exp(xb)
return f
def _cdf(self, x, b):
return -sc.expm1(-sc.expm1(x**b))
def _sf(self, x, b):
return np.exp(-sc.expm1(x**b))
def _isf(self, x, b):
return (sc.log1p(-np.log(x)))**(1./b)
def _ppf(self, q, b):
return pow(sc.log1p(-sc.log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow')
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Saunders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x, c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
`fatiguelife` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
.. [1] "Birnbaum-Saunders distribution",
http://en.wikipedia.org/wiki/Birnbaum-Saunders_distribution
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, c):
z = self._random_state.standard_normal(self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*np.sqrt(1 + x2)
return t
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return (np.log(x+1) - (x-1)**2 / (2.0*x*c**2) - np.log(2*c) -
0.5*(np.log(2*np.pi) + 3*np.log(x)))
def _cdf(self, x, c):
return _norm_cdf(1.0 / c * (np.sqrt(x) - 1.0/np.sqrt(x)))
def _ppf(self, q, c):
tmp = c*sc.ndtri(q)
return 0.25 * (tmp + np.sqrt(tmp**2 + 4))**2
def _stats(self, c):
# NB: the formula for kurtosis in wikipedia seems to have an error:
# it's 40, not 41. At least it disagrees with the one from Wolfram
# Alpha. And the latter one, below, passes the tests, while the wiki
# one doesn't So far I didn't have the guts to actually check the
# coefficients from the expressions for the raw moments.
c2 = c*c
mu = c2 / 2.0 + 1.0
den = 5.0 * c2 + 4.0
mu2 = c2*den / 4.0
g1 = 4 * c * (11*c2 + 6.0) / np.power(den, 1.5)
g2 = 6 * c2 * (93*c2 + 40.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife')
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
`foldcauchy` takes ``c`` as a shape parameter.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c, size=self._size,
random_state=self._random_state))
def _pdf(self, x, c):
return 1.0/np.pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/np.pi*(np.arctan(x-c) + np.arctan(x+c))
def _stats(self, c):
return np.inf, np.inf, np.nan, np.nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy')
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
`f` takes ``dfn`` and ``dfd`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd):
return self._random_state.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
return np.exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0 * dfn
m = 1.0 * dfd
lPx = m/2 * np.log(m) + n/2 * np.log(n) + (n/2 - 1) * np.log(x)
lPx -= ((n+m)/2) * np.log(m + n*x) + sc.betaln(n/2, m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return sc.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return sc.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return sc.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v1, v2 = 1. * dfn, 1. * dfd
v2_2, v2_4, v2_6, v2_8 = v2 - 2., v2 - 4., v2 - 6., v2 - 8.
mu = _lazywhere(
v2 > 2, (v2, v2_2),
lambda v2, v2_2: v2 / v2_2,
np.inf)
mu2 = _lazywhere(
v2 > 4, (v1, v2, v2_2, v2_4),
lambda v1, v2, v2_2, v2_4:
2 * v2 * v2 * (v1 + v2_2) / (v1 * v2_2**2 * v2_4),
np.inf)
g1 = _lazywhere(
v2 > 6, (v1, v2_2, v2_4, v2_6),
lambda v1, v2_2, v2_4, v2_6:
(2 * v1 + v2_2) / v2_6 * np.sqrt(v2_4 / (v1 * (v1 + v2_2))),
np.nan)
g1 *= np.sqrt(8.)
g2 = _lazywhere(
v2 > 8, (g1, v2_6, v2_8),
lambda g1, v2_6, v2_8: (8 + g1 * g1 * v2_6) / v2_8,
np.nan)
g2 *= 3. / 2.
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f')
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
`foldnorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return c >= 0
def _rvs(self, c):
return abs(self._random_state.standard_normal(self._size) + c)
def _pdf(self, x, c):
return _norm_pdf(x + c) + _norm_pdf(x-c)
def _cdf(self, x, c):
return _norm_cdf(x-c) + _norm_cdf(x+c) - 1.0
def _stats(self, c):
# Regina C. Elandt, Technometrics 3, 551 (1961)
# http://www.jstor.org/stable/1266561
#
c2 = c*c
expfac = np.exp(-0.5*c2) / np.sqrt(2.*np.pi)
mu = 2.*expfac + c * sc.erf(c/np.sqrt(2))
mu2 = c2 + 1 - mu*mu
g1 = 2. * (mu*mu*mu - c2*mu - expfac)
g1 /= np.power(mu2, 1.5)
g2 = c2 * (c2 + 6.) + 3 + 8.*expfac*mu
g2 += (2. * (c2 - 3.) - 3. * mu**2) * mu**2
g2 = g2 / mu2**2.0 - 3.
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
`frechet_r` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _sf(self, x, c):
return np.exp(-pow(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
`frechet_l` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x, c-1)*np.exp(-pow(-x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c-1, -x) - pow(-x, c)
def _cdf(self, x, c):
return np.exp(-pow(-x, c))
def _logcdf(self, x, c):
return -pow(-x, c)
def _sf(self, x, c):
return -sc.expm1(-pow(-x, c))
def _ppf(self, q, c):
return -pow(-np.log(q), 1.0/c)
def _munp(self, n, c):
val = sc.gamma(1.0+n*1.0/c)
if int(n) % 2:
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max')
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
`genlogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) - x - (c+1.0)*sc.log1p(np.exp(-x))
def _cdf(self, x, c):
Cx = (1+np.exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -np.log(pow(q, -1.0/c)-1)
return vals
def _stats(self, c):
mu = _EULER + sc.psi(c)
mu2 = np.pi*np.pi/6.0 + sc.zeta(2, c)
g1 = -2*sc.zeta(3, c) + 2*_ZETA3
g1 /= np.power(mu2, 1.5)
g2 = np.pi**4/15.0 + 6*sc.zeta(4, c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic')
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
defined for ``x >= 0`` if ``c >=0``, and for
``0 <= x <= -1/c`` if ``c < 0``.
`genpareto` takes ``c`` as a shape parameter.
For ``c == 0``, `genpareto` reduces to the exponential
distribution, `expon`::
genpareto.pdf(x, c=0) = exp(-x)
For ``c == -1``, `genpareto` is uniform on ``[0, 1]``::
genpareto.cdf(x, c=-1) = x
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
c = np.asarray(c)
self.b = _lazywhere(c < 0, (c,),
lambda c: -1. / c,
np.inf)
return True
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _munp(self, n, c):
def __munp(n, c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return _lazywhere(c != 0, (c,),
lambda c: __munp(n, c),
sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a, b, c > 0``.
`genexpon` takes ``a``, ``b`` and ``c`` as shape parameters.
%(after_notes)s
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a + b*(-sc.expm1(-c*x)))*np.exp((-a-b)*x +
b*(-sc.expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -sc.expm1((-a-b)*x + b*(-sc.expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-sc.expm1(-c*x))) + (-a-b)*x+b*(-sc.expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon')
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
Note that several sources and software packages use the opposite
convention for the sign of the shape parameter ``c``.
`genextreme` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = np.where(c > 0, 1.0 / np.maximum(c, _XMIN), np.inf)
self.a = np.where(c < 0, 1.0 / np.minimum(c, -_XMIN), -np.inf)
return np.where(abs(c) == np.inf, 0, 1)
def _loglogcdf(self, x, c):
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: sc.log1p(-c*x)/c, -x)
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
cx = _lazywhere((x == x) & (c != 0), (x, c), lambda x, c: c*x, 0.0)
logex2 = sc.log1p(-cx)
logpex2 = self._loglogcdf(x, c)
pex2 = np.exp(logpex2)
# Handle special cases
np.putmask(logpex2, (c == 0) & (x == -np.inf), 0.0)
logpdf = np.where((cx == 1) | (cx == -np.inf),
-np.inf,
-pex2+logpex2-logex2)
np.putmask(logpdf, (c == 1) & (x == 1), 0.0)
return logpdf
def _logcdf(self, x, c):
return -np.exp(self._loglogcdf(x, c))
def _cdf(self, x, c):
return np.exp(self._logcdf(x, c))
def _sf(self, x, c):
return -sc.expm1(self._logcdf(x, c))
def _ppf(self, q, c):
x = -np.log(-np.log(q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _isf(self, q, c):
x = -np.log(-sc.log1p(-q))
return _lazywhere((x == x) & (c != 0), (x, c),
lambda x, c: -sc.expm1(-c * x) / c, x)
def _stats(self, c):
g = lambda n: sc.gamma(n*c + 1)
g1 = g(1)
g2 = g(2)
g3 = g(3)
g4 = g(4)
g2mg12 = np.where(abs(c) < 1e-7, (c*np.pi)**2.0/6.0, g2-g1**2.0)
gam2k = np.where(abs(c) < 1e-7, np.pi**2.0/6.0,
sc.expm1(sc.gammaln(2.0*c+1.0)-2*sc.gammaln(c + 1.0))/c**2.0)
eps = 1e-14
gamk = np.where(abs(c) < eps, -_EULER, sc.expm1(sc.gammaln(c + 1))/c)
m = np.where(c < -1.0, np.nan, -gamk)
v = np.where(c < -0.5, np.nan, g1**2.0*gam2k)
# skewness
sk1 = np.where(c < -1./3, np.nan,
np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)))
sk = np.where(abs(c) <= eps**0.29, 12*np.sqrt(6)*_ZETA3/np.pi**3, sk1)
# kurtosis
ku1 = np.where(c < -1./4, np.nan,
(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = np.where(abs(c) <= (eps)**0.23, 12.0/5.0, ku1-3.0)
return m, v, sk, ku
def _fitstart(self, data):
# This is better than the default shape of (1,).
g = _skew(data)
if g < 0:
a = 0.5
else:
a = -0.5
return super(genextreme_gen, self)._fitstart(data, args=(a,))
def _munp(self, n, c):
k = np.arange(0, n+1)
vals = 1.0/c**n * np.sum(
sc.comb(n, k) * (-1)**k * sc.gamma(c*k + 1),
axis=0)
return np.where(c*n > -1, vals, np.inf)
def _entropy(self, c):
return _EULER*(1 - c) + 1
genextreme = genextreme_gen(name='genextreme')
def _digammainv(y):
# Inverse of the digamma function (real positive arguments only).
# This function is used in the `fit` method of `gamma_gen`.
# The function uses either optimize.fsolve or optimize.newton
# to solve `sc.digamma(x) - y = 0`. There is probably room for
# improvement, but currently it works over a wide range of y:
# >>> y = 64*np.random.randn(1000000)
# >>> y.min(), y.max()
# (-311.43592651416662, 351.77388222276869)
# x = [_digammainv(t) for t in y]
# np.abs(sc.digamma(x) - y).max()
# 1.1368683772161603e-13
#
_em = 0.5772156649015328606065120
func = lambda x: sc.digamma(x) - y
if y > -0.125:
x0 = np.exp(y) + 0.5
if y < 10:
# Some experimentation shows that newton reliably converges
# must faster than fsolve in this y range. For larger y,
# newton sometimes fails to converge.
value = optimize.newton(func, x0, tol=1e-10)
return value
elif y > -3:
x0 = np.exp(y/2.332) + 0.08661
else:
x0 = 1.0 / (-y - _em)
value, info, ier, mesg = optimize.fsolve(func, x0, xtol=1e-11,
full_output=True)
if ier != 1:
raise RuntimeError("_digammainv: fsolve failed, y = %r" % y)
return value[0]
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = x**(a-1) * exp(-x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
`gamma` has a shape parameter `a` which needs to be set explicitly.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.standard_gamma(a, self._size)
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return sc.xlogy(a-1.0, x) - x - sc.gammaln(a)
def _cdf(self, x, a):
return sc.gammainc(a, x)
def _sf(self, x, a):
return sc.gammaincc(a, x)
def _ppf(self, q, a):
return sc.gammaincinv(a, q)
def _stats(self, a):
return a, a, 2.0/np.sqrt(a), 6.0/a
def _entropy(self, a):
return sc.psi(a)*(1-a) + a + sc.gammaln(a)
def _fitstart(self, data):
# The skewness of the gamma distribution is `4 / np.sqrt(a)`.
# We invert that to estimate the shape `a` using the skewness
# of the data. The formula is regularized with 1e-8 in the
# denominator to allow for degenerate data where the skewness
# is close to 0.
a = 4 / (1e-8 + _skew(data)**2)
return super(gamma_gen, self)._fitstart(data, args=(a,))
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
f0 = (kwds.get('f0', None) or kwds.get('fa', None) or
kwds.get('fix_a', None))
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is None:
# loc is not fixed. Use the default fit method.
return super(gamma_gen, self).fit(data, *args, **kwds)
# Special case: loc is fixed.
if f0 is not None and fscale is not None:
# This check is for consistency with `rv_continuous.fit`.
# Without this check, this function would just return the
# parameters that were given.
raise ValueError("All parameters fixed. There is nothing to "
"optimize.")
# Fixed location is handled by shifting the data.
data = np.asarray(data)
if np.any(data <= floc):
raise FitDataError("gamma", lower=floc, upper=np.inf)
if floc != 0:
# Don't do the subtraction in-place, because `data` might be a
# view of the input array.
data = data - floc
xbar = data.mean()
# Three cases to handle:
# * shape and scale both free
# * shape fixed, scale free
# * shape free, scale fixed
if fscale is None:
# scale is free
if f0 is not None:
# shape is fixed
a = f0
else:
# shape and scale are both free.
# The MLE for the shape parameter `a` is the solution to:
# np.log(a) - sc.digamma(a) - np.log(xbar) + np.log(data.mean) = 0
s = np.log(xbar) - np.log(data).mean()
func = lambda a: np.log(a) - sc.digamma(a) - s
aest = (3-s + np.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
# The MLE for the scale parameter is just the data mean
# divided by the shape parameter.
scale = xbar / a
else:
# scale is fixed, shape is free
# The MLE for the shape parameter `a` is the solution to:
# sc.digamma(a) - np.log(data).mean() + np.log(fscale) = 0
c = np.log(data).mean() - np.log(fscale)
a = _digammainv(c)
scale = fscale
return a, floc, scale
gamma = gamma_gen(a=0.0, name='gamma')
class erlang_gen(gamma_gen):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma distribution, with
the shape parameter `a` an integer. Note that this restriction is not
enforced by `erlang`. It will, however, generate a warning the first time
a non-integer value is used for the shape parameter.
Refer to `gamma` for examples.
"""
def _argcheck(self, a):
allint = np.all(np.floor(a) == a)
allpos = np.all(a > 0)
if not allint:
# An Erlang distribution shouldn't really have a non-integer
# shape parameter, so warn the user.
warnings.warn(
'The shape parameter of the erlang distribution '
'has been given a non-integer value %r.' % (a,),
RuntimeWarning)
return allpos
def _fitstart(self, data):
# Override gamma_gen_fitstart so that an integer initial value is
# used. (Also regularize the division, to avoid issues when
# _skew(data) is 0 or close to 0.)
a = int(4.0 / (1e-8 + _skew(data)**2))
return super(gamma_gen, self)._fitstart(data, args=(a,))
# Trivial override of the fit method, so we can monkey-patch its
# docstring.
def fit(self, data, *args, **kwds):
return super(erlang_gen, self).fit(data, *args, **kwds)
if fit.__doc__ is not None:
fit.__doc__ = (rv_continuous.fit.__doc__ +
"""
Notes
-----
The Erlang distribution is generally defined to have integer values
for the shape parameter. This is not enforced by the `erlang` class.
When fitting the distribution, it will generally return a non-integer
value for the shape parameter. By using the keyword argument
`f0=<integer>`, the fit method can be constrained to fit the data to
a specific integer shape parameter.
""")
erlang = erlang_gen(a=0.0, name='erlang')
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x >= 0``, ``a > 0``, and ``c != 0``.
`gengamma` takes ``a`` and ``c`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return np.exp(self._logpdf(x, a, c))
def _logpdf(self, x, a, c):
return np.log(abs(c)) + sc.xlogy(c*a - 1, x) - x**c - sc.gammaln(a)
def _cdf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val1, val2)
def _sf(self, x, a, c):
xc = x**c
val1 = sc.gammainc(a, xc)
val2 = sc.gammaincc(a, xc)
return np.where(c > 0, val2, val1)
def _ppf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val1, val2)**(1.0/c)
def _isf(self, q, a, c):
val1 = sc.gammaincinv(a, q)
val2 = sc.gammainccinv(a, q)
return np.where(c > 0, val2, val1)**(1.0/c)
def _munp(self, n, a, c):
# Pochhammer symbol: sc.pocha,n) = gamma(a+n)/gamma(a)
return sc.poch(a, n*1.0/c)
def _entropy(self, a, c):
val = sc.psi(a)
return a*(1-val) + 1.0/c*val + sc.gammaln(a) - np.log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma')
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) =
2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
`genhalflogistic` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return c > 0
def _pdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = np.asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self, c):
return 2 - (2*c+1)*np.log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic')
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
`gompertz` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return np.log(c) + x - c * sc.expm1(x)
def _cdf(self, x, c):
return -sc.expm1(-c * sc.expm1(x))
def _ppf(self, q, c):
return sc.log1p(-1.0 / c * sc.log1p(-q))
def _entropy(self, c):
return 1.0 - np.log(c) - np.exp(c)*sc.expn(1, c)
gompertz = gompertz_gen(a=0.0, name='gompertz')
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - np.exp(-x)
def _cdf(self, x):
return np.exp(-np.exp(-x))
def _logcdf(self, x):
return -np.exp(-x)
def _ppf(self, q):
return -np.log(-np.log(q))
def _stats(self):
return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
# http://en.wikipedia.org/wiki/Gumbel_distribution
return _EULER + 1.
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
gumbel_l = gumbel_l_gen(name='gumbel_l')
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/np.pi) - sc.log1p(x*x)
def _cdf(self, x):
return 2.0/np.pi*np.arctan(x)
def _ppf(self, q):
return np.tan(np.pi/2*q)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
def _entropy(self):
return np.log(2*np.pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return np.log(2) - x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return np.tanh(x/2.0)
def _ppf(self, q):
return 2*np.arctanh(q)
def _munp(self, n):
if n == 1:
return 2*np.log(2)
if n == 2:
return np.pi*np.pi/3.0
if n == 3:
return 9*_ZETA3
if n == 4:
return 7*np.pi**4 / 15.0
return 2*(1-pow(2.0, 1-n))*sc.gamma(n+1)*sc.zeta(n, 1)
def _entropy(self):
return 2-np.log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
`halfnorm` is a special case of `chi` with ``df == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return abs(self._random_state.standard_normal(size=self._size))
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*np.exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/np.pi) - x*x/2.0
def _cdf(self, x):
return _norm_cdf(x)*2-1.0
def _ppf(self, q):
return sc.ndtri((1+q)/2.0)
def _stats(self):
return (np.sqrt(2.0/np.pi), 1-2.0/np.pi, np.sqrt(2)*(4-np.pi)/(np.pi-2)**1.5,
8*(np.pi-3)/(np.pi-2)**2)
def _entropy(self):
return 0.5*np.log(np.pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 1.0/(np.pi*np.cosh(x))
def _cdf(self, x):
return 2.0/np.pi*np.arctan(np.exp(x))
def _ppf(self, q):
return np.log(np.tan(np.pi*q/2.0))
def _stats(self):
return 0, np.pi*np.pi/4, 0, 2
def _entropy(self):
return np.log(2*np.pi)
hypsecant = hypsecant_gen(name='hypsecant')
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a, b) F[2, 1](c, a; a+b; -z))``
`gausshyper` takes ``a``, ``b``, ``c`` and ``z`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c == c) & (z == z)
def _pdf(self, x, a, b, c, z):
Cinv = sc.gamma(a)*sc.gamma(b)/sc.gamma(a+b)*sc.hyp2f1(c, a, a+b, -z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = sc.beta(n+a, b) / sc.beta(a, b)
num = sc.hyp2f1(c, a+n, a+b+n, -z)
den = sc.hyp2f1(c, a, a+b, -z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper')
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
`invgamma` takes ``a`` as a shape parameter.
`invgamma` is a special case of `gengamma` with ``c == -1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, a):
return np.exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return -(a+1) * np.log(x) - sc.gammaln(a) - 1.0/x
def _cdf(self, x, a):
return sc.gammaincc(a, 1.0 / x)
def _ppf(self, q, a):
return 1.0 / sc.gammainccinv(a, q)
def _sf(self, x, a):
return sc.gammainc(a, 1.0 / x)
def _isf(self, q, a):
return 1.0 / sc.gammaincinv(a, q)
def _stats(self, a, moments='mvsk'):
m1 = _lazywhere(a > 1, (a,), lambda x: 1. / (x - 1.), np.inf)
m2 = _lazywhere(a > 2, (a,), lambda x: 1. / (x - 1.)**2 / (x - 2.),
np.inf)
g1, g2 = None, None
if 's' in moments:
g1 = _lazywhere(
a > 3, (a,),
lambda x: 4. * np.sqrt(x - 2.) / (x - 3.), np.nan)
if 'k' in moments:
g2 = _lazywhere(
a > 4, (a,),
lambda x: 6. * (5. * x - 11.) / (x - 3.) / (x - 4.), np.nan)
return m1, m2, g1, g2
def _entropy(self, a):
return a - (a+1.0) * sc.psi(a) + sc.gammaln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma')
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
`invgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
When `mu` is too small, evaluating the cumulative distribution function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, mu):
return self._random_state.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x**3.0)*np.exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*np.log(2*np.pi) - 1.5*np.log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = np.sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = _norm_cdf(fac*(x-mu)/mu)
C1 += np.exp(1.0/mu) * _norm_cdf(-fac*(x+mu)/mu) * np.exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*np.sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss')
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
`invweibull` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
F.R.S. de Gusmao, E.M.M Ortega and G.M. Cordeiro, "The generalized inverse
Weibull distribution", Stat. Papers, vol. 52, pp. 591-619, 2011.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c):
xc1 = np.power(x, -c - 1.0)
xc2 = np.power(x, -c)
xc2 = np.exp(-xc2)
return c * xc1 * xc2
def _cdf(self, x, c):
xc1 = np.power(x, -c)
return np.exp(-xc1)
def _ppf(self, q, c):
return np.power(-np.log(q), -1.0/c)
def _munp(self, n, c):
return sc.gamma(1 - n / c)
def _entropy(self, c):
return 1+_EULER + _EULER / c - np.log(c)
invweibull = invweibull_gen(a=0, name='invweibull')
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a, b > 0``, and ``phi`` is the normal pdf.
`johnsonsb` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
trm = _norm_pdf(a + b*np.log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b*np.log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0 / (1 + np.exp(-1.0 / b * (_norm_ppf(q) - a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonsb')
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
`johnsonsu` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a == a)
def _pdf(self, x, a, b):
x2 = x*x
trm = _norm_pdf(a + b * np.log(x + np.sqrt(x2+1)))
return b*1.0/np.sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return _norm_cdf(a + b * np.log(x + np.sqrt(x*x + 1)))
def _ppf(self, q, a, b):
return np.sinh((_norm_ppf(q) - a) / b)
johnsonsu = johnsonsu_gen(name='johnsonsu')
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*np.exp(-abs(x))
def _cdf(self, x):
return np.where(x > 0, 1.0-0.5*np.exp(-x), 0.5*np.exp(x))
def _ppf(self, q):
return np.where(q > 0.5, -np.log(2*(1-q)), np.log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return np.log(2)+1
laplace = laplace_gen(name='laplace')
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
return 1 / np.sqrt(2*np.pi*x) / x * np.exp(-1/(2*x))
def _cdf(self, x):
# Equivalent to 2*norm.sf(np.sqrt(1/x))
return sc.erfc(np.sqrt(0.5 / x))
def _ppf(self, q):
# Equivalent to 1.0/(norm.isf(q/2)**2) or 0.5/(erfcinv(q)**2)
val = -sc.ndtri(q/2)
return 1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy = levy_gen(a=0.0, name="levy")
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x):
ax = abs(x)
return 1/np.sqrt(2*np.pi*ax)/ax*np.exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2 * _norm_cdf(1 / np.sqrt(ax)) - 1
def _ppf(self, q):
val = _norm_ppf((q + 1.0) / 2)
return -1.0 / (val * val)
def _stats(self):
return np.inf, np.inf, np.nan, np.nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(after_notes)s
%(example)s
"""
def _rvs(self, alpha, beta):
def alpha1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (2/np.pi*(np.pi/2 + bTH)*tanTH -
beta*np.log((np.pi/2*W*cosTH)/(np.pi/2 + bTH)))
def beta0func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
return (W/(cosTH/np.tan(aTH) + np.sin(TH)) *
((np.cos(aTH) + np.sin(aTH)*tanTH)/W)**(1.0/alpha))
def otherwise(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
# alpha is not 1 and beta is not 0
val0 = beta*np.tan(np.pi*alpha/2)
th0 = np.arctan(val0)/alpha
val3 = W/(cosTH/np.tan(alpha*(th0 + TH)) + np.sin(TH))
res3 = val3*((np.cos(aTH) + np.sin(aTH)*tanTH -
val0*(np.sin(aTH) - np.cos(aTH)*tanTH))/W)**(1.0/alpha)
return res3
def alphanot1func(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W):
res = _lazywhere(beta == 0,
(alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
beta0func, f2=otherwise)
return res
sz = self._size
alpha = broadcast_to(alpha, sz)
beta = broadcast_to(beta, sz)
TH = uniform.rvs(loc=-np.pi/2.0, scale=np.pi, size=sz,
random_state=self._random_state)
W = expon.rvs(size=sz, random_state=self._random_state)
aTH = alpha*TH
bTH = beta*TH
cosTH = np.cos(TH)
tanTH = np.tan(TH)
res = _lazywhere(alpha == 1, (alpha, beta, TH, aTH, bTH, cosTH, tanTH, W),
alpha1func, f2=alphanot1func)
return res
def _argcheck(self, alpha, beta):
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable')
class logistic_gen(rv_continuous):
"""A logistic (or Sech-squared) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
`logistic` is a special case of `genlogistic` with ``c == 1``.
%(after_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.logistic(size=self._size)
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return -x - 2. * sc.log1p(np.exp(-x))
def _cdf(self, x):
return sc.expit(x)
def _ppf(self, q):
return sc.logit(q)
def _sf(self, x):
return sc.expit(-x)
def _isf(self, q):
return -sc.logit(q)
def _stats(self):
return 0, np.pi*np.pi/3.0, 0, 6.0/5.0
def _entropy(self):
# http://en.wikipedia.org/wiki/Logistic_distribution
return 2.0
logistic = logistic_gen(name='logistic')
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
`loggamma` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, c):
return np.log(self._random_state.gamma(c, size=self._size))
def _pdf(self, x, c):
return np.exp(c*x-np.exp(x)-sc.gammaln(c))
def _cdf(self, x, c):
return sc.gammainc(c, np.exp(x))
def _ppf(self, q, c):
return np.log(sc.gammaincinv(c, q))
def _stats(self, c):
# See, for example, "A Statistical Study of Log-Gamma Distribution", by
# Ping Shing Chan (thesis, McMaster University, 1993).
mean = sc.digamma(c)
var = sc.polygamma(1, c)
skewness = sc.polygamma(2, c) / np.power(var, 1.5)
excess_kurtosis = sc.polygamma(3, c) / (var*var)
return mean, var, skewness, excess_kurtosis
loggamma = loggamma_gen(name='loggamma')
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
`loglaplace` takes ``c`` as a shape parameter.
%(after_notes)s
References
----------
T.J. Kozubowski and K. Podgorski, "A log-Laplace growth rate model",
The Mathematical Scientist, vol. 28, pp. 49-60, 2003.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = np.where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return np.where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return np.where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _munp(self, n, c):
return c**2 / (c**2 - n**2)
def _entropy(self, c):
return np.log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace')
def _lognorm_logpdf(x, s):
return _lazywhere(x != 0, (x, s),
lambda x, s: -np.log(x)**2 / (2*s**2) - np.log(s*x*np.sqrt(2*np.pi)),
-np.inf)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
`lognorm` takes ``s`` as a shape parameter.
%(after_notes)s
A common parametrization for a lognormal random variable ``Y`` is in
terms of the mean, ``mu``, and standard deviation, ``sigma``, of the
unique normally distributed random variable ``X`` such that exp(X) = Y.
This parametrization corresponds to setting ``s = sigma`` and ``scale =
exp(mu)``.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self, s):
return np.exp(s * self._random_state.standard_normal(self._size))
def _pdf(self, x, s):
return np.exp(self._logpdf(x, s))
def _logpdf(self, x, s):
return _lognorm_logpdf(x, s)
def _cdf(self, x, s):
return _norm_cdf(np.log(x) / s)
def _logcdf(self, x, s):
return _norm_logcdf(np.log(x) / s)
def _ppf(self, q, s):
return np.exp(s * _norm_ppf(q))
def _sf(self, x, s):
return _norm_sf(np.log(x) / s)
def _logsf(self, x, s):
return _norm_logsf(np.log(x) / s)
def _stats(self, s):
p = np.exp(s*s)
mu = np.sqrt(p)
mu2 = p*(p-1)
g1 = np.sqrt((p-1))*(2+p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5 * (1 + np.log(2*np.pi) + 2 * np.log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm')
class gilbrat_gen(rv_continuous):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
`gilbrat` is a special case of `lognorm` with ``s = 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return np.exp(self._random_state.standard_normal(self._size))
def _pdf(self, x):
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return _lognorm_logpdf(x, 1.0)
def _cdf(self, x):
return _norm_cdf(np.log(x))
def _ppf(self, q):
return np.exp(_norm_ppf(q))
def _stats(self):
p = np.e
mu = np.sqrt(p)
mu2 = p * (p - 1)
g1 = np.sqrt((p - 1)) * (2 + p)
g2 = np.polyval([1, 2, 3, 0, -6.0], p)
return mu, mu2, g1, g2
def _entropy(self):
return 0.5 * np.log(2 * np.pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = a``, where ``a`` is the parameter used in the
Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
%(after_notes)s
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0, size=self._size, random_state=self._random_state)
def _pdf(self, x):
return np.sqrt(2.0/np.pi)*x*x*np.exp(-x*x/2.0)
def _cdf(self, x):
return sc.gammainc(1.5, x*x/2.0)
def _ppf(self, q):
return np.sqrt(2*sc.gammaincinv(1.5, q))
def _stats(self):
val = 3*np.pi-8
return (2*np.sqrt(2.0/np.pi),
3-8/np.pi,
np.sqrt(2)*(32-10*np.pi)/val**1.5,
(-12*np.pi*np.pi + 160*np.pi - 384) / val**2.0)
def _entropy(self):
return _EULER + 0.5*np.log(2*np.pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
`mielke` takes ``k`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q, s*1.0/k)
return pow(qsk/(1.0-qsk), 1.0/s)
mielke = mielke_gen(a=0.0, name='mielke')
class kappa4_gen(rv_continuous):
"""Kappa 4 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for kappa4 is::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
(1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1)
if ``h`` and ``k`` are not equal to 0.
If ``h`` or ``k`` are zero then the pdf can be simplified:
h = 0 and k != 0::
kappa4.pdf(x, h, k) = (1.0 - k*x)**(1.0/k - 1.0)*
exp(-(1.0 - k*x)**(1.0/k))
h != 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*(1.0 - h*exp(-x))**(1.0/h - 1.0)
h = 0 and k = 0::
kappa4.pdf(x, h, k) = exp(-x)*exp(-exp(-x))
kappa4 takes ``h`` and ``k`` as shape parameters.
The kappa4 distribution returns other distributions when certain
``h`` and ``k`` values are used.
+------+-------------+----------------+------------------+
| h | k=0.0 | k=1.0 | -inf<=k<=inf |
+======+=============+================+==================+
| -1.0 | Logistic | | Generalized |
| | | | Logistic(1) |
| | | | |
| | logistic(x) | | |
+------+-------------+----------------+------------------+
| 0.0 | Gumbel | Reverse | Generalized |
| | | Exponential(2) | Extreme Value |
| | | | |
| | gumbel_r(x) | | genextreme(x, k) |
+------+-------------+----------------+------------------+
| 1.0 | Exponential | Uniform | Generalized |
| | | | Pareto |
| | | | |
| | expon(x) | uniform(x) | genpareto(x, -k) |
+------+-------------+----------------+------------------+
(1) There are at least five generalized logistic distributions.
Four are described here:
https://en.wikipedia.org/wiki/Generalized_logistic_distribution
The "fifth" one is the one kappa4 should match which currently
isn't implemented in scipy:
https://en.wikipedia.org/wiki/Talk:Generalized_logistic_distribution
http://www.mathwave.com/help/easyfit/html/analyses/distributions/gen_logistic.html
(2) This distribution is currently not in scipy.
References
----------
J.C. Finney, "Optimization of a Skewed Logistic Distribution With Respect
to the Kolmogorov-Smirnov Test", A Dissertation Submitted to the Graduate
Faculty of the Louisiana State University and Agricultural and Mechanical
College, (August, 2004),
http://etd.lsu.edu/docs/available/etd-05182004-144851/unrestricted/Finney_dis.pdf
J.R.M. Hosking, "The four-parameter kappa distribution". IBM J. Res.
Develop. 38 (3), 25 1-258 (1994).
B. Kumphon, A. Kaew-Man, P. Seenoi, "A Rainfall Distribution for the Lampao
Site in the Chi River Basin, Thailand", Journal of Water Resource and
Protection, vol. 4, 866-869, (2012).
http://file.scirp.org/pdf/JWARP20121000009_14676002.pdf
C. Winchester, "On Estimation of the Four-Parameter Kappa Distribution", A
Thesis Submitted to Dalhousie University, Halifax, Nova Scotia, (March
2000).
http://www.nlc-bnc.ca/obj/s4/f2/dsk2/ftp01/MQ57336.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, h, k):
condlist = [np.logical_and(h > 0, k > 0),
np.logical_and(h > 0, k == 0),
np.logical_and(h > 0, k < 0),
np.logical_and(h <= 0, k > 0),
np.logical_and(h <= 0, k == 0),
np.logical_and(h <= 0, k < 0)]
def f0(h, k):
return (1.0 - h**(-k))/k
def f1(h, k):
return np.log(h)
def f3(h, k):
a = np.empty(np.shape(h))
a[:] = -np.inf
return a
def f5(h, k):
return 1.0/k
self.a = _lazyselect(condlist,
[f0, f1, f0, f3, f3, f5],
[h, k],
default=np.nan)
def f0(h, k):
return 1.0/k
def f1(h, k):
a = np.empty(np.shape(h))
a[:] = np.inf
return a
self.b = _lazyselect(condlist,
[f0, f1, f1, f0, f1, f1],
[h, k],
default=np.nan)
return h == h
def _pdf(self, x, h, k):
return np.exp(self._logpdf(x, h, k))
def _logpdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*(
1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h-1.0)
logpdf = ...
'''
return (sc.xlog1py(1.0/k - 1.0, -k*x) +
sc.xlog1py(1.0/h - 1.0, -h*(1.0 - k*x)**(1.0/k)))
def f1(x, h, k):
'''pdf = (1.0 - k*x)**(1.0/k - 1.0)*np.exp(-(
1.0 - k*x)**(1.0/k))
logpdf = ...
'''
return sc.xlog1py(1.0/k - 1.0, -k*x) - (1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''pdf = np.exp(-x)*(1.0 - h*np.exp(-x))**(1.0/h - 1.0)
logpdf = ...
'''
return -x + sc.xlog1py(1.0/h - 1.0, -h*np.exp(-x))
def f3(x, h, k):
'''pdf = np.exp(-x-np.exp(-x))
logpdf = ...
'''
return -x - np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _cdf(self, x, h, k):
return np.exp(self._logcdf(x, h, k))
def _logcdf(self, x, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(x, h, k):
'''cdf = (1.0 - h*(1.0 - k*x)**(1.0/k))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*(1.0 - k*x)**(1.0/k))
def f1(x, h, k):
'''cdf = np.exp(-(1.0 - k*x)**(1.0/k))
logcdf = ...
'''
return -(1.0 - k*x)**(1.0/k)
def f2(x, h, k):
'''cdf = (1.0 - h*np.exp(-x))**(1.0/h)
logcdf = ...
'''
return (1.0/h)*sc.log1p(-h*np.exp(-x))
def f3(x, h, k):
'''cdf = np.exp(-np.exp(-x))
logcdf = ...
'''
return -np.exp(-x)
return _lazyselect(condlist,
[f0, f1, f2, f3],
[x, h, k],
default=np.nan)
def _ppf(self, q, h, k):
condlist = [np.logical_and(h != 0, k != 0),
np.logical_and(h == 0, k != 0),
np.logical_and(h != 0, k == 0),
np.logical_and(h == 0, k == 0)]
def f0(q, h, k):
return 1.0/k*(1.0 - ((1.0 - (q**h))/h)**k)
def f1(q, h, k):
return 1.0/k*(1.0 - (-np.log(q))**k)
def f2(q, h, k):
'''ppf = -np.log((1.0 - (q**h))/h)
'''
return -sc.log1p(-(q**h)) + np.log(h)
def f3(q, h, k):
return -np.log(-np.log(q))
return _lazyselect(condlist,
[f0, f1, f2, f3],
[q, h, k],
default=np.nan)
def _stats(self, h, k):
if h >= 0 and k >= 0:
maxr = 5
elif h < 0 and k >= 0:
maxr = int(-1.0/h*k)
elif k < 0:
maxr = int(-1.0/k)
else:
maxr = 5
outputs = [None if r < maxr else np.nan for r in range(1, 5)]
return outputs[:]
kappa4 = kappa4_gen(name='kappa4')
class kappa3_gen(rv_continuous):
"""Kappa 3 parameter distribution.
%(before_notes)s
Notes
-----
The probability density function for `kappa` is::
kappa3.pdf(x, a) =
a*[a + x**a]**(-(a + 1)/a), for ``x > 0``
0.0, for ``x <= 0``
`kappa3` takes ``a`` as a shape parameter and ``a > 0``.
References
----------
P.W. Mielke and E.S. Johnson, "Three-Parameter Kappa Distribution Maximum
Likelihood and Likelihood Ratio Tests", Methods in Weather Research,
701-707, (September, 1973),
http://docs.lib.noaa.gov/rescue/mwr/101/mwr-101-09-0701.pdf
B. Kumphon, "Maximum Entropy and Maximum Likelihood Estimation for the
Three-Parameter Kappa Distribution", Open Journal of Statistics, vol 2,
415-419 (2012)
http://file.scirp.org/pdf/OJS20120400011_95789012.pdf
%(after_notes)s
%(example)s
"""
def _argcheck(self, a):
return a > 0
def _pdf(self, x, a):
return a*(a + x**a)**(-1.0/a-1)
def _cdf(self, x, a):
return x*(a + x**a)**(-1.0/a)
def _ppf(self, q, a):
return (a/(q**-a - 1.0))**(1.0/a)
def _stats(self, a):
outputs = [None if i < a else np.nan for i in range(1, 5)]
return outputs[:]
kappa3 = kappa3_gen(a=0.0, name='kappa3')
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
`nakagami` takes ``nu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/sc.gamma(nu)*(x**(2*nu-1.0))*np.exp(-nu*x*x)
def _cdf(self, x, nu):
return sc.gammainc(nu, nu*x*x)
def _ppf(self, q, nu):
return np.sqrt(1.0/nu*sc.gammaincinv(nu, q))
def _stats(self, nu):
mu = sc.gamma(nu+0.5)/sc.gamma(nu)/np.sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu * (1 - 4*nu*mu2) / 2.0 / nu / np.power(mu2, 1.5)
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami")
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+x)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
`ncx2` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, df, nc):
return self._random_state.noncentral_chisquare(df, nc, self._size)
def _logpdf(self, x, df, nc):
return _ncx2_log_pdf(x, df, nc)
def _pdf(self, x, df, nc):
return _ncx2_pdf(x, df, nc)
def _cdf(self, x, df, nc):
return _ncx2_cdf(x, df, nc)
def _ppf(self, q, df, nc):
return sc.chndtrix(q, df, nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return (df + nc,
2*val,
np.sqrt(8)*(val+nc)/val**1.5,
12.0*(val+2*nc)/val**2.0)
ncx2 = ncx2_gen(a=0.0, name='ncx2')
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) *
df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) *
(df2+df1*x)**(-(df1+df2)/2) *
gamma(df1/2)*gamma(1+df2/2) *
L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) /
(B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
`ncf` takes ``df1``, ``df2`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return self._random_state.noncentral_f(dfn, dfd, nc, self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1, n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + sc.gammaln(n1/2.)+sc.gammaln(1+n2/2.)
term -= sc.gammaln((n1+n2)/2.0)
Px = np.exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= sc.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)), n2/2, n1/2-1)
Px /= sc.beta(n1/2, n2/2)
# This function does not have a return. Drop it for now, the generic
# function seems to work OK.
def _cdf(self, x, dfn, dfd, nc):
return sc.ncfdtr(dfn, dfd, nc, x)
def _ppf(self, q, dfn, dfd, nc):
return sc.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn * 1.0/dfd)**n
term = sc.gammaln(n+0.5*dfn) + sc.gammaln(0.5*dfd-n) - sc.gammaln(dfd*0.5)
val *= np.exp(-nc / 2.0+term)
val *= sc.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = np.where(dfd <= 2, np.inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = np.where(dfd <= 4, np.inf, 2*(dfd*1.0/dfn)**2.0 *
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) /
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf')
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
`t` takes ``df`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, df):
return self._random_state.standard_t(df, size=self._size)
def _pdf(self, x, df):
r = np.asarray(df*1.0)
Px = np.exp(sc.gammaln((r+1)/2)-sc.gammaln(r/2))
Px /= np.sqrt(r*np.pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = sc.gammaln((r+1)/2)-sc.gammaln(r/2)
lPx -= 0.5*np.log(r*np.pi) + (r+1)/2*np.log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return sc.stdtr(df, x)
def _sf(self, x, df):
return sc.stdtr(df, -x)
def _ppf(self, q, df):
return sc.stdtrit(df, q)
def _isf(self, q, df):
return -sc.stdtrit(df, q)
def _stats(self, df):
mu2 = _lazywhere(df > 2, (df,),
lambda df: df / (df-2.0),
np.inf)
g1 = np.where(df > 3, 0.0, np.nan)
g2 = _lazywhere(df > 4, (df,),
lambda df: 6.0 / (df-4.0),
np.nan)
return 0, mu2, g1, g2
t = t_gen(name='t')
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``.
`nct` takes ``df`` and ``nc`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, df, nc):
return (df > 0) & (nc == nc)
def _rvs(self, df, nc):
sz, rndm = self._size, self._random_state
n = norm.rvs(loc=nc, size=sz, random_state=rndm)
c2 = chi2.rvs(df, size=sz, random_state=rndm)
return n * np.sqrt(df) / np.sqrt(c2)
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*np.log(n) + sc.gammaln(n+1)
trm1 -= n*np.log(2)+nc*nc/2.+(n/2.)*np.log(fac1)+sc.gammaln(n/2.)
Px = np.exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = np.sqrt(2)*nc*x*sc.hyp1f1(n/2+1, 1.5, valF)
trm1 /= np.asarray(fac1*sc.gamma((n+1)/2))
trm2 = sc.hyp1f1((n+1)/2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1)*sc.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return sc.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return sc.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
#
# See D. Hogben, R.S. Pinkham, and M.B. Wilk,
# 'The moments of the non-central t-distribution'
# Biometrika 48, p. 465 (2961).
# e.g. http://www.jstor.org/stable/2332772 (gated)
#
mu, mu2, g1, g2 = None, None, None, None
gfac = sc.gamma(df/2.-0.5) / sc.gamma(df/2.)
c11 = np.sqrt(df/2.) * gfac
c20 = df / (df-2.)
c22 = c20 - c11*c11
mu = np.where(df > 1, nc*c11, np.inf)
mu2 = np.where(df > 2, c22*nc*nc + c20, np.inf)
if 's' in moments:
c33t = df * (7.-2.*df) / (df-2.) / (df-3.) + 2.*c11*c11
c31t = 3.*df / (df-2.) / (df-3.)
mu3 = (c33t*nc*nc + c31t) * c11*nc
g1 = np.where(df > 3, mu3 / np.power(mu2, 1.5), np.nan)
#kurtosis
if 'k' in moments:
c44 = df*df / (df-2.) / (df-4.)
c44 -= c11*c11 * 2.*df*(5.-df) / (df-2.) / (df-3.)
c44 -= 3.*c11**4
c42 = df / (df-4.) - c11*c11 * (df-1.) / (df-3.)
c42 *= 6.*df / (df-2.)
c40 = 3.*df*df / (df-2.) / (df-4.)
mu4 = c44 * nc**4 + c42*nc**2 + c40
g2 = np.where(df > 4, mu4/mu2**2 - 3., np.nan)
return mu, mu2, g1, g2
nct = nct_gen(name="nct")
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
`pareto` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = np.extract(mask, b)
mu = valarray(np.shape(b), value=np.inf)
np.place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = np.extract(mask, b)
mu2 = valarray(np.shape(b), value=np.inf)
np.place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = np.extract(mask, b)
g1 = valarray(np.shape(b), value=np.nan)
vals = 2 * (bt + 1.0) * np.sqrt(bt - 2.0) / ((bt - 3.0) * np.sqrt(bt))
np.place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = np.extract(mask, b)
g2 = valarray(np.shape(b), value=np.nan)
vals = (6.0*np.polyval([1.0, 1.0, -6, -2], bt) /
np.polyval([1.0, -7.0, 12.0, 0.0], bt))
np.place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - np.log(c)
pareto = pareto_gen(a=1.0, name="pareto")
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
`lomax` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
class pearson3_gen(rv_continuous):
"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is::
pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
(beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
where::
beta = 2 / (skew * stddev)
alpha = (stddev * beta)**2
zeta = loc - alpha / beta
`pearson3` takes ``skew`` as a shape parameter.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays([1.0], x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.ones(np.shape(skew), dtype=bool)
def _stats(self, skew):
_, _, _, _, _, beta, alpha, zeta = (
self._preprocess([1], skew))
m = zeta + alpha / beta
v = alpha / (beta**2)
s = 2.0 / (alpha**0.5) * np.sign(beta)
k = 6.0 / alpha
return m, v, s, k
def _pdf(self, x, skew):
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
ans[invmask] = np.log(abs(beta)) + gamma._logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
ans[invmask] = gamma._cdf(transx, alpha)
return ans
def _rvs(self, skew):
skew = broadcast_to(skew, self._size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = self._random_state.standard_normal(nsmall)
ans[invmask] = (self._random_state.standard_gamma(alpha, nbig)/beta +
zeta)
if self._size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
ans[invmask] = sc.gammaincinv(alpha, q[invmask])/beta + zeta
return ans
pearson3 = pearson3_gen(name="pearson3")
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
`powerlaw` takes ``a`` as a shape parameter.
%(after_notes)s
`powerlaw` is a special case of `beta` with ``b == 1``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return np.log(a) + sc.xlogy(a - 1, x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*np.log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * np.sqrt((a + 2.0) / a),
6 * np.polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - np.log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw")
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
`powerlognorm` takes ``c`` and ``s`` as shape parameters.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _pdf(self, x, c, s):
return (c/(x*s) * _norm_pdf(np.log(x)/s) *
pow(_norm_cdf(-np.log(x)/s), c*1.0-1.0))
def _cdf(self, x, c, s):
return 1.0 - pow(_norm_cdf(-np.log(x)/s), c*1.0)
def _ppf(self, q, c, s):
return np.exp(-s * _norm_ppf(pow(1.0 - q, 1.0 / c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm")
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
`powernorm` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x) * (_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return np.log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -_norm_ppf(pow(1.0 - q, 1.0 / c))
powernorm = powernorm_gen(name='powernorm')
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
`rdist` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0 - x**2), c / 2.0 - 1) / sc.beta(0.5, c / 2.0)
def _cdf(self, x, c):
term1 = x / sc.beta(0.5, c / 2.0)
res = 0.5 + term1 * sc.hyp2f1(0.5, 1 - c / 2.0, 1.5, x**2)
# There's an issue with hyp2f1, it returns nans near x = +-1, c > 100.
# Use the generic implementation in that case. See gh-1285 for
# background.
if np.any(np.isnan(res)):
return rv_continuous._cdf(self, x, c)
return res
def _munp(self, n, c):
numerator = (1 - (n % 2)) * sc.beta((n + 1.0) / 2, c / 2.0)
return numerator / sc.beta(1. / 2, c / 2.)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist")
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
`rayleigh` is a special case of `chi` with ``df == 2``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return chi.rvs(2, size=self._size, random_state=self._random_state)
def _pdf(self, r):
return np.exp(self._logpdf(r))
def _logpdf(self, r):
return np.log(r) - 0.5 * r * r
def _cdf(self, r):
return -sc.expm1(-0.5 * r**2)
def _ppf(self, q):
return np.sqrt(-2 * sc.log1p(-q))
def _sf(self, r):
return np.exp(self._logsf(r))
def _logsf(self, r):
return -0.5 * r * r
def _isf(self, q):
return np.sqrt(-2 * np.log(q))
def _stats(self):
val = 4 - np.pi
return (np.sqrt(np.pi/2),
val/2,
2*(np.pi-3)*np.sqrt(np.pi)/val**1.5,
6*np.pi/val-16/val**2)
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*np.log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
`reciprocal` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = np.log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
return 1.0 / (x * self.d)
def _logpdf(self, x, a, b):
return -np.log(x) - np.log(self.d)
def _cdf(self, x, a, b):
return (np.log(x)-np.log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a, q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0, n) - pow(a*1.0, n))
def _entropy(self, a, b):
return 0.5*np.log(a*b)+np.log(np.log(b/a))
reciprocal = reciprocal_gen(name="reciprocal")
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
`rice` takes ``b`` as a shape parameter.
%(after_notes)s
The Rice distribution describes the length, ``r``, of a 2-D vector
with components ``(U+u, V+v)``, where ``U, V`` are constant, ``u, v``
are independent Gaussian random variables with standard deviation
``s``. Let ``R = (U**2 + V**2)**0.5``. Then the pdf of ``r`` is
``rice.pdf(x, R/s, scale=s)``.
%(example)s
"""
def _argcheck(self, b):
return b >= 0
def _rvs(self, b):
# http://en.wikipedia.org/wiki/Rice_distribution
t = b/np.sqrt(2) + self._random_state.standard_normal(size=(2,) +
self._size)
return np.sqrt((t*t).sum(axis=0))
def _cdf(self, x, b):
return sc.chndtr(np.square(x), 2, np.square(b))
def _ppf(self, q, b):
return np.sqrt(sc.chndtrix(q, 2, np.square(b)))
def _pdf(self, x, b):
# We use (x**2 + b**2)/2 = ((x-b)**2)/2 + xb.
# The factor of np.exp(-xb) is then included in the i0e function
# in place of the modified Bessel function, i0, improving
# numerical stability for large values of xb.
return x * np.exp(-(x-b)*(x-b)/2.0) * sc.i0e(x*b)
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1 + nd2
b2 = b*b/2.0
return (2.0**(nd2) * np.exp(-b2) * sc.gamma(n1) *
sc.hyp1f1(n1, 1, b2))
rice = rice_gen(a=0.0, name="rice")
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
`recipinvgauss` takes ``mu`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _pdf(self, x, mu):
return 1.0/np.sqrt(2*np.pi*x)*np.exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*np.log(2*np.pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/np.sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-np.exp(2.0/mu)*_norm_cdf(-isqx*trm2)
def _rvs(self, mu):
return 1.0/self._random_state.wald(mu, 1.0, size=self._size)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss')
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(after_notes)s
%(example)s
"""
def _pdf(self, x):
return 2.0/np.pi*np.sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/np.pi*(x*np.sqrt(1-x*x) + np.arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
class skew_norm_gen(rv_continuous):
"""A skew-normal random variable.
%(before_notes)s
Notes
-----
The pdf is::
skewnorm.pdf(x, a) = 2*norm.pdf(x)*norm.cdf(ax)
`skewnorm` takes ``a`` as a skewness parameter
When a=0 the distribution is identical to a normal distribution.
rvs implements the method of [1]_.
%(after_notes)s
%(example)s
References
----------
.. [1] A. Azzalini and A. Capitanio (1999). Statistical applications of the
multivariate skew-normal distribution. J. Roy. Statist. Soc., B 61, 579-602.
http://azzalini.stat.unipd.it/SN/faq-r.html
"""
def _argcheck(self, a):
return np.isfinite(a)
def _pdf(self, x, a):
return 2.*_norm_pdf(x)*_norm_cdf(a*x)
def _rvs(self, a):
u0 = self._random_state.normal(size=self._size)
v = self._random_state.normal(size=self._size)
d = a/np.sqrt(1 + a**2)
u1 = d*u0 + v*np.sqrt(1 - d**2)
return np.where(u0 >= 0, u1, -u1)
def _stats(self, a, moments='mvsk'):
output = [None, None, None, None]
const = np.sqrt(2/np.pi) * a/np.sqrt(1 + a**2)
if 'm' in moments:
output[0] = const
if 'v' in moments:
output[1] = 1 - const**2
if 's' in moments:
output[2] = ((4 - np.pi)/2) * (const/np.sqrt(1 - const**2))**3
if 'k' in moments:
output[3] = (2*(np.pi - 3)) * (const**4/(1 - const**2)**2)
return output
skewnorm = skew_norm_gen(name='skewnorm')
class trapz_gen(rv_continuous):
"""A trapezoidal continuous random variable.
%(before_notes)s
Notes
-----
The trapezoidal distribution can be represented with an up-sloping line
from ``loc`` to ``(loc + c*scale)``, then constant to ``(loc + d*scale)``
and then downsloping from ``(loc + d*scale)`` to ``(loc+scale)``.
`trapz` takes ``c`` and ``d`` as shape parameters.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _argcheck(self, c, d):
return (c >= 0) & (c <= 1) & (d >= 0) & (d <= 1) & (d >= c)
def _pdf(self, x, c, d):
u = 2 / (d - c + 1)
condlist = [x < c, x <= d, x > d]
choicelist = [u * x / c, u, u * (1 - x) / (1 - d)]
return np.select(condlist, choicelist)
def _cdf(self, x, c, d):
condlist = [x < c, x <= d, x > d]
choicelist = [x**2 / c / (d - c + 1),
(c + 2 * (x - c)) / (d - c + 1),
1 - ((1 - x)**2 / (d - c + 1) / (1 - d))]
return np.select(condlist, choicelist)
def _ppf(self, q, c, d):
qc, qd = self._cdf(c, c, d), self._cdf(d, c, d)
condlist = [q < qc, q <= qd, q > qd]
choicelist = [np.sqrt(q * c * (1 + d - c)),
0.5 * q * (1 + d - c) + 0.5 * c,
1 - np.sqrt((1 - q) * (d - c + 1) * (1 - d))]
return np.select(condlist, choicelist)
trapz = trapz_gen(a=0.0, b=1.0, name="trapz")
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
`triang` takes ``c`` as a shape parameter.
%(after_notes)s
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return self._random_state.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return np.where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return np.where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return np.where(q < c, np.sqrt(c*q), 1-np.sqrt((1-c)*(1-q)))
def _stats(self, c):
return ((c+1.0)/3.0,
(1.0-c+c*c)/18,
np.sqrt(2)*(2*c-1)*(c+1)*(c-2) / (5*np.power((1.0-c+c*c), 1.5)),
-3.0/5.0)
def _entropy(self, c):
return 0.5-np.log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang")
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
`truncexpon` takes ``b`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, b):
self.b = b
return b > 0
def _pdf(self, x, b):
return np.exp(-x)/(-sc.expm1(-b))
def _logpdf(self, x, b):
return -x - np.log(-sc.expm1(-b))
def _cdf(self, x, b):
return sc.expm1(-x)/sc.expm1(-b)
def _ppf(self, q, b):
return -sc.log1p(q*sc.expm1(-b))
def _munp(self, n, b):
# wrong answer with formula, same as in continuous.pdf
# return sc.gamman+1)-sc.gammainc1+n, b)
if n == 1:
return (1-(b+1)*np.exp(-b))/(-sc.expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*np.exp(-b))/(-sc.expm1(-b))
else:
# return generic for higher moments
# return rv_continuous._mom1_sc(self, n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = np.exp(b)
return np.log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon')
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a, b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
`truncnorm` takes ``a`` and ``b`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._sb = _norm_sf(b)
self._sa = _norm_sf(a)
self._delta = np.where(self.a > 0,
-(self._sb - self._sa),
self._nb - self._na)
self._logdelta = np.log(self._delta)
return a != b
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
# XXX Use _lazywhere...
ppf = np.where(self.a > 0,
_norm_isf(q*self._sb + self._sa*(1.0-q)),
_norm_ppf(q*self._nb + self._na*(1.0-q)))
return ppf
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d # correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm')
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
`tukeylambda` takes ``lam`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lam):
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = np.asarray(sc.tklmbda(x, lam))
Px = Fx**(lam-1.0) + (np.asarray(1-Fx))**(lam-1.0)
Px = 1.0/np.asarray(Px)
return np.where((lam <= 0) | (abs(x) < 1.0/np.asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return sc.tklmbda(x, lam)
def _ppf(self, q, lam):
return sc.boxcox(q, lam) - sc.boxcox1p(-q, lam)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return np.log(pow(p, lam-1)+pow(1-p, lam-1))
return integrate.quad(integ, 0, 1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda')
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return self._random_state.uniform(0.0, 1.0, self._size)
def _pdf(self, x):
return 1.0*(x == x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, kappa) = exp(kappa * cos(x)) / (2*pi*I[0](kappa))
for ``-pi <= x <= pi``, ``kappa > 0``.
`vonmises` takes ``kappa`` as a shape parameter.
%(after_notes)s
See Also
--------
vonmises_line : The same distribution, defined on a [-pi, pi] segment
of the real line.
%(example)s
"""
def _rvs(self, kappa):
return self._random_state.vonmises(0.0, kappa, size=self._size)
def _pdf(self, x, kappa):
return np.exp(kappa * np.cos(x)) / (2*np.pi*sc.i0(kappa))
def _cdf(self, x, kappa):
return _stats.von_mises_cdf(kappa, x)
def _stats_skip(self, kappa):
return 0, None, 0, None
def _entropy(self, kappa):
return (-kappa * sc.i1(kappa) / sc.i0(kappa) +
np.log(2 * np.pi * sc.i0(kappa)))
vonmises = vonmises_gen(name='vonmises')
vonmises_line = vonmises_gen(a=-np.pi, b=np.pi, name='vonmises_line')
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
`wald` is a special case of `invgauss` with ``mu == 1``.
%(after_notes)s
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _rvs(self):
return self._random_state.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
`wrapcauchy` takes ``c`` as a shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*np.pi*(1+c*c-2*c*np.cos(x)))
def _cdf(self, x, c):
output = np.zeros(x.shape, dtype=x.dtype)
val = (1.0+c)/(1.0-c)
c1 = x < np.pi
c2 = 1-c1
xp = np.extract(c1, x)
xn = np.extract(c2, x)
if np.any(xn):
valn = np.extract(c2, np.ones_like(x)*val)
xn = 2*np.pi - xn
yn = np.tan(xn/2.0)
on = 1.0-1.0/np.pi*np.arctan(valn*yn)
np.place(output, c2, on)
if np.any(xp):
valp = np.extract(c1, np.ones_like(x)*val)
yp = np.tan(xp/2.0)
op = 1.0/np.pi*np.arctan(valp*yp)
np.place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*np.arctan(val*np.tan(np.pi*q))
rcmq = 2*np.pi-2*np.arctan(val*np.tan(np.pi*(1-q)))
return np.where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return np.log(2*np.pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*np.pi, name='wrapcauchy')
class gennorm_gen(rv_continuous):
"""A generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gennorm` is [1]_::
beta
gennorm.pdf(x, beta) = --------------- exp(-|x|**beta)
2 gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to a Laplace distribution.
For ``beta = 2``, it is identical to a normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
laplace : Laplace distribution
norm : normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(0.5*beta) - sc.gammaln(1.0/beta) - abs(x)**beta
def _cdf(self, x, beta):
c = 0.5 * np.sign(x)
# evaluating (.5 + c) first prevents numerical cancellation
return (0.5 + c) - c * sc.gammaincc(1.0/beta, abs(x)**beta)
def _ppf(self, x, beta):
c = np.sign(x - 0.5)
# evaluating (1. + c) first prevents numerical cancellation
return c * sc.gammainccinv(1.0/beta, (1.0 + c) - 2.0*c*x)**(1.0/beta)
def _sf(self, x, beta):
return self._cdf(-x, beta)
def _isf(self, x, beta):
return -self._ppf(x, beta)
def _stats(self, beta):
c1, c3, c5 = sc.gammaln([1.0/beta, 3.0/beta, 5.0/beta])
return 0., np.exp(c3 - c1), 0., np.exp(c5 + c1 - 2.0*c3) - 3.
def _entropy(self, beta):
return 1. / beta - np.log(.5 * beta) + sc.gammaln(1. / beta)
gennorm = gennorm_gen(name='gennorm')
class halfgennorm_gen(rv_continuous):
"""The upper half of a generalized normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfgennorm` is::
beta
halfgennorm.pdf(x, beta) = ------------- exp(-|x|**beta)
gamma(1/beta)
`gennorm` takes ``beta`` as a shape parameter.
For ``beta = 1``, it is identical to an exponential distribution.
For ``beta = 2``, it is identical to a half normal distribution
(with ``scale=1/sqrt(2)``).
See Also
--------
gennorm : generalized normal distribution
expon : exponential distribution
halfnorm : half normal distribution
References
----------
.. [1] "Generalized normal distribution, Version 1",
https://en.wikipedia.org/wiki/Generalized_normal_distribution#Version_1
%(example)s
"""
def _pdf(self, x, beta):
return np.exp(self._logpdf(x, beta))
def _logpdf(self, x, beta):
return np.log(beta) - sc.gammaln(1.0/beta) - x**beta
def _cdf(self, x, beta):
return sc.gammainc(1.0/beta, x**beta)
def _ppf(self, x, beta):
return sc.gammaincinv(1.0/beta, x)**(1.0/beta)
def _sf(self, x, beta):
return sc.gammaincc(1.0/beta, x**beta)
def _isf(self, x, beta):
return sc.gammainccinv(1.0/beta, x)**(1.0/beta)
def _entropy(self, beta):
return 1.0/beta - np.log(beta) + sc.gammaln(1.0/beta)
halfgennorm = halfgennorm_gen(a=0, name='halfgennorm')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names
|
matthewalbani/scipy
|
scipy/stats/_continuous_distns.py
|
Python
|
bsd-3-clause
| 140,183
|
[
"Gaussian"
] |
8c3f85ea94f477859b74c4f10f16f96640efcb7d4ad8ea94a81998be34ed900b
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for Student's Profile Page.
"""
from contextlib import contextmanager
from datetime import datetime
from unittest import skip
import pytest
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.learner_profile import LearnerProfilePage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class LearnerProfileTestMixin(EventsTestMixin):
"""
Mixin with helper methods for testing learner profile pages.
"""
PRIVACY_PUBLIC = u'all_users'
PRIVACY_PRIVATE = u'private'
PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio']
PRIVATE_PROFILE_FIELDS = ['username']
PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio']
USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed"
def log_in_as_unique_user(self):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def set_public_profile_fields_data(self, profile_page):
"""
Fill in the public profile fields of a user.
"""
# These value_for_dropdown_field method calls used to include
# focus_out = True, but a change in selenium is focusing out of the
# drop down after selection without any more action needed.
profile_page.value_for_dropdown_field('language_proficiencies', 'English')
profile_page.value_for_dropdown_field('country', 'United Arab Emirates')
profile_page.set_value_for_textarea_field('bio', 'Nothing Special')
# Waits here for text to appear/save on bio field
profile_page.wait_for_ajax()
def visit_profile_page(self, username, privacy=None):
"""
Visit a user's profile page and if a privacy is specified and
is different from the displayed value, then set the privacy to that value.
"""
profile_page = LearnerProfilePage(self.browser, username)
# Change the privacy if requested by loading the page and
# changing the drop down
if privacy is not None:
profile_page.visit()
# Change the privacy setting if it is not the desired one already
profile_page.privacy = privacy
# Verify the current setting is as expected
if privacy == self.PRIVACY_PUBLIC:
self.assertEqual(profile_page.privacy, 'all_users')
else:
self.assertEqual(profile_page.privacy, 'private')
if privacy == self.PRIVACY_PUBLIC:
self.set_public_profile_fields_data(profile_page)
# Reset event tracking so that the tests only see events from
# loading the profile page.
self.start_time = datetime.now()
# Load the page
profile_page.visit()
return profile_page
def set_birth_year(self, birth_year):
"""
Set birth year for the current user to the specified value.
"""
account_settings_page = AccountSettingsPage(self.browser)
account_settings_page.visit()
account_settings_page.wait_for_page()
self.assertEqual(
account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year), focus_out=True),
str(birth_year)
)
def verify_profile_page_is_public(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently public.
"""
self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS)
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS)
else:
self.assertEqual(profile_page.editable_fields, [])
def verify_profile_page_is_private(self, profile_page, is_editable=True):
"""
Verify that the profile page is currently private.
"""
if is_editable:
self.assertTrue(profile_page.privacy_field_visible)
self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS)
def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None):
"""
Verifies that the correct view event was captured for the profile page.
"""
actual_events = self.wait_for_events(
start_time=self.start_time,
event_filter={'event_type': 'edx.user.settings.viewed', 'username': requesting_username},
number_of_matches=1)
self.assert_events_match(
[
{
'username': requesting_username,
'event': {
'user_id': int(profile_user_id),
'page': 'profile',
'visibility': unicode(visibility)
}
}
],
actual_events
)
@contextmanager
def verify_pref_change_event_during(self, username, user_id, setting, **kwargs):
"""Assert that a single setting changed event is emitted for the user_api_userpreference table."""
expected_event = {
'username': username,
'event': {
'setting': setting,
'user_id': int(user_id),
'table': 'user_api_userpreference',
'truncated': []
}
}
expected_event['event'].update(kwargs)
event_filter = {
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'username': username,
}
with self.assert_events_match_during(event_filter=event_filter, expected_events=[expected_event]):
yield
def initialize_different_user(self, privacy=None, birth_year=None):
"""
Initialize the profile page for a different test user
"""
username, user_id = self.log_in_as_unique_user()
# Set the privacy for the new user
if privacy is None:
privacy = self.PRIVACY_PUBLIC
self.visit_profile_page(username, privacy=privacy)
# Set the user's year of birth
if birth_year:
self.set_birth_year(birth_year)
# Log the user out
LogoutPage(self.browser).visit()
return username, user_id
class OwnLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify a student's own profile page.
"""
shard = 4
def verify_profile_forced_private_message(self, username, birth_year, message=None):
"""
Verify age limit messages for a user.
"""
if birth_year is None:
birth_year = ""
self.set_birth_year(birth_year=birth_year)
profile_page = self.visit_profile_page(username)
self.assertTrue(profile_page.privacy_field_visible)
if message:
self.assertTrue(profile_page.age_limit_message_present)
else:
self.assertFalse(profile_page.age_limit_message_present)
self.assertIn(message, profile_page.profile_forced_private_message)
@skip("failing on Jenkins")
def test_profile_defaults_to_public(self):
"""
Scenario: Verify that a new user's profile defaults to public.
Given that I am a new user.
When I go to my profile page.
Then I see that the profile visibility is set to public.
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
self.verify_profile_page_is_public(profile_page)
def assert_default_image_has_public_access(self, profile_page):
"""
Assert that profile image has public access.
"""
self.assertTrue(profile_page.profile_has_default_image)
self.assertTrue(profile_page.profile_has_image_with_public_access())
@skip("failing on Jenkins")
def test_make_profile_public(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my private profile page
And I set the profile visibility to public
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as public
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=self.PRIVACY_PRIVATE, new=self.PRIVACY_PUBLIC
):
profile_page.privacy = self.PRIVACY_PUBLIC
# Reload the page and verify that the profile is now public
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_public(profile_page)
def test_make_profile_private(self):
"""
Scenario: Verify that the user can change their privacy.
Given that I am a registered user
And I visit my public profile page
And I set the profile visibility to private
Then a user preference changed event should be recorded
When I reload the page
Then the profile visibility should be shown as private
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
with self.verify_pref_change_event_during(
username, user_id, 'account_privacy', old=None, new=self.PRIVACY_PRIVATE
):
profile_page.privacy = self.PRIVACY_PRIVATE
# Reload the page and verify that the profile is now private
self.browser.refresh()
profile_page.wait_for_page()
self.verify_profile_page_is_private(profile_page)
def test_dashboard_learner_profile_link(self):
"""
Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page.
Given that I am a registered user.
When I go to Dashboard page.
And I click on username dropdown.
Then I see Profile link in the dropdown menu.
When I click on Profile link.
Then I will be navigated to Profile page.
"""
username, __ = self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
self.assertIn('Profile', dashboard_page.tabs_link_text)
dashboard_page.click_my_profile_link()
my_profile_page = LearnerProfilePage(self.browser, username)
my_profile_page.wait_for_page()
def test_fields_on_my_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own private profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to private.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_page_is_private(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
@skip("failing on Jenkins")
def test_fields_on_my_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at her own public profile.
Given that I am a registered user.
And I visit my Profile page.
And I set the profile visibility to public.
And I reload the page.
Then I should see the profile visibility selector dropdown.
Then I see all the profile fields are shown.
And `location`, `language` and `about me` fields are editable.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.verify_profile_page_is_public(profile_page)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC)
def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a dropdown field.
"""
profile_page.value_for_dropdown_field(field_id, new_value, focus_out=True)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode):
"""
Test behaviour of a textarea field.
"""
profile_page.set_value_for_textarea_field(field_id, new_value)
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
self.browser.refresh()
profile_page.wait_for_page()
self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value)
self.assertTrue(profile_page.mode_for_field(field_id), mode)
def test_birth_year_not_set(self):
"""
Verify message if birth year is not set.
Given that I am a registered user.
And birth year is not set for the user.
And I visit my profile page.
Then I should see a message that the profile is private until the year of birth is set.
"""
username, user_id = self.log_in_as_unique_user()
message = "You must specify your birth year before you can share your full profile."
self.verify_profile_forced_private_message(username, birth_year=None, message=message)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_is_under_age(self):
"""
Verify message if user is under age.
Given that I am a registered user.
And birth year is set so that age is less than 13.
And I visit my profile page.
Then I should see a message that the profile is private as I am under thirteen.
"""
username, user_id = self.log_in_as_unique_user()
under_age_birth_year = datetime.now().year - 10
self.verify_profile_forced_private_message(
username,
birth_year=under_age_birth_year,
message='You must be over 13 to share a full profile.'
)
self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE)
def test_user_can_only_see_default_image_for_private_profile(self):
"""
Scenario: Default profile image behaves correctly for under age user.
Given that I am on my profile page with private access
And I can see default image
When I move my cursor to the image
Then i cannot see the upload/remove image text
And i cannot upload/remove the image.
"""
year_of_birth = datetime.now().year - 5
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE)
self.verify_profile_forced_private_message(
username,
year_of_birth,
message='You must be over 13 to share a full profile.'
)
self.assertTrue(profile_page.profile_has_default_image)
self.assertFalse(profile_page.profile_has_image_with_private_access())
def test_user_can_see_default_image_for_public_profile(self):
"""
Scenario: Default profile image behaves correctly for public profile.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
And i am able to upload new image
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
def test_user_can_upload_the_profile_image_with_success(self):
"""
Scenario: Upload profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new image via file uploader
Then i can see the changed image
And i can also see the latest image after reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
profile_page.visit()
self.assertTrue(profile_page.image_upload_success)
def test_user_can_see_error_for_exceeding_max_file_size_limit(self):
"""
Scenario: Upload profile image does not work for > 1MB image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new > 1MB image via file uploader
Then i can see the error message for file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='larger_image.jpg')
self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_file_size_below_the_min_limit(self):
"""
Scenario: Upload profile image does not work for < 100 Bytes image file.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new < 100 Bytes image via file uploader
Then i can see the error message for minimum file size limit
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='list-icon-visited.png')
self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.")
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_see_error_for_wrong_file_type(self):
"""
Scenario: Upload profile image does not work for wrong file types.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i upload new csv file via file uploader
Then i can see the error message for wrong/unsupported file type
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
profile_page.upload_file(filename='generic_csv.csv')
self.assertEqual(
profile_page.profile_image_message,
"The file must be one of the following types: .gif, .png, .jpeg, .jpg."
)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
self.assert_no_matching_events_were_emitted({
'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME,
'event': {
'setting': 'profile_image_uploaded_at',
'user_id': int(user_id),
}
})
def test_user_can_remove_profile_image(self):
"""
Scenario: Remove profile image works correctly.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see the upload/remove image text
When i click on the remove image link
Then i can see the default image
And i can still see the default image after page reload.
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
self.assertTrue(profile_page.remove_profile_image())
self.assertTrue(profile_page.profile_has_default_image)
profile_page.visit()
self.assertTrue(profile_page.profile_has_default_image)
def test_user_cannot_remove_default_image(self):
"""
Scenario: Remove profile image does not works for default images.
Given that I am on my profile page with public access
And I can see default image
When I move my cursor to the image
Then i can see only the upload image text
And i cannot see the remove image text
"""
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
self.assertFalse(profile_page.remove_link_present)
def test_eventing_after_multiple_uploads(self):
"""
Scenario: An event is fired when a user with a profile image uploads another image
Given that I am on my profile page with public access
And I upload a new image via file uploader
When I upload another image via the file uploader
Then two upload events have been emitted
"""
username, user_id = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC)
self.assert_default_image_has_public_access(profile_page)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg')
self.assertTrue(profile_page.image_upload_success)
with self.verify_pref_change_event_during(
username, user_id, 'profile_image_uploaded_at', table='auth_userprofile'
):
profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False)
class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Tests that verify viewing the profile page of a different user.
"""
shard = 4
def test_different_user_private_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's private profile.
Given that I am a registered user.
And I visit a different user's private profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see some of the profile fields are shown.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PRIVATE)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_under_age(self):
"""
Scenario: Verify that an under age user's profile is private to others.
Given that I am a registered user.
And I visit an under age user's profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then I see that only the private fields are shown.
"""
under_age_birth_year = datetime.now().year - 10
different_username, different_user_id = self.initialize_different_user(
privacy=self.PRIVACY_PUBLIC,
birth_year=under_age_birth_year
)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
self.verify_profile_page_is_private(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE)
def test_different_user_public_profile(self):
"""
Scenario: Verify that desired fields are shown when looking at a different user's public profile.
Given that I am a registered user.
And I visit a different user's public profile page.
Then I shouldn't see the profile visibility selector dropdown.
Then all the profile fields are shown.
Then I shouldn't see the profile visibility selector dropdown.
Also `location`, `language` and `about me` fields are not editable.
"""
different_username, different_user_id = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
username, __ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.wait_for_public_fields()
self.verify_profile_page_is_public(profile_page, is_editable=False)
self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC)
def test_badge_share_modal(self):
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
badge = profile_page.badges[0]
badge.display_modal()
badge.close_modal()
@pytest.mark.a11y
class LearnerProfileA11yTest(LearnerProfileTestMixin, AcceptanceTest):
"""
Class to test learner profile accessibility.
"""
def test_editable_learner_profile_a11y(self):
"""
Test the accessibility of the editable version of the profile page
(user viewing her own public profile).
"""
username, _ = self.log_in_as_unique_user()
profile_page = self.visit_profile_page(username)
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('language_proficiencies')
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.make_field_editable('bio')
profile_page.a11y_audit.check_for_accessibility_errors()
def test_read_only_learner_profile_a11y(self):
"""
Test the accessibility of the read-only version of a public profile page
(user viewing someone else's profile page).
"""
# initialize_different_user should cause country, language, and bio to be filled out (since
# privacy is public). It doesn't appear that this is happening, although the method
# works in regular bokchoy tests. Perhaps a problem with phantomjs? So this test is currently
# only looking at a read-only profile page with a username.
different_username, _ = self.initialize_different_user(privacy=self.PRIVACY_PUBLIC)
self.log_in_as_unique_user()
profile_page = self.visit_profile_page(different_username)
profile_page.a11y_audit.check_for_accessibility_errors()
def test_badges_accessibility(self):
"""
Test the accessibility of the badge listings and sharing modal.
"""
username = 'testcert'
AutoAuthPage(self.browser, username=username).visit()
profile_page = self.visit_profile_page(username)
profile_page.display_accomplishments()
profile_page.a11y_audit.check_for_accessibility_errors()
profile_page.badges[0].display_modal()
profile_page.a11y_audit.check_for_accessibility_errors()
|
ahmedaljazzar/edx-platform
|
common/test/acceptance/tests/lms/test_learner_profile.py
|
Python
|
agpl-3.0
| 30,502
|
[
"VisIt"
] |
64edae34943cbcf7360b480fd5a0c0ff86c83691ad7272bdca885e7a7880a645
|
"""
Tests for array_handler.py.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2008, Enthought, Inc.
# License: BSD Style.
import unittest
import vtk
import numpy
from tvtk import array_handler
from tvtk import tvtk_base
# FIXME: test_tvtk_base.py is in the local directory so just doing
# from test_tvtk_base import Prop
# should be enough, however nose 0.9.3 will not find it, unless you give
# it the full path. It nose 0.10.3 works fine in this respect.
from tvtk.tests.test_tvtk_base import Prop
def mysum(arr):
val = arr
while type(val) == numpy.ndarray:
val = numpy.sum(val)
return val
class TestArrayHandler(unittest.TestCase):
def _check_arrays(self, arr, vtk_arr):
self.assertEqual(vtk_arr.GetNumberOfTuples(), len(arr))
if len(arr.shape) == 2:
dim1 = arr.shape[1]
self.assertEqual(vtk_arr.GetNumberOfComponents(), dim1)
for i in range(len(arr)):
if dim1 in [1,2,3,4,9]:
res = getattr(vtk_arr, 'GetTuple%s'%dim1)(i)
self.assertEqual(numpy.sum(res - arr[i]), 0)
else:
res = [vtk_arr.GetComponent(i, j) for j in range(dim1)]
self.assertEqual(numpy.sum(res - arr[i]), 0)
else:
if arr.dtype.char == 'c':
for i in range(len(arr)):
self.assertEqual(chr(int(vtk_arr.GetTuple1(i))), arr[i])
else:
for i in range(len(arr)):
self.assertEqual(vtk_arr.GetTuple1(i), arr[i])
def test_array2vtk(self):
"""Test Numeric array to VTK array conversion and vice-versa."""
# Put all the test arrays here.
t_z = []
# Test the different types of arrays.
t_z.append(numpy.array([-128, 0, 127], numpy.int8))
# FIXME: character arrays are a problem since there is no
# unique mapping to a VTK data type and back.
#t_z.append(numpy.array([-128, 0, 127], numpy.character))
t_z.append(numpy.array([-32768, 0, 32767], numpy.int16))
t_z.append(numpy.array([-2147483648, 0, 2147483647], numpy.int32))
t_z.append(numpy.array([0, 255], numpy.uint8))
t_z.append(numpy.array([0, 65535], numpy.uint16))
t_z.append(numpy.array([0, 4294967295], numpy.uint32))
t_z.append(numpy.array([-1.0e38, 0, 1.0e38], 'f'))
t_z.append(numpy.array([-1.0e299, 0, 1.0e299], 'd'))
# Check multi-component arrays.
t_z.append(numpy.array([[1], [2], [300]], 'd'))
t_z.append(numpy.array([[1, 20], [300, 4000]], 'd'))
t_z.append(numpy.array([[1, 2, 3], [4, 5, 6]], 'f'))
t_z.append(numpy.array([[1, 2, 3],[4, 5, 6]], 'd'))
t_z.append(numpy.array([[1, 2, 3, 400],[4, 5, 6, 700]],
'd'))
t_z.append(numpy.array([list(range(9)),list(range(10,19))], 'f'))
# Test if a Python list also works.
t_z.append(numpy.array([[1., 2., 3., 400.],[4, 5, 6, 700]],
'd'))
# Test if arrays with number of components not in [1,2,3,4,9] work.
t_z.append(numpy.array([[1, 2, 3, 400, 5000],
[4, 5, 6, 700, 8000]], 'd'))
t_z.append(numpy.array([list(range(10)), list(range(10,20))], 'd'))
for z in t_z:
vtk_arr = array_handler.array2vtk(z)
# Test for memory leaks.
self.assertEqual(vtk_arr.GetReferenceCount(),
array_handler.BASE_REFERENCE_COUNT)
self._check_arrays(z, vtk_arr)
z1 = array_handler.vtk2array(vtk_arr)
if len(z.shape) == 1:
self.assertEqual(len(z1.shape), 1)
if z.dtype.char != 'c':
#print z1
self.assertEqual(sum(numpy.ravel(z) - numpy.ravel(z1)), 0)
else:
#print z1.astype('c')
self.assertEqual(z, z1.astype('c'))
# Check if type conversion works correctly.
z = numpy.array([-128, 0, 127], numpy.int8)
vtk_arr = vtk.vtkDoubleArray()
ident = id(vtk_arr)
vtk_arr = array_handler.array2vtk(z, vtk_arr)
# Make sure this is the same array!
self.assertEqual(ident, id(vtk_arr))
self._check_arrays(z, vtk_arr)
# Check the vtkBitArray.
vtk_arr = vtk.vtkBitArray()
vtk_arr.InsertNextValue(0)
vtk_arr.InsertNextValue(1)
vtk_arr.InsertNextValue(0)
vtk_arr.InsertNextValue(1)
arr = array_handler.vtk2array(vtk_arr)
self.assertEqual(numpy.sum(arr - [0,1,0,1]), 0)
vtk_arr = array_handler.array2vtk(arr, vtk_arr)
self.assertEqual(vtk_arr.GetValue(0), 0)
self.assertEqual(vtk_arr.GetValue(1), 1)
self.assertEqual(vtk_arr.GetValue(2), 0)
self.assertEqual(vtk_arr.GetValue(3), 1)
# ----------------------------------------
# Test if the array is copied or not.
a = numpy.array([[1, 2, 3],[4, 5, 6]], 'd')
vtk_arr = array_handler.array2vtk(a)
# Change the numpy array and see if the changes are
# reflected in the VTK array.
a[0] = [10.0, 20.0, 30.0]
self.assertEqual(vtk_arr.GetTuple3(0), (10., 20., 30.))
# Make sure the cache is doing its job.
key = vtk_arr.__this__
z = array_handler._array_cache.get(vtk_arr)
self.assertEqual(numpy.sum(z - numpy.ravel(a)), 0.0)
l1 = len(array_handler._array_cache)
# del the Numeric array and see if this still works.
del a
self.assertEqual(vtk_arr.GetTuple3(0), (10., 20., 30.))
# Check the cache -- just making sure.
self.assertEqual(len(array_handler._array_cache), l1)
# Delete the VTK array and see if the cache is cleared.
del vtk_arr
self.assertEqual(len(array_handler._array_cache), l1-1)
self.assertEqual(key in array_handler._array_cache._cache, False)
# Make sure bit arrays are copied.
vtk_arr = vtk.vtkBitArray()
a = numpy.array([0,1,0,1], numpy.int32)
vtk_arr = array_handler.array2vtk(a, vtk_arr)
del a
self.assertEqual(vtk_arr.GetValue(0), 0)
self.assertEqual(vtk_arr.GetValue(1), 1)
self.assertEqual(vtk_arr.GetValue(2), 0)
self.assertEqual(vtk_arr.GetValue(3), 1)
# Make sure the code at least runs for all the non-complex
# numerical dtypes in numpy.
for dtype in (numpy.sctypes['int'] + numpy.sctypes['uint'] +
numpy.sctypes['float']):
array_handler.array2vtk(numpy.zeros((1,), dtype=dtype))
def test_arr2cell_array(self):
"""Test Numeric array to vtkCellArray conversion."""
# Test list of lists.
a = [[0], [1, 2], [3, 4, 5], [6, 7, 8, 9]]
cells = array_handler.array2vtkCellArray(a)
z = numpy.array([1, 0, 2, 1,2, 3, 3,4,5, 4, 6,7,8,9])
arr = array_handler.vtk2array(cells.GetData())
self.assertEqual(numpy.sum(arr - z), 0)
self.assertEqual(len(arr.shape), 1)
self.assertEqual(len(arr), 14)
# Test if optional argument stuff also works.
cells = vtk.vtkCellArray()
ident = id(cells)
cells = array_handler.array2vtkCellArray(a, cells)
self.assertEqual(id(cells), ident)
arr = array_handler.vtk2array(cells.GetData())
self.assertEqual(numpy.sum(arr - z), 0)
self.assertEqual(cells.GetNumberOfCells(), 4)
# Make sure this resets the cell array and does not add to the
# existing list!
cells = array_handler.array2vtkCellArray(a, cells)
self.assertEqual(cells.GetNumberOfCells(), 4)
# Test Numeric array handling.
N = 3
a = numpy.zeros((N,3), numpy.int)
a[:,1] = 1
a[:,2] = 2
cells = array_handler.array2vtkCellArray(a)
arr = array_handler.vtk2array(cells.GetData())
expect = numpy.array([3, 0, 1, 2]*3, numpy.int)
self.assertEqual(numpy.alltrue(numpy.equal(arr, expect)),
True)
self.assertEqual(cells.GetNumberOfCells(), N)
# Test if a list of Numeric arrays of different cell lengths works.
l_a = [a[:,:1], a, a[:2,:2]]
cells = array_handler.array2vtkCellArray(l_a)
arr = array_handler.vtk2array(cells.GetData())
expect = numpy.array([1, 0]*3 + [3, 0, 1, 2]*3 + [2, 0,1]*2, numpy.int)
self.assertEqual(numpy.alltrue(numpy.equal(arr, expect)),
True)
self.assertEqual(cells.GetNumberOfCells(), N*2 + 2)
# This should not take a long while. This merely tests if a
# million cells can be created rapidly.
N = int(1e6)
a = numpy.zeros((N,3), numpy.int)
a[:,1] = 1
a[:,2] = 2
cells = array_handler.array2vtkCellArray(a)
self.assertEqual(cells.GetNumberOfCells(), N)
def test_arr2vtkPoints(self):
"""Test Numeric array to vtkPoints conversion."""
a = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
p = array_handler.array2vtkPoints(a)
self.assertEqual(p.GetPoint(0), (0.0, 0.0, 0.0))
self.assertEqual(p.GetPoint(1), (1.0, 1.0, 1.0))
p = vtk.vtkPoints()
ident = id(p)
p = array_handler.array2vtkPoints(numpy.array(a), p)
self.assertEqual(p.GetPoint(0), (0.0, 0.0, 0.0))
self.assertEqual(p.GetPoint(1), (1.0, 1.0, 1.0))
self.assertEqual(id(p), ident)
self.assertRaises(AssertionError, array_handler.array2vtkPoints,
[0.0, 1.0])
self.assertRaises(AssertionError, array_handler.array2vtkPoints,
[0.0, 1.0, 1.0])
def test_arr2vtkIdList(self):
"""Test array to vtkIdList conversion."""
a = [1, 2, 3, 4, 5]
p = array_handler.array2vtkIdList(a)
for i, j in enumerate(a):
self.assertEqual(p.GetId(i), j)
p = vtk.vtkIdList()
ident = id(p)
p = array_handler.array2vtkIdList(numpy.array(a), p)
for i, j in enumerate(a):
self.assertEqual(p.GetId(i), j)
self.assertEqual(id(p), ident)
self.assertRaises(AssertionError, array_handler.array2vtkIdList,
[[1,2,3]])
def test_get_correct_sig(self):
"""Test multiple signature cases that have array arguments."""
obj = tvtk_base.TVTKBase(vtk.vtkIdTypeArray)
sigs = [ None,
[['vtkDataArray']],
[['int', 'vtkIdList']],
[['int', 'vtkPoints'], ['int', 'int']],
[['int', 'vtkPoints'], ['int']],
[['int'], ['int', 'vtkPoints']],
[['int', 'vtkDataArray'], ['int', 'int']],
[['int', 'vtkDataArray'], ['int', 'int']],
[['vtkIdList', 'vtkCellArray'], ['int', 'vtkPoints'],
['int', 'vtkDataArray']],
[['vtkIdList', 'vtkCellArray'], ['int', 'vtkPoints'],
['int', 'vtkDataArray']],
[['vtkIdTypeArray', 'vtkCellArray'], ['int', 'vtkPoints'],
['int', 'vtkDataArray']],
[['vtkIdTypeArray', 'vtkCellArray'], ['int', 'vtkPoints'],
['int', 'vtkDataArray']],
[['vtkIdTypeArray', 'vtkCellArray'], ['int', 'vtkPoints'],
['int', ('float', 'float', 'float')]],
]
args = [ [1], # No sig info.
['foo'], # One sig.
[1], # One sig.
[1], # Error
[1], # Only one valid sig.
[1,[1,1,1]], # Only one valid sig.
[1, [1,1,1]], # Multiple valid sigs.
[1,1], # No arrays!
[1,1], # No match so returns None.
[1, [1,1,1]], # ambiguous, pick first match.
[numpy.array([1,1]), [1,1,1]], # Match!
[obj, [2,1,2,3]], # TVTK array object, match.
[[2,1,2,3], obj], # TVTK array object, match but has
# wrong argument. Should be caught
# by VTK.
]
res = [ None,
['vtkDataArray'],
['int', 'vtkIdList'],
TypeError,
['int'],
['int', 'vtkPoints'],
['int', 'vtkDataArray'],
None,
None,
['int', 'vtkPoints'],
['vtkIdTypeArray', 'vtkCellArray'],
['vtkIdTypeArray', 'vtkCellArray'],
['vtkIdTypeArray', 'vtkCellArray'],
]
for i in range(len(sigs)):
if res[i] is TypeError:
self.assertRaises(res[i], array_handler.get_correct_sig,
args[i], sigs[i])
else:
s = array_handler.get_correct_sig(args[i], sigs[i])
#print s, res[i]
self.assertEqual(s, res[i])
def test_deref_array(self):
"""Test if dereferencing array args works correctly."""
sigs = [[['vtkDataArray']],
[['vtkFloatArray']],
[['vtkCellArray']],
[['vtkPoints']],
[['int', 'vtkIdList']],
[['int', ('float', 'float'), 'vtkDataArray']],
[['Prop', 'int', 'vtkDataArray']],
[['Points', ('float', 'float', 'float')]]
]
args = [[[1,2,3]],
[[0,0,0]],
[[[1,2,3],[4,5,6]]],
[[[0.,0.,0.], [1.,1.,1.]]],
[1, [1,2,3]],
[1, (0.0, 0.0), [1.0, 1.0, 1.0]],
[Prop(), 1, numpy.array([1.0, 1.0, 1.0])],
[[[1,2,3]], [1,2,3]]
]
r = array_handler.deref_array(args[0], sigs[0])
self.assertEqual(mysum(array_handler.vtk2array(r[0]) -args[0]), 0)
r = array_handler.deref_array(args[1], sigs[1])
self.assertEqual(mysum(array_handler.vtk2array(r[0]) - args[1]), 0)
r = array_handler.deref_array(args[2], sigs[2])
self.assertEqual(r[0].GetNumberOfCells(), 2)
r = array_handler.deref_array(args[3], sigs[3])
self.assertEqual(mysum(array_handler.vtk2array(r[0].GetData()) -
numpy.array(args[3], 'f')), 0)
r = array_handler.deref_array(args[4], sigs[4])
self.assertEqual(r[0], 1)
self.assertEqual(r[1].__class__.__name__, 'vtkIdList')
r = array_handler.deref_array(args[5], sigs[5])
self.assertEqual(r[0], 1)
self.assertEqual(r[1], (0.0, 0.0))
self.assertEqual(mysum(array_handler.vtk2array(r[2]) -args[5][2]), 0)
r = array_handler.deref_array(args[6], sigs[6])
self.assertEqual(r[0].IsA('vtkProperty'), True)
self.assertEqual(r[1], 1)
self.assertEqual(mysum(array_handler.vtk2array(r[2]) -args[6][2]), 0)
r = array_handler.deref_array(args[7], sigs[7])
def test_reference_to_array(self):
"""Does to_array return an existing array instead of a new copy."""
arr = numpy.arange(0.0, 10.0, 0.1)
arr = numpy.reshape(arr, (25, 4))
vtk_arr = array_handler.array2vtk(arr)
arr1 = array_handler.vtk2array(vtk_arr)
# Now make sure these are using the same memory.
arr[0][0] = 100.0
self.assertEqual(arr[0][0], arr1[0][0])
self.assertEqual(arr.shape, arr1.shape)
def test_array_cache(self):
"""Test the ArrayCache class."""
cache = array_handler.ArrayCache()
# Test if len works.
self.assertEqual(len(cache), 0)
arr = numpy.zeros(100, float)
varr = vtk.vtkFloatArray()
# test contains
self.assertEqual(varr not in cache, True)
cache.add(varr, arr)
self.assertEqual(len(cache), 1)
self.assertEqual(varr in cache, True)
# Test the get method.
self.assertEqual(cache.get(varr) is arr, True)
# Test if the cache is cleared when the array is deleted.
del varr
self.assertEqual(len(cache), 0)
def test_id_array(self):
"""Test if a vtkIdTypeArray is converted correctly."""
arr = vtk.vtkIdTypeArray()
arr.SetNumberOfTuples(10)
for i in range(10):
arr.SetValue(i, i)
np = array_handler.vtk2array(arr)
self.assertEqual(numpy.all(np == list(range(10))), True)
if __name__ == "__main__":
unittest.main()
|
dmsurti/mayavi
|
tvtk/tests/test_array_handler.py
|
Python
|
bsd-3-clause
| 16,704
|
[
"VTK"
] |
0952bf2b6d78138912365fac1c40abb9d065cc414102ba7d99d284166119eb5d
|
extensions = dict(
required_params=['training_frame', ('cols', "NULL")],
validate_required_params="",
set_required_params="""
parms$training_frame <- training_frame
if(!missing(cols))
parms$ignored_columns <- .verify_datacols(training_frame, cols)$cols_ignore
""",
set_params="""
# Check if user_y is an acceptable set of user-specified starting points
if( is.data.frame(user_y) || is.matrix(user_y) || is.list(user_y) || is.H2OFrame(user_y) ) {
# Convert user-specified starting points to H2OFrame
if( is.data.frame(user_y) || is.matrix(user_y) || is.list(user_y) ) {
if( !is.data.frame(user_y) && !is.matrix(user_y) ) user_y <- t(as.data.frame(user_y))
user_y <- as.h2o(user_y)
}
parms[["user_y"]] <- user_y
# Set k
if( !(missing(k)) && k!=as.integer(nrow(user_y)) ) {
warning("Argument k is not equal to the number of rows in user-specified Y. Ignoring k. Using specified Y.")
}
if ( !missing(loading_name)) {
warning("Argument loading_name is deprecated. Use representation_name instead.")
}
parms[["k"]] <- as.numeric(nrow(user_y))
# } else if( is.null(user_y) ) {
# if(!missing(init) && parms[["init"]] == "User")
# warning("Initializing Y to a standard Gaussian random matrix.")
# } else
} else if( !is.null(user_y) )
stop("Argument user_y must either be null or a valid user-defined starting Y matrix.")
# Check if user_x is an acceptable set of user-specified starting points
if( is.data.frame(user_x) || is.matrix(user_x) || is.list(user_x) || is.H2OFrame(user_x) ) {
# Convert user-specified starting points to H2OFrame
if( is.data.frame(user_x) || is.matrix(user_x) || is.list(user_x) ) {
if( !is.data.frame(user_x) && !is.matrix(user_x) ) user_x <- t(as.data.frame(user_x))
user_x <- as.h2o(user_x)
}
parms[["user_x"]] <- user_x
# } else if( is.null(user_x) ) {
# if(!missing(init) && parms[["init"]] == "User")
# warning("Initializing X to a standard Gaussian random matrix.")
# } else
} else if( !is.null(user_x) )
stop("Argument user_x must either be null or a valid user-defined starting X matrix.")
""",
module="""
#' Reconstruct Training Data via H2O GLRM Model
#'
#' Reconstruct the training data and impute missing values from the H2O GLRM model
#' by computing the matrix product of X and Y, and transforming back to the original
#' feature space by minimizing each column's loss function.
#'
#' @param object An \linkS4class{H2ODimReductionModel} object that represents the
#' model to be used for reconstruction.
#' @param data An H2OFrame object representing the training data for the H2O GLRM model.
#' Used to set the domain of each column in the reconstructed frame.
#' @param reverse_transform (Optional) A logical value indicating whether to reverse the
#' transformation from model-building by re-scaling columns and adding back the
#' offset to each column of the reconstructed frame.
#' @return Returns an H2OFrame object containing the approximate reconstruction of the
#' training data;
#' @seealso \code{\link{h2o.glrm}} for making an H2ODimReductionModel.
#' @examples
#' \dontrun{
#' library(h2o)
#' h2o.init()
#' iris_hf <- as.h2o(iris)
#' iris_glrm <- h2o.glrm(training_frame = iris_hf, k = 4, transform = "STANDARDIZE",
#' loss = "Quadratic", multi_loss = "Categorical", max_iterations = 1000)
#' iris_rec <- h2o.reconstruct(iris_glrm, iris_hf, reverse_transform = TRUE)
#' head(iris_rec)
#' }
#' @export
h2o.reconstruct <- function(object, data, reverse_transform=FALSE) {
url <- paste0('Predictions/models/', object@model_id, '/frames/',h2o.getId(data))
res <- .h2o.__remoteSend(url, method = "POST", reconstruct_train=TRUE, reverse_transform=reverse_transform)
key <- res$model_metrics[[1L]]$predictions$frame_id$name
h2o.getFrame(key)
}
#' Convert Archetypes to Features from H2O GLRM Model
#'
#' Project each archetype in an H2O GLRM model into the corresponding feature
#' space from the H2O training frame.
#'
#' @param object An \linkS4class{H2ODimReductionModel} object that represents the
#' model containing archetypes to be projected.
#' @param data An H2OFrame object representing the training data for the H2O GLRM model.
#' @param reverse_transform (Optional) A logical value indicating whether to reverse the
#' transformation from model-building by re-scaling columns and adding back the
#' offset to each column of the projected archetypes.
#' @return Returns an H2OFrame object containing the projection of the archetypes
#' down into the original feature space, where each row is one archetype.
#' @seealso \code{\link{h2o.glrm}} for making an H2ODimReductionModel.
#' @examples
#' \dontrun{
#' library(h2o)
#' h2o.init()
#' iris_hf <- as.h2o(iris)
#' iris_glrm <- h2o.glrm(training_frame = iris_hf, k = 4, loss = "Quadratic",
#' multi_loss = "Categorical", max_iterations = 1000)
#' iris_parch <- h2o.proj_archetypes(iris_glrm, iris_hf)
#' head(iris_parch)
#' }
#' @export
h2o.proj_archetypes <- function(object, data, reverse_transform=FALSE) {
url <- paste0('Predictions/models/', object@model_id, '/frames/',h2o.getId(data))
res <- .h2o.__remoteSend(url, method = "POST", project_archetypes=TRUE, reverse_transform=reverse_transform)
key <- res$model_metrics[[1L]]$predictions$frame_id$name
h2o.getFrame(key)
}
""",
)
doc = dict(
preamble="""
Generalized low rank decomposition of an H2O data frame
Builds a generalized low rank decomposition of an H2O data frame
""",
params=dict(
cols="(Optional) A vector containing the data columns on which k-means operates."
),
returns="""
an object of class \linkS4class{H2ODimReductionModel}.
""",
seealso="""
\code{\link{h2o.kmeans}, \link{h2o.svd}}, \code{\link{h2o.prcomp}}
""",
references="""
M. Udell, C. Horn, R. Zadeh, S. Boyd (2014). {Generalized Low Rank Models}[https://arxiv.org/abs/1410.0342]. Unpublished manuscript, Stanford Electrical Engineering Department.
N. Halko, P.G. Martinsson, J.A. Tropp. {Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions}[https://arxiv.org/abs/0909.4061]. SIAM Rev., Survey and Review section, Vol. 53, num. 2, pp. 217-288, June 2011.
""",
examples="""
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.uploadFile(path = australia_path)
h2o.glrm(training_frame = australia, k = 5, loss = "Quadratic", regularization_x = "L1",
gamma_x = 0.5, gamma_y = 0, max_iterations = 1000)
"""
)
|
michalkurka/h2o-3
|
h2o-bindings/bin/custom/R/gen_glrm.py
|
Python
|
apache-2.0
| 6,624
|
[
"Gaussian"
] |
8a28d0fe324e268f4529dc6ca1bba1b3b511f35bb2d3cf55b51c0523e6916c77
|
# Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
""" %prog [options] module_or_package
Check that a module satisfies a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
# import this first to avoid builtin namespace pollution
from pylint.checkers import utils
import sys
import os
import tokenize
from warnings import warn
from logilab.common.configuration import UnsupportedAction, OptionsManagerMixIn
from logilab.common.optik_ext import check_csv
from logilab.common.modutils import load_module_from_name, get_module_part
from logilab.common.interface import implements
from logilab.common.textutils import splitstrip
from logilab.common.ureports import Table, Text, Section
from logilab.common.__pkginfo__ import version as common_version
from astroid import MANAGER, nodes, AstroidBuildingException
from astroid.__pkginfo__ import version as astroid_version
from pylint.utils import (
MSG_TYPES, OPTION_RGX,
PyLintASTWalker, UnknownMessage, MessagesHandlerMixIn, ReportsHandlerMixIn,
EmptyReport, WarningScope,
expand_modules, tokenize_module)
from pylint.interfaces import IRawChecker, ITokenChecker, IAstroidChecker
from pylint.checkers import (BaseTokenChecker,
table_lines_from_stats,
initialize as checkers_initialize)
from pylint.reporters import initialize as reporters_initialize
from pylint import config
from pylint.__pkginfo__ import version
def _get_python_path(filepath):
dirname = os.path.dirname(os.path.realpath(
os.path.expanduser(filepath)))
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'fatal',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'astroid-error',
'Used when an unexpected error occurred while building the '
'Astroid representation. This is usually accompanied by a '
'traceback. Please report such errors !'),
'F0003': ('ignored builtin module %s',
'ignored-builtin-module',
'Used to indicate that the user asked to analyze a builtin '
'module which has been skipped.'),
'F0004': ('unexpected inferred value %s',
'unexpected-inferred-value',
'Used to indicate that some value of an unexpected type has been '
'inferred.'),
'F0010': ('error while code parsing: %s',
'parse-error',
'Used when an exception occured while building the Astroid '
'representation which could be handled by astroid.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'raw-checker-failed',
'Used to inform that a built-in module has not been checked '
'using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'bad-inline-option',
'Used when an inline option is either badly formatted or can\'t '
'be used inside modules.'),
'I0011': ('Locally disabling %s',
'locally-disabled',
'Used when an inline option disables a message or a messages '
'category.'),
'I0012': ('Locally enabling %s',
'locally-enabled',
'Used when an inline option enables a message or a messages '
'category.'),
'I0013': ('Ignoring entire file',
'file-ignored',
'Used to inform that the file will not be checked'),
'I0014': ('Used deprecated directive "pylint:disable-all" or '
'"pylint:disable=all"',
'deprecated-disable-all',
'You should preferably use "pylint:skip-file" as this directive '
'has a less confusing name. Do this only if you are sure that '
'all people running Pylint on your code have version >= 0.26'),
'I0020': ('Suppressed %s (from line %d)',
'suppressed-message',
'A message was triggered on a line, but suppressed explicitly '
'by a disable= comment in the file. This message is not '
'generated for messages that are ignored due to configuration '
'settings.'),
'I0021': ('Useless suppression of %s',
'useless-suppression',
'Reported when a message is explicitly disabled for a line or '
'a block of code, but never triggered.'),
'I0022': ('Deprecated pragma "pylint:disable-msg" or "pylint:enable-msg"',
'deprecated-pragma',
'You should preferably use "pylint:disable" or "pylint:enable" '
'instead of the deprecated suppression pragma style '
'"pylint:disable-msg" or "pylint:enable-msg"'),
'E0001': ('%s',
'syntax-error',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'unrecognized-inline-option',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'bad-option-value',
'Used when a bad value for an inline option is encountered.'),
}
class PyLinter(OptionsManagerMixIn, MessagesHandlerMixIn, ReportsHandlerMixIn,
BaseTokenChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugins developpers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` accross run if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (ITokenChecker,)
name = 'master'
priority = 0
level = 0
msgs = MSGS
may_be_disabled = False
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. '
'They should be base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of '
'python modules names) to load, usually to register '
'additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'string', 'metavar' : '<format>',
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,'
' parseable, colorized, msvs (visual studio) and html. You '
'can also give a reporter class, eg mypackage.mymodule.'
'MyReporterClass.'}),
('files-output',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Put messages in a separate file for each module / '
'package specified on the command line instead of printing '
'them on stdout. Reports (if any) will be written in a file '
'name "pylint_global.[txt|html]".'}),
('reports',
{'default': 1, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the '
'messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + '
'convention) / statement) * 10)',
'help' : 'Python expression which should return a note less \
than 10 (10 is the highest note). You have access to the variables errors \
warning, statement which respectively contain the number of errors / warnings\
messages and the total number of statements analyzed. This is used by the \
global evaluation report (RP0004).'}),
('comment',
{'default': 0, 'type' : 'yn', 'metavar' : '<y_or_n>',
'group': 'Reports', 'level': 1,
'help' : 'Add a comment according to your evaluation note. '
'This is used by the global evaluation report (RP0004).'}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time. '
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
#'short': 't',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}), # msg-template
)
option_groups = (
('Messages control', 'Options controling analysis messages'),
('Reports', 'Options related to output formating and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# checkers / reporter / astroid manager
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = {}
self._ignore_file = False
# visit variables
self.base_name = None
self.base_file = None
self.current_name = None
self.current_file = None
self.stats = None
# init options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s, common %s\nPython %s' % (
version, astroid_version, common_version, sys.version)
OptionsManagerMixIn.__init__(self, usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
MessagesHandlerMixIn.__init__(self)
ReportsHandlerMixIn.__init__(self)
BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
('RP0004', 'Global evaluation',
self.report_evaluation),
)
self.register_checker(self)
self._dynamic_plugins = set()
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers_initialize(self)
reporters_initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def prepare_import_path(self, args):
"""Prepare sys.path for running the linter checks."""
if len(args) == 1:
sys.path.insert(0, _get_python_path(args[0]))
else:
sys.path.insert(0, os.getcwd())
def cleanup_import_path(self):
"""Revert any changes made to sys.path in prepare_import_path."""
sys.path.pop(0)
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
qname = self._reporter_name
module = load_module_from_name(get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
self.set_reporter(reporter_class())
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from configuration.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warn('%s is deprecated, replace it by %s' % (
optname, optname.split('-')[0]), DeprecationWarning)
value = check_csv(None, optname, value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id)
else:
meth(value)
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
BaseTokenChecker.set_option(self, optname, value, action, optdict)
except UnsupportedAction:
print >> sys.stderr, 'option %s can\'t be read from config file' % \
optname
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers.setdefault(checker.name, []).append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.register_messages(checker)
checker.load_defaults()
def disable_noerror_messages(self):
for msgcat, msgids in self._msgs_by_category.iteritems():
if msgcat == 'E':
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for reporters in self._reports.itervalues():
for report_id, _title, _cb in reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self.disable_noerror_messages()
self.disable('miscellaneous')
self.set_option('reports', False)
self.set_option('persistent', False)
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
comment = tokenize.COMMENT
newline = tokenize.NEWLINE
for (tok_type, _, start, _, line) in tokens:
if tok_type not in (comment, newline):
continue
match = OPTION_RGX.search(line)
if match is None:
continue
if match.group(1).strip() == "disable-all" or \
match.group(1).strip() == 'skip-file':
if match.group(1).strip() == "disable-all":
self.add_message('I0014', line=start[0])
self.add_message('I0013', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('I0010', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppresssion
self.add_message('deprecated-pragma', line=start[0])
for msgid in splitstrip(value):
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('I0014', line=start[0])
self.add_message('I0013', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except UnknownMessage:
self.add_message('E0012', args=msgid, line=start[0])
else:
self.add_message('E0011', args=opt, line=start[0])
def collect_block_lines(self, node, msg_state):
"""walk ast to collect block level options line numbers"""
# recurse on children (depth first)
for child in node.get_children():
self.collect_block_lines(child, msg_state)
first = node.fromlineno
last = node.tolineno
# first child line number used to distinguish between disable
# which are the first child of scoped node with those defined later.
# For instance in the code below:
#
# 1. def meth8(self):
# 2. """test late disabling"""
# 3. # pylint: disable=E1102
# 4. print self.blip
# 5. # pylint: disable=E1101
# 6. print self.bla
#
# E1102 should be disabled from line 1 to 6 while E1101 from line 5 to 6
#
# this is necessary to disable locally messages applying to class /
# function using their fromlineno
if isinstance(node, (nodes.Module, nodes.Class, nodes.Function)) and node.body:
firstchildlineno = node.body[0].fromlineno
else:
firstchildlineno = last
for msgid, lines in msg_state.iteritems():
for lineno, state in lines.items():
original_lineno = lineno
if first <= lineno <= last:
# Set state for all lines for this block, if the
# warning is applied to nodes.
if self.check_message_id(msgid).scope == WarningScope.NODE:
if lineno > firstchildlineno:
state = True
first_, last_ = node.block_range(lineno)
else:
first_ = lineno
last_ = last
for line in xrange(first_, last_+1):
# do not override existing entries
if not line in self._module_msgs_state.get(msgid, ()):
if line in lines: # state change in the same block
state = lines[line]
original_lineno = line
if not state:
self._suppression_mapping[(msgid, line)] = original_lineno
try:
self._module_msgs_state[msgid][line] = state
except KeyError:
self._module_msgs_state[msgid] = {line: state}
del lines[lineno]
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for checkers in self._checkers.itervalues()
for c in checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
# fatal errors should not trigger enable / disabling a checker
messages = set(msg for msg in checker.msgs
if msg[0] != 'F' and self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
checker.active_msgs = messages
return neededcheckers
def should_analyze_file(self, modname, path): # pylint: disable=unused-argument
"""Returns whether or not a module should be checked.
This implementation returns True for all inputs, indicating that all
files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:returns: True if the module should be checked.
:rtype: bool
"""
return True
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
walker = PyLintASTWalker(self)
checkers = self.prepare_checkers()
tokencheckers = [c for c in checkers if implements(c, ITokenChecker)
and c is not self]
rawcheckers = [c for c in checkers if implements(c, IRawChecker)]
# notify global begin
for checker in checkers:
checker.open()
if implements(checker, IAstroidChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath = descr['name'], descr['path']
if not self.should_analyze_file(modname, filepath):
continue
if self.config.files_output:
reportfile = 'pylint_%s.%s' % (modname, self.reporter.extension)
self.reporter.set_output(open(reportfile, 'w'))
self.set_current_module(modname, filepath)
# get the module representation
astroid = self.get_ast(filepath, modname)
if astroid is None:
continue
self.base_name = descr['basename']
self.base_file = descr['basepath']
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = astroid.file
self.check_astroid_module(astroid, walker, rawcheckers, tokencheckers)
self._add_suppression_messages()
# notify global end
self.set_current_module('')
self.stats['statement'] = walker.nbstatements
checkers.reverse()
for checker in checkers:
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = expand_modules(modules, self.config.black_list)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "F0001":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in MSG_TYPES.itervalues():
self.stats['by_module'][modname][msg_cat] = 0
# XXX hack, to be correct we need to keep module_msgs_state
# for every analyzed module (the problem stands with localized
# messages which are only detected in the .close step)
if modname:
self._module_msgs_state = {}
self._raw_module_msgs_state = {}
self._ignored_msgs = {}
def get_ast(self, filepath, modname):
"""return a ast(roid) representation for a module"""
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except SyntaxError, ex:
self.add_message('E0001', line=ex.lineno, args=ex.msg)
except AstroidBuildingException, ex:
self.add_message('F0010', args=ex)
except Exception, ex:
import traceback
traceback.print_exc()
self.add_message('F0002', args=(ex.__class__, ex))
def check_astroid_module(self, astroid, walker, rawcheckers, tokencheckers):
"""check a module from its astroid representation, real work"""
# call raw checkers if possible
try:
tokens = tokenize_module(astroid)
except tokenize.TokenError, ex:
self.add_message('E0001', line=ex.args[1][0], args=ex.args[0])
return
if not astroid.pure_python:
self.add_message('I0001', args=astroid.name)
else:
#assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
for msg, lines in self._module_msgs_state.iteritems():
self._raw_module_msgs_state[msg] = lines.copy()
orig_state = self._module_msgs_state.copy()
self._module_msgs_state = {}
self._suppression_mapping = {}
self.collect_block_lines(astroid, orig_state)
for checker in rawcheckers:
checker.process_module(astroid)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(astroid)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {'by_module' : {},
'by_msg' : {},
}
for msg_cat in MSG_TYPES.itervalues():
self.stats[msg_cat] = 0
def close(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
if self.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
if self.config.files_output:
filename = 'pylint_global.' + self.reporter.extension
self.reporter.set_output(open(filename, 'w'))
else:
sect = Section()
if self.config.reports or self.config.output_format == 'html':
self.reporter.display_results(sect)
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.base_name)
else:
self.reporter.on_close(self.stats, {})
# specific reports ########################################################
def _add_suppression_messages(self):
for warning, lines in self._raw_module_msgs_state.iteritems():
for line, enable in lines.iteritems():
if not enable and (warning, line) not in self._ignored_msgs:
self.add_message('I0021', line, None,
(self.get_msg_display_string(warning),))
# don't use iteritems here, _ignored_msgs may be modified by add_message
for (warning, from_), lines in self._ignored_msgs.items():
for line in lines:
self.add_message('I0020', line, None,
(self.get_msg_display_string(warning), from_))
def report_evaluation(self, sect, stats, previous_stats):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
if stats['statement'] == 0:
raise EmptyReport()
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats)
except Exception, ex:
msg = 'An exception occurred while rating: %s' % ex
else:
stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
pnote = previous_stats.get('global_note')
if pnote is not None:
msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote)
if self.config.comment:
msg = '%s\n%s' % (msg, config.get_note_message(note))
sect.append(Text(msg))
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise EmptyReport()
in_order = sorted([(value, msg_id)
for msg_id, value in stats['by_msg'].iteritems()
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise EmptyReport()
by_mod = {}
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in stats['by_module'].iterkeys():
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod.setdefault(module, {})[m_type] = percent
sorted_result = []
for module, mod_info in by_mod.iteritems():
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
if line[0] == 0 and line[1] == 0:
break
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise EmptyReport()
sect.append(Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
# this may help to import modules using gettext
# XXX syt, actually needed since we don't import code?
from logilab.common.compat import builtins
builtins._ = str
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
raise ArgumentPreprocessingError(arg)
val = args[i]
del args[i]
cb(option, val)
except KeyError:
i += 1
else:
i += 1
class Run(object):
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
# option: (callback, takearg)
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError, ex:
print >> sys.stderr, 'Argument %s expects a value.' % (ex.args[0],)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'type' : 'string', 'metavar': '<code>',
'callback' : cb_init_hook, 'level': 1,
'help' : 'Python code to execute, usually for sys.path \
manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : '''Display a help message for the given message id and \
exit. The value may be a comma separated list of message ids.'''}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : '''Generate a sample configuration file according to \
the current configuration. You can put other options before this one to get \
them in the generated configuration.'''}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.", 'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : '''In error mode, checkers without error messages are \
disabled and for others, only the ERROR messages are displayed, and no reports \
are done by default'''}),
('profile',
{'type' : 'yn', 'metavar' : '<y_or_n>',
'default': False, 'hide': True,
'help' : 'Profiled execution.'}),
), option_groups=self.option_groups, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
linter.add_help_section('Output',
'Using the default text output, the message format is : \n'
' \n'
' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n'
' \n'
'There are 5 kind of message types : \n'
' * (C) convention, for programming standard violation \n'
' * (R) refactor, for bad code smell \n'
' * (W) warning, for python specific problems \n'
' * (E) error, for probable bugs in the code \n'
' * (F) fatal, if an error occurred which prevented pylint from doing further\n'
'processing.\n'
, level=1)
linter.add_help_section('Output status code',
'Pylint should leave with following status code: \n'
' * 0 if everything went fine \n'
' * 1 if a fatal message was issued \n'
' * 2 if an error message was issued \n'
' * 4 if a warning message was issued \n'
' * 8 if a refactor message was issued \n'
' * 16 if a convention message was issued \n'
' * 32 on usage error \n'
' \n'
'status 1 to 16 will be bit-ORed so you can know which different categories has\n'
'been issued by analysing pylint output status code\n',
level=1)
# read configuration
linter.disable('W0704')
linter.disable('I0020')
linter.disable('I0021')
linter.read_config_file()
# is there some additional plugins in the file configuration, in
config_parser = linter.cfgfile_parser
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = splitstrip(config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit, exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print linter.help()
sys.exit(32)
# insert current working directory to the python path to have a correct
# behaviour
linter.prepare_import_path(args)
if self.linter.config.profile:
print >> sys.stderr, '** profiled run'
import cProfile, pstats
cProfile.runctx('linter.check(%r)' % args, globals(), locals(),
'stones.prof')
data = pstats.Stats('stones.prof')
data.strip_dirs()
data.sort_stats('time', 'calls')
data.print_stats(30)
else:
linter.check(args)
linter.cleanup_import_path()
if exit:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before optik parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before optik parsing)"""
self._plugins.extend(splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.help_message(splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.list_messages()
sys.exit(0)
def cb_init_hook(option, optname, value, parser):
"""exec arbitrary code to set sys.path for instance"""
exec value
if __name__ == '__main__':
Run(sys.argv[1:])
|
godfryd/pylint
|
lint.py
|
Python
|
gpl-2.0
| 47,174
|
[
"VisIt"
] |
8e14b90c68b4a95dd3a12d9026519118bc10a5f9a7b05192484aa29b46b0b838
|
"""
Tests for Shopping Cart views
"""
import pytz
from urlparse import urlparse
from decimal import Decimal
import json
from django.http import HttpRequest
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import Group, User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core import mail
from django.core.cache import cache
from pytz import UTC
from freezegun import freeze_time
from datetime import datetime, timedelta
from mock import patch, Mock
import ddt
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase, mixed_store_config
)
from xmodule.modulestore.tests.factories import CourseFactory
from util.date_utils import get_default_time_display
from util.testing import UrlResetMixin
from shoppingcart.views import _can_download_report, _get_date_from_str
from shoppingcart.models import (
Order, CertificateItem, PaidCourseRegistration, CourseRegCodeItem,
Coupon, CourseRegistrationCode, RegistrationCodeRedemption,
DonationConfiguration
)
from student.tests.factories import UserFactory, AdminFactory
from courseware.tests.factories import InstructorFactory
from student.models import CourseEnrollment
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_response
from shoppingcart.processors import render_purchase_form_html
from shoppingcart.admin import SoftDeleteCouponAdmin
from shoppingcart.views import initialize_report
from shoppingcart.tests.payment_fake import PaymentFakeView
def mock_render_purchase_form_html(*args, **kwargs):
return render_purchase_form_html(*args, **kwargs)
form_mock = Mock(side_effect=mock_render_purchase_form_html)
def mock_render_to_response(*args, **kwargs):
return render_to_response(*args, **kwargs)
render_mock = Mock(side_effect=mock_render_to_response)
postpay_mock = Mock()
# Since we don't need any XML course fixtures, use a modulestore configuration
# that disables the XML modulestore.
MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False)
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
@ddt.ddt
class ShoppingCartViewsTests(ModuleStoreTestCase):
def setUp(self):
patcher = patch('student.models.tracker')
self.mock_tracker = patcher.start()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.instructor = AdminFactory.create()
self.cost = 40
self.coupon_code = 'abcde'
self.reg_code = 'qwerty'
self.percentage_discount = 10
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_key = self.course.id
self.course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
#Saving another testing course mode
self.testing_cost = 20
self.testing_course = CourseFactory.create(org='edX', number='888', display_name='Testing Super Course')
self.testing_course_mode = CourseMode(course_id=self.testing_course.id,
mode_slug="honor",
mode_display_name="testing honor cert",
min_price=self.testing_cost)
self.testing_course_mode.save()
verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course')
self.verified_course_key = verified_course.id
self.cart = Order.get_cart_for_user(self.user)
self.addCleanup(patcher.stop)
def get_discount(self, cost):
"""
This method simple return the discounted amount
"""
val = Decimal("{0:.2f}".format(Decimal(self.percentage_discount / 100.00) * cost))
return cost - val
def add_coupon(self, course_key, is_active, code):
"""
add dummy coupon into models
"""
coupon = Coupon(code=code, description='testing code', course_id=course_key,
percentage_discount=self.percentage_discount, created_by=self.user, is_active=is_active)
coupon.save()
def add_reg_code(self, course_key):
"""
add dummy registration code into models
"""
course_reg_code = CourseRegistrationCode(code=self.reg_code, course_id=course_key, created_by=self.user)
course_reg_code.save()
def add_course_to_user_cart(self, course_key):
"""
adding course to user cart
"""
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, course_key)
return reg_item
def login_user(self):
self.client.login(username=self.user.username, password="password")
def test_add_course_to_cart_anon(self):
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 403)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_billing_details(self):
billing_url = reverse('billing_details')
self.login_user()
# page not found error because order_type is not business
resp = self.client.get(billing_url)
self.assertEqual(resp.status_code, 404)
#chagne the order_type to business
self.cart.order_type = 'business'
self.cart.save()
resp = self.client.get(billing_url)
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/billing_details.html')
# check for the default currency in the context
self.assertEqual(context['currency'], 'usd')
self.assertEqual(context['currency_symbol'], '$')
data = {'company_name': 'Test Company', 'company_contact_name': 'JohnDoe',
'company_contact_email': 'john@est.com', 'recipient_name': 'Mocker',
'recipient_email': 'mock@germ.com', 'company_address_line_1': 'DC Street # 1',
'company_address_line_2': '',
'company_city': 'DC', 'company_state': 'NY', 'company_zip': '22003', 'company_country': 'US',
'customer_reference_number': 'PO#23'}
resp = self.client.post(billing_url, data)
self.assertEqual(resp.status_code, 200)
@patch('shoppingcart.views.render_to_response', render_mock)
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
def test_billing_details_with_override_currency_settings(self):
billing_url = reverse('billing_details')
self.login_user()
#chagne the order_type to business
self.cart.order_type = 'business'
self.cart.save()
resp = self.client.get(billing_url)
self.assertEqual(resp.status_code, 200)
((template, context), __) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/billing_details.html')
# check for the override currency settings in the context
self.assertEqual(context['currency'], 'PKR')
self.assertEqual(context['currency_symbol'], 'Rs')
def test_same_coupon_code_applied_on_multiple_items_in_the_cart(self):
"""
test to check that that the same coupon code applied on multiple
items in the cart.
"""
self.login_user()
# add first course to user cart
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 200)
# add and apply the coupon code to course in the cart
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# now add the same coupon code to the second course(testing_course)
self.add_coupon(self.testing_course.id, True, self.coupon_code)
#now add the second course to cart, the coupon code should be
# applied when adding the second course to the cart
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.testing_course.id.to_deprecated_string()]))
self.assertEqual(resp.status_code, 200)
#now check the user cart and see that the discount has been applied on both the courses
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
#first course price is 40$ and the second course price is 20$
# after 10% discount on both the courses the total price will be 18+36 = 54
self.assertIn('54.00', resp.content)
def test_add_course_to_cart_already_in_cart(self):
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 400)
self.assertIn('The course {0} is already in your cart.'.format(self.course_key.to_deprecated_string()), resp.content)
def test_course_discount_invalid_coupon(self):
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
non_existing_code = "non_existing_code"
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content)
def test_valid_qty_greater_then_one_and_purchase_type_should_business(self):
qty = 2
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['total_cost'], item.unit_cost * qty)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'business')
def test_in_valid_qty_case(self):
# invalid quantity, Quantity must be between 1 and 1000.
qty = 0
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 400)
self.assertIn("Quantity must be between 1 and 1000.", resp.content)
# invalid quantity, Quantity must be an integer.
qty = 'abcde'
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 400)
self.assertIn("Quantity must be an integer.", resp.content)
# invalid quantity, Quantity is not present in request
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id})
self.assertEqual(resp.status_code, 400)
self.assertIn("Quantity must be between 1 and 1000.", resp.content)
def test_valid_qty_but_item_not_found(self):
qty = 2
item_id = '-1'
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item_id, 'qty': qty})
self.assertEqual(resp.status_code, 404)
self.assertEqual('Order item does not exist.', resp.content)
# now testing the case if item id not found in request,
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'qty': qty})
self.assertEqual(resp.status_code, 400)
self.assertEqual('Order item not found in request.', resp.content)
def test_purchase_type_should_be_personal_when_qty_is_one(self):
qty = 1
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['total_cost'], item.unit_cost * 1)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'personal')
def test_purchase_type_on_removing_item_and_cart_has_item_with_qty_one(self):
qty = 5
self.add_course_to_user_cart(self.course_key)
item2 = self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
cart = Order.get_cart_for_user(self.user)
cart_items = cart.orderitem_set.all()
test_flag = False
for cartitem in cart_items:
if cartitem.qty == 5:
test_flag = True
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id})
self.assertEqual(resp.status_code, 200)
self.assertTrue(test_flag)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'personal')
def test_billing_details_btn_in_cart_when_qty_is_greater_than_one(self):
qty = 5
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertIn("Billing Details", resp.content)
def test_purchase_type_should_be_personal_when_remove_all_items_from_cart(self):
item1 = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item1.id, 'qty': 2})
self.assertEqual(resp.status_code, 200)
item2 = self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item2.id, 'qty': 5})
self.assertEqual(resp.status_code, 200)
cart = Order.get_cart_for_user(self.user)
cart_items = cart.orderitem_set.all()
test_flag = False
for cartitem in cart_items:
test_flag = True
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]), {'id': cartitem.id})
self.assertEqual(resp.status_code, 200)
self.assertTrue(test_flag)
cart = Order.get_cart_for_user(self.user)
self.assertEqual(cart.order_type, 'personal')
def test_use_valid_coupon_code_and_qty_is_greater_than_one(self):
qty = 5
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': qty})
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.content)
self.assertEqual(data['total_cost'], item.unit_cost * qty)
# use coupon code
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost * qty, 180)
def test_course_discount_invalid_reg_code(self):
self.add_reg_code(self.course_key)
self.add_course_to_user_cart(self.course_key)
non_existing_code = "non_existing_code"
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': non_existing_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(non_existing_code), resp.content)
def test_course_discount_inactive_coupon(self):
self.add_coupon(self.course_key, False, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content)
def test_course_does_not_exist_in_cart_against_valid_coupon(self):
course_key = self.course_key.to_deprecated_string() + 'testing'
self.add_coupon(course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Discount does not exist against code '{0}'.".format(self.coupon_code), resp.content)
def test_course_does_not_exist_in_cart_against_valid_reg_code(self):
course_key = self.course_key.to_deprecated_string() + 'testing'
self.add_reg_code(course_key)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Code '{0}' is not valid for any course in the shopping cart.".format(self.reg_code), resp.content)
def test_cart_item_qty_greater_than_1_against_valid_reg_code(self):
course_key = self.course_key.to_deprecated_string()
self.add_reg_code(course_key)
item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.update_user_cart'), {'ItemId': item.id, 'qty': 4})
self.assertEqual(resp.status_code, 200)
# now update the cart item quantity and then apply the registration code
# it will raise an exception
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 404)
self.assertIn("Cart item quantity should not be greater than 1 when applying activation code", resp.content)
def test_course_discount_for_valid_active_coupon_code(self):
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit price should be updated for that course
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
# after getting 10 percent discount
self.assertEqual(self.cart.total_cost, self.get_discount(self.cost))
# now using the same coupon code against the same order.
# Only one coupon redemption should be allowed per order.
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 400)
self.assertIn("Only one coupon redemption is allowed against an order", resp.content)
def test_course_discount_against_two_distinct_coupon_codes(self):
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit price should be updated for that course
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
# now using another valid active coupon code.
# Only one coupon redemption should be allowed per order.
self.add_coupon(self.course_key, True, 'abxyz')
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': 'abxyz'})
self.assertEqual(resp.status_code, 400)
self.assertIn("Only one coupon redemption is allowed against an order", resp.content)
def test_same_coupons_code_on_multiple_courses(self):
# add two same coupon codes on two different courses
self.add_coupon(self.course_key, True, self.coupon_code)
self.add_coupon(self.testing_course.id, True, self.coupon_code)
self.add_course_to_user_cart(self.course_key)
self.add_course_to_user_cart(self.testing_course.id)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit price should be updated for that course
item = self.cart.orderitem_set.all().select_subclasses()[0]
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
item = self.cart.orderitem_set.all().select_subclasses()[1]
self.assertEquals(item.unit_cost, self.get_discount(self.testing_cost))
def test_soft_delete_coupon(self): # pylint: disable=no-member
self.add_coupon(self.course_key, True, self.coupon_code)
coupon = Coupon(code='TestCode', description='testing', course_id=self.course_key,
percentage_discount=12, created_by=self.user, is_active=True)
coupon.save()
self.assertEquals(coupon.__unicode__(), '[Coupon] code: TestCode course: MITx/999/Robot_Super_Course')
admin = User.objects.create_user('Mark', 'admin+courses@edx.org', 'foo')
admin.is_staff = True
get_coupon = Coupon.objects.get(id=1)
request = HttpRequest()
request.user = admin
setattr(request, 'session', 'session') # pylint: disable=no-member
messages = FallbackStorage(request) # pylint: disable=no-member
setattr(request, '_messages', messages) # pylint: disable=no-member
coupon_admin = SoftDeleteCouponAdmin(Coupon, AdminSite())
test_query_set = coupon_admin.queryset(request)
test_actions = coupon_admin.get_actions(request)
self.assertIn('really_delete_selected', test_actions['really_delete_selected'])
self.assertEqual(get_coupon.is_active, True)
coupon_admin.really_delete_selected(request, test_query_set) # pylint: disable=no-member
for coupon in test_query_set:
self.assertEqual(coupon.is_active, False)
coupon_admin.delete_model(request, get_coupon) # pylint: disable=no-member
self.assertEqual(get_coupon.is_active, False)
coupon = Coupon(code='TestCode123', description='testing123', course_id=self.course_key,
percentage_discount=22, created_by=self.user, is_active=True)
coupon.save()
test_query_set = coupon_admin.queryset(request)
coupon_admin.really_delete_selected(request, test_query_set) # pylint: disable=no-member
for coupon in test_query_set:
self.assertEqual(coupon.is_active, False)
def test_course_free_discount_for_valid_active_reg_code(self):
self.add_reg_code(self.course_key)
self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
# now testing registration code already used scenario, reusing the same code
# the item has been removed when using the registration code for the first time
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 400)
self.assertIn("Oops! The code '{0}' you entered is either invalid or expired".format(self.reg_code), resp.content)
@patch('shoppingcart.views.log.debug')
def test_non_existing_coupon_redemption_on_removing_item(self, debug_log):
reg_item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
debug_log.assert_called_with(
'Code redemption does not exist for order item id={0}.'.format(reg_item.id))
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
@patch('shoppingcart.views.log.info')
def test_existing_coupon_redemption_on_removing_item(self, info_log):
self.add_coupon(self.course_key, True, self.coupon_code)
reg_item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
info_log.assert_called_with(
'Coupon "{0}" redemption entry removed for user "{1}" for order item "{2}"'.format(self.coupon_code, self.user, reg_item.id))
@patch('shoppingcart.views.log.info')
def test_reset_redemption_for_coupon(self, info_log):
self.add_coupon(self.course_key, True, self.coupon_code)
reg_item = self.add_course_to_user_cart(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
resp = self.client.post(reverse('shoppingcart.views.reset_code_redemption', args=[]))
self.assertEqual(resp.status_code, 200)
info_log.assert_called_with(
'Coupon redemption entry removed for user {0} for order {1}'.format(self.user, reg_item.id))
@patch('shoppingcart.views.log.info')
def test_coupon_discount_for_multiple_courses_in_cart(self, info_log):
reg_item = self.add_course_to_user_cart(self.course_key)
self.add_coupon(self.course_key, True, self.coupon_code)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
# unit_cost should be updated for that particular course for which coupon code is registered
items = self.cart.orderitem_set.all().select_subclasses()
for item in items:
if item.id == reg_item.id:
self.assertEquals(item.unit_cost, self.get_discount(self.cost))
elif item.id == cert_item.id:
self.assertEquals(item.list_price, None)
# Delete the discounted item, corresponding coupon redemption should
# be removed for that particular discounted item
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 1)
info_log.assert_called_with(
'Coupon "{0}" redemption entry removed for user "{1}" for order item "{2}"'.format(self.coupon_code, self.user, reg_item.id))
@patch('shoppingcart.views.log.info')
def test_delete_certificate_item(self, info_log):
self.add_course_to_user_cart(self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
# Delete the discounted item, corresponding coupon redemption should be removed for that particular discounted item
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': cert_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 1)
info_log.assert_called_with(
'order item {0} removed for user {1}'.format(cert_item.id, self.user))
@patch('shoppingcart.views.log.info')
def test_remove_coupon_redemption_on_clear_cart(self, info_log):
reg_item = self.add_course_to_user_cart(self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
info_log.assert_called_with(
'Coupon redemption entry removed for user {0} for order {1}'.format(self.user, reg_item.id))
def test_add_course_to_cart_already_registered(self):
CourseEnrollment.enroll(self.user, self.course_key)
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 400)
self.assertIn('You are already registered in course {0}.'.format(self.course_key.to_deprecated_string()), resp.content)
def test_add_nonexistent_course_to_cart(self):
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=['non/existent/course']))
self.assertEqual(resp.status_code, 404)
self.assertIn(_("The course you requested does not exist."), resp.content)
def test_add_course_to_cart_success(self):
self.login_user()
reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()])
resp = self.client.post(reverse('shoppingcart.views.add_course_to_cart', args=[self.course_key.to_deprecated_string()]))
self.assertEqual(resp.status_code, 200)
self.assertTrue(PaidCourseRegistration.contained_in_order(self.cart, self.course_key))
@patch('shoppingcart.views.render_purchase_form_html', form_mock)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_cart(self):
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name
purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses()
self.assertIn(reg_item, purchase_form_arg_cart_items)
self.assertIn(cert_item, purchase_form_arg_cart_items)
self.assertEqual(len(purchase_form_arg_cart_items), 2)
((template, context), _) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/shopping_cart.html')
self.assertEqual(len(context['shoppingcart_items']), 2)
self.assertEqual(context['amount'], 80)
self.assertIn("80.00", context['form_html'])
# check for the default currency in the context
self.assertEqual(context['currency'], 'usd')
self.assertEqual(context['currency_symbol'], '$')
@patch('shoppingcart.views.render_purchase_form_html', form_mock)
@patch('shoppingcart.views.render_to_response', render_mock)
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
def test_show_cart_with_override_currency_settings(self):
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
((purchase_form_arg_cart,), _) = form_mock.call_args # pylint: disable=redefined-outer-name
purchase_form_arg_cart_items = purchase_form_arg_cart.orderitem_set.all().select_subclasses()
self.assertIn(reg_item, purchase_form_arg_cart_items)
((template, context), _) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/shopping_cart.html')
# check for the override currency settings in the context
self.assertEqual(context['currency'], 'PKR')
self.assertEqual(context['currency_symbol'], 'Rs')
def test_clear_cart(self):
self.login_user()
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.clear_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 0)
@patch('shoppingcart.views.log.exception')
def test_remove_item(self, exception_log):
self.login_user()
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': reg_item.id})
self.assertEqual(resp.status_code, 200)
self.assertEquals(self.cart.orderitem_set.count(), 1)
self.assertNotIn(reg_item, self.cart.orderitem_set.all().select_subclasses())
self.cart.purchase()
resp2 = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': cert_item.id})
self.assertEqual(resp2.status_code, 200)
exception_log.assert_called_with(
'Cannot remove cart OrderItem id={0}. DoesNotExist or item is already purchased'.format(cert_item.id))
resp3 = self.client.post(reverse('shoppingcart.views.remove_item', args=[]),
{'id': -1})
self.assertEqual(resp3.status_code, 200)
exception_log.assert_called_with(
'Cannot remove cart OrderItem id={0}. DoesNotExist or item is already purchased'.format(-1))
@patch('shoppingcart.views.process_postpay_callback', postpay_mock)
def test_postpay_callback_success(self):
postpay_mock.return_value = {'success': True, 'order': self.cart}
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[]))
self.assertEqual(resp.status_code, 302)
self.assertEqual(urlparse(resp.__getitem__('location')).path,
reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
@patch('shoppingcart.views.process_postpay_callback', postpay_mock)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_postpay_callback_failure(self):
postpay_mock.return_value = {'success': False, 'order': self.cart, 'error_html': 'ERROR_TEST!!!'}
self.login_user()
resp = self.client.post(reverse('shoppingcart.views.postpay_callback', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn('ERROR_TEST!!!', resp.content)
((template, context), _) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/error.html')
self.assertEqual(context['order'], self.cart)
self.assertEqual(context['error_html'], 'ERROR_TEST!!!')
@ddt.data(0, 1)
def test_show_receipt_json(self, num_items):
# Create the correct number of items in the order
for __ in range(num_items):
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase()
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url, HTTP_ACCEPT="application/json")
# Should have gotten a successful response
self.assertEqual(resp.status_code, 200)
# Parse the response as JSON and check the contents
json_resp = json.loads(resp.content)
self.assertEqual(json_resp.get('currency'), self.cart.currency)
self.assertEqual(json_resp.get('purchase_datetime'), get_default_time_display(self.cart.purchase_time))
self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost)
self.assertEqual(json_resp.get('status'), "purchased")
self.assertEqual(json_resp.get('billed_to'), {
'first_name': self.cart.bill_to_first,
'last_name': self.cart.bill_to_last,
'street1': self.cart.bill_to_street1,
'street2': self.cart.bill_to_street2,
'city': self.cart.bill_to_city,
'state': self.cart.bill_to_state,
'postal_code': self.cart.bill_to_postalcode,
'country': self.cart.bill_to_country
})
self.assertEqual(len(json_resp.get('items')), num_items)
for item in json_resp.get('items'):
self.assertEqual(item, {
'unit_cost': 40,
'quantity': 1,
'line_cost': 40,
'line_desc': 'Honor Code Certificate for course Test Course'
})
def test_show_receipt_json_multiple_items(self):
# Two different item types
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase()
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url, HTTP_ACCEPT="application/json")
# Should have gotten a successful response
self.assertEqual(resp.status_code, 200)
# Parse the response as JSON and check the contents
json_resp = json.loads(resp.content)
self.assertEqual(json_resp.get('total_cost'), self.cart.total_cost)
items = json_resp.get('items')
self.assertEqual(len(items), 2)
self.assertEqual(items[0], {
'unit_cost': 40,
'quantity': 1,
'line_cost': 40,
'line_desc': 'Registration for Course: Robot Super Course'
})
self.assertEqual(items[1], {
'unit_cost': 40,
'quantity': 1,
'line_cost': 40,
'line_desc': 'Honor Code Certificate for course Test Course'
})
def test_receipt_json_refunded(self):
mock_enrollment = Mock()
mock_enrollment.refundable.side_effect = lambda: True
mock_enrollment.course_id = self.verified_course_key
mock_enrollment.user = self.user
CourseMode.objects.create(
course_id=self.verified_course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost
)
cert = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'verified')
self.cart.purchase()
cert.refund_cert_callback(course_enrollment=mock_enrollment)
self.login_user()
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url, HTTP_ACCEPT="application/json")
self.assertEqual(resp.status_code, 200)
json_resp = json.loads(resp.content)
self.assertEqual(json_resp.get('status'), 'refunded')
def test_show_receipt_404s(self):
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase()
user2 = UserFactory.create()
cart2 = Order.get_cart_for_user(user2)
PaidCourseRegistration.add_to_order(cart2, self.course_key)
cart2.purchase()
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[cart2.id]))
self.assertEqual(resp.status_code, 404)
resp2 = self.client.get(reverse('shoppingcart.views.show_receipt', args=[1000]))
self.assertEqual(resp2.status_code, 404)
def test_total_amount_of_purchased_course(self):
self.add_course_to_user_cart(self.course_key)
self.assertEquals(self.cart.orderitem_set.count(), 1)
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
# Total amount of a particular course that is purchased by different users
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key)
self.assertEqual(total_amount, 36)
self.client.login(username=self.instructor.username, password="test")
cart = Order.get_cart_for_user(self.instructor)
PaidCourseRegistration.add_to_order(cart, self.course_key)
cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
total_amount = PaidCourseRegistration.get_total_amount_of_purchased_item(self.course_key)
self.assertEqual(total_amount, 76)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_valid_coupon_code(self):
self.add_course_to_user_cart(self.course_key)
self.add_coupon(self.course_key, True, self.coupon_code)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.coupon_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn(str(self.get_discount(self.cost)), resp.content)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_reg_code_and_course_registration_scenario(self):
self.add_reg_code(self.course_key)
# One courses in user shopping cart
self.add_course_to_user_cart(self.course_key)
self.assertEquals(self.cart.orderitem_set.count(), 1)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_reg_code_with_multiple_courses_and_checkout_scenario(self):
self.add_reg_code(self.course_key)
# Two courses in user shopping cart
self.login_user()
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
item2 = PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id)
self.assertEquals(self.cart.orderitem_set.count(), 2)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
redeem_url = reverse('register_code_redemption', args=[self.reg_code])
resp = self.client.get(redeem_url)
self.assertEquals(resp.status_code, 200)
# check button text
self.assertTrue('Activate Course Enrollment' in resp.content)
#now activate the user by enrolling him/her to the course
resp = self.client.post(redeem_url)
self.assertEquals(resp.status_code, 200)
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertIn('Payment', resp.content)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertEqual(context['order'].total_cost, self.testing_cost)
course_enrollment = CourseEnrollment.objects.filter(user=self.user)
self.assertEqual(course_enrollment.count(), 2)
# make sure the enrollment_ids were stored in the PaidCourseRegistration items
# refetch them first since they are updated
# item1 has been deleted from the the cart.
# User has been enrolled for the item1
item2 = PaidCourseRegistration.objects.get(id=item2.id)
self.assertIsNotNone(item2.course_enrollment)
self.assertEqual(item2.course_enrollment.course_id, self.testing_course.id)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_valid_reg_code(self):
self.add_course_to_user_cart(self.course_key)
self.add_reg_code(self.course_key)
resp = self.client.post(reverse('shoppingcart.views.use_code'), {'code': self.reg_code})
self.assertEqual(resp.status_code, 200)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('0.00', resp.content)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success(self):
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn('80.00', resp.content)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
self.assertFalse(context['any_refunds'])
# check for the default currency settings in the context
self.assertEqual(context['currency_symbol'], '$')
self.assertEqual(context['currency'], 'usd')
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=['PKR', 'Rs'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_override_currency_settings(self):
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
# check for the override currency settings in the context
self.assertEqual(context['currency_symbol'], 'Rs')
self.assertEqual(context['currency'], 'PKR')
@patch('shoppingcart.views.render_to_response', render_mock)
def test_courseregcode_item_total_price(self):
self.cart.order_type = 'business'
self.cart.save()
CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.assertEquals(CourseRegCodeItem.get_total_amount_of_purchased_item(self.course_key), 80)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_order_type_business(self):
self.cart.order_type = 'business'
self.cart.save()
reg_item = CourseRegCodeItem.add_to_order(self.cart, self.course_key, 2)
self.cart.add_billing_details(company_name='T1Omega', company_contact_name='C1',
company_contact_email='test@t1.com', recipient_email='test@t2.com')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
# mail is sent to these emails recipient_email, company_contact_email, order.user.email
self.assertEquals(len(mail.outbox), 3)
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
# when order_type = 'business' the user is not enrolled in the
# course but presented with the enrollment links
self.assertFalse(CourseEnrollment.is_enrolled(self.cart.user, self.course_key))
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn('80.00', resp.content)
# check for the enrollment codes content
self.assertIn('Please send each professional one of these unique registration codes to enroll into the course.', resp.content)
# fetch the newly generated registration codes
course_registration_codes = CourseRegistrationCode.objects.filter(order=self.cart)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
# now check for all the registration codes in the receipt
# and all the codes should be unused at this point
self.assertIn(course_registration_codes[0].code, context['reg_code_info_list'][0]['code'])
self.assertIn(course_registration_codes[1].code, context['reg_code_info_list'][1]['code'])
self.assertFalse(context['reg_code_info_list'][0]['is_redeemed'])
self.assertFalse(context['reg_code_info_list'][1]['is_redeemed'])
self.assertIn(self.cart.purchase_time.strftime("%B %d, %Y"), resp.content)
self.assertIn(self.cart.company_name, resp.content)
self.assertIn(self.cart.company_contact_name, resp.content)
self.assertIn(self.cart.company_contact_email, resp.content)
self.assertIn(self.cart.recipient_email, resp.content)
self.assertIn("Invoice #{order_id}".format(order_id=self.cart.id), resp.content)
self.assertIn('You have successfully purchased <b>{total_registration_codes} course registration codes'
.format(total_registration_codes=context['total_registration_codes']), resp.content)
# now redeem one of registration code from the previous order
redeem_url = reverse('register_code_redemption', args=[context['reg_code_info_list'][0]['code']])
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.assertTrue('View Dashboard' in response.content)
# now view the receipt page again to see if any registration codes
# has been expired or not
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
((template, context), _) = render_mock.call_args # pylint: disable=redefined-outer-name
self.assertEqual(template, 'shoppingcart/receipt.html')
# now check for all the registration codes in the receipt
# and one of code should be used at this point
self.assertTrue(context['reg_code_info_list'][0]['is_redeemed'])
self.assertFalse(context['reg_code_info_list'][1]['is_redeemed'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_with_upgrade(self):
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
self.login_user()
# When we come from the upgrade flow, we'll have a session variable showing that
s = self.client.session
s['attempting_upgrade'] = True
s.save()
self.mock_tracker.emit.reset_mock() # pylint: disable=maybe-no-member
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
# Once they've upgraded, they're no longer *attempting* to upgrade
attempting_upgrade = self.client.session.get('attempting_upgrade', False)
self.assertFalse(attempting_upgrade)
self.assertEqual(resp.status_code, 200)
self.assertIn('FirstNameTesting123', resp.content)
self.assertIn('80.00', resp.content)
((template, context), _) = render_mock.call_args
# When we come from the upgrade flow, we get these context variables
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
self.assertFalse(context['any_refunds'])
course_enrollment = CourseEnrollment.get_or_create_enrollment(self.user, self.course_key)
course_enrollment.emit_event('edx.course.enrollment.upgrade.succeeded')
self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member
'edx.course.enrollment.upgrade.succeeded',
{
'user_id': course_enrollment.user.id,
'course_id': course_enrollment.course_id.to_deprecated_string(),
'mode': course_enrollment.mode
}
)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_refund(self):
reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
cert_item = CertificateItem.add_to_order(self.cart, self.verified_course_key, self.cost, 'honor')
self.cart.purchase(first='FirstNameTesting123', street1='StreetTesting123')
cert_item.status = "refunded"
cert_item.save()
self.assertEqual(self.cart.total_cost, 40)
self.login_user()
resp = self.client.get(reverse('shoppingcart.views.show_receipt', args=[self.cart.id]))
self.assertEqual(resp.status_code, 200)
self.assertIn('40.00', resp.content)
((template, context), _tmp) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/receipt.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item, context['shoppingcart_items'][0])
self.assertIn(cert_item, context['shoppingcart_items'][1])
self.assertTrue(context['any_refunds'])
@patch('shoppingcart.views.render_to_response', render_mock)
def test_show_receipt_success_custom_receipt_page(self):
cert_item = CertificateItem.add_to_order(self.cart, self.course_key, self.cost, 'honor')
self.cart.purchase()
self.login_user()
receipt_url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(receipt_url)
self.assertEqual(resp.status_code, 200)
((template, _context), _tmp) = render_mock.call_args
self.assertEqual(template, cert_item.single_item_receipt_template)
def _assert_404(self, url, use_post=False):
"""
Helper method to assert that a given url will return a 404 status code
"""
if use_post:
response = self.client.post(url)
else:
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': False})
def test_disabled_paid_courses(self):
"""
Assert that the pages that require ENABLE_PAID_COURSE_REGISTRATION=True return a
HTTP 404 status code when we have this flag turned off
"""
self.login_user()
self._assert_404(reverse('shoppingcart.views.show_cart', args=[]))
self._assert_404(reverse('shoppingcart.views.clear_cart', args=[]))
self._assert_404(reverse('shoppingcart.views.remove_item', args=[]), use_post=True)
self._assert_404(reverse('shoppingcart.views.register_code_redemption', args=["testing"]))
self._assert_404(reverse('shoppingcart.views.use_code', args=[]), use_post=True)
self._assert_404(reverse('shoppingcart.views.update_user_cart', args=[]))
self._assert_404(reverse('shoppingcart.views.reset_code_redemption', args=[]), use_post=True)
self._assert_404(reverse('shoppingcart.views.billing_details', args=[]))
# TODO (ECOM-188): Once we complete the A/B test of separate
# verified/payment flows, we can replace these tests
# with something more general.
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
class ReceiptRedirectTest(UrlResetMixin, ModuleStoreTestCase):
"""Test special-case redirect from the receipt page. """
COST = 40
PASSWORD = 'password'
@patch.dict(settings.FEATURES, {'SEPARATE_VERIFICATION_FROM_PAYMENT': True})
def setUp(self):
super(ReceiptRedirectTest, self).setUp('verify_student.urls')
self.user = UserFactory.create()
self.user.set_password(self.PASSWORD)
self.user.save()
self.course = CourseFactory.create()
self.course_key = self.course.id
self.course_mode = CourseMode(
course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.COST
)
self.course_mode.save()
self.cart = Order.get_cart_for_user(self.user)
self.client.login(
username=self.user.username,
password=self.PASSWORD
)
@patch.dict(settings.FEATURES, {'SEPARATE_VERIFICATION_FROM_PAYMENT': True})
def test_show_receipt_redirect_to_verify_student(self):
# Create other carts first
# This ensures that the order ID and order item IDs do not match
Order.get_cart_for_user(self.user).start_purchase()
Order.get_cart_for_user(self.user).start_purchase()
Order.get_cart_for_user(self.user).start_purchase()
# Purchase a verified certificate
self.cart = Order.get_cart_for_user(self.user)
CertificateItem.add_to_order(
self.cart,
self.course_key,
self.COST,
'verified'
)
self.cart.purchase()
# Set the session flag indicating that the user is in the
# experimental group
session = self.client.session
session['separate-verified'] = True
session.save()
# Visit the receipt page
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url)
# Expect to be redirected to the payment confirmation
# page in the verify_student app
redirect_url = reverse(
'verify_student_payment_confirmation',
kwargs={'course_id': unicode(self.course_key)}
)
redirect_url += '?payment-order-num={order_num}'.format(
order_num=self.cart.id
)
self.assertRedirects(resp, redirect_url)
@patch.dict(settings.FEATURES, {'SEPARATE_VERIFICATION_FROM_PAYMENT': True})
def test_no_redirect_if_not_in_experimental_group(self):
# Purchase a verified certificate
CertificateItem.add_to_order(
self.cart,
self.course_key,
self.COST,
'verified'
)
self.cart.purchase()
# We do NOT set the session flag indicating that the user is in
# the experimental group.
# Visit the receipt page
url = reverse('shoppingcart.views.show_receipt', args=[self.cart.id])
resp = self.client.get(url)
# Since the user is not in the experimental group, expect
# that we see the usual receipt page (no redirect)
self.assertEqual(resp.status_code, 200)
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class ShoppingcartViewsClosedEnrollment(ModuleStoreTestCase):
"""
Test suite for ShoppingcartViews Course Enrollments Closed or not
"""
def setUp(self):
super(ShoppingcartViewsClosedEnrollment, self).setUp()
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.instructor = AdminFactory.create()
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_key = self.course.id
self.course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
self.testing_course = CourseFactory.create(
org='Edx',
number='999',
display_name='Testing Super Course',
metadata={"invitation_only": False}
)
self.course_mode = CourseMode(course_id=self.testing_course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
self.cart = Order.get_cart_for_user(self.user)
self.now = datetime.now(pytz.UTC)
self.tomorrow = self.now + timedelta(days=1)
self.nextday = self.tomorrow + timedelta(days=1)
def login_user(self):
"""
Helper fn to login self.user
"""
self.client.login(username=self.user.username, password="password")
@patch('shoppingcart.views.render_to_response', render_mock)
def test_to_check_that_cart_item_enrollment_is_closed(self):
self.login_user()
reg_item1 = PaidCourseRegistration.add_to_order(self.cart, self.course_key)
PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id)
# update the testing_course enrollment dates
self.testing_course.enrollment_start = self.tomorrow
self.testing_course.enrollment_end = self.nextday
self.testing_course = self.update_course(self.testing_course, self.user.id)
# testing_course enrollment is closed but the course is in the cart
# so we delete that item from the cart and display the message in the cart
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content)
((template, context), _tmp) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/shopping_cart.html')
self.assertEqual(context['order'], self.cart)
self.assertIn(reg_item1, context['shoppingcart_items'][0])
self.assertEqual(1, len(context['shoppingcart_items']))
self.assertEqual(True, context['is_course_enrollment_closed'])
self.assertIn(self.testing_course.display_name, context['appended_expired_course_names'])
def test_to_check_that_cart_item_enrollment_is_closed_when_clicking_the_payment_button(self):
self.login_user()
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
PaidCourseRegistration.add_to_order(self.cart, self.testing_course.id)
# update the testing_course enrollment dates
self.testing_course.enrollment_start = self.tomorrow
self.testing_course.enrollment_end = self.nextday
self.testing_course = self.update_course(self.testing_course, self.user.id)
# testing_course enrollment is closed but the course is in the cart
# so we delete that item from the cart and display the message in the cart
resp = self.client.get(reverse('shoppingcart.views.verify_cart'))
self.assertEqual(resp.status_code, 200)
self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed'])
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content)
self.assertIn('40.00', resp.content)
def test_is_enrollment_closed_when_order_type_is_business(self):
self.login_user()
self.cart.order_type = 'business'
self.cart.save()
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
CourseRegCodeItem.add_to_order(self.cart, self.testing_course.id, 2)
# update the testing_course enrollment dates
self.testing_course.enrollment_start = self.tomorrow
self.testing_course.enrollment_end = self.nextday
self.testing_course = self.update_course(self.testing_course, self.user.id)
resp = self.client.post(reverse('shoppingcart.views.billing_details'))
self.assertEqual(resp.status_code, 200)
self.assertTrue(json.loads(resp.content)['is_course_enrollment_closed'])
# testing_course enrollment is closed but the course is in the cart
# so we delete that item from the cart and display the message in the cart
resp = self.client.get(reverse('shoppingcart.views.show_cart', args=[]))
self.assertEqual(resp.status_code, 200)
self.assertIn("{course_name} has been removed because the enrollment period has closed.".format(course_name=self.testing_course.display_name), resp.content)
self.assertIn('40.00', resp.content)
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class RegistrationCodeRedemptionCourseEnrollment(ModuleStoreTestCase):
"""
Test suite for RegistrationCodeRedemption Course Enrollments
"""
def setUp(self, **kwargs):
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_key = self.course.id
self.course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
def login_user(self):
"""
Helper fn to login self.user
"""
self.client.login(username=self.user.username, password="password")
def test_registration_redemption_post_request_ratelimited(self):
"""
Try (and fail) registration code redemption 30 times
in a row on an non-existing registration code post request
"""
cache.clear()
url = reverse('register_code_redemption', args=['asdasd'])
self.login_user()
for i in xrange(30): # pylint: disable=unused-variable
response = self.client.post(url)
self.assertEquals(response.status_code, 404)
# then the rate limiter should kick in and give a HttpForbidden response
response = self.client.post(url)
self.assertEquals(response.status_code, 403)
# now reset the time to 5 mins from now in future in order to unblock
reset_time = datetime.now(UTC) + timedelta(seconds=300)
with freeze_time(reset_time):
response = self.client.post(url)
self.assertEquals(response.status_code, 404)
cache.clear()
def test_registration_redemption_get_request_ratelimited(self):
"""
Try (and fail) registration code redemption 30 times
in a row on an non-existing registration code get request
"""
cache.clear()
url = reverse('register_code_redemption', args=['asdasd'])
self.login_user()
for i in xrange(30): # pylint: disable=unused-variable
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
# then the rate limiter should kick in and give a HttpForbidden response
response = self.client.get(url)
self.assertEquals(response.status_code, 403)
# now reset the time to 5 mins from now in future in order to unblock
reset_time = datetime.now(UTC) + timedelta(seconds=300)
with freeze_time(reset_time):
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
cache.clear()
def test_course_enrollment_active_registration_code_redemption(self):
"""
Test for active registration code course enrollment
"""
cache.clear()
instructor = InstructorFactory(course_key=self.course_key)
self.client.login(username=instructor.username, password='test')
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'sale_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
# get the first registration from the newly created registration codes
registration_code = CourseRegistrationCode.objects.all()[0].code
redeem_url = reverse('register_code_redemption', args=[registration_code])
self.login_user()
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertIn('Activate Course Enrollment', response.content)
#now activate the user by enrolling him/her to the course
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
self.assertIn('View Dashboard', response.content)
#now check that the registration code has already been redeemed and user is already registered in the course
RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code)
response = self.client.get(redeem_url)
self.assertEquals(len(RegistrationCodeRedemption.objects.filter(registration_code__code=registration_code)), 1)
self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content)
#now check that the registration code has already been redeemed
response = self.client.post(redeem_url)
self.assertIn("You've clicked a link for an enrollment code that has already been used.", response.content)
#now check the response of the dashboard page
dashboard_url = reverse('dashboard')
response = self.client.get(dashboard_url)
self.assertEquals(response.status_code, 200)
self.assertIn(self.course.display_name, response.content)
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
@ddt.ddt
class DonationViewTest(ModuleStoreTestCase):
"""Tests for making a donation.
These tests cover both the single-item purchase flow,
as well as the receipt page for donation items.
"""
DONATION_AMOUNT = "23.45"
PASSWORD = "password"
def setUp(self):
"""Create a test user and order. """
super(DonationViewTest, self).setUp()
# Create and login a user
self.user = UserFactory.create()
self.user.set_password(self.PASSWORD)
self.user.save()
result = self.client.login(username=self.user.username, password=self.PASSWORD)
self.assertTrue(result)
# Enable donations
config = DonationConfiguration.current()
config.enabled = True
config.save()
def test_donation_for_org(self):
self._donate(self.DONATION_AMOUNT)
self._assert_receipt_contains("tax purposes")
def test_donation_for_course_receipt(self):
# Create a test course and donate to it
self.course = CourseFactory.create(display_name="Test Course")
self._donate(self.DONATION_AMOUNT, course_id=self.course.id)
# Verify the receipt page
self._assert_receipt_contains("tax purposes")
self._assert_receipt_contains(self.course.display_name)
def test_smallest_possible_donation(self):
self._donate("0.01")
self._assert_receipt_contains("0.01")
@ddt.data(
{},
{"amount": "abcd"},
{"amount": "-1.00"},
{"amount": "0.00"},
{"amount": "0.001"},
{"amount": "0"},
{"amount": "23.45", "course_id": "invalid"}
)
def test_donation_bad_request(self, bad_params):
response = self.client.post(reverse('donation'), bad_params)
self.assertEqual(response.status_code, 400)
def test_donation_requires_login(self):
self.client.logout()
response = self.client.post(reverse('donation'), {'amount': self.DONATION_AMOUNT})
self.assertEqual(response.status_code, 302)
def test_no_such_course(self):
response = self.client.post(
reverse("donation"),
{"amount": self.DONATION_AMOUNT, "course_id": "edx/DemoX/Demo"}
)
self.assertEqual(response.status_code, 400)
@ddt.data("get", "put", "head", "options", "delete")
def test_donation_requires_post(self, invalid_method):
response = getattr(self.client, invalid_method)(
reverse("donation"), {"amount": self.DONATION_AMOUNT}
)
self.assertEqual(response.status_code, 405)
def test_donations_disabled(self):
config = DonationConfiguration.current()
config.enabled = False
config.save()
# Logged in -- should be a 404
response = self.client.post(reverse('donation'))
self.assertEqual(response.status_code, 404)
# Logged out -- should still be a 404
self.client.logout()
response = self.client.post(reverse('donation'))
self.assertEqual(response.status_code, 404)
def _donate(self, donation_amount, course_id=None):
"""Simulate a donation to a course.
This covers the entire payment flow, except for the external
payment processor, which is simulated.
Arguments:
donation_amount (unicode): The amount the user is donating.
Keyword Arguments:
course_id (CourseKey): If provided, make a donation to the specific course.
Raises:
AssertionError
"""
# Purchase a single donation item
# Optionally specify a particular course for the donation
params = {'amount': donation_amount}
if course_id is not None:
params['course_id'] = course_id
url = reverse('donation')
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# Use the fake payment implementation to simulate the parameters
# we would receive from the payment processor.
payment_info = json.loads(response.content)
self.assertEqual(payment_info["payment_url"], "/shoppingcart/payment_fake")
# If this is a per-course donation, verify that we're sending
# the course ID to the payment processor.
if course_id is not None:
self.assertEqual(
payment_info["payment_params"]["merchant_defined_data1"],
unicode(course_id)
)
self.assertEqual(
payment_info["payment_params"]["merchant_defined_data2"],
"donation_course"
)
else:
self.assertEqual(payment_info["payment_params"]["merchant_defined_data1"], "")
self.assertEqual(
payment_info["payment_params"]["merchant_defined_data2"],
"donation_general"
)
processor_response_params = PaymentFakeView.response_post_params(payment_info["payment_params"])
# Use the response parameters to simulate a successful payment
url = reverse('shoppingcart.views.postpay_callback')
response = self.client.post(url, processor_response_params)
self.assertRedirects(response, self._receipt_url)
def _assert_receipt_contains(self, expected_text):
"""Load the receipt page and verify that it contains the expected text."""
resp = self.client.get(self._receipt_url)
self.assertContains(resp, expected_text)
@property
def _receipt_url(self):
order_id = Order.objects.get(user=self.user, status="purchased").id
return reverse("shoppingcart.views.show_receipt", kwargs={"ordernum": order_id})
@override_settings(MODULESTORE=MODULESTORE_CONFIG)
class CSVReportViewsTest(ModuleStoreTestCase):
"""
Test suite for CSV Purchase Reporting
"""
def setUp(self):
self.user = UserFactory.create()
self.user.set_password('password')
self.user.save()
self.cost = 40
self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course')
self.course_key = self.course.id
self.course_mode = CourseMode(course_id=self.course_key,
mode_slug="honor",
mode_display_name="honor cert",
min_price=self.cost)
self.course_mode.save()
self.course_mode2 = CourseMode(course_id=self.course_key,
mode_slug="verified",
mode_display_name="verified cert",
min_price=self.cost)
self.course_mode2.save()
verified_course = CourseFactory.create(org='org', number='test', display_name='Test Course')
self.verified_course_key = verified_course.id
self.cart = Order.get_cart_for_user(self.user)
self.dl_grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP)
self.dl_grp.save()
def login_user(self):
"""
Helper fn to login self.user
"""
self.client.login(username=self.user.username, password="password")
def add_to_download_group(self, user):
"""
Helper fn to add self.user to group that's allowed to download report CSV
"""
user.groups.add(self.dl_grp)
def test_report_csv_no_access(self):
self.login_user()
response = self.client.get(reverse('payment_csv_report'))
self.assertEqual(response.status_code, 403)
def test_report_csv_bad_method(self):
self.login_user()
self.add_to_download_group(self.user)
response = self.client.put(reverse('payment_csv_report'))
self.assertEqual(response.status_code, 400)
@patch('shoppingcart.views.render_to_response', render_mock)
def test_report_csv_get(self):
self.login_user()
self.add_to_download_group(self.user)
response = self.client.get(reverse('payment_csv_report'))
((template, context), unused_kwargs) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/download_report.html')
self.assertFalse(context['total_count_error'])
self.assertFalse(context['date_fmt_error'])
self.assertIn(_("Download CSV Reports"), response.content.decode('UTF-8'))
@patch('shoppingcart.views.render_to_response', render_mock)
def test_report_csv_bad_date(self):
self.login_user()
self.add_to_download_group(self.user)
response = self.client.post(reverse('payment_csv_report'), {'start_date': 'BAD', 'end_date': 'BAD', 'requested_report': 'itemized_purchase_report'})
((template, context), unused_kwargs) = render_mock.call_args
self.assertEqual(template, 'shoppingcart/download_report.html')
self.assertFalse(context['total_count_error'])
self.assertTrue(context['date_fmt_error'])
self.assertIn(_("There was an error in your date input. It should be formatted as YYYY-MM-DD"),
response.content.decode('UTF-8'))
CORRECT_CSV_NO_DATE_ITEMIZED_PURCHASE = ",1,purchased,1,40,40,usd,Registration for Course: Robot Super Course,"
def test_report_csv_itemized(self):
report_type = 'itemized_purchase_report'
start_date = '1970-01-01'
end_date = '2100-01-01'
PaidCourseRegistration.add_to_order(self.cart, self.course_key)
self.cart.purchase()
self.login_user()
self.add_to_download_group(self.user)
response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date,
'end_date': end_date,
'requested_report': report_type})
self.assertEqual(response['Content-Type'], 'text/csv')
report = initialize_report(report_type, start_date, end_date)
self.assertIn(",".join(report.header()), response.content)
self.assertIn(self.CORRECT_CSV_NO_DATE_ITEMIZED_PURCHASE, response.content)
def test_report_csv_university_revenue_share(self):
report_type = 'university_revenue_share'
start_date = '1970-01-01'
end_date = '2100-01-01'
start_letter = 'A'
end_letter = 'Z'
self.login_user()
self.add_to_download_group(self.user)
response = self.client.post(reverse('payment_csv_report'), {'start_date': start_date,
'end_date': end_date,
'start_letter': start_letter,
'end_letter': end_letter,
'requested_report': report_type})
self.assertEqual(response['Content-Type'], 'text/csv')
report = initialize_report(report_type, start_date, end_date, start_letter, end_letter)
self.assertIn(",".join(report.header()), response.content)
class UtilFnsTest(TestCase):
"""
Tests for utility functions in views.py
"""
def setUp(self):
self.user = UserFactory.create()
def test_can_download_report_no_group(self):
"""
Group controlling perms is not present
"""
self.assertFalse(_can_download_report(self.user))
def test_can_download_report_not_member(self):
"""
User is not part of group controlling perms
"""
Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP).save()
self.assertFalse(_can_download_report(self.user))
def test_can_download_report(self):
"""
User is part of group controlling perms
"""
grp = Group(name=settings.PAYMENT_REPORT_GENERATOR_GROUP)
grp.save()
self.user.groups.add(grp)
self.assertTrue(_can_download_report(self.user))
def test_get_date_from_str(self):
test_str = "2013-10-01"
date = _get_date_from_str(test_str)
self.assertEqual(2013, date.year)
self.assertEqual(10, date.month)
self.assertEqual(1, date.day)
|
UQ-UQx/edx-platform_lti
|
lms/djangoapps/shoppingcart/tests/test_views.py
|
Python
|
agpl-3.0
| 86,720
|
[
"VisIt"
] |
b448b41d3dd8de32c43e5055c10f6b3fdb35d67f63b26d2ea511155a40843c59
|
# Functions removed from main code that MAY still be useful somewhere.
# From Analyse Object
# def save_ranges(self):
# """
# Saves signal/background/transition data ranges for each sample.
# """
# if os.path.isfile(self.param_dir + 'bkg.rng'):
# f = input(('Range files already exist. Do you want to overwrite '
# 'them (old files will be lost)? [Y/n]: '))
# if 'n' in f or 'N' in f:
# print('Ranges not saved. Run self.save_ranges() to try again.')
# return
# bkgrngs = []
# sigrngs = []
# for d in self.data:
# bkgrngs.append(d.sample + ':' + str(d.bkgrng.tolist()))
# sigrngs.append(d.sample + ':' + str(d.sigrng.tolist()))
# bkgrngs = '\n'.join(bkgrngs)
# sigrngs = '\n'.join(sigrngs)
# fb = open(self.param_dir + 'bkg.rng', 'w')
# fb.write(bkgrngs)
# fb.close()
# fs = open(self.param_dir + 'sig.rng', 'w')
# fs.write(sigrngs)
# fs.close()
# return
# def load_ranges(self, bkgrngs=None, sigrngs=None):
# """
# Loads signal/background/transition data ranges for each sample.
# Parameters
# ----------
# bkgrngs : str or None
# A array of size (2, n) specifying time intervals that are
# background regions.
# sigrngs : str or None
# A array of size (2, n) specifying time intervals that are
# signal regions.
# Returns
# -------
# None
# """
# if bkgrngs is None:
# bkgrngs = self.param_dir + 'bkg.rng'
# bkgs = open(bkgrngs).readlines()
# samples = []
# bkgrngs = []
# for b in bkgs:
# samples.append(re.match('(.*):{1}(.*)',
# b.strip()).groups()[0])
# bkgrngs.append(eval(re.match('(.*):{1}(.*)',
# b.strip()).groups()[1]))
# for s, rngs in zip(samples, bkgrngs):
# self.data_dict[s].bkgrng = np.array(rngs)
# if sigrngs is None:
# sigrngs = self.param_dir + 'sig.rng'
# sigs = open(sigrngs).readlines()
# samples = []
# sigrngs = []
# for s in sigs:
# samples.append(re.match('(.*):{1}(.*)',
# s.strip()).groups()[0])
# sigrngs.append(eval(re.match('(.*):{1}(.*)',
# s.strip()).groups()[1]))
# for s, rngs in zip(samples, sigrngs):
# self.data_dict[s].sigrng = np.array(rngs)
# # number the signal regions (used for statistics and standard matching)
# for s in self.data:
# # re-create booleans
# s.makerangebools()
# # make trnrng
# s.trn[[0, -1]] = False
# s.trnrng = s.Time[s.trn ^ np.roll(s.trn, 1)]
# # number traces
# n = 1
# for i in range(len(s.sig)-1):
# if s.sig[i]:
# s.ns[i] = n
# if s.sig[i] and ~s.sig[i+1]:
# n += 1
# s.n = int(max(s.ns)) # record number of traces
# return
# From D Object
# def bkgrange(self, rng=None):
# """
# Calculate background boolean array from list of limit pairs.
# Generate a background boolean string based on a list of [min,max] value
# pairs stored in self.bkgrng.
# If `rng` is supplied, these will be added to the bkgrng list before
# the boolean arrays are calculated.
# Parameters
# ----------
# rng : array_like
# [min,max] pairs defining the upper and lowe limits of background
# regions.
# Returns
# -------
# None
# """
# if rng is not None:
# if np.array(rng).ndim is 1:
# self.bkgrng = np.append(self.bkgrng, np.array([rng]), 0)
# else:
# self.bkgrng = np.append(self.bkgrng, np.array(rng), 0)
# self.bkg = tuples_2_bool(self.bkgrng, self.Time)
# # self.bkg = np.array([False] * self.Time.size)
# # for lb, ub in self.bkgrng:
# # self.bkg[(self.Time > lb) & (self.Time < ub)] = True
# self.trn = ~self.bkg & ~self.sig # redefine transition regions
# return
# def sigrange(self, rng=None):
# """
# Calculate signal boolean array from list of limit pairs.
# Generate a background boolean string based on a list of [min,max] value
# pairs stored in self.bkgrng.
# If `rng` is supplied, these will be added to the sigrng list before
# the boolean arrays are calculated.
# Parameters
# ----------
# rng : array_like
# [min,max] pairs defining the upper and lowe limits of signal
# regions.
# Returns
# -------
# None
# """
# if rng is not None:
# if np.array(rng).ndim is 1:
# self.sigrng = np.append(self.sigrng, np.array([rng]), 0)
# else:
# self.sigrng = np.append(self.sigrng, np.array(rng), 0)
# self.sig = tuples_2_bool(self.sigrng, self.Time)
# # self.sig = np.array([False] * self.Time.size)
# # for ls, us in self.sigrng:
# # self.sig[(self.Time > ls) & (self.Time < us)] = True
# self.trn = ~self.bkg & ~self.sig # redefine transition regions
# return
# def makerangebools(self):
# """
# Calculate signal and background boolean arrays from lists of limit
# pairs.
# """
# self.sig = tuples_2_bool(self.sigrng, self.Time)
# self.bkg = tuples_2_bool(self.bkgrng, self.Time)
# self.trn = ~self.bkg & ~self.sig
# return
# def separate(self, analytes=None):
# """
# Extract signal and backround data into separate arrays.
# Isolates signal and background signals from raw data for specified
# elements.
# Parameters
# ----------
# analytes : array_like
# list of analyte names (default = all analytes)
# Returns
# -------
# None
# """
# if analytes is None:
# analytes = self.analytes
# self.data['background'] = {}
# self.data['signal'] = {}
# for a in analytes:
# self.data['background'][a] = self.focus[a].copy()
# self.data['background'][a][~self.bkg] = np.nan
# self.data['signal'][a] = self.focus[a].copy()
# self.data['signal'][a][~self.sig] = np.nan
# def bkg_subtract(self, bkgs):
# """
# Subtract provided background from signal (focus stage).
# Results is saved in new 'bkgsub' focus stage
# Parameters
# ----------
# bkgs : dict
# dict containing background values to subtract from
# focus stage of data.
# Returns
# -------
# None
# """
# if any(a not in bkgs.keys() for a in analytes):
# warnings.warn(('Not all analytes have been provided in bkgs.\n' +
# "If you didn't do this on purpose, something is\n" +
# "wrong!"))
# self.data['bkgsub'] = {}
# for a in self.analytes:
# self.data['bkgsub'][a] = self.focus[a] - bkgs[a]
# self.setfocus('bkgsub')
# return
# def bkg_correct(self, mode='constant'):
# """
# Subtract background from signal.
# Subtract constant or polynomial background from all analytes.
# Parameters
# ----------
# mode : str or int
# 'constant' or an int describing the degree of polynomial
# background.
# Returns
# -------
# None
# """
# params = locals()
# del(params['self'])
# self.bkgcorrect_params = params
# self.bkgrange()
# self.sigrange()
# self.separate()
# self.data['bkgsub'] = {}
# if mode == 'constant':
# for c in self.analytes:
# self.data['bkgsub'][c] = \
# (self.data['signal'][c] -
# np.nanmean(self.data['background'][c]))
# if (mode != 'constant'):
# for c in self.analytes:
# p = np.polyfit(self.Time[self.bkg], self.focus[c][self.bkg],
# mode)
# self.data['bkgsub'][c] = \
# (self.data['signal'][c] -
# np.polyval(p, self.Time))
# self.setfocus('bkgsub')
# return
# Helper Functions
# def gauss_inv(y, *p):
# """
# Inverse Gaussian function.
# For determining the x coordinates
# for a given y intensity (i.e. width at a given height).
# Parameters
# ----------
# y : float
# The height at which to calculate peak width.
# *p : parameters unpacked to mu, sigma
# mu: peak center
# sigma: peak width
# Return
# ------
# array_like
# x positions either side of mu where gauss(x) == y.
# """
# mu, sigma = p
# return np.array([mu - 1.4142135623731 * np.sqrt(sigma**2*np.log(1/y)),
# mu + 1.4142135623731 * np.sqrt(sigma**2*np.log(1/y))])
|
oscarbranson/latools
|
latools/latools_graveyard.py
|
Python
|
mit
| 9,555
|
[
"Gaussian"
] |
c7ce87abb3ed43c308a49b67f8288938e2a04d0527f47dbd56c3b4e96a976b80
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013, 2014 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import unittest
from jsonschema import validate
from jsonschema import ValidationError
from freeseer.framework.config.options import ChoiceOption
from freeseer.tests.framework.config.options import OptionTest
class TestChoiceOptionNoDefault(unittest.TestCase, OptionTest):
"""Tests ChoiceOption without a default value."""
valid_success = [
'hello',
'world',
]
valid_failure = [
'hello1',
'1hello',
'w0rld',
]
encode_success = zip(valid_success, valid_success)
decode_success = zip(valid_success, valid_success)
decode_failure = valid_failure
def setUp(self):
self.option = ChoiceOption([
'hello',
'world',
])
def test_schema(self):
"""Tests a ChoiceOption schema method."""
expected = {
'enum': [
'hello',
'world',
],
}
self.assertRaises(ValidationError, validate, 'error', self.option.schema())
self.assertIsNone(validate('world', self.option.schema()))
self.assertDictEqual(self.option.schema(), expected)
class TestChoiceOptionWithDefault(TestChoiceOptionNoDefault):
"""Tests ChoiceOption with a default value."""
def setUp(self):
self.option = ChoiceOption([
'hello',
'world',
], 'hello')
def test_default(self):
"""Tests that the default was set correctly."""
self.assertEqual(self.option.default, 'hello')
def test_schema(self):
"""Tests a ChoiceOption schema method."""
expected = {
'default': 'hello',
'enum': [
'hello',
'world',
],
}
self.assertRaises(ValidationError, validate, 'error', self.option.schema())
self.assertIsNone(validate('world', self.option.schema()))
self.assertDictEqual(self.option.schema(), expected)
|
Freeseer/freeseer
|
src/freeseer/tests/framework/config/options/test_choice.py
|
Python
|
gpl-3.0
| 2,916
|
[
"VisIt"
] |
7d6d835c86b2e4dd680909c0e2a80ad678a20c06df5aecfd08b38a7f2295bc95
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Example script that generates FEFF input files from a cif file
Remove comment # on write line to actually write files to disk
"""
import os
import argparse
from pymatgen.io.feff.sets import MPXANESSet
from pymatgen.io.cif import CifParser
__author__ = "Alan Dozier"
__credits__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0.2"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__date__ = "April 7, 2013"
def main():
"""
Main method.
"""
parser = argparse.ArgumentParser(description='''
Example script to generate FEFF input files from a cif file
Author: Alan Dozier
Version: 1.0
Last updated: August, 2012''')
parser.add_argument('cif_file', metavar='cif_file', type=str, nargs=1,
help='cif_file to use')
parser.add_argument('central_atom', metavar='central_atom', type=str,
nargs=1, help='symbol of absorbing atom')
parser.add_argument('calc_type', metavar='calc_type', type=str, nargs=1,
help='type of calc, currently XANES or EXAFS')
args = parser.parse_args()
cif_file = args.cif_file[0]
central_atom = args.central_atom[0]
calc_type = args.calc_type[0]
r = CifParser(cif_file)
structure = r.get_structures()[0]
x = MPXANESSet("MaterialsProject")
source = os.path.basename(cif_file)
comment = 'From cif file'
header = MPXANESSet.get_header(x, structure, source, comment)
print("\n\nHEADER\n")
print(header)
tags = MPXANESSet.get_feff_tags(x, calc_type)
print("\n\nPARAMETERS\n")
print(tags)
POT = MPXANESSet.get_feff_pot(x, structure, central_atom)
print("\n\nPOTENTIALS\n")
print(POT)
ATOMS = MPXANESSet.get_feff_atoms(x, structure, central_atom)
print("\n\nATOMS\n")
print(ATOMS)
# x.write_input(structure, calc_type, source, central_atom, comment, "./feffinput")
if __name__ == "__main__":
main()
|
tschaume/pymatgen
|
pymatgen/cli/feff_input_generation.py
|
Python
|
mit
| 2,139
|
[
"FEFF",
"pymatgen"
] |
7c682ee96b3b7e03544dc3d9a07999f916340da0b0889021cf63b667318cd8ab
|
import sys, numpy as np, hashlib, copy, cPickle, ctypes, os, os.path as osp
from collections import defaultdict,namedtuple
import __builtin__
import traceback
import cgt
from . import utils
# ================================================================
# Datatypes
# ================================================================
class Dtype: #pylint: disable=W0232
@staticmethod
def canon(dt):
"""
Return canonical string representation of dtype,
using the floating point type that CGT is currently configured for
The following string representations are used: i1,i2,i4,i8, f4,f8,f16 c8,c16,c32
So either we're using single (f4, c8) or double (f8, c16) or quad (f16, c32)
Note that quad precision is very useful for gradient checking
"""
dt = np.dtype(dt)
k = dt.kind
if k=='f':
return cgt.floatX
elif k in 'biu':
return 'i'+str(dt.itemsize)
elif k=='c':
return cgt.complexX
else:
raise ValueError("Invalid dtype %s"%dt)
def as_valid_array(x, dtype=None):
"""
Converts to numpy array and dtype with valid precision
"""
x = np.asarray(x)
x = x.astype(Dtype.canon(x.dtype) if dtype is None else dtype)
return x
def as_valid_tuple(x):
return tuple(as_valid_array(a) for a in x)
# @TUPLES_OF_TENSORS
def as_valid_arg(x):
if isinstance(x, tuple):
return as_valid_tuple(x)
else:
return as_valid_array(x)
class Type(object):
"""
Represents a datatype for Nodes
"""
pass
class TensorType(Type):
"""
Type used to represent computation results (Nodes in the graph)
that are n-dimensional arrays.
Scalars are represented as zero-dimensional arrays
[though we may create a scalar type later for efficiency]
"""
def __init__(self, dtype, ndim):
self.dtype = Dtype.canon(dtype)
self.ndim = ndim
def __repr__(self):
return "Tensor(%s,%s)"%(self.dtype, self.ndim)
def __eq__(self, other):
return self.dtype == other.dtype and self.ndim == other.ndim
def __hash__(self):
return hash((self.dtype, self.ndim))
class TupleType(Type):
"""
A compound type consisting of a tuple of other types
Only tuples of tensors are currently supported
"""
def __init__(self, *eltypes):
assert all(isinstance(eltype, TensorType) for eltype in eltypes) # @TUPLES_OF_TENSORS
self.eltypes = eltypes
self.dtype = 'O'
def __len__(self):
return len(self.eltypes)
def __getitem__(self, i):
return self.eltypes[i]
def __iter__(self):
return iter(self.eltypes)
def __str__(self):
return "Tup(" + ",".join(map(str,self.eltypes))+")"
def __eq__(self, other):
return len(self.eltypes) == len(other.eltypes)\
and all(typ0 == typ1 for (typ0, typ1) in zip(self.eltypes, other.eltypes))
def __hash__(self):
return hash((self.eltypes, self.dtype))
class Device(object):
"""
Represents a location where a computation is performed
devtype: cpu vs gpu
idx: index of which device
"""
def __init__(self, devtype="cpu", idx=0):
assert isinstance(devtype,str) and isinstance(idx,int)
self.devtype = devtype
self.idx = idx
def __eq__(self, other):
return self.devtype == other.devtype and self.idx == other.idx
def __hash__(self):
return hash((self.devtype, self.idx))
def __repr__(self):
return "%s/%s"%(self.devtype,self.idx)
def _promote(typ1, typ2):
"""
Output type of a floating point operation involving these input types
"""
d1 = typ1[0]
s1 = typ1[1:]
d2 = typ2[0]
s2 = typ2[1:]
if d1 == 'c' or d2 == 'c':
return cgt.complexX
elif d1 == 'f' or d2 == 'f':
return cgt.floatX
elif d1 == 'i' and d2 == 'i':
assert d1 == d2
return d1 + __builtin__.max(s1,s2)
else:
raise ValueError("Don't know what to do with dtypes %s,%s"%(typ1, typ2))
def _promote_multi(xtypes):
"""
_promote with multiple operands
"""
return reduce(_promote, xtypes)
def dtype_kind(dtype):
"""
one of f,c,i
"""
assert isinstance(dtype, str)
return dtype[0]
def _dtype_itemsize(dtype):
"""
size in bytes
"""
return int(dtype[1:])
def _type_to_int(typ1):
"""
integer type of result of operation such as floor that converts to integer
"""
d1 = dtype_kind(typ1)
if d1 == 'f' or d1 == 'c':
return 'i8'
else:
return typ1
# ================================================================
# Computation Graph Nodes
# ================================================================
class Node(object):
"""
Node in the computation graph
"""
counter = 0 # allows unique identification of argument nodes
# Constants
# ----------------------------------------
def __init__(self, typ, op, parents, props=None, fixed_shape=None, name=None):
self.typ = typ
self.op = op
self.parents = parents
self.props = props or {}
self._fixed_shape = fixed_shape
self.name = name
self.counter = Node.counter
Node.counter += 1
def __repr__(self):
if self.op is None:
return "Argument{%s,name='%s'}"%(self.typ,self.name)
else:
return "Result{%s}"%(str(self.op))
# CGT-specific
# ----------------------------------------
def is_argument(self):
"""
Returns whether Node is an argument
"""
return self.op is None
def is_data(self):
"""
Returns whether Node's Op is data
"""
return self.op is not None and self.op.is_data_op
def is_input(self):
"""
Returns whether this node is either an argument or is data
"""
return self.is_argument() or self.is_data()
def get_diff(self):
"""
Returns a sequence of bool indicating whether output is differentiable wrt each input
"""
return [] if self.op is None else self.op.get_diff(len(self.parents))
def is_tensor(self):
"""
Returns whether this node's type (self.typ) is TensorType
"""
return isinstance(self.typ, TensorType)
def is_tuple(self):
"""
Returns whether this node's type (self.typ) is TupleType
"""
return isinstance(self.typ, TupleType)
def is_scalar(self):
return self.is_tensor() and self.ndim==0
def get_hash(self, node2hash):
"""
Return UNIQUE string identifying this Node
"""
if self.is_input():
return str(self.counter)
else:
hashobj = hashlib.md5(self.op.get_hash())
for p in self.parents:
hashobj.update(node2hash[p])
return hashobj.hexdigest()
def clone(self, newparents):
"""
Create a new Node that applies self.op to `newparents`
Preserve annotations on this node (.props)
"""
if self.is_input(): return self
else: return Node(self.typ, self.op, newparents, props = self.props)
def get_fixed_shape(self):
"""
Returns a tuple of int or None. You'll get ints if this is an argument or data node
with fixed shape provided
"""
if self.is_data():
return self.op.get_fixed_shape()
return (None,)*self.ndim if self._fixed_shape is None else self._fixed_shape
# Math Overloads
# ----------------------------------------
__array_priority__ = 1000 # precedence over numpy operators
def __neg__(self):
return Result(ElwiseUnary("neg"), [self])
def __add__(self, other):
return elwise_binary("+", self, other)
def __sub__(self, other):
return elwise_binary("-", self, other)
def __mul__(self, other):
return elwise_binary("*", self, other)
def __div__(self, other):
return elwise_binary("/", self, other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, other):
return elwise_binary("**", self, other)
def __floordiv__(self, other):
return cgt.floor_divide(self, other)
def __gt__(self, other):
return cgt.greater(self, other)
def __ge__(self, other):
return cgt.greater_equal(self, other)
def __lt__(self, other):
return cgt.less(self, other)
def __le__(self, other):
return cgt.less_equal(self, other)
# GOT RID OF __eq__ and __ne__ because they might lead to funny problems when
# people want equality check. No strong opinion on whether they should be included
# def __eq__(self, other):
# return equal(self, other)
# def __ne__(self, other):
# return not_equal(self, other)
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return cgt.constant(other).__sub__(self)
def __rmul__(self, other):
return self.__mul__(other)
def __rdiv__(self, other):
return cgt.constant(other).__div__(self)
def __rtruediv__(self, other):
return cgt.constant(other).__rtruediv__(self)
def __rfloordiv__(self, other):
return cgt.constant(other).__floordiv__(self)
def __getitem__(self, slis):
if self.is_tuple():
assert isinstance(slis, int), "TupleType can be only be indexed by an int"
return cgt.tuple_index(self, slis)
else:
return cgt.subtensor(self, slis)
def __iter__(self):
if self.is_tensor():
raise TypeError("Array variable is not iterable")
if self.is_tuple():
return iter(unpack(self))
else:
raise NotImplementedError
def __len__(self):
if isinstance(self.typ, TupleType):
return len(self.typ)
else:
raise ValueError("Node of type Tensor has no __len__")
def __nonzero__(self):
return True
# Properties like numpy ndarray
# ----------------------------------------
@property
def shape(self):
return cgt.shape(self)
@property
def ndim(self):
return self.typ.ndim if isinstance(self.typ, TensorType) else 0
@property
def dtype(self):
return self.typ.dtype
@property
def T(self):
return cgt.transpose(self)
# More math overloads
# ----------------------------------------
def reshape(self, shp):
"see cgt.reshape"
assert isinstance(shp, (list,tuple))
return cgt.reshape(self, shp)
def dot(self, other):
"see cgt.dot"
return cgt.dot(self, other)
def sum(self, axis=None, keepdims=False):
"see cgt.sum"
return cgt.sum(self, axis=axis, keepdims=keepdims)
def prod(self, axis=None, keepdims=False):
"see cgt.prod"
return cgt.prod(self, axis=axis, keepdims=keepdims)
def max(self, axis=None, keepdims=False):
"see cgt.max"
return cgt.max(self, axis=axis, keepdims=keepdims)
def argmax(self, axis=None, keepdims=False):
"see cgt.argmax"
return cgt.argmax(self, axis=axis, keepdims=keepdims)
def mean(self, axis=None, keepdims=False):
"see cgt.mean"
return cgt.mean(self, axis=axis, keepdims=keepdims)
def transpose(self, axes=None):
"see cgt.transpose"
return cgt.transpose(self, axes=axes)
def flatten(self):
"see cgt.flatten"
return cgt.flatten(self)
def dimshuffle(self, pattern):
"see cgt.dimshuffle"
return cgt.dimshuffle(self, pattern)
def _ndarray_type(value):
assert isinstance(value, np.ndarray)
return TensorType(value.dtype, value.ndim)
def _get_value_type(value):
if isinstance(value, np.ndarray):
return TensorType(value.dtype, value.ndim)
elif isinstance(value, tuple):
return TupleType(*map(_get_value_type, value))
def num_components(node):
return len(node.typ) if isinstance(node.typ, TupleType) else 1
class Op(object):
"""
Describes an operation that will be performed on some data.
"""
# attributes that can be overwritten in subclasses
return_type = "byref" # or "byval"
writes_to_input = -1 # whether output is allowed to have same underlying data as input
available_impls = () # python, native_cpu, native_gpu
is_data_op = False
# pylint: disable=W0613
def shp_apply(self, parents):
"""
Return output shapes as a function of input nodes
"""
raise NotImplementedError
def typ_apply(self, parent_types):
"""
Return output types as a function of input types
"""
raise NotImplementedError
def get_diff(self, num_inputs):
"""
Return a list of length len(inputs), specifying which inputs the Op is differentiable with respect to.
"""
assert isinstance(num_inputs, int)
return [True]*num_inputs
def get_expr(self, parent_exprs):
"""
Return string expression for this operation, built from the parent expressions
"""
return "%s(%s)"%(str(self), ",".join(parent_exprs))
def get_hash(self):
"""
Return a string that uniquely identifies the value of this Op.
Should ideally be fixed across program runs
"""
return cPickle.dumps(self.__dict__, -1)+self.__class__.__name__
def get_name(self):
"""
Get a human-readable description of the Op, including its attributes
"""
return type(self).__name__
def get_replacement(self, _newparents, _analysis):
"""
Return the name of this node
"""
return None
def pullback(self, inputs, output, goutput): #pylint: disable=W0613
"""
Compute symbolic expressions for derivatives obtained by backpropagation on this Op
Given a function y = f(x_1, x_2, ..., x_k), let J_k denote the Jacobian dy/dx_k
pullback(...) computes gradx_k = J_k^T grady
"""
raise MethodNotDefined
def pushforward(self, inputs, output, goutput):
r"""
Compute symbolic expressions for derivatives obtained by "tangent propagation" on this Op
Given a function y = f(x_1, x_2, ..., x_k), let J_k denote the Jacobian dy/dx_k
pullback([x_1, ..., x_k], y, grady) := \sum_k J_k gradx_k
"""
raise MethodNotDefined
def spliting(self, inputs):
"""
Return a list [tensor_type_sig, split_specs]
where tensor_type_sig is a string labeling the input and output axes
and split_specs is a list of tuples (axis, split_type, split_args...)
tensor_type_sig is easiest to illustrate with a few examples:
Mul22: i.j , j.k-> i.k
Sum{1} i.j -> i.1
GetSli{0} ij.1.1
"""
raise MethodNotDefined
def get_native_compile_info(self, inputs, devtype):
"""
returns NativeCompileInfo
"""
raise MethodNotDefined
def get_py_func(self, input_types):
"""
Returns python function that implements this operation
"""
raise MethodNotDefined
def get_py_callable(self, input_types):
func = self.get_py_func(input_types)
return PyCallable(self, len(input_types), func)
def __repr__(self):
"""
Get a human-readable description of the Op, including its attributes
"""
return type(self).__name__
def as_node(val_or_node):
"""
If numeric data received, convert to a constant node
"""
if isinstance(val_or_node, Node):
return val_or_node
elif isinstance(val_or_node, np.ndarray) or np.isscalar(val_or_node):
return cgt.constant(val_or_node)
elif isinstance(val_or_node, tuple):
return cgt.make_tuple(*val_or_node)
else:
raise ValueError("expected numeric data or Node, got object of type %s"%type(val_or_node))
def default_props():
props = {}
props["default_device"] = _CONFIG["default_device"]
if _CONFIG["debug"] and "stack" not in props: props["stack"] = traceback.extract_stack()[:-3]
return props
def Result(op, parents, typ=None, props=None, name=None):
"""
Just here as as "damage control" after some refactoring/renaming
"""
parents = map(as_node, parents)
typ = op.typ_apply([parent.typ for parent in parents]) if typ is None else typ
return Node(typ, op, parents, props=props or default_props(), name=name)
def Argument(typ, name=None, fixed_shape=None, props=None):
"""
Just here as as "damage control" after some refactoring/renaming
"""
return Node(typ, None, [], props=props or default_props(), fixed_shape=fixed_shape, name=name)
class GetData(Op):
is_data_op=True
return_type="byval"
available_impls=("python","native_cpu","native_gpu")
def __init__(self, typ):
self.typ = typ
def typ_apply(self, _):
return self.typ
class InMemoryData(GetData):
def __init__(self, value, device=None, fixed_shape_mask=None):
value = as_valid_array(value)
GetData.__init__(self, _ndarray_type(value))
self.device = device or get_config()["default_device"]
self.use_numpy = cgt.get_config()["backend"] == "python"
# use_numpy: whether to store the data as a numpy array or a CppArrayWrapper object
if self.use_numpy:
assert self.device.devtype=="cpu","can only use numpy for cpu. maybe you need to set backend=native?"
else:
self.dataptr = ctypes.c_long(0)
self.set_value(value)
assert self._value.dtype != object
if fixed_shape_mask is None: fixed_shape_mask = (False,)*self._value.ndim
elif fixed_shape_mask == "all": fixed_shape_mask = (True,)*self._value.ndim
self.fixed_shape = tuple(s if bfixed else None for (s, bfixed) in zip(value.shape, fixed_shape_mask))
def get_py_func(self, _):
def f(_):
return self.get_value()
return f
def get_native_compile_info(self, _input_types, _devtype):
code=r"""
CGT_EXPORT_C cgtArray* $function($closure* cldata, cgtArray** reads) {
return *(cgtArray**)cldata->pptr;
}"""
pptr = self.get_pptr()
return NativeCompileInfo(code, closure_triples=[("pptr", ctypes.c_void_p, pptr)],
store_objects=self._value)
def __repr__(self):
return "Data{%s}"%(self.typ)
def get_device(self):
return self.device
def get_value(self):
return self._value if self.use_numpy else self._value.to_numpy()
# XXX use more explicit names
def get_shape(self):
return self._value.shape
def get_size(self):
return self._value.size
def set_value(self, value):
value = value.astype(self.typ.dtype)
if self.use_numpy:
self._value = value.copy()
else:
self._value = cgt.cycgt.CppArrayWrapper.from_numpy(value, self.device.devtype, False) #pylint: disable=E1101
self.dataptr.value = self._value.ptr
def get_pptr(self):
return ctypes.addressof(self.dataptr)
def get_fixed_shape(self):
return self.fixed_shape
def _singleton_ones(dtype, ndim):
return cgt.constant(np.ones((1,)*ndim, dtype))
def make_argument(typ):
if isinstance(typ, TupleType):
return Argument(TupleType(typ))
elif isinstance(typ, TensorType):
return Argument(TensorType(typ.dtype, typ.ndim))
else:
raise ValueError("expected Tuple or Tensor. Got %s"%typ)
# ================================================================
# Differentiation
# ================================================================
def differentiably_influences(outputs, nodelist=None):
"""
Return the set of nodes that differentiably influence `outputs`
i.e., the Jacobian doutputs/dnode != 0
in reverse topological sorted order
optionally pass in nodelist=topsorted(outputs)
(save on recomputation of topsort)
"""
if nodelist is None: nodelist = list(topsorted(outputs))
diset = set(outputs)
for node in reversed(nodelist):
if node in diset and not node.is_input():
for (p,d) in utils.safezip(node.parents, node.get_diff()):
if d: diset.add(p)
return diset
def differentiably_influenced_by(wrt, outputs=None, nodelist=None):
"""
Return the set of nodes that are differentiably influenced by outputs,
i.e., the set of x for which Jacobian dx/dwrt is nonzero
"""
assert (outputs is None) != (nodelist is None) # one of these are provided
if nodelist is None: nodelist = list(topsorted(outputs))
dibset = set(wrt)
for node in nodelist:
if any(p in dibset and d for (p,d) in utils.safezip(node.parents, node.get_diff())):
dibset.add(node)
return dibset
def pullback(outputs, goutputs, wrt):
"""
This function propagates derivative information backwards from the outputs of a computation
to the inputs.
All of these operations are performed symbolically, and we construct expressions for derivatives
of inputs in terms of derivatives of outputs.
This function is called 'pullback' as a reference to the similar concept in differential geometry.
More precisely, suppose f is a function with (y_1, y_2, ..., y_k) = f(x_1, x_2, ..., x_n)
Then pullback([x_1,...,x_n], [y_1,...,y_k], [gy_1, ..., gy_k]) := [gx_1, ..., gx_n]
"""
nodelist = list(topsorted(outputs))
dio = differentiably_influences(outputs,nodelist=nodelist)
dibw = differentiably_influenced_by(wrt, nodelist=nodelist)
# Check that each output is differentiably influenced by some input
badwrtset = set(wrt).difference(dio)
if badwrtset:
raise NonDifferentiable("Outputs not differentiable wrt %s"%badwrtset)
# Check that each input differentiably influences some output
badoutset = set(outputs).difference(dibw)
if badoutset:
raise NonDifferentiable("Outputs %s not differentiable wrt any of %s"%(badoutset, badwrtset))
# Map node to a list of gradient terms
# These gradient terms will be summed up when we visit the node, when iterating through the nodes
# in reverse toplogical order
var2gs = defaultdict(list)
for (node, gnode) in utils.safezip(outputs, goutputs):
var2gs[node] = [gnode]
# "active" nodes are the ones that are differentially influenced by the inputs
# and also differentiably influence the outputs. These are the nodes where we need to call the
# "pullback" function to backpropagate derivatives
active = dio.intersection(dibw)
# Iterate through nodes in reverse topological order
for node in reversed(nodelist):
if node not in active: continue
# Once we reach a node, we have already backpropagated from all parents
# So now we can sum up the gradients
if len(var2gs[node]) > 1:
if node.is_tensor():
var2gs[node] = [cgt.add_multi(var2gs[node])]
# There's only one gradient in the list at this point
gnode = var2gs[node][0]
if not node.is_input():
if isinstance(node.op, TupleIndex):
# A little complication that arises when we have a node of Tuple type
# Instead of having a list of gradient terms, we're going to store a list with one element
# and inside that list, we have a list of gradient terms for each tuple element
# Let's say we have a tuple node (y,z) with predecessor x
# x -> (y, z) -> y
# input Result{foo_op} Result{TupleIndex{0}}
# At this point in the code, we just got gy.
# we first set the gradient at (y,z) to [[None,None]]
# then we set the first element to gy to get
# [[gy, None]]
par = node.parents[0]
if par not in var2gs: var2gs[par] = [[None for _ in par.typ]]
var2gs[par][0][node.op.idx] = gnode
else:
gpars = node.op.pullback(node.parents, node, gnode)
diffs = node.get_diff()
for (par,gpar,d) in utils.safezip3(node.parents, gpars,diffs):
assert (gpar is not None) == d # grad is None iff not diff wrt input
if d: var2gs[par].append(gpar)
# only we already summed up the gradients for the input nodes, so just take
# 0th element
return [var2gs[node][0] for node in wrt]
def infer_shape(arr):
"""
Infer the shape of `arr` and return a tuple of int and None
"""
return tuple(x.op.value if isinstance(x.op, Constant) else None for x in CACHER.simplify(cgt.shape(arr)))
def grad(cost, wrt):
"""
Compute the gradient of scalar-valued `cost` with respect to a list of variables `wrt`
"""
assert cost.ndim == 0
single_wrt = not (isinstance(wrt, list) or isinstance(wrt, tuple))
if single_wrt:
wrtl = [wrt]
else:
wrtl = wrt
assert all(x.is_input() for x in wrtl), "Can only differentiate wrt Input nodes."
gout = _singleton_ones(cost.dtype, 0)
retval = pullback([cost], [gout], wrtl)
if single_wrt:
retval = retval[0]
return retval
# ================================================================
# Compilation
# ================================================================
class NativeCompileInfo(object):
"""
Stores the information necessary to create a NativeCallable object
"""
def __init__(self, func_code, closure_triples = None, includes=(), link_flags="",
setup=False, teardown=False, gpu_deref_mask=None, store_objects = (), extra_srcs=()):
"""
func_code : code implementing function
lang : c++ or cuda
closure_tuples: a list of triples (fieldname, ctypes class, value) that will be provided at each call at runtime
includes: list of strings specifying files to includes
link flags: string specifying link flags
setup: bool specifying if there's a setup method to call once when building a Callable, which should be called $setup in the code string
teardown: bool specifying if there's a teardown method, called $teardown
gpu_deref_mask : None or tuple of bools specifying which arguments to Op will have data dereferenced on the GPU (i.e., they must be moved to GPU)
store_objects : list of python objects which should be stored somewhere as long as the Callable created from this object exists, e.g. because they own some data it uses
"""
# To be filled in by caller of constructor
self.op_str = None
self.return_type = None
self.n_in = None
#####
self.func_code = func_code
self.closure_triples = closure_triples
self.includes = list(includes)
self.link_flags = link_flags
self.setup = setup
self.teardown = teardown
self.gpu_deref_mask = gpu_deref_mask
self.store_objects = store_objects
self.extra_srcs = extra_srcs
def involves_gpu(self):
return self.gpu_deref_mask is not None
SrcFile = namedtuple("SrcFile", ["lang","code"])
class Callable(object):
"""
Callable object built out of an Op
"""
def call(self, *args):
raise NotImplementedError
@property
def return_type(self):
raise NotImplementedError
@property
def op_str(self):
raise NotImplementedError
@property
def n_in(self):
raise NotImplementedError
class PyCallable(Callable):
"""
Callable object with an underlying python function acting on python objects
"""
def __init__(self, op, n_in, func):
self._op_str = str(op)
self._return_type = op.return_type
self._n_in = n_in
self._func = func
self._kind = "py"
def call(self, *args):
return self._func(*args)
@property
def op_str(self):
return self._op_str
@property
def return_type(self):
return self._return_type
@property
def kind(self):
return self._kind
@property
def func(self):
return self._func
@property
def n_in(self):
return self._n_in
class NativeCallable(object):
"""
Callable object with an underlying function pointer that acts on cgtObject
"""
def __init__(self, n_in, return_type, op_str, fptr, cldata=None,
store_objects=None, setup_fptr=None, teardown_fptr=None):
self._n_in = n_in
self._return_type = return_type
self._op_str = op_str
self.fptr = fptr
self.cldata = cldata
self.store_objects = store_objects
self.teardown_fptr = teardown_fptr
if setup_fptr is not None:
setup_fptr()
self._kind = "native"
def __del__(self):
if self.teardown_fptr is not None:
self.teardown_fptr()
@property
def return_type(self):
return self._return_type
@property
def op_str(self):
return self._op_str
@property
def kind(self):
return self._kind
@property
def n_in(self):
return self._n_in
def _call_byval(self, inputs):
raise Todo
# cgt.cycgt.apply_byval(self.fptr, self.cldata, inputs) #pylint: disable=E1101
def _call_byref(self, inputs, output):
cgt.cycgt.apply_byref(self.fptr, self.cldata, inputs, output) #pylint: disable=E1101
def call(self, *args):
if self._return_type == "byval": self._call_byval(*args)
elif self.return_type == "byref": self._call_byref(*args)
else: raise Unreachable
# ================================================================
# Ops
# ================================================================
# Constants
# ----------------------------------------------------------------
class Constant(Op): #pylint: disable=W0223
available_impls = ("python","native_cpu")
def __init__(self, value):
self.value = value
def get_value(self):
return self.value
class ConstantTensor(Constant):
return_type = "byref"
# XXX for some reason valret version gives rare segfaults
def __init__(self, value):
Constant.__init__(self, as_valid_array(value))
self._hash = None
def get_expr(self, parent_exprs):
return self._value_str()
def __str__(self):
return "Const{%s}"%self._value_str()
def _value_str(self):
ndim = self.value.ndim
return "%g"%self.value if ndim==0 else "%s%g...%s"%("["*ndim, self.value.flat[0], "]"*ndim)
def get_py_func(self, input_types):
def f(_, write):
np.copyto(write, self.value)
return f
# def get_py_func(self, input_types):
# def f(reads):
# return self.value
# return f
# def valret_func(reads):
# return self.value
# def inplace_func(reads, write):
# if isinstance(write, tuple):
# for (arrfrom,arrto) in utils.safezip(self.value,write):
# np.copyto(arrto, arrfrom)
# else:
# np.copyto(write,self.value)
# return PyImpl(inplace_func=inplace_func)
def pullback(self, _inps, _out, _gout):
return []
def shp_apply(self, _inputs):
return [cgt.constant(x) for x in self.value.shape]
def typ_apply(self, input_types):
assert len(input_types)==0
return _ndarray_type(self.value)
def get_hash(self):
if self._hash is None: self._hash = cPickle.dumps(self.value, -1)
return self._hash
def get_closure(self):
assert isinstance(self.value, np.ndarray)
shapeptr = ctypes.cast(self.value.ctypes.shape, ctypes.c_void_p).value
return [
("ndim", ctypes.c_int,self.value.ndim),
("shape",ctypes.c_void_p,shapeptr),
("dtype",ctypes.c_byte,self.value.dtype.num),
("data",ctypes.c_void_p,self.value.ctypes.data)]
def get_native_compile_info(self, input_types, devtype):
code = None
if self.return_type == "byval": code = self._c_code_valret()
elif self.return_type == "byref": code = self._c_code_inplace()
else: raise ValueError
return NativeCompileInfo(func_code=code, closure_triples=self.get_closure(),store_objects=(self.value,))
def _c_code_inplace(self):
if isinstance(self.value, tuple):
raise MethodNotDefined
return r"""
CGT_EXPORT_C void $function($closure* cldata, cgtArray** reads, cgtArray* write) {
cgt_memcpy(cgtCPU, cgtCPU, write->data(), cldata->data, write->nbytes());
}
"""
def _c_code_valret(self):
return r"""
CGT_EXPORT_C cgtArray* $function($closure* cldata, cgtArray** reads) {
auto out = new cgtArray(cldata->ndim, (size_t*)cldata->shape,
(cgtDtype)cldata->dtype, cgtCPU, (void*)cldata->data, false);
return out;
}"""
class ConstantTuple(Constant):
return_type = "byval"
def __init__(self, value):
Constant.__init__(value)
def get_expr(self, parent_exprs):
return str(self.value)
def __str__(self):
return "Const{%s}"%str(self.value)
def get_py_func(self, input_types):
def f(_):
return self.value
return f
def shp_apply(self, _inputs):
return tuple(map(cgt.constant, x.shape) for x in self.value)
def typ_apply(self, input_types):
assert len(input_types)==0
return _get_value_type(self.value)
def get_hash(self):
if self._hash is None: self._hash = cPickle.dumps(self.value, -1)
return self._hash
class Fill(Op):
"""
(value, shape...) -> array filled with `value`, with shape `shape`
"""
available_impls = ("python","native_cpu")
def __init__(self, value):
self.value = as_valid_array(value)
assert self.value.ndim ==0
assert self.value.dtype != "O"
self.dtype = self.value.dtype
assert self.value.ndim==0
self.tag = -1 # @TAG_HACK
def get_hash(self):
return cPickle.dumps((self.value,self.tag) ,-1)
def get_diff(self, num_inputs):
return [False]*num_inputs
def __str__(self):
return "Fill{%g}"%self.value
def get_py_func(self, input_types):
def f(reads, write):
write[...] = self.value
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, inputs):
return inputs
def typ_apply(self, input_types):
assert all(map(_isintscalar, input_types)), "Fill Op should have integer scalars as arguments"
return TensorType(self.dtype, len(input_types))
def get_closure(self):
typ = ctypes.c_long if self.value.dtype.kind=='i' else ctypes.c_double
return [("value", typ, self.value.item())]
def get_native_compile_info(self, _input_types, devtype):
assert devtype == "cpu"
outdtype = Dtype.canon(self.value.dtype)
func_code=r"""
CGT_EXPORT_C void $function($closure* cldata, cgtArray** reads, cgtArray* write) {
size_t s = write->size();
%(cdtype)s value = cldata->value;
for (int i=0; i < s; ++i) write->at<%(cdtype)s>(i) = value;
}"""%dict(cdtype = np2c[outdtype])
return NativeCompileInfo(func_code=func_code, closure_triples=self.get_closure())
def _isintscalar(typ):
return typ.dtype[0] == 'i' and typ.ndim == 0
def _list_is_valid_sli(input_types):
return len(input_types)==3 and all(map(_isintscalar, input_types))
class Arange(Op):
"""
(start,stop,step) -> 1D array, just like numpy
"""
available_impls = ("python","native_cpu")
return_type="byval"
def __init__(self, dtype='i8'):
self.dtype = dtype
def get_diff(self, num_inputs):
return [False]*num_inputs
def get_py_func(self, input_types):
def f((start, stop, step)):
return np.arange(start, stop, step, self.dtype)
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, inputs):
start,stop,step = inputs
return [(stop - start)//step]
def typ_apply(self, input_types):
assert _list_is_valid_sli(input_types)
return TensorType(self.dtype, 1)
def get_native_compile_info(self, input_types, devtype):
func_code=r"""
CGT_EXPORT_C cgtArray* $function(void* cldata, cgtArray** reads) {
size_t start=reads[0]->at<size_t>(0),
stop=reads[1]->at<size_t>(0),
step=reads[2]->at<size_t>(0);
size_t size = (stop-start)/step;
cgtArray* out = new cgtArray(1, &size, cgt_i8, cgtCPU);
for (int i=0; i < size; ++i) out->at<size_t>(i) = start+i*step;
return out;
}"""
return NativeCompileInfo(func_code=func_code)
class ScalarRng(Op):
"""
(shape...) -> array filled with iid random numbers, from either uniform or normal distribution
"""
available_impls = ("python",)
def __init__(self, kind):
assert kind in ("uniform","gaussian")
self.kind = kind
def get_diff(self, num_inputs):
return [False]*num_inputs
def __str__(self):
return "Rng{%s}"%self.kind
def get_py_func(self, input_types):
def f(reads, write):
if self.kind == "uniform": write[...] = np.random.rand(*reads)
elif self.kind == "gaussian": write[...] = np.random.randn(*reads)
else: raise RuntimeError
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, inputs):
return inputs
def typ_apply(self, input_types):
return TensorType(cgt.floatX, len(input_types))
def get_native_compile_info(self, input_types, devtype):
func_code=r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
size_t start=reads[0]->at<size_t>(0),
stop=reads[1]->at<size_t>(0),
step=reads[2]->at<size_t>(0);
size_t size = (stop-start)/step;
cgtArray* out = new cgtArray(1, &size, cgt_i8, cgtCPU);
for (int i=0; i < size; ++i) out->at<size_t>(i) = start+i*step;
return out;
}"""
return NativeCompileInfo(func_code=func_code)
# Elementwise
# ----------------------------------------------------------------
def _no_grad():
raise NonDifferentiable()
def _nu_sigmoid(x, out=None):
return np.reciprocal(1+np.exp(-x), out=out)
def _nu_iceil(x,out=None):
if out is None:
return np.ceil(x)
else:
np.ceil(x,out)
def _nu_ifloor(x,out=None):
if out is None:
return np.floor(x)
else:
np.floor(x,out)
def _nu_divide(x, y, out=None):
if x.dtype.kind != 'f': x = x.astype(cgt.floatX)
if out is None:
return np.divide(x,y)
else:
np.divide(x,y,out)
UnaryInfo = namedtuple("UnaryInfo", ("short","pyfunc","diff","typeinfo", "gradexpr", "cexpr"))
UNARY_INFO = {
"abs" : UnaryInfo( "abs", np.abs, True, 's', lambda x, y, gy: gy*cgt.sign(x), "fabs(x)"),
"ceil" : UnaryInfo( "ceil", np.ceil, False, 'i', lambda x, y, gy: _no_grad(), "ceil(x)"),
"cos" : UnaryInfo( "cos", np.cos, True, 'f', lambda x, y, gy: -gy*cgt.sin(x), "cos(x)"),
"exp" : UnaryInfo( "exp", np.exp, True, 'f', lambda x, y, gy: gy*cgt.exp(x), "exp(x)"),
"iceil" : UnaryInfo( "iceil", _nu_iceil, False, 'i', lambda x, y, gy: _no_grad(), "(int)ceil(x)"),
"ifloor" : UnaryInfo( "ifloor", _nu_ifloor, False, 'i', lambda x, y, gy: _no_grad(), "(int)floor(x)"),
"log" : UnaryInfo( "log", np.log, True, 'f', lambda x, y, gy: gy/x, "log(x)"),
"neg" : UnaryInfo( "negative", np.negative, True, 's', lambda x, y, gy: -gy, "(-x)"),
"sign" : UnaryInfo( "sign", np.sign, False, 's', lambda x, y, gy: _no_grad(), "2*(x>0)-1"),
"sin" : UnaryInfo( "sin", np.sin, True, 'f', lambda x, y, gy: gy*cgt.cos(x), "sin(x)"),
"square" : UnaryInfo( "square", np.square, True, 's', lambda x, y, gy: 2.0*gy*x, "x*x"),
"sqrt" : UnaryInfo( "sqrt", np.sqrt, True, 'f', lambda x, y, gy: gy/(2.0*y), "sqrt(x)"),
"tanh" : UnaryInfo( "tanh", np.tanh, True, 'f', lambda x, y, gy: gy*(1-cgt.square(y)), "tanh(x)"),
"sigmoid" : UnaryInfo( "sigmoid", _nu_sigmoid, True, 'f', lambda x, y, gy: gy*y*(1-y), "1.0/(1.0+exp(-x))"),
"conj" : UnaryInfo( "conj", np.conj, True, 'c', lambda x, y, gy: cgt.conj(gy), "conj(x)")
}
BinaryInfo = namedtuple("BinaryInfo", ("short", "pyfunc","commutes","diff","typeinfo","gradexpr", "cexpr"))
BINARY_INFO = {
#infix short pyfunc commutes diff typeinfo
"*" : BinaryInfo("multiply", np.multiply, True, (True,True), 'p', lambda x, y, z, gz: [y*gz,x*gz], "x*y"),
"+" : BinaryInfo("add", np.add, True, (True,True), 'p', lambda x, y, z, gz: [gz,gz], "x+y"),
"-" : BinaryInfo("subtract", np.subtract, False, (True,True), 'p', lambda x, y, z, gz: [gz,-gz], "x-y"),
"/" : BinaryInfo("divide", _nu_divide, False, (True,True), 'f', lambda x, y, z, gz: [gz/y,-gz*z/y], "(x+0.0)/y"),
"<" : BinaryInfo("less", np.less, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x<y"),
">" : BinaryInfo("greater", np.greater, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x>y"),
"<=" : BinaryInfo("less_equal", np.less_equal, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x<=y"),
">=" : BinaryInfo("greater_equal", np.greater_equal, False, (False,False), 'i1', lambda x, y, z, gz: _no_grad(), "x>=y"),
"**" : BinaryInfo("power", np.power, False, (True,True), 'p', lambda x, y, z, gz: [gz*y*cgt.power(x,y-1),gz*z*cgt.log(x)],"pow(x,y)"),
"==" : BinaryInfo("equal", lambda x,y,out : np.equal(x,y,out=out), True, (False, False), 'i1', lambda x, y, z, gz: _no_grad(), "x==y"),
"!=" : BinaryInfo("not_equal", lambda x,y,out : np.not_equal(x,y,out=out), True, (False, False), 'i1', lambda x, y, z, gz: _no_grad(), "x!=y"),
}
np2c = {"i1":"int8_t","i2":"int16_t","i4":"int32_t","i8":"int64_t",
"f4":"float","f8":"double","f16":"long double",
"c4" : "float complex", "c8" : "double complex", "c16" : "long double complex"}
class ElwiseUnary(Op):
"""
Elementwise unary operation
"""
available_impls = ("python","native_cpu","native_gpu")
def __init__(self, opname, info=None):
self.opname = opname
self.info = UNARY_INFO[opname] if info is None else info
def get_diff(self, _):
return [self.info.diff]
def __str__(self):
return self.info.short
def get_hash(self):
return utils.hash_seq1(self.opname)
def get_replacement(self, _newparents, _analysis):
return None
def pullback(self, (x,), y, gy): #pylint: disable=W0613
return [self.info.gradexpr(x, y, gy)]
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
typeinfo = self.info.typeinfo
intype = input_types[0].dtype
if typeinfo == 's':
out_type = intype
elif typeinfo == 'i':
out_type = _type_to_int(intype)
elif typeinfo == 'f':
out_type = cgt.floatX
elif typeinfo == 'c':
out_type = cgt.complexX
else:
assert typeinfo in (cgt.floatX, cgt.complexX, 'i1','i2','i4','i8')
out_type = typeinfo
return TensorType(out_type, input_types[0].ndim)
def get_py_func(self,_):
def f(reads, write):
self.info.pyfunc(reads[0], out=write)
return f
def get_native_compile_info(self, input_types, devtype):
info = self.info
out_dtype = self.typ_apply(input_types).dtype
d = dict(cdtype0=np2c[input_types[0].dtype], cdtype1=np2c[out_dtype], cexpr=info.cexpr)
if devtype == "cpu":
code = r"""
static inline %(cdtype1)s scalar_$function(%(cdtype0)s x) {return %(cexpr)s;}
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray* read = reads[0];
int s = read->size();
%(cdtype0)s* readdata = (%(cdtype0)s*)read->data();
%(cdtype1)s* writedata = (%(cdtype1)s*)write->data();
for (int i=0; i < s; ++i) {
writedata[i] = scalar_$function(readdata[i]);
}
}"""%d
return NativeCompileInfo(code, includes=["math.h"], link_flags="-lm")
elif devtype == "gpu":
cuda_code = r"""
#include "cgt_cuda.h"
__forceinline__ __device__ %(cdtype1)s $function(%(cdtype0)s x) {return %(cexpr)s;}
__global__ void ${function}_kernel(const size_t n, const %(cdtype0)s* in, %(cdtype1)s* out) {
CUDA_KERNEL_LOOP(i, n) {
out[i] = $function(in[i]);
}
}
void launchker_$function(size_t n, %(cdtype0)s* x, %(cdtype1)s* y) {
int num_blocks, num_threads;
cgt_get_bt(n, num_blocks, num_threads);
${function}_kernel<<<num_blocks, num_threads>>>(n, x, y);
}
"""%d
cpp_code = """
extern void launchker_${function}(size_t, %(cdtype0)s*, %(cdtype1)s*);
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray* read = reads[0];
size_t n = read->size();
launchker_$function(n, (%(cdtype0)s*)reads[0]->data(), (%(cdtype1)s*)write->data());
}"""%d
return NativeCompileInfo(cpp_code, includes=["math.h"], link_flags="-lm -lcudart",
gpu_deref_mask=(True,), extra_srcs=[SrcFile("cuda",cuda_code)])
else:
raise Unreachable
class ElwiseBinary(Op):
available_impls = ("python","native_cpu","native_gpu")
# +, -, *, /, <, ^, //
def __init__(self, opname, scalar_mask, info=None):
assert opname in BINARY_INFO
self.opname = opname
self.info = BINARY_INFO[opname] if info is None else info
self.scalar_mask = scalar_mask
def get_diff(self, _):
return BINARY_INFO[self.opname].diff
def get_hash(self):
return utils.hash_seq1(self.opname)
def get_expr(self, parent_exprs):
return "(%s %s %s)"%(parent_exprs[0], self.opname, parent_exprs[1])
def __str__(self):
return BINARY_INFO[self.opname].short
def get_replacement(self, parents, analysis):
l,r = parents
node2sv = analysis["node2sv"]
out = None
# The following replacements are allowed to return a scalar constant value
# Before returning, we'll broadcast it back to the right shape
if isinstance(l.op,Fill) and not self.scalar_mask[1]:
out=Result(ElwiseBinary(self.opname, (True,False), self.info),
[cgt.constant(l.op.value), r])
elif isinstance(r.op,Fill) and not self.scalar_mask[0]:
out=Result(ElwiseBinary(self.opname, (False,True), self.info),
[l, cgt.constant(r.op.value)])
# if both have single value, apply this operation numerically and fill the result with it
elif l in node2sv and r in node2sv:
out =self.info.pyfunc(node2sv[l], node2sv[r])
# if l has has a single value, apply the operation to l and return a Constant
elif l in node2sv and isinstance(r.op, Constant):
out = py_numeric_apply(self, [node2sv[l], r.op.val])
# same as previous but swapped
elif r in node2sv and isinstance(l.op, Constant):
out = py_numeric_apply(self, [l.op.val, node2sv[r]])
elif self.opname == "*":
if l in node2sv and node2sv[l] == 1: out = r
if l in node2sv and node2sv[l] == -1: out = -r
if r in node2sv and node2sv[r] == 1: out = l
if r in node2sv and node2sv[r] == -1: out = -l
elif self.opname == "+":
if l in node2sv and node2sv[l] == 0: out = r
if r in node2sv and node2sv[r] == 0: out = l
elif self.opname == "**":
if r in node2sv and node2sv[r] == 1: out = l
if out is not None:
outtyp = self.typ_apply([p.typ for p in parents])
out = cgt.cast(out, outtyp.dtype)
if out.ndim==0 and outtyp.ndim>0:
ind4shape = 1 if self.scalar_mask[0] else 0
outshape = analysis["node2shape"][parents[ind4shape]]
out = cgt.fill(out, outshape)
return out
def pullback(self, (x, y), z, gz): #pylint: disable=W0613
gin = BINARY_INFO[self.opname].gradexpr(x, y, z, gz)
return [cgt.sum(gv) if (v.ndim==0 and gv.ndim > 0) else gv for (v,gv) in utils.safezip([x,y],gin)]
def shp_apply(self, inputs):
ind4shape = 1 if self.scalar_mask[0] else 0
return cgt.shape(inputs[ind4shape])
def typ_apply(self, input_types):
assert ((input_types[0].ndim==0) == self.scalar_mask[0]) and ((input_types[1].ndim==0) == self.scalar_mask[1])
if self.scalar_mask==(False,False):
assert input_types[0].ndim == input_types[1].ndim
# assertequaln(cgt.shape(input_types[0]),cgt.shape(input_types[1]),"shape mismatch at elementwise binary operation")
typeinfo = BINARY_INFO[self.opname].typeinfo
if typeinfo == 'p':
out_dtype = _promote(input_types[0].dtype, input_types[1].dtype)
elif typeinfo == 'f':
out_dtype = cgt.floatX
else:
out_dtype = typeinfo
ind4shape = 1 if self.scalar_mask[0] else 0
return TensorType(out_dtype, input_types[ind4shape].ndim)
def get_py_func(self, input_types):
def f(reads, write):
x,y = reads
if self.scalar_mask==(False,False):
if x.shape != y.shape:
raise RuntimeError("mismatched shapes %s %s. Note that implicit broadcasting isn't allowed. Use the broadcast(...) function"%(x.shape, y.shape))
self.info.pyfunc(x,y, out=write)
return f
def get_native_compile_info(self, input_types, devtype):
typ2 = self.typ_apply(input_types)
npdtype0 = input_types[0].dtype
npdtype1 = input_types[1].dtype
npdtype2 = typ2.dtype
ind4shape = 1 if self.scalar_mask[0] else 0
index0 = "0" if self.scalar_mask[0] else "i"
index1 = "0" if self.scalar_mask[1] else "i"
d = dict(cdtype0=np2c[npdtype0],cdtype1=np2c[npdtype1],cdtype2=np2c[npdtype2],
cexpr=self.info.cexpr,index0=index0,index1=index1,ind4shape=ind4shape)
if devtype == "cpu":
code = r"""
static inline %(cdtype2)s scalar_$function(%(cdtype0)s x, %(cdtype1)s y) {return %(cexpr)s;}
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
int s = reads[%(ind4shape)s]->size();
%(cdtype0)s* in0 = (%(cdtype0)s*)reads[0]->data();
%(cdtype1)s* in1 = (%(cdtype1)s*)reads[1]->data();
%(cdtype2)s* out = (%(cdtype2)s*)write->data();
cgt_check(write->size() == s, "Shape error in elementwise binary operation. You might be missing a call to cgt.broadcast(...)");
for (int i=0; i < s; ++i) {
out[i] = scalar_$function(in0[%(index0)s], in1[%(index1)s]);
}
}"""%d
return NativeCompileInfo(func_code=code, includes=["math.h"])
elif devtype == "gpu":
cuda_code = r"""
#include "cgt_cuda.h"
__forceinline__ __device__ %(cdtype2)s $function(%(cdtype0)s x, %(cdtype1)s y) {return %(cexpr)s;}
__global__ void ${function}_kernel(const size_t n, const %(cdtype0)s* x, const %(cdtype1)s* y, %(cdtype2)s* z) {
CUDA_KERNEL_LOOP(i, n) {
z[i] = $function(x[%(index0)s], y[%(index1)s]);
}
}
void launchker_$function(size_t n, %(cdtype0)s* x, %(cdtype1)s* y, %(cdtype2)s* z) {
int num_blocks,num_threads;
cgt_get_bt(n, num_blocks, num_threads);
${function}_kernel<<<num_blocks, num_threads>>>(n, x, y, z);
}
"""%d
cpp_code = """
extern void launchker_${function}(size_t, %(cdtype0)s*, %(cdtype1)s*, %(cdtype2)s*);
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
size_t n = reads[%(ind4shape)s]->size();
launchker_${function}(n, (%(cdtype0)s*)reads[0]->data(), (%(cdtype1)s*)reads[1]->data(), (%(cdtype2)s*)write->data());
}"""%d
return NativeCompileInfo(func_code=cpp_code, includes=["math.h"], link_flags="-lm -lcudart", gpu_deref_mask=(True,True),
extra_srcs=[SrcFile("cuda",cuda_code)])
def elwise_binary(opname, x, y):
(x, y) = map(as_node, (x, y))
scalar_mask = ((x.ndim == 0), (y.ndim == 0))
op = ElwiseBinary(opname, scalar_mask)
if (scalar_mask == (False, False)):
assert (x.ndim == y.ndim)
return Result(op, [x, y])
# Shape manip
# ----------------------------------------------------------------
class Size(Op):
"""
Return an element of the shape of a tensor
"""
return_type = "byval"
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [False]
def __str__(self):
return "Size{%i}"%self.axis
def get_py_func(self, input_types):
def f(reads):
return np.array(reads[0].shape[self.axis])
return f
def pullback(self, inputs, output, goutput):
raise NonDifferentiable
def shp_apply(self, _inputs):
return []
def typ_apply(self, _):
return TensorType('i8',0)
def get_replacement(self, inputs, _analysis):
x = inputs[0]
if x.is_input():
fixed_shape = x.get_fixed_shape()
if fixed_shape[self.axis] is not None:
return cgt.constant(fixed_shape[self.axis])
def get_closure(self):
return [("ax",ctypes.c_int,self.axis)]
def get_native_compile_info(self, input_types, devtype):
code = r"""
CGT_EXPORT_C cgtArray* $function(void* cl0, cgtArray** reads) {
$closure* cl = ($closure*)cl0;
cgtArray* in = reads[0];
cgtArray* out = new cgtArray(0, NULL, cgt_i8, cgtCPU);
out->at<size_t>(0) = in->shape()[cl->ax];
return out;
}"""
return NativeCompileInfo(code,closure_triples = self.get_closure())
class Reshape(Op):
available_impls = ("python","native_cpu")
return_type = "byval"
def get_diff(self, num_inputs):
return [True] + [False]*(num_inputs-1)
def get_py_func(self, input_types):
def f(reads):
out = reads[0].reshape(reads[1:])
if not out.flags.c_contiguous: out = out.copy()
return out
return f
def pullback(self, inputs, _out, gout):
return [cgt.reshape(gout, cgt.shape(inputs[0]))] + [None]*(len(inputs)-1)
def shp_apply(self, inputs):
return inputs[1:]
def typ_apply(self, input_types):
return TensorType(input_types[0].dtype, len(input_types)-1)
def get_closure(self, n_parents):
return [("ndim", ctypes.c_int,n_parents-1)]
def get_native_compile_info(self, input_types, devtype):
code = r"""
CGT_EXPORT_C cgtArray* $function($closure* cldata, cgtArray** reads) {
cgtArray* in = reads[0];
size_t* newshape = new size_t[cldata->ndim];
for (int i=0; i < cldata->ndim; ++i) newshape[i] = static_cast<size_t*>(reads[i+1]->data())[0];
cgtArray* out = new cgtArray(cldata->ndim, newshape, in->dtype(), in->devtype(), in->data(), false);
return out;
}
"""
return NativeCompileInfo(code, closure_triples=self.get_closure(len(input_types)))
class Concatenate(Op):
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, num_inputs):
return [True]*num_inputs
def get_py_func(self, input_types):
def f(reads, write): write[...] = np.concatenate(reads,axis=self.axis)
return f
def pullback(self, inputs, _output, gout):
start = 0
out = []
for x in inputs:
end = start + cgt.size(x, self.axis)
out.append(Result(GetSli(self.axis), [gout, start,end, 1]))
start = end
return out
def shp_apply(self, inputs):
out = cgt.shape(inputs[0])
out[self.axis] = cgt.add_multi([cgt.size(x,self.axis) for x in inputs])
return out
def typ_apply(self, input_types):
return TensorType(_promote_multi([x.dtype for x in input_types]), input_types[0].ndim)
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < in->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
inidxexpr = ",".join(["i%i"%ax for ax in xrange(x.ndim)])
outidxexpr = ",".join([("i%i+n" if ax == self.axis else "i%i")%ax for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
size_t n=0; // value along concat axis
for (int i=0; i < %(n_in)s; ++i) {
cgtArray* in = reads[i];
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = in->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
n += in->shape()[%(axis)s];
}
}
"""%dict(openloops=openloops, closeloops=closeloops, inidxexpr=inidxexpr, outidxexpr=outidxexpr,
n_in=len(input_types), cdtype=np2c[input_types[0].dtype],axis=self.axis)
return NativeCompileInfo(code)
# TODO testme
class Stack(Op):
available_impls = ("python","native_cpu")
def get_diff(self, num_inputs):
return [True for _ in xrange(num_inputs)]
def get_py_func(self, input_types):
def fn(reads, write):
write[:] = np.array(reads)
return fn
def pullback(self, inputs, output, goutput):
return [goutput[i] for i in xrange(len(inputs))]
def shp_apply(self, inputs):
return [cgt.constant(len(inputs))] + cgt.shape(inputs[0])
def typ_apply(self, input_types):
assert utils.allsame(input_types)
return TensorType(input_types[0].dtype, input_types[0].ndim+1)
def get_native_compile_info(self, input_types, devtype):
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
for (int i=0; i < %(n_in)s, ++i) {
write->at<%(cdtype)s>(i) = reads[i]->at<%(cdtype)s>(0);
}
}
"""%dict(n_in = len(input_types),cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
class Repeat(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = axes
def get_diff(self, num_inputs):
return [True] + [False for _ in xrange(num_inputs-1)]
def get_py_func(self, input_types):
def f(reads, write):
arr = reads[0]
numreps = reads[1:]
shp = arr.shape
assert all(shp[i] == 1 for i in self.axes)
for (ax,numrep) in utils.safezip(self.axes, numreps):
arr = np.repeat(arr, numrep, ax)
np.copyto(write, arr)
return f
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
outidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
inidxexpr = ",".join(["0" if ax in self.axes else "i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *read=reads[0];
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = read->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
def get_replacement(self, parents, analysis):
if parents[0] in analysis["node2sv"]:
value = analysis["node2sv"][parents[0]]
shp = self.shp_apply(parents)
return Result(Fill(value), shp)
def pullback(self, inputs, output, goutput):
return [cgt.sum(goutput, self.axes, keepdims=True)] + [None]*(len(inputs)-1)
def shp_apply(self, inputs):
out = cgt.shape(inputs[0])
for (ax,rep) in utils.safezip(self.axes, inputs[1:]):
out[ax] = rep
return out
def typ_apply(self, input_types):
assert all(x.dtype == "i8" for x in input_types[1:])
return input_types[0]
class Transpose(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = axes
def get_diff(self, _):
return [True]
def get_py_func(self, input_types):
def f(reads, write):
np.copyto(write, reads[0].transpose(self.axes))
return f
def pullback(self, inputs, output, goutput):
return [cgt.transpose(goutput, utils.invert_perm(self.axes))]
def shp_apply(self, inputs):
inshape = cgt.shape(inputs[0])
return [inshape[ax] for ax in self.axes]
def typ_apply(self, input_types):
return input_types[0]
def __str__(self):
return "Transpose{%s}"%",".join(map(str, self.axes))
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
d = {}
d["openloops"] = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
d["closeloops"] = "}"*x.ndim
d["outidxexpr"] = ",".join(["i"+str(i) for i in xrange(x.ndim)])
d["inidxexpr"] = ",".join(["i"+str(i) for i in utils.invert_perm(self.axes)])
d["cdtype"] = np2c[x.dtype]
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *read = reads[0];
%(cdtype)s* indata = (%(cdtype)s*)read->data(), *outdata = (%(cdtype)s*)write->data();
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = read->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}"""%d
return NativeCompileInfo(code)
class Transport(Op):
available_impls = ("native_cpu","native_gpu")
def __init__(self, dev):
self.dev = dev
def typ_apply(self, input_types):
return input_types[0]
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def get_native_compile_info(self, _inputs, _devtype):
# This C code should only be run if the input and output devices differ.
# There should never be any no-op transports.
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtObject** reads, cgtObject* write) {
cgt_copy_object(write, reads[0]);
}
"""
return NativeCompileInfo(code)
# TODO save computation by removing negative freq components
class RFFT(Op):
available_impls = ("python",)
def __init__(self, axes):
self.axes = axes
def get_diff(self, num_inputs):
return [True] + [False]*(num_inputs-1)
def get_py_func(self, input_types):
def f(reads, write):
x = reads[0]
shp = map(int,reads[1:])
np.copyto(write, np.fft.fftn(x,shp,self.axes))
return f
def pullback(self, inputs, _outputs, goutput):
return cgt.real(Result(RFFT(self.axes),[goutput]+inputs[1:]))
def shp_apply(self, inputs):
out = cgt.shape(inputs[0])
for (ax,sz) in utils.safezip(self.axes, inputs[1:]):
out[ax]=sz
return out
def typ_apply(self, input_types):
x = input_types[0]
assert x.dtype==cgt.floatX
return TensorType(cgt.complexX,x.ndim)
class IRFFT(Op):
available_impls = ("python",)
def __init__(self, axes):
self.axes = axes
def get_diff(self, _):
return [True]
def get_py_func(self, input_types):
def f(reads, write):
x = reads[0]
shp = map(int,reads[1:])
slis = [slice(0,None) for _ in xrange(x.ndim)]
for (ax,s) in zip(self.axes,shp): slis[ax] = slice(0, s)
np.copyto(write, np.real(np.fft.ifftn(x,axes=self.axes)[slis]))
return f
def pullback(self, inputs, _outputs, goutput):
return Result(IRFFT(self.axes),[goutput]) # XXX is this right?
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, inputs):
return TensorType(cgt.floatX,inputs[0].ndim)
# Reductions
# ----------------------------------------------------------------
def gen_reduction_code(dtype, axes, ndim, reduction_expr, initval):
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < read->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(ndim)])
closeloops = "}"*ndim
inidxexpr = ",".join(["i"+str(i) for i in xrange(ndim)])
outidxexpr = ",".join(["0" if i in axes else "i"+str(i) for i in xrange(ndim)])
d = dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[dtype])
reduction_expr %= d
initval %= d
d["reduction_expr"] = reduction_expr
d["initval"] = initval
return r"""
static inline %(cdtype)s reduction_$function(%(cdtype)s x, %(cdtype)s y) {return %(reduction_expr)s;}
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *read=reads[0];
for (int i=0; i < write->size(); ++i) write->at<%(cdtype)s>(i) = %(initval)s;
%(openloops)s
%(cdtype)s x = write->at<%(cdtype)s>(%(outidxexpr)s);
%(cdtype)s y = read->at<%(cdtype)s>(%(inidxexpr)s) ;
write->at<%(cdtype)s>(%(outidxexpr)s) = reduction_$function(x, y);
%(closeloops)s
}
"""%d
class Sum(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = tuple(axes)
def get_diff(self, _):
return [True]
def __str__(self):
return "Sum{%s}"%(",".join(map(str,self.axes)))
def get_py_func(self, input_types):
def f(reads, write):
reads[0].sum(axis = self.axes or None, out=write, keepdims=True)
return f
def pullback(self, inputs, output, goutput):
return [Result(Repeat(self.axes), [goutput] + [cgt.size(inputs[0],ax) for ax in self.axes])]
def shp_apply(self, inputs):
x = inputs[0]
s = cgt.shape(x)
return [(cgt.constant(1) if i in self.axes else s[i]) for i in xrange(x.ndim)]
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
code = gen_reduction_code(input_types[0].dtype, self.axes, input_types[0].ndim, "x+y","0")
return NativeCompileInfo(code, includes=["string.h"])
class Max(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = tuple(axes)
def get_diff(self, _):
return [True]
def __str__(self):
return "Max{%s}"%(",".join(map(str,self.axes)))
def get_py_func(self, input_types):
def f(reads, write):
reads[0].max(axis=self.axes or None,keepdims=True, out=write)
return f
def pullback(self, inputs, output, goutput):
x = inputs[0]
inputpat = "x"*x.ndim
singpat = "".join(["1" if i in self.axes else "x" for i in xrange(x.ndim)])
bcpat = singpat+","+inputpat
return [cgt.broadcast("*", goutput, cgt.broadcast("==", output, x, bcpat), bcpat)]
# XXX doesn't deal well with corner case
def shp_apply(self, inputs):
x = inputs[0]
s = cgt.shape(x)
return [(cgt.constant(1) if i in self.axes else s[i]) for i in xrange(x.ndim)]
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
code = gen_reduction_code(input_types[0].dtype, self.axes, input_types[0].ndim, "fmax(x,y)", "-std::numeric_limits<%(cdtype)s>::max()")
return NativeCompileInfo(code, includes=["string.h","limits","math.h"])
class Argmax(Op):
available_impls = ("python",)
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [False]
def __str__(self):
return "Argmax{%s}"%self.axis
def get_py_func(self, input_types):
def f(reads, write):
write.flat[:] = reads[0].argmax(axis=self.axis)
return f
def shp_apply(self, inputs):
x = inputs[0]
s = cgt.shape(x)
return [(cgt.constant(1) if i == self.axis else s[i]) for i in xrange(x.ndim)]
def typ_apply(self, inputs):
return TensorType('i8', inputs[0].ndim)
# re: native impl, this is a tricky one, since it requires some scratch space
# to store the max values. probably just do a alloc/dealloc
# Slicing
# ----------------------------------------------------------------
class GetSli(Op):
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False,False,False]
def get_py_func(self, input_types):
def f(reads, write):
x,start,stop,step=reads
if step<0 and stop==-1: stop=None
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices[self.axis] = slice(start,stop,step)
write[:] = x[slices]
return f
def pullback(self, inputs, output, goutput):
z = cgt.zeros_like(inputs[0])
z.op.tag = id(output) # @TAG_HACK
return [Result(IncSli(self.axis), [z] + inputs[1:] + [goutput])] + [None]*3
def shp_apply(self, inputs):
arr, start, stop, step = inputs
s = cgt.shape(arr) #pylint: disable=W0621
newshape = copy.copy(s)
newshape[self.axis] = cgt.ceil_divide(stop - start, step)
return newshape
def typ_apply(self, input_types):
assert _list_is_valid_sli(input_types[1:])
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
outidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
inidxexpr = ",".join([("start + step*i%(ax)s" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *in=reads[0];
size_t start = reads[1]->at<size_t>(0);
size_t step = reads[3]->at<size_t>(0);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = in->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
class IncSli(Op):
available_impls = ("python","native_cpu")
writes_to_input = 0
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False,True,True]
def get_py_func(self, input_types):
def f(reads, write):
x, start, stop, step, y=reads
if step<0 and stop==-1: stop=None
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices[self.axis] = slice(start,stop,step)
if x.data != write.data:
utils.warn("incsli not inplace!")
np.copyto(write, x)
write[slices] += y
return f
def pullback(self, inputs, output, goutput):
raise NotImplementedError
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(
["for (int i%(ax)s=0; i%(ax)s < inc->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
incidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
outidxexpr = ",".join([("start + step*i%(ax)s" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *in=reads[0], *inc = reads[4];
long start = reads[1]->at<size_t>(0);
long step = reads[3]->at<size_t>(0);
cgt_assert(in->size() == write->size());
if (write->data() != in->data()) cgt_copy_array(write, in);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) += inc->at<%(cdtype)s>(%(incidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype], incidxexpr=incidxexpr)
return NativeCompileInfo(code)
class GetFancySli(Op):
available_impls = ("python","native_cpu")
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False]
def get_py_func(self, input_types):
def f(reads, write):
x,inds=reads
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices[self.axis] = inds
write[:] = x[slices]
return f
def pullback(self, inputs, output, goutput):
z = cgt.zeros_like(inputs[0])
z.op.tag = id(output) # @TAG_HACK
return [Result(IncFancySli(self.axis), [z, inputs[1], goutput]), None]
def shp_apply(self, inputs):
arr, inds = inputs
s = cgt.shape(arr) #pylint: disable=W0621
newshape = copy.copy(s)
newshape[self.axis] = cgt.size(inds,0)
return newshape
def typ_apply(self, input_types):
assert input_types[1] == TensorType('i8', 1)
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < write->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
outidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
inidxexpr = ",".join([("inds->at<size_t>(i%(ax)s)" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *x=reads[0], *inds=reads[1];
size_t start = reads[1]->at<size_t>(0);
size_t step = reads[3]->at<size_t>(0);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) = x->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, inidxexpr=inidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
class IncFancySli(Op):
available_impls = ("python","native_cpu")
writes_to_input = 0
def __init__(self, axis):
self.axis = axis
def get_diff(self, _):
return [True,False,True,True]
def get_py_func(self, input_types):
def f(reads, write):
x, inds, y=reads
slices = [slice(None,None,None) for _ in xrange(x.ndim)]
slices2 = [slice(None,None,None) for _ in xrange(x.ndim)]
if x.data != write.data:
utils.warn("incsli not inplace!")
np.copyto(write, x)
for (i,ind) in enumerate(inds):
slices[self.axis]=ind
slices2[self.axis]=i
write[slices] += y[slices2]
return f
def pullback(self, inputs, output, goutput):
raise NotImplementedError
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(
["for (int i%(ax)s=0; i%(ax)s < inc->shape()[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
incidxexpr = ",".join(["i%(ax)s"%dict(ax=ax) for ax in xrange(x.ndim)])
outidxexpr = ",".join([("inds->at<size_t>(i%(ax)s)" if ax==self.axis else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *x=reads[0], *inds=reads[1], *inc = reads[2];
cgt_assert(x->size() == write->size());
if (write->data() != x->data()) cgt_copy_array(write, x);
%(openloops)s
write->at<%(cdtype)s>(%(outidxexpr)s) += inc->at<%(cdtype)s>(%(incidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, closeloops=closeloops,
cdtype=np2c[input_types[0].dtype], incidxexpr=incidxexpr)
return NativeCompileInfo(code)
class GetFlatIndices(Op):
available_impls = ("python","native_cpu")
def get_diff(self, _):
return [True,False]
def get_py_func(self, input_types):
def f(reads, write):
np.copyto(write, reads[0].flat[reads[1]])
return f
def pullback(self, inputs, output, goutput):
x,inds = inputs
ginput = cgt.zeros_like(x)
return [Result(IncFlatIndices(), [ginput, inds, goutput]), None]
def shp_apply(self, inputs):
return cgt.shape(inputs[1])
def typ_apply(self, inputs):
assert inputs[1].ndim == 1 and dtype_kind(inputs[1].dtype) == 'i'
return TensorType(inputs[0].dtype,1)
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xk, cgtArray* z) {
cgtArray *x=xk[0], *k=xk[1];
for (int i=0; i < z->size(); ++i) {
z->at<%(cdtype)s>(i) = x->at<%(cdtype)s>(k->at<size_t>(i));
}
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
class IncFlatIndices(Op):
available_impls = ("python","native_cpu")
writes_to_input = 0
def get_diff(self, _):
return [True,False,True]
def get_py_func(self, input_types):
def f(reads, write):
x,inds,y = reads
if x.data != write.data:
utils.warn("incsli not inplace!")
np.copyto(write, x)
for (i,ind) in enumerate(inds):
write.flat[ind] += y[i]
# This is unvectorized so it gives the right answer when inds are non-unique
# faster vectorized version: write[inds] += y
return f
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xkp, cgtArray* write) {
cgtArray *x=xkp[0], *k=xkp[1], *p=xkp[2];
if (write->data() != x->data()) cgt_memcpy(cgtCPU, cgtCPU, write, x, write->nbytes());
for (int i=0; i < p->size(); ++i) {
write->at<%(cdtype)s>(k->at<size_t>(i)) += p->at<%(cdtype)s>(i);
}
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
class Flip(Op):
available_impls = ("python","native_cpu")
def __init__(self, axes):
self.axes = axes
def get_diff(self, _):
return [True]
def get_py_func(self, input_types):
def f(reads, write):
x = reads[0]
slices = [slice(0,None,None) for _ in xrange(x.ndim)]
for ax in self.axes: slices[ax] = slice(None,None,-1)
np.copyto(write, x[slices])
return f
def pullback(self, inputs, output, goutput):
return [cgt.flip(goutput, self.axes)]
def shp_apply(self, inputs):
return cgt.shape(inputs[0])
def typ_apply(self, input_types):
return input_types[0]
def get_native_compile_info(self, input_types, devtype):
x = input_types[0]
openloops = " ".join(["for (int i%(ax)s=0; i%(ax)s < shape[%(ax)s]; ++i%(ax)s) {"%dict(ax=ax) for ax in xrange(x.ndim)])
closeloops = "}"*x.ndim
inidxexpr = ",".join(["i%i"%ax for ax in xrange(x.ndim)])
outidxexpr = ",".join([("shape[%(ax)s] - 1 - i%(ax)s" if ax in self.axes else "i%(ax)s")%dict(ax=ax) for ax in xrange(x.ndim)])
code = r"""
CGT_EXPORT_C void $function(void* cldata, cgtArray** reads, cgtArray* write) {
cgtArray *in=reads[0], *out=write;
cgt_assert(in->size() == out->size());
const size_t* shape = in->shape();
%(openloops)s
out->at<%(cdtype)s>(%(outidxexpr)s) = in->at<%(cdtype)s>(%(inidxexpr)s);
%(closeloops)s
}
"""%dict(openloops=openloops, outidxexpr=outidxexpr, closeloops=closeloops,
inidxexpr=inidxexpr, cdtype=np2c[input_types[0].dtype])
return NativeCompileInfo(code)
# Linalg
# ----------------------------------------------------------------
class Mul21(Op):
available_impls = ("python","native_cpu")
def __init__(self, tA):
self.tA = tA
def get_py_func(self, input_types):
def f(reads, write):
x,y = reads
if self.tA: x = x.T
x.dot(y, out=write)
return f
def get_replacement(self, inputs, analysis):
if inputs[1] in analysis["node2sv"]:
return cgt.sum(inputs[0],0 if self.tA else 1) * analysis["node2sv"][inputs[1]]
def pullback(self, inputs, _output, goutput):
return [cgt.outer(goutput,inputs[1]), Result(Mul21(not self.tA), [inputs[0],goutput])]
def shp_apply(self, inputs):
assertequal1(cgt.size(inputs[0],0 if self.tA else 1),cgt.size(inputs[1],0),
"shape mismatch at matrix-vector multiplication")
return [cgt.size(inputs[0], 1 if self.tA else 0)]
def typ_apply(self, input_types):
return TensorType(input_types[0].dtype, 1)
def get_closure(self):
return [("tA",ctypes.c_bool, self.tA),("handle", ctypes.c_void_p, 0)]
# gemv docs: https://software.intel.com/en-us/node/520750
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
try:
letter = {"f4":"s","f8":"d","c8":"c","c16":"z"}[npdtype]
except KeyError:
raise MethodNotDefined("Dtype %s not supported by this BLAS. Falling back to numpy"%npdtype)
if devtype == "cpu":
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** Ax, cgtArray* y) {
cgtArray *A=Ax[0], *x=Ax[1];
int lda = A->shape()[1];
int M = A->shape()[0];
int N = A->shape()[1];
const %(cdtype)s alpha=1, beta=0;
int incx = 1, incy = 1;
cblas_%(letter)sgemv(CblasRowMajor, (CBLAS_TRANSPOSE)(cl->tA + 111), M, N, alpha, (%(cdtype)s*)A->data(), lda, (%(cdtype)s*)x->data(),
incx, beta, (%(cdtype)s*)y->data(), incy);
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
elif devtype == "gpu":
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** Ax, cgtArray* y) {
if (!cl->handle) cublasCreate_v2((cublasHandle_t*)&cl->handle);
cgtArray *A=Ax[0], *x=Ax[1];
int lda = A->shape()[1];
int M = A->shape()[0];
int N = A->shape()[1];
const %(cdtype)s alpha=1, beta=0;
int incx = 1, incy = 1;
cublas_%(letter)sgemv((cublasHandle_t)cl->handle, (cublasOperation_t)(!cl->tA), N, M, alpha, (%(cdtype)s*)A->data(), lda, (%(cdtype)s*)x->data(),
incx, beta, (%(cdtype)s*)y->data(), incy);
}"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cblas.h"], link_flags="-lopenblas", closure_triples = self.get_closure())
def get_expr(self, (xexpr,yexpr)):
return u"%s%s \u00D7 %s"%(xexpr, u"\u1d57" if self.tA else "", yexpr)
class Mul22(Op):
@property
def available_impls(self):
return ("python",) if cgt.get_precision() == "quad" else ("python","native_cpu","native_gpu")
def __init__(self, tA, tB):
self.tA = tA
self.tB = tB
def get_py_func(self, input_types):
def f(reads, write):
x,y = reads
if self.tA: x = x.T
if self.tB: y = y.T
x.dot(y, out=write)
return f
def pullback(self, inputs, output, goutput):
"""
mul(F,F) Aij Bjk -> Cik
g[0]: GAij = mul(F,T) GCik Bjk
g[1]: GBjk = mul(T,F) Aij GCik
mul(F,T) Aij Bkj -> Cik
g[0]: GAij = mul(F,F) GCik Bkj
g[1]: GBkj = mul(T,F) GCik Aij
mul(T,F) Aji Bjk -> Cik
g[0]: GAji = mul(F,T) Bjk GCik
g[1]: GBjk = mul(F,F) Aji GCik
mul(T,T) Aji Bkj -> Cik
g[0]: GAji = mul(T,T) Bkj GCik
g[1]: GBkj = mul(T,T) GCik Aji
"""
A,B = inputs
GC = goutput
if (self.tA, self.tB) == (False,False):
return [Result(Mul22(False,True), [GC, B]),
Result(Mul22(True,False), [A, GC])]
elif (self.tA, self.tB) == (False,True):
return [Result(Mul22(False,False), [GC, B]),
Result(Mul22(True,False), [GC, A])]
elif (self.tA, self.tB) == (True,False):
return [Result(Mul22(False,True), [B, GC]),
Result(Mul22(False,False), [A, GC])]
elif (self.tA, self.tB) == (True,True):
return [Result(Mul22(True,True), [B, GC]),
Result(Mul22(True,True), [GC, A])]
def shp_apply(self, inputs):
return [cgt.size(inputs[0], 1 if self.tA else 0),cgt.size(inputs[1],0 if self.tB else 1)]
def typ_apply(self, input_types):
# assertequal1(cgt.size(inputs[0],0 if self.tA else 1),cgt.size(inputs[1],1 if self.tB else 0),
# "shape mismatch at matrix-matrix multiplication")
# TODO put shape check somewhere
assert input_types[0].dtype==cgt.floatX and input_types[1].dtype==cgt.floatX
return input_types[0]
def get_closure(self):
return [("tA",ctypes.c_bool, self.tA), ("tB",ctypes.c_bool, self.tB), ("handle",ctypes.c_void_p, 0)]
# best gemm docs: https://software.intel.com/en-us/node/520775
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
try:
letter = {"f4":"s","f8":"d","c8":"c","c16":"z"}[npdtype]
except KeyError:
raise MethodNotDefined("Dtype %s not supported by this BLAS. Falling back to numpy"%npdtype)
if devtype == "cpu":
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** AB, cgtArray* C) {
cgtArray *A=AB[0], *B=AB[1];
int lda = A->shape()[1], ldb = B->shape()[1], ldc = C->shape()[1];
int M = C->shape()[0];
int N = C->shape()[1];
int K = A->shape()[cl->tA ? 0 : 1];
const %(cdtype)s alpha=1, beta=0;
cblas_%(letter)sgemm(CblasRowMajor, (CBLAS_TRANSPOSE)(cl->tA + 111), (CBLAS_TRANSPOSE)(cl->tB + 111), M, N, K, alpha, (%(cdtype)s*)A->data(), lda, (%(cdtype)s*)B->data(),
ldb, beta, (%(cdtype)s*)C->data(), ldc);
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cblas.h"], link_flags="-lopenblas", closure_triples=self.get_closure())
elif devtype == "gpu":
letter = letter.upper()
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** AB, cgtArray* C) {
if (!cl->handle) cublasCreate_v2((cublasHandle_t*)&cl->handle);
cgtArray *A=AB[0], *B=AB[1];
int lda = A->shape()[1], ldb = B->shape()[1], ldc = C->shape()[1];
int M = C->shape()[0];
int N = C->shape()[1];
int K = A->shape()[cl->tA ? 0 : 1];
const %(cdtype)s alpha=1, beta=0;
CUBLAS_CHECK(cublas%(letter)sgemm_v2((cublasHandle_t)cl->handle, (cublasOperation_t)cl->tB, (cublasOperation_t)cl->tA, N, M, K, &alpha, (%(cdtype)s*)B->data(), ldb, (%(cdtype)s*)A->data(),
lda, &beta, (%(cdtype)s*)C->data(), ldc));
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cublas_v2.h","cgt_cuda.h"], link_flags="-lcublas -lcudart", closure_triples=self.get_closure())
def get_expr(self, (xexpr,yexpr)):
return u"%s%s \u00D7 %s%s"%(xexpr, u"\u1d57" if self.tA else "", yexpr, u"\u1d57" if self.tB else "")
def __repr__(self):
return "Mul22{%s,%s}"%("T" if self.tA else "N", "T" if self.tB else "N")
class BatchedMul22(Op):
available_impls = ("python","native_cpu")
def __init__(self, tA, tB):
self.tA = tA
self.tB = tB
def get_py_func(self, input_types):
def f((x,y), z):
for (xmat, ymat, zmat) in zip(x,y, z):
if self.tA: xmat = xmat.T
if self.tB: ymat = ymat.T
xmat.dot(ymat, out=zmat)
return f
def pullback(self, inputs, output, goutput):
A,B = inputs
GC = goutput
if (self.tA, self.tB) == (False,False):
return [Result(BatchedMul22(False,True), [GC, B]),
Result(BatchedMul22(True,False), [A, GC])]
elif (self.tA, self.tB) == (False,True):
return [Result(BatchedMul22(False,False), [GC, B]),
Result(BatchedMul22(True,False), [GC, A])]
elif (self.tA, self.tB) == (True,False):
return [Result(BatchedMul22(False,True), [B, GC]),
Result(BatchedMul22(False,False), [A, GC])]
elif (self.tA, self.tB) == (True,True):
return [Result(BatchedMul22(True,True), [B, GC]),
Result(BatchedMul22(True,True), [GC, A])]
def shp_apply(self, inputs):
return [cgt.size(inputs[0],0), cgt.size(inputs[0], 2 if self.tA else 1),cgt.size(inputs[1],1 if self.tB else 2)]
def typ_apply(self, input_types):
# assert inputs[0].dtype==cgt.floatX and inputs[1].dtype==cgt.floatX
return input_types[0]
def get_closure(self):
return [("tA",ctypes.c_bool, self.tA), ("tB",ctypes.c_bool, self.tB)]
# <COPIED FROM Mul22> but incremented all dimensions
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
try:
letter = {"f4":"s","f8":"d","c8":"c","c16":"z"}[npdtype]
except KeyError:
raise MethodNotDefined("Dtype %s not supported by this BLAS. Falling back to numpy"%npdtype)
code = r"""
CGT_EXPORT_C void $function($closure* cl, cgtArray** AB, cgtArray* C) {
cgtArray *A=AB[0], *B=AB[1];
int P = A->shape()[0];
int lda = A->shape()[1+1], ldb = B->shape()[1+1], ldc = C->shape()[1+1];
int M = C->shape()[1+0];
int N = C->shape()[1+1];
int K = A->shape()[1+(cl->tA ? 0 : 1)];
const %(cdtype)s alpha=1, beta=0;
for (int i=0; i < P; ++i) {
cblas_%(letter)sgemm(CblasRowMajor, (CBLAS_TRANSPOSE)(cl->tA + 111), (CBLAS_TRANSPOSE)(cl->tB + 111), M, N, K, alpha, (%(cdtype)s*)A->data()+i*A->stride(0), lda,
(%(cdtype)s*)B->data()+i*B->stride(0), ldb, beta, (%(cdtype)s*)C->data()+ i*C->stride(0), ldc);
}
}
"""%dict(letter=letter, cdtype = np2c[npdtype])
return NativeCompileInfo(code, includes=["cblas.h"], link_flags="-lopenblas", closure_triples=self.get_closure())
# </COPIED>
class Outer(Op):
available_impls = ("python","native_cpu")
def get_py_func(self, input_types):
def f(reads, write):
write[:] = np.outer(reads[0], reads[1])
return f
def pullback(self, inputs, _output, goutput):
return [goutput.dot(inputs[0]), inputs[1].dot(goutput)]
def shp_apply(self, inputs):
return [cgt.size(inputs[0],0), cgt.size(inputs[1],0)]
def typ_apply(self, input_types):
assert input_types[0] == input_types[1]
return TensorType(input_types[0].dtype, 2)
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xy, cgtArray* z) {
cgtArray *x=xy[0], *y=xy[1];
for (int i=0; i < x->size(); ++i) {
for (int j=0; j < y->size(); ++j) {
z->at<%(cdtype)s>(i,j) = x->at<%(cdtype)s>(i) * y->at<%(cdtype)s>(j);
}
}
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
# BLAS 1
# ----------------------------------------------------------------
class Dot(Op):
available_impls = ("python","native_cpu")
return_type = "byref"
def get_py_func(self, input_types):
def f(reads,write):
write[...] = np.dot(reads[0], reads[1])
return f
def pullback(self, inputs, _output, goutput):
x, y = inputs
return [y*goutput, x*goutput]
def shp_apply(self, _):
return []
def typ_apply(self, input_types):
assert input_types[0] == input_types[1]
return TensorType(cgt.floatX, 0)
def get_native_compile_info(self, input_types, devtype):
npdtype = input_types[0].dtype
code = r"""
CGT_EXPORT_C void $function(void**, cgtArray** xy, cgtArray* z) {
cgtArray *x=xy[0], *y=xy[1];
%(cdtype)s out = 0;
for (int i=0; i < x->size(); ++i) {
out += x->at<%(cdtype)s>(i) * y->at<%(cdtype)s>(i);
}
z->at<%(cdtype)s>(0) = out;
}
"""%dict(cdtype = np2c[npdtype])
return NativeCompileInfo(code)
# Composition
# ----------------------------------------------------------------
class Composition(Op):
available_impls = ("python",)
return_type = "byval"
def __init__(self, inputs, outputs):
self._inputs = inputs
self._outputs = outputs
analysis = analyze(outputs)
node2shape = analysis["node2shape"]
self._shp = tuple(node2shape[x] for x in outputs)
assert [x.is_input() for x in inputs]
self._nodes = list(topsorted(outputs))
dio = set(differentiably_influences(outputs))
wrt = [x for x in inputs if x in dio]
self._goutput = [Argument(x.typ) for x in outputs]
gwrt = pullback(self._outputs, self._goutput, wrt)
wrtidx = 0
self._gin = []
for x in inputs:
if x in dio:
self._gin.append(gwrt[wrtidx])
wrtidx += 1
self._gin.append(None)
self._diff = [node in dio for node in self._inputs]
self._out_typs = [x.typ for x in outputs]
def get_diff(self, _):
return self._diff
def get_py_func(self, input_types):
# TODO testme
f = cgt.compilation.function(self._inputs, self._outputs)
def py_impl(num_inputs):
return tuple(f(num_inputs))
return py_impl
def pullback(self, inputs, output, goutput):
# repl = {}
# repl.update(utils.safezip(self._inputs, inputs))
# repl.update(utils.safezip(self._outputs, output))
# repl.update(utils.safezip(self._goutput, goutput))
# return clone(self._gin, replace=repl)
gwrt = pullback([output], [goutput], inputs)
def shp_apply(self, inputs):
out = clone(self._shp, replace=dict(utils.safezip(self._inputs, inputs)))
return out
def typ_apply(self, input_types):
assert input_types == [x.typ for x in self._inputs]
return TupleType(*self._out_typs)
@property
def n_out(self):
return len(self._outputs)
def shapes(self):
return self._shp
def expand(self, inputs):
return clone(self._outputs, replace=dict(utils.safezip(self._inputs, inputs)))
def get_nodes(self):
return self._nodes
class TupleIndex(Op):
available_impls = ("python","native_cpu","native_gpu")
return_type="byval"
def __init__(self, idx):
self.idx = idx
def get_py_func(self, input_types):
def f(reads):
return reads[0][self.idx]
return f
def shp_apply(self, inputs):
return cgt.shape(inputs[0])[self.idx]
def typ_apply(self, input_types):
intype = input_types[0]
assert isinstance(intype, TupleType)
return intype[self.idx]
def get_closure(self, _inputs):
return [("idx",ctypes.c_int, self.idx)]
def get_native_compile_info(self, input_types, devtype):
code=r"""
CGT_EXPORT_C cgtObject* $function($closure* cldata, cgtTuple** reads) {
return reads[0]->getitem(cldata->idx);
}"""
return NativeCompileInfo(code, closure_triples=self.get_closure(input_types))
class MakeTuple(Op):
available_impls = ("python",)
return_type="byval"
def get_py_func(self, input_types):
def f(inputs):
return tuple(inputs)
return f
def shp_apply(self, inputs):
return tuple(cgt.shape(x) for x in inputs)
def typ_apply(self, input_types):
assert all(isinstance(t, TensorType) for t in input_types), "Can only create tuples of tensors" # @TUPLES_OF_TENSORS
return TupleType(*input_types)
def unpack(tup):
return [Result(TupleIndex(i),[tup]) for i in xrange(len(tup.typ))]
# Assertion and debug operations
# ----------------------------------------------------------------
# XXX currently not being used / tested anywhere
class Assertion(Op):
"""
Assertion gets evaluated when the graph is executed, and it prints out a stack trace on failure
"""
available_impls = ("python",)
def __init__(self, msg):
self.stack = traceback.extract_stack()[:-2]
self.msg = msg
def typ_apply(self, input_types):
x, = input_types
assert x.ndim==0 and x.dtype=='i1'
return TensorType('i8',0)
def shp_apply(self, _):
return []
def get_py_func(self, input_types):
def f(reads, _):
x = reads[0]
if not x.item():
self.display_error()
return f
def display_error(self):
print "Stack trace at failed assertion:"
print "**************************"
traceback.print_list(self.stack)
print "**************************"
raise AssertionError("Assertion failed. Message: %s. Above, you can find the stack trace of the failed node"%self.msg)
class DebugFunc(Op):
"""
Call a function when the graph is executed
"""
available_impls = ("python",)
def __init__(self, yourfunc):
self.yourfunc = yourfunc
def typ_apply(self, _):
return TensorType('i8',0)
def shp_apply(self, _):
return []
def get_py_func(self, input_types):
def f(_, __):
def fn(*reads):
self.yourfunc(*reads)
return f
def assert_(x,msg=None):
dbgnode = Result(Assertion(msg or "(empty)"), [x])
print "assertion", CACHER.simplify1(dbgnode)
# add_debug_node(dbgnode)
def dbg_call(yourfunc, *args):
add_debug_node(Result(DebugFunc(yourfunc), list(args)))
def add_debug_node(x):
if debug_context.global_context is not None:
debug_context.global_context.nodes.append(x)
class debug_context(object):
global_context = None # TODO: what is this?
def __init__(self):
self.nodes = []
def __enter__(self):
assert debug_context.global_context is None, "can only be in one debug context at a time"
debug_context.global_context = self
return self
def __exit__(self, *_args):
debug_context.global_context = None
# ================================================================
# Graph Optimization
# ================================================================
def analyze(outputs):
with disable_cacher():
analysis = init_analysis()
for node in topsorted(outputs):
do_analysis(node, analysis)
return analysis
def simplify_and_analyze(outputs):
assert isinstance(outputs, list)
analysis = init_analysis()
repl = {}
for output in outputs: update_simplify_map(output, analysis, repl)
return [repl[node] for node in outputs], analysis, repl
def process_top_stack_item_and_maybe_get_replacement(stack, analysis, repl): #pylint: disable=W0621
"""
Helper function for update_simplify_map, which performs an update to the
stack, which stores the state of the simplification computation.
Suppose the top element of the stack is `(orig, node)`, where `orig` is
the original node and `node` is simpler than `orig` but not fully simplified.
We can only guarantee that `node` is fully simplified after all of its parents are in the
map `repl`.
This function iterates over the parents of `node` and looks for one that is not in `repl`
If we find one, called `par`, put `(orig, node)` back on the stack and `(par, par)` on top of it, and return.
If all of the parents are already in `repl`, then we can try to compute a newly simplified version of `orig`.
"""
(orig,node) = stack.pop()
if node.is_input():
return (orig,node)
else:
for par in node.parents:
if par not in repl:
stack.append((orig,node))
stack.append((par,par))
return
newparents = [repl[p] for p in node.parents]
newnode = node.clone(newparents)
newnewnode = maybe_replace(newnode, analysis, repl)
if newnewnode is None:
return (orig,newnode)
else:
assert newnewnode.typ == orig.typ
if newnewnode in repl:
return (orig, newnewnode)
else:
stack.append((orig, newnewnode))
def update_simplify_map(node, analysis, repl):
"""
Non-recursive version of simplification algorithm.
Compute a fully simplified version of `node` and its ancestors
When this function finishes, `repl[node]` is the simplified version of `node`,
and repl[anc] is the simplified version of each node `anc` which is an ancestor of `node`.
Moreover, analysis contains
This algorithm is most simply described recursively, and the implementation below is
a conversion of the recursive algorithm into a stack-based algorithm (to avoid
stack overflows).
(TODO: bring back recursive version for reference)
The stack contains pairs `(orig, replacement_candidate)`, where `orig` is a node in the original
graph (i.e., an ancestor of `node`) and `replacement_candidate` is a simplified version of it, but
not necessarily fully simplified. We do a depth-first search on the graph, computing for each node
the simplified version of all its parents, then we try to simplify that node.
One tricky aspect is that once we've simplified the parents, we might apply some identity at that node.
If that happens, we obtain a new node with non-simplified parents, so we put that on the stack.
"""
stack = [(node,node)] #pylint: disable=W0621
while stack:
# Given (orig, node) on top of the stack, we visit one un-simplified parent of node,
# putting it on the stack if necessary. If all parents are already simplified, then we can
# check if any replacements can be applied. If we can, we return this pair and add it to our
# dict `repl` which stores the current replacements.
maybe_pair = process_top_stack_item_and_maybe_get_replacement(stack, analysis, repl)
if maybe_pair:
(orig,node) = maybe_pair #pylint: disable=W0633
# if not node.is_input():
# for shpcmp in node.op.shp_apply(node.parents):
# update_simplify_map(shpcmp, analysis, repl, True)
do_analysis(node, analysis)
repl[orig] = node
repl[node] = node
assert orig.ndim==node.ndim
def do_analysis(node, analysis):
node2hash = analysis["node2hash"]
node2shape = analysis["node2shape"]
node2sv = analysis["node2sv"]
# -- HASH --
h = node.get_hash(node2hash)
node2hash[node] = h
analysis["hash2node"][h] = node
# -- SHAPE --
if node.is_input():
node2shape[node] = cgt.shape(node)
elif isinstance(node.op, TupleIndex):
node2shape[node] = node2shape[node.parents[0]][node.op.idx]
else:
newparents = node.parents
node2shape[node] = node.op.shp_apply(newparents)
# assert all([s.dtype == "i8" for s in node2shape[node]])
assert len(node2shape[node]) == node.ndim or isinstance(node.typ,TupleType)
# -- SCALAR VALUE --
if not node.is_input():
op = node.op
if isinstance(op, Fill):
node2sv[node] = op.value
elif isinstance(op, ConstantTensor) and utils.is_singleton(op.value):
node2sv[node] = op.value.flat[0]
elif isinstance(op, Repeat) and newparents[0] in node2sv:
node2sv[node] = node2sv[newparents[0]]
elif isinstance(op, (ElwiseUnary, ElwiseBinary)) and all(p in node2sv for p in newparents):
node2sv[node] = node.op.info.pyfunc(*(node2sv[p] for p in newparents))
VERBOSE_OPTIMIZATION = False
def maybe_replace(node, analysis, repl):
if node.is_input(): return
if isinstance(node.op, Constant): return
# -- CSE --
node2hash = analysis["node2hash"]
h = node.get_hash(node2hash)
if h in analysis["hash2node"]:
newnode = analysis["hash2node"][h]
if VERBOSE_OPTIMIZATION: print "Did CSE", node
assert newnode in repl and newnode.op.__class__ == node.op.__class__
return newnode
parents = node.parents
# -- CONSTANT PROP --
# ASSUMPTION: the only type of nullary ops that we can propagate this way
# are subclasses of Constant
if len(parents) > 0 and all(isinstance(par.op, Constant) for par in parents):
c = cgt.compilation.get_callable(node.op, [par.typ for par in parents], "cpu", True)
try:
out = cgt.constant(py_numeric_apply(node, [p.op.value for p in parents]))
if VERBOSE_OPTIMIZATION: print "Did constant prop on %s"%node.op
return out
except MethodNotDefined:
utils.warn("Couldn't get a python impl of %s"%node.op)
# -- SIZE --
if isinstance(node.op, Size):
s = analysis["node2shape"][parents[0]][node.op.axis]
if not (isinstance(s.op, Size) and s.parents[0] == node.parents[0]):
if VERBOSE_OPTIMIZATION: print "Did size prop"
return s
# -- OP IDENTITY --
maybe_repl = node.op.get_replacement(parents, analysis)
if maybe_repl is not None:
if VERBOSE_OPTIMIZATION: print "Applied op-specific identity for %s"%node.op
return maybe_repl
return None
def simplify(xs):
"""
xs : a variable or list of variables
Compute equivalent expression(s) in which simplifications have been applied
"""
assert isinstance(xs, list)
return simplify_and_analyze(xs)[0]
def simplify1(x):
return simplify([x])[0]
def init_analysis():
return {"node2hash":{},"hash2node":{},"node2shape":{},"node2sv":{},"repl":{}}
class AnalysisCacher(object):
def __init__(self):
self.analysis = init_analysis()
self.repl = {}
def simplify(self, xs):
with disable_cacher(): # not actually necessary but seems reasonable
for x in xs: self.simplify1(x)
return [self.repl[x] for x in xs]
def simplify1(self, x):
assert isinstance(x, Node)
with disable_cacher():
update_simplify_map(x, self.analysis, self.repl)
return self.repl[x]
CACHER = AnalysisCacher()
CACHER_ENABLED = False
class disable_cacher(object):
def __enter__(self):
global CACHER_ENABLED
self.prevstate = CACHER_ENABLED
CACHER_ENABLED = False
def __exit__(self, *args):
global CACHER_ENABLED
CACHER_ENABLED = self.prevstate
def assert1(x, msg=""):
if not CACHER_ENABLED: return
b = CACHER.simplify1(x)
if isinstance(b.op, Constant):
if not b.op.value:
raise AssertionError(msg)
def assertn(xs,msg=""):
if not CACHER_ENABLED: return
bs = CACHER.simplify(xs)
if isinstance(bs.op, Constant):
if not np.all(bs.op.val):
raise AssertionError(msg)
def _noderepr(x):
if isinstance(x.op, ConstantTensor):
return x.op.value.item()
elif isinstance(x.op, ConstantTuple):
return x.op.value
else:
return "?"
def assertequal1(x,y,msg):
if not CACHER_ENABLED: return
x = as_node(x)
y = as_node(y)
simpx = CACHER.simplify1(x)
simpy = CACHER.simplify1(y)
if isinstance(simpx.op,Constant) and isinstance(simpy.op,Constant) and simpx.op.value != simpy.op.value:
raise AssertionError(msg + "\nlhs: %s. rhs: %s"%(_noderepr(simpx), _noderepr(simpy)))
def assertequaln(xs,ys,msg):
if not CACHER_ENABLED: return
xs = map(as_node,xs)
ys = map(as_node,ys)
simpxs = CACHER.simplify(xs)
simpys = CACHER.simplify(ys)
for (x,y) in utils.safezip(simpxs,simpys):
if isinstance(x.op,Constant) and isinstance(y.op,Constant) and x.op.value != y.op.value:
raise AssertionError(msg + "\nlhs: %s. rhs: %s"%(tuple(map(_noderepr,simpxs)), tuple(map(_noderepr,simpys))))
# ================================================================
# Graph Traversal
# ================================================================
def topsorted(outputs):
assert isinstance(outputs, (list,tuple))
marks = {}
out = []
stack = [] #pylint: disable=W0621
# i: node
# jidx = number of children visited so far from that node
# marks: state of each node, which is one of
# 0: haven't visited
# 1: have visited, but not done visiting children
# 2: done visiting children
for x in outputs:
stack.append((x,0))
while stack:
(i,jidx) = stack.pop()
if jidx == 0:
m = marks.get(i,0)
if m == 0:
marks[i] = 1
elif m == 1:
raise ValueError("not a dag")
else:
continue
ps = i.parents
if jidx == len(ps):
marks[i] = 2
out.append(i)
else:
stack.append((i,jidx+1))
j = ps[jidx]
stack.append((j,0))
return out
def count_nodes(outputs):
"""
Given a list of output nodes, compute the number of ancestors
"""
if isinstance(outputs, Node): outputs = [outputs]
return len(list(topsorted(outputs)))
def clone(nodes, replace=None):
if isinstance(nodes, Node): return _clone_list([nodes], replace)[0]
else: return _clone_list(list(nodes), replace)
def _clone_list(nodes, replace):
assert isinstance(nodes, list)
if replace is None: replace = {}
else:
assert isinstance(replace, dict)
replace = replace.copy()
for (k,v) in replace.iteritems():
if not isinstance(v, Node):
replace[k] = as_node(v)
for node in topsorted(nodes):
if node in replace:
pass
elif node.is_input():
replace[node] = node
else:
replace[node] = node.clone([replace[p] for p in node.parents])
return [replace[node] for node in nodes]
def alloc_from_shp(shp, typ):
if isinstance(shp, tuple):
return tuple([alloc_from_shp(shpel,typel) for (shpel,typel) in utils.safezip(shp,typ)])
else:
return np.empty(shp,typ.dtype)
def alloc_output(node, vals):
typ = node.typ
shp = get_numeric_shape_fun(node)(vals)
return alloc_from_shp(shp,typ)
def _flatten_lists(lis):
out = []
sizes = []
for li in lis:
out.extend(li)
sizes.append(len(li))
return out,sizes
def _unflatten_list(li,sizes):
start = 0
out = []
for sz in sizes:
out.append(li[start:start+sz])
start += sz
return out
def get_numeric_shape_fun(node):
args = [make_argument(p.typ) for p in node.parents]
# outputs = simplify(node.op.shp_apply(args))
syshape = node.op.shp_apply(args)
if isinstance(syshape, list):
istuple = False
elif isinstance(syshape, tuple):
assert all(isinstance(elem,list) for elem in syshape)
istuple = True
syshape,sizes = _flatten_lists(syshape)
else:
raise ValueError("shape should be a list or tuple of lists. got %s"%syshape)
singletuple = not isinstance(syshape, list)
if singletuple: # XXX
syshape = [cgt.make_tuple(*syshape)]
nodes = topsorted(syshape)
def fn(vals):
node2val = {node:val for (node,val) in utils.safezip(args, vals)}
for node in nodes:
if not node.is_argument():
node2val[node] = py_numeric_apply(node, [node2val[p] for p in node.parents])
nushape = [node2val[node] for node in syshape]
if istuple:
return tuple(_unflatten_list(nushape, sizes))
else:
return nushape
return fn
def py_numeric_apply(node, vals):
try:
callable = cgt.compilation.get_callable(node.op, [par.typ for par in node.parents],"cpu", True)
except MethodNotDefined:
print 'Op %s has no Python implementation' % repr(node.op)
raise
if node.op.return_type == "byval":
out = callable.call(vals)
else:
out = alloc_output(node,vals)
callable.call(vals, out)
return out
class NonDifferentiable(Exception):
pass
class Disconnected(Exception):
pass
class Todo(Exception):
pass
class ShapeError(Exception):
pass
class AllocationError(Exception):
pass
class MethodNotDefined(Exception):
pass
class Unreachable(Exception):
pass
def get_cgt_src_root():
return osp.dirname(osp.dirname(osp.realpath(__file__)))
# ================================================================
# Global config
# ================================================================
_CONFIG = None
def get_config(force_reload = False):
"""
Return the global configuration, which is loaded from your rcfile
and the environment variables CGT_FLAGS
"""
global _CONFIG
if _CONFIG is None or force_reload:
_CONFIG = _load_config()
return _CONFIG
def _load_config():
from thirdparty.configobj import ConfigObj
from thirdparty.validate import Validator
rcfileloc = osp.join(osp.expanduser("~/.cgtrc"))
specfilename = osp.join(get_cgt_src_root(), "cgtrc_spec.ini")
config = ConfigObj(rcfileloc, configspec=specfilename)
val = Validator()
test = config.validate(val,preserve_errors=True)
if test is not True:
for (k,v) in test.items():
if v is not True:
utils.error("%s: %s in %s"%(k,v.message,rcfileloc))
raise ValueError
envflags = os.getenv("CGT_FLAGS")
if envflags:
pairs = envflags.split(",")
for pair in pairs:
lhs,rhs = pair.split("=")
assert lhs in config, "Unrecognized config option %s provided"%lhs
oldrhs = config[lhs]
config[lhs] = rhs
assert isinstance(rhs, (str,bool,int,float,list)), "You set %s=%s but rhs is invalid"%(lhs, rhs)
if isinstance(oldrhs, str): pass
elif isinstance(oldrhs, bool): config[lhs] = config.as_bool(lhs)
elif isinstance(oldrhs, int): config[lhs] = config.as_int(lhs)
elif isinstance(oldrhs, float): config[lhs] = config.as_float(lhs)
elif isinstance(oldrhs, list): config[lhs] = config.as_list(lhs)
config["default_device"] = Device()
cgt.set_precision(config["precision"])
return config
def reset_config():
"""
Reload config from CGT_FLAGS and cgtrc
I.e., discard values set at runtime, e.g. through update_config and set_precision
"""
get_config(True)
def update_config(**kws):
"""
Globally update the provided configuration variables
"""
config = get_config()
for (name,val) in kws.iteritems():
if name not in config:
raise ValueError("%s is not a valid config option"%name)
config[name] = val
class scoped_update_config(object):
"""
example usage:
with scoped_update_config(precision='single',backend='native', parallel=True)
...
Changes relevant config variables in the scope of the `with` statements, and change them
back when we leave this scope
"""
def __init__(self, **kw):
self.kw = kw
config = get_config()
self.prevsettings = {}
for k in kw.iterkeys():
if k in config:
self.prevsettings[k] = config[k]
else:
raise ValueError("%s is not a valid config option"%k)
def __enter__(self):
config = get_config()
config.update(self.kw)
cgt.set_precision(config["precision"])
def __exit__(self, *args):
config = get_config()
config.update(self.prevsettings)
# TAGS
# Just a few labels in the code for assumptions we're making now
# which we might change later.
# @TUPLES_OF_TENSORS : assumes all elements of TupleType object are TensorType
# @TAG_HACK : a non-local interaction between inplace optimization and other optimizations.
# Certain operations created by pullback should be performed in place, but other optimizations
# like CSE make that impossible. So we add an extra field that associates arrays of zeros with the node that
# they represent the gradient for, to prevent CSE from cutting out these nodes
# @SHAPE_CHECK : eventually we should check the shape while building up the graph, but this functionality isn't set up in a fully coherent way yet
|
briancheung/cgt
|
cgt/core.py
|
Python
|
mit
| 122,529
|
[
"Gaussian",
"VisIt"
] |
c130844a5b805239320b24d22a31038e7415bb442dc0971a63e50c9e73bbdfc6
|
import os
import sys
import errno
import argparse
from subprocess import call, PIPE, Popen
HOME_DIR = os.environ.get('HOME')
def install_pysimm(prefix):
os.chdir(prefix)
if os.path.isdir('pysimm'):
print('pysimm directory already exists...assuming it is the pysimm repository and continuing...')
else:
call('git clone https://github.com/polysimtools/pysimm', shell=True)
if not os.path.isfile('pysimm/complete_install.py'):
print('assumption about pysimm repository existing was wrong; exiting...')
exit()
call("echo export PYTHONPATH='$PYTHONPATH':{} >> {}".format(os.path.join(prefix, 'pysimm'),
os.path.join(HOME_DIR, '.bashrc')),
shell=True)
call("echo export PATH='$PATH':{} >> {}".format(os.path.join(prefix, 'pysimm', 'bin'),
os.path.join(HOME_DIR, '.bashrc')),
shell=True)
def apt_update():
call('apt-get update', shell=True)
def apt_install(*packages):
call('apt-get -y install {}'.format(' '.join(packages)),
shell=True)
def install_lammps(prefix, *packages):
os.chdir(prefix)
call('git clone -b unstable https://github.com/lammps/lammps.git lammps', shell=True)
os.chdir(os.path.join(prefix,'lammps','src'))
for package in packages:
call('make yes-{}'.format(package), shell=True)
call('make mpi', shell=True)
call("echo export PATH='$PATH':{} >> {}".format(os.path.join(prefix, 'lammps', 'src'),
os.path.join(HOME_DIR,'.bashrc')),
shell=True)
call("echo export LAMMPS_EXEC={} >> {}".format(os.path.join(prefix, 'lammps', 'src', 'lmp_mpi'),
os.path.join(HOME_DIR,'.bashrc')),
shell=True)
def install_ambertools(dir_):
os.chdir(dir_)
call("echo export AMBERHOME={} >> {}".format(dir_, os.path.join(HOME_DIR,'.bashrc')),
shell=True)
os.environ['AMBERHOME'] = dir_
call('./configure gnu', shell=True)
call('make install', shell=True)
call("echo export ANTECHAMBER_EXEC={} >> {}".format(os.path.join(dir_, 'bin', 'antechamber'),
os.path.join(HOME_DIR,'.bashrc')),
shell=True)
def install_openbabel():
apt_install('libopenbabel4', 'libopenbabel-dev', 'openbabel', 'python-openbabel')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--apt-update', dest='apt_update', action='store_true', default=False)
parser.add_argument('--apt-install', dest='apt_install', action='store_true', default=False)
parser.add_argument('--pysimm', dest='pysimm_prefix', default=HOME_DIR)
parser.add_argument('--lammps', dest='lammps_prefix', default=None)
parser.add_argument('--lammps-packages', dest='lammps_packages', nargs='*',
default=['molecule', 'class2', 'kspace', 'user-misc', 'misc', 'qeq', 'manybody'])
parser.add_argument('--amber-tools', dest='ambertools_dir', default=None)
parser.add_argument('--openbabel', dest='openbabel', action='store_true', default=False)
return parser.parse_args()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if __name__ == '__main__':
args = parse_args()
if bool(args.apt_update):
apt_update()
if args.pysimm_prefix:
if args.apt_install:
apt_install('git', 'python-numpy', 'python-matplotlib')
mkdir_p(args.pysimm_prefix)
install_pysimm(args.pysimm_prefix)
if args.lammps_prefix:
if args.apt_install:
apt_install('make git g++', 'libopenmpi-dev', 'openmpi-bin')
mkdir_p(args.lammps_prefix)
install_lammps(args.lammps_prefix, *args.lammps_packages)
if args.ambertools_dir:
if args.apt_install:
apt_install('make', 'csh', 'gfortran', 'libopenmpi-dev', 'openmpi-bin', 'xorg-dev', 'xserver-xorg')
install_ambertools(args.ambertools_dir)
if args.openbabel:
install_openbabel()
os.chdir(HOME_DIR)
|
polysimtools/pysimm
|
complete_install.py
|
Python
|
mit
| 4,322
|
[
"Amber",
"LAMMPS"
] |
4a0f47535b7c57b0853b74850e80d8e49f1f07eb7be9dd34e37a1af9a373cf00
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import re
import sys
import uuid
import warnings
from collections import Counter
from itertools import product
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Union
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver import qcdb
from . import optproc
from .exceptions import TestComparisonError, ValidationError, UpgradeHelper
## Python basis helps
@staticmethod
def _pybuild_basis(mol,
key=None,
target=None,
fitrole='ORBITAL',
other=None,
puream=-1,
return_atomlist=False,
quiet=False):
if key == 'ORBITAL':
key = 'BASIS'
def _resolve_target(key, target):
"""Figure out exactly what basis set was intended by (key, target)
"""
horde = qcdb.libmintsbasisset.basishorde
if not target:
if not key:
key = 'BASIS'
target = core.get_global_option(key)
if target in horde:
return horde[target]
return target
# Figure out what exactly was meant by 'target'.
resolved_target = _resolve_target(key, target)
# resolved_target needs to be either a string or function for pyconstuct.
# if a string, they search for a gbs file with that name.
# if a function, it needs to apply a basis to each atom.
bs, basisdict = qcdb.BasisSet.pyconstruct(mol.to_dict(),
key,
resolved_target,
fitrole,
other,
return_dict=True,
return_atomlist=return_atomlist)
if return_atomlist:
atom_basis_list = []
for atbs in basisdict:
atommol = core.Molecule.from_dict(atbs['molecule'])
lmbs = core.BasisSet.construct_from_pydict(atommol, atbs, puream)
atom_basis_list.append(lmbs)
return atom_basis_list
if isinstance(resolved_target, str):
basisdict['name'] = basisdict['name'].split('/')[-1].replace('.gbs', '')
if callable(resolved_target):
basisdict['name'] = resolved_target.__name__.replace('basisspec_psi4_yo__', '').upper()
if not quiet:
core.print_out(basisdict['message'])
if 'ECP' in basisdict['message']:
core.print_out(' !!! WARNING: ECP capability is in beta. Please check occupations closely. !!!\n\n')
if basisdict['key'] is None:
basisdict['key'] = 'BASIS'
psibasis = core.BasisSet.construct_from_pydict(mol, basisdict, puream)
return psibasis
core.BasisSet.build = _pybuild_basis
## Python wavefunction helps
@staticmethod
def _core_wavefunction_build(mol, basis=None):
if basis is None:
basis = core.BasisSet.build(mol)
elif isinstance(basis, str):
basis = core.BasisSet.build(mol, "ORBITAL", basis)
wfn = core.Wavefunction(mol, basis)
# Set basis for density-fitted calculations to the zero basis...
# ...until the user explicitly provides a DF basis.
wfn.set_basisset("DF_BASIS_SCF", core.BasisSet.zero_ao_basis_set())
return wfn
core.Wavefunction.build = _core_wavefunction_build
def _core_wavefunction_get_scratch_filename(self, filenumber):
""" Given a wavefunction and a scratch file number, canonicalizes the name
so that files can be consistently written and read """
fname = os.path.split(os.path.abspath(core.get_writer_file_prefix(self.molecule().name())))[1]
psi_scratch = core.IOManager.shared_object().get_default_path()
return os.path.join(psi_scratch, fname + '.' + str(filenumber))
core.Wavefunction.get_scratch_filename = _core_wavefunction_get_scratch_filename
@staticmethod
def _core_wavefunction_from_file(wfn_data: Union[str, Dict, Path]) -> core.Wavefunction:
r"""Build Wavefunction from data.
Parameters
----------
wfn_data
If a dict, use data directly. Otherwise, path-like passed to :py:func:`numpy.load`
to read from disk.
Returns
-------
Wavefunction
A deserialized Wavefunction object
"""
# load the wavefunction from file
if isinstance(wfn_data, dict):
pass
elif isinstance(wfn_data, str):
if not wfn_data.endswith(".npy"):
wfn_data = wfn_data + ".npy"
wfn_data = np.load(wfn_data, allow_pickle=True).item()
else:
# Could be path-like or file-like, let `np.load` handle it
wfn_data = np.load(wfn_data, allow_pickle=True).item()
# variable type specific dictionaries to be passed into C++ constructor
wfn_matrix = wfn_data['matrix']
wfn_vector = wfn_data['vector']
wfn_dimension = wfn_data['dimension']
wfn_int = wfn_data['int']
wfn_string = wfn_data['string']
wfn_boolean = wfn_data['boolean']
wfn_float = wfn_data['float']
wfn_floatvar = wfn_data['floatvar']
wfn_matrixarr = wfn_data['matrixarr']
# reconstruct molecule from dictionary representation
wfn_molecule = wfn_data['molecule']
molecule = core.Molecule.from_dict(wfn_molecule)
# get basis set name and spherical harmonics boolean
basis_name = wfn_string['basisname']
if ".gbs" in basis_name:
basis_name = basis_name.split('/')[-1].replace('.gbs', '')
basis_puream = wfn_boolean['basispuream']
basisset = core.BasisSet.build(molecule, 'ORBITAL', basis_name, puream=basis_puream)
# change some variables to psi4 specific data types (Matrix, Vector, Dimension)
for label in wfn_matrix:
array = wfn_matrix[label]
wfn_matrix[label] = core.Matrix.from_array(array, name=label) if array is not None else None
for label in wfn_vector:
array = wfn_vector[label]
wfn_vector[label] = core.Vector.from_array(array, name=label) if array is not None else None
for label in wfn_dimension:
tup = wfn_dimension[label]
wfn_dimension[label] = core.Dimension.from_list(tup, name=label) if tup is not None else None
for label in wfn_matrixarr:
array = wfn_matrixarr[label]
wfn_matrixarr[label] = core.Matrix.from_array(array, name=label) if array is not None else None
# make the wavefunction
wfn = core.Wavefunction(molecule, basisset, wfn_matrix, wfn_vector, wfn_dimension, wfn_int, wfn_string,
wfn_boolean, wfn_float)
# some of the wavefunction's variables can be changed directly
for k, v in wfn_floatvar.items():
wfn.set_variable(k, v)
for k, v in wfn_matrixarr.items():
wfn.set_variable(k, v)
return wfn
core.Wavefunction.from_file = _core_wavefunction_from_file
def _core_wavefunction_to_file(wfn: core.Wavefunction, filename: str = None) -> Dict:
"""Converts a Wavefunction object to a base class
Parameters
----------
wfn
A Wavefunction or inherited class
filename
An optional filename to write the data to
Returns
-------
dict
A dictionary and NumPy representation of the Wavefunction.
"""
# collect the wavefunction's variables in a dictionary indexed by varaible type
# some of the data types have to be made numpy-friendly first
if wfn.basisset().name().startswith("anonymous"):
raise ValidationError("Cannot serialize wavefunction with custom basissets.")
wfn_data = {
'molecule': wfn.molecule().to_dict(),
'matrix': {
'Ca': wfn.Ca().to_array() if wfn.Ca() else None,
'Cb': wfn.Cb().to_array() if wfn.Cb() else None,
'Da': wfn.Da().to_array() if wfn.Da() else None,
'Db': wfn.Db().to_array() if wfn.Db() else None,
'Fa': wfn.Fa().to_array() if wfn.Fa() else None,
'Fb': wfn.Fb().to_array() if wfn.Fb() else None,
'H': wfn.H().to_array() if wfn.H() else None,
'S': wfn.S().to_array() if wfn.S() else None,
'X': wfn.lagrangian().to_array() if wfn.lagrangian() else None,
'aotoso': wfn.aotoso().to_array() if wfn.aotoso() else None,
'gradient': wfn.gradient().to_array() if wfn.gradient() else None,
'hessian': wfn.hessian().to_array() if wfn.hessian() else None
},
'vector': {
'epsilon_a': wfn.epsilon_a().to_array() if wfn.epsilon_a() else None,
'epsilon_b': wfn.epsilon_b().to_array() if wfn.epsilon_b() else None,
'frequencies': wfn.frequencies().to_array() if wfn.frequencies() else None
},
'dimension': {
'doccpi': wfn.doccpi().to_tuple(),
'frzcpi': wfn.frzcpi().to_tuple(),
'frzvpi': wfn.frzvpi().to_tuple(),
'nalphapi': wfn.nalphapi().to_tuple(),
'nbetapi': wfn.nbetapi().to_tuple(),
'nmopi': wfn.nmopi().to_tuple(),
'nsopi': wfn.nsopi().to_tuple(),
'soccpi': wfn.soccpi().to_tuple()
},
'int': {
'nalpha': wfn.nalpha(),
'nbeta': wfn.nbeta(),
'nfrzc': wfn.nfrzc(),
'nirrep': wfn.nirrep(),
'nmo': wfn.nmo(),
'nso': wfn.nso(),
'print': wfn.get_print(),
},
'string': {
'name': wfn.name(),
'module': wfn.module(),
'basisname': wfn.basisset().name()
},
'boolean': {
'PCM_enabled': wfn.PCM_enabled(),
'same_a_b_dens': wfn.same_a_b_dens(),
'same_a_b_orbs': wfn.same_a_b_orbs(),
'density_fitted': wfn.density_fitted(),
'basispuream': wfn.basisset().has_puream()
},
'float': {
'energy': wfn.energy(),
'efzc': wfn.efzc(),
'dipole_field_x': wfn.get_dipole_field_strength()[0],
'dipole_field_y': wfn.get_dipole_field_strength()[1],
'dipole_field_z': wfn.get_dipole_field_strength()[2]
},
'floatvar': wfn.scalar_variables(),
'matrixarr': {k: v.to_array() for k, v in wfn.array_variables().items()}
} # yapf: disable
if filename is not None:
if not filename.endswith('.npy'): filename += '.npy'
np.save(filename, wfn_data, allow_pickle=True)
return wfn_data
core.Wavefunction.to_file = _core_wavefunction_to_file
## Python JK helps
@staticmethod
def _core_jk_build(orbital_basis: core.BasisSet, aux: core.BasisSet = None, jk_type: str = None, do_wK: bool = None, memory: int = None) -> core.JK:
"""
Constructs a Psi4 JK object from an input basis.
Parameters
----------
orbital_basis
Orbital basis to use in the JK object.
aux
Optional auxiliary basis set for density-fitted tensors. Defaults
to the DF_BASIS_SCF if set, otherwise the correspond JKFIT basis
to the passed in `orbital_basis`.
jk_type
Type of JK object to build (DF, Direct, PK, etc). Defaults to the
current global SCF_TYPE option.
Returns
-------
JK
Uninitialized JK object.
Example
-------
jk = psi4.core.JK.build(bas)
jk.set_memory(int(5e8)) # 4GB of memory
jk.initialize()
...
jk.C_left_add(matirx)
jk.compute()
jk.C_clear()
...
"""
optstash = optproc.OptionsState(["SCF_TYPE"])
if jk_type is not None:
core.set_global_option("SCF_TYPE", jk_type)
if aux is None:
if core.get_global_option("SCF_TYPE") == "DF":
aux = core.BasisSet.build(orbital_basis.molecule(), "DF_BASIS_SCF", core.get_option("SCF", "DF_BASIS_SCF"),
"JKFIT", orbital_basis.name(), orbital_basis.has_puream())
else:
aux = core.BasisSet.zero_ao_basis_set()
if (do_wK is None) or (memory is None):
jk = core.JK.build_JK(orbital_basis, aux)
else:
jk = core.JK.build_JK(orbital_basis, aux, bool(do_wK), int(memory))
optstash.restore()
return jk
core.JK.build = _core_jk_build
## Grid Helpers
def _core_vbase_get_np_xyzw(Vpot):
"""
Returns the x, y, z, and weights of a grid as a tuple of NumPy array objects.
"""
x_list = []
y_list = []
z_list = []
w_list = []
# Loop over every block in the potenital
for b in range(Vpot.nblocks()):
# Obtain the block
block = Vpot.get_block(b)
# Obtain the x, y, and z coordinates along with the weight
x_list.append(block.x())
y_list.append(block.y())
z_list.append(block.z())
w_list.append(block.w())
x = np.hstack(x_list)
y = np.hstack(y_list)
z = np.hstack(z_list)
w = np.hstack(w_list)
return (x, y, z, w)
core.VBase.get_np_xyzw = _core_vbase_get_np_xyzw
## Python other helps
def set_options(options_dict: Dict[str, Any], verbose: int = 1) -> None:
"""Sets Psi4 options from an input dictionary.
Parameters
----------
options_dict
Dictionary where keys are "option_name" for global options or
"module_name__option_name" (double underscore separation) for
option local to "module_name". Values are the option value. All
are case insensitive.
verbose
Control print volume.
"""
optionre = re.compile(r'\A(?P<module>\w+__)?(?P<option>\w+)\Z', re.IGNORECASE)
rejected = {}
for k, v, in options_dict.items():
mobj = optionre.match(k.strip())
module = mobj.group('module').upper()[:-2] if mobj.group('module') else None
option = mobj.group('option').upper()
if module:
if ((module, option, v) not in [('SCF', 'GUESS', 'READ')]) and ((module, option) not in [('PCM', 'INPUT')]):
# TODO guess/read exception is for distributed driver. should be handled differently.
try:
core.set_local_option(module, option, v)
except RuntimeError as err:
rejected[k] = (v, err)
if verbose > 1:
print('Setting: core.set_local_option', module, option, v)
if (module, option) == ("PCM", "INPUT"):
pcm_helper(v)
else:
try:
core.set_global_option(option, v)
except RuntimeError as err:
rejected[k] = (v, err)
if verbose > 1:
print('Setting: core.set_global_option', option, v)
if rejected:
raise ValidationError(f'Error setting options: {rejected}')
# TODO could subclass ValidationError and append rejected so that run_json could handle remanants.
def set_module_options(module: str, options_dict: Dict[str, Any]) -> None:
"""
Sets Psi4 module options from a module specification and input dictionary.
"""
warnings.warn(
"Using `psi4.set_module_options(<module>, {keys: vals})` instead of `psi4.set_options({<module>__<keys>: <vals>})` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
for k, v, in options_dict.items():
core.set_local_option(module.upper(), k.upper(), v)
## OEProp helpers
def pcm_helper(block: str):
"""
Passes multiline string *block* to PCMSolver parser.
Parameters
----------
block
multiline string with PCM input in PCMSolver syntax.
"""
import pcmsolver
with NamedTemporaryFile(mode="w+t", delete=True) as fl:
fl.write(block)
fl.flush()
parsed_pcm = pcmsolver.parse_pcm_input(fl.name)
with NamedTemporaryFile(mode="w+t", delete=False) as fl:
fl.write(parsed_pcm)
core.set_local_option("PCM", "PCMSOLVER_PARSED_FNAME", fl.name)
def basname(name):
"""Imitates BasisSet.make_filename() without the gbs extension"""
return name.lower().replace('+', 'p').replace('*', 's').replace('(', '_').replace(')', '_').replace(',', '_')
def temp_circular_import_blocker():
pass
def basis_helper(block, name='', key='BASIS', set_option=True):
"""For PsiAPI mode, forms a basis specification function from *block*
and associates it with keyword *key* under handle *name*. Registers
the basis spec with Psi4 so that it can be applied again to future
molecules. For usage, see mints2, mints9, and cc54 test cases. Unless
*set_option* is False, *name* will be set as current active *key*,
equivalent to `set key name` or `set_option({key: name})`.
"""
key = key.upper()
name = ('anonymous' + str(uuid.uuid4())[:8]) if name == '' else name
cleanbas = basname(name).replace('-', '') # further remove hyphens so can be function name
block = qcel.util.filter_comments(block)
command_lines = re.split('\n', block)
symbol_re = re.compile(r'^\s*assign\s+(?P<symbol>[A-Z]{1,3})\s+(?P<basis>[-+*\(\)\w]+)\s*$', re.IGNORECASE)
label_re = re.compile(
r'^\s*assign\s+(?P<label>(?P<symbol>[A-Z]{1,3})(?:(_\w+)|(\d+))?)\s+(?P<basis>[-+*\(\)\w]+)\s*$',
re.IGNORECASE)
all_re = re.compile(r'^\s*assign\s+(?P<basis>[-+*\(\)\w]+)\s*$', re.IGNORECASE)
basislabel = re.compile(r'\s*\[\s*([-*\(\)\w]+)\s*\]\s*')
def anon(mol, role):
basstrings = {}
# Start by looking for assign lines, and remove them
leftover_lines = []
assignments = False
for line in command_lines:
if symbol_re.match(line):
m = symbol_re.match(line)
mol.set_basis_by_symbol(m.group('symbol'), m.group('basis'), role=role)
assignments = True
elif label_re.match(line):
m = label_re.match(line)
mol.set_basis_by_label(m.group('label'), m.group('basis'), role=role)
assignments = True
elif all_re.match(line):
m = all_re.match(line)
mol.set_basis_all_atoms(m.group('basis'), role=role)
assignments = True
else:
# Ignore blank lines and accumulate remainder
if line and not line.isspace():
leftover_lines.append(line.strip())
# Now look for regular basis set definitions
basblock = list(filter(None, basislabel.split('\n'.join(leftover_lines))))
if len(basblock) == 1:
if not assignments:
# case with no [basname] markers where whole block is contents of gbs file
mol.set_basis_all_atoms(name, role=role)
basstrings[basname(name)] = basblock[0]
else:
message = (
"Conflicting basis set specification: assign lines present but shells have no [basname] label."
"")
raise TestComparisonError(message)
else:
# case with specs separated by [basname] markers
for idx in range(0, len(basblock), 2):
basstrings[basname(basblock[idx])] = basblock[idx + 1]
return basstrings
anon.__name__ = 'basisspec_psi4_yo__' + cleanbas
qcdb.libmintsbasisset.basishorde[name.upper()] = anon
if set_option:
core.set_global_option(key, name)
core.OEProp.valid_methods = [
'DIPOLE', 'QUADRUPOLE', 'MULLIKEN_CHARGES', 'LOWDIN_CHARGES', 'WIBERG_LOWDIN_INDICES', 'MAYER_INDICES',
'MBIS_CHARGES', 'MO_EXTENTS', 'GRID_FIELD', 'GRID_ESP', 'ESP_AT_NUCLEI', 'NO_OCCUPATIONS'
]
## Option helpers
def _core_set_global_option_python(key, EXTERN):
"""
This is a fairly hacky way to get around EXTERN issues. Effectively we are routing this option Python side through attributes until the general Options overhaul.
"""
if (key != "EXTERN"):
raise ValidationError("Options: set_global_option_python does not recognize keyword %s" % key)
if EXTERN is None:
core.EXTERN = None
core.set_global_option("EXTERN", False)
elif isinstance(EXTERN, core.ExternalPotential):
# Well this is probably the worst hack I have done, thats saying something
core.EXTERN = EXTERN
core.set_global_option("EXTERN", True)
else:
raise ValidationError("Options: set_global_option_python can either be a NULL or External Potential object")
core.set_global_option_python = _core_set_global_option_python
## QCvar helps
_qcvar_transitions = {
"SCSN-MP2 CORRELATION ENERGY": "SCS(N)-MP2 CORRELATION ENERGY",
"SCSN-MP2 TOTAL ENERGY": "SCS(N)-MP2 TOTAL ENERGY",
"MAYER_INDICES": "MAYER INDICES",
"WIBERG_LOWDIN_INDICES": "WIBERG LOWDIN INDICES",
"LOWDIN_CHARGES": "LOWDIN CHARGES",
"MULLIKEN_CHARGES": "MULLIKEN CHARGES",
}
_qcvar_cancellations = {
"SCSN-MP2 SAME-SPIN CORRELATION ENERGY": ["MP2 SAME-SPIN CORRELATION ENERGY"],
"SCSN-MP2 OPPOSITE-SPIN CORRELATION ENERGY": ["MP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS-CCSD SAME-SPIN CORRELATION ENERGY": ["CCSD SAME-SPIN CORRELATION ENERGY"],
"SCS-CCSD OPPOSITE-SPIN CORRELATION ENERGY": ["CCSD OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS-MP2 SAME-SPIN CORRELATION ENERGY": ["MP2 SAME-SPIN CORRELATION ENERGY"],
"SCS-MP2 OPPOSITE-SPIN CORRELATION ENERGY": ["MP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS(N)-OMP2 CORRELATION ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCS(N)-OMP2 TOTAL ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCSN-OMP2 CORRELATION ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
"SCSN-OMP2 TOTAL ENERGY": ["OMP2 SAME-SPIN CORRELATION ENERGY", "OMP2 OPPOSITE-SPIN CORRELATION ENERGY"],
}
def _qcvar_warnings(key: str) -> str:
if any([key.upper().endswith(" DIPOLE " + cart) for cart in ["X", "Y", "Z"]]):
warnings.warn(
f"Using scalar QCVariable `{key.upper()}` [D] instead of array `{key.upper()[:-2]}` [e a0] is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
if any([key.upper().endswith(" QUADRUPOLE " + cart) for cart in ["XX", "YY", "ZZ", "XY", "XZ", "YZ"]]):
warnings.warn(
f"Using scalar QCVariable `{key.upper()}` [D A] instead of array `{key.upper()[:-3]}` [e a0^2] is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
if key.upper() in _qcvar_transitions:
warnings.warn(
f"Using QCVariable `{key.upper()}` instead of `{_qcvar_transitions[key.upper()]}` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=3)
return _qcvar_transitions[key.upper()]
if key.upper() in _qcvar_cancellations:
raise UpgradeHelper(key.upper(), "no direct replacement", 1.4, " Consult QCVariables " + ", ".join(_qcvar_cancellations[key.upper()]) + " to recompose the quantity.")
return key
_multipole_order = ["dummy", "dummy", "QUADRUPOLE", "OCTUPOLE", "HEXADECAPOLE"]
for order in range(5, 10):
_multipole_order.append(f"{int(2**order)}-POLE")
def _qcvar_reshape_set(key, val):
"""Reverse `_qcvar_reshape_get` for internal psi4.core.Matrix storage."""
reshaper = None
if key.upper().startswith("MBIS"):
if key.upper().endswith("CHARGES"):
return val
elif key.upper().endswith("DIPOLES"):
reshaper = (-1, 3)
return val.reshape(reshaper)
elif key.upper().endswith("QUADRUPOLES"):
val = val.reshape(-1, 3, 3)
val = np.array([_multipole_compressor(val[iat], 2) for iat in range(len(val))])
return val
elif key.upper().endswith("OCTUPOLES"):
val = val.reshape(-1, 3, 3, 3)
val = np.array([_multipole_compressor(val[iat], 3) for iat in range(len(val))])
return val
elif key.upper().endswith("DIPOLE"):
reshaper = (1, 3)
elif any(key.upper().endswith(p) for p in _multipole_order):
val = _multipole_compressor(val, _multipole_order.index(key.upper().split()[-1]))
reshaper = (1, -1)
elif key.upper() in ["MULLIKEN_CHARGES", "LOWDIN_CHARGES", "MULLIKEN CHARGES", "LOWDIN CHARGES"]:
reshaper = (1, -1)
if reshaper:
return val.reshape(reshaper)
else:
return val
def _qcvar_reshape_get(key, val):
"""For QCVariables where the 2D psi4.core.Matrix shape is unnatural, convert to natural shape in ndarray."""
reshaper = None
if key.upper().startswith("MBIS"):
if key.upper().endswith("CHARGES"):
return val.np
elif key.upper().endswith("DIPOLES"):
reshaper = (-1, 3)
return val.np.reshape(reshaper)
elif key.upper().endswith("QUADRUPOLES"):
val = val.np.reshape(-1, 6)
val = np.array([_multipole_plumper(val[iat], 2) for iat in range(len(val))])
return val
elif key.upper().endswith("OCTUPOLES"):
val = val.np.reshape(-1, 10)
val = np.array([_multipole_plumper(val[iat], 3) for iat in range(len(val))])
return val
elif key.upper().endswith("DIPOLE"):
reshaper = (3, )
elif any(key.upper().endswith(p) for p in _multipole_order):
return _multipole_plumper(val.np.reshape((-1, )), _multipole_order.index(key.upper().split()[-1]))
elif key.upper() in ["MULLIKEN_CHARGES", "LOWDIN_CHARGES", "MULLIKEN CHARGES", "LOWDIN CHARGES"]:
reshaper = (-1, )
if reshaper:
return val.np.reshape(reshaper)
else:
return val
def _multipole_compressor(complete, order):
"""Form flat unique components multipole array from complete Cartesian array.
Parameters
----------
order : int
Multipole order. e.g., 1 for dipole, 4 for hexadecapole.
complete : ndarray
Multipole array, order-dimensional Cartesian array expanded to complete components.
Returns
-------
compressed : ndarray
Multipole array, length (order + 1) * (order + 2) / 2 compressed to unique components.
"""
compressed = []
for ii in range(order + 1):
lx = order - ii
for lz in range(ii + 1):
ly = ii - lz
np_index = []
for xval in range(lx):
np_index.append(0)
for yval in range(ly):
np_index.append(1)
for zval in range(lz):
np_index.append(2)
compressed.append(complete[tuple(np_index)])
assert len(compressed) == ((order + 1) * (order + 2) / 2)
return np.array(compressed)
def _multipole_plumper(compressed: np.ndarray, order: int) -> np.ndarray:
"""Form multidimensional multipole array from unique components array.
Parameters
----------
order
Multipole order. e.g., 1 for dipole, 4 for hexadecapole.
compressed
Multipole array, length (order + 1) * (order + 2) / 2 compressed to unique components.
Returns
-------
complete : numpy.ndarray
Multipole array, order-dimensional Cartesian array expanded to complete components.
"""
shape = tuple([3] * order)
complete = np.zeros(shape)
def compound_index(counter):
# thanks, https://www.pamoc.it/tpc_cart_mom.html Eqn 2.2!
# jn = nz + (ny + nz)(ny + nz + 1) / 2
return int(
counter.get("2", 0) + (counter.get("1", 0) + counter.get("2", 0)) *
(counter.get("1", 0) + counter.get("2", 0) + 1) / 2)
for idx in product("012", repeat=order):
xyz_counts = Counter(idx) # "010" --> {"0": 2, "1": 1}
np_index = tuple(int(x) for x in idx) # ('0', '1') --> (0, 1)
complete[np_index] = compressed[compound_index(xyz_counts)]
return complete
def _core_has_variable(key: str) -> bool:
"""Whether scalar or array QCVariable *key* has been set in global memory."""
return core.has_scalar_variable(key) or core.has_array_variable(key)
def _core_wavefunction_has_variable(cls: core.Wavefunction, key: str) -> bool:
"""Whether scalar or array QCVariable *key* has been set on *self* :class:`psi4.core.Wavefunction`."""
return cls.has_scalar_variable(key) or cls.has_array_variable(key)
def _core_variable(key: str) -> Union[float, core.Matrix, np.ndarray]:
"""Return copy of scalar or array QCVariable *key* from global memory.
Returns
-------
float or numpy.ndarray or Matrix
Scalar variables are returned as floats.
Array variables not naturally 2D (like multipoles) are returned as :class:`numpy.ndarray` of natural dimensionality.
Other array variables are returned as :py:class:`~psi4.core.Matrix` and may have an extra dimension with symmetry information.
Example
-------
>>> psi4.gradient("hf/cc-pvdz")
>>> psi4.variable("CURRENT ENERGY")
-100.00985995185668
>>> psi4.variable("CURRENT DIPOLE")
array([ 0. , 0. , -0.83217802])
>>> psi4.variable("CURRENT GRADIENT")
<psi4.core.Matrix object at 0x12d884fc0>
>>> psi4.variable("CURRENT GRADIENT").np
array([[ 6.16297582e-33, 6.16297582e-33, -9.41037138e-02],
[-6.16297582e-33, -6.16297582e-33, 9.41037138e-02]])
"""
key = _qcvar_warnings(key)
if core.has_scalar_variable(key):
return core.scalar_variable(key)
elif core.has_array_variable(key):
return _qcvar_reshape_get(key, core.array_variable(key))
else:
raise KeyError("psi4.core.variable: Requested variable " + key + " was not set!\n")
def _core_wavefunction_variable(cls: core.Wavefunction, key: str) -> Union[float, core.Matrix, np.ndarray]:
"""Return copy of scalar or array QCVariable *key* from *self* :class:`psi4.core.Wavefunction`.
Returns
-------
float or numpy.ndarray or Matrix
Scalar variables are returned as floats.
Array variables not naturally 2D (like multipoles) are returned as :class:`numpy.ndarray` of natural dimensionality.
Other array variables are returned as :py:class:`~psi4.core.Matrix` and may have an extra dimension with symmetry information.
Example
-------
>>> g, wfn = psi4.gradient("hf/cc-pvdz", return_wfn=True)
>>> wfn.variable("CURRENT ENERGY")
-100.00985995185668
>>> wfn.variable("CURRENT DIPOLE")
array([ 0. , 0. , -0.83217802])
>>> wfn.variable("CURRENT GRADIENT")
<psi4.core.Matrix object at 0x12d884fc0>
>>> wfn.variable("CURRENT GRADIENT").np
array([[ 6.16297582e-33, 6.16297582e-33, -9.41037138e-02],
[-6.16297582e-33, -6.16297582e-33, 9.41037138e-02]])
"""
key = _qcvar_warnings(key)
if cls.has_scalar_variable(key):
return cls.scalar_variable(key)
elif cls.has_array_variable(key):
return _qcvar_reshape_get(key, cls.array_variable(key))
else:
raise KeyError("psi4.core.Wavefunction.variable: Requested variable " + key + " was not set!\n")
def _core_set_variable(key: str, val: Union[core.Matrix, np.ndarray, float]) -> None:
"""Sets scalar or array QCVariable *key* to *val* in global memory."""
if isinstance(val, core.Matrix):
if core.has_scalar_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already a scalar variable!")
else:
core.set_array_variable(key, val)
elif isinstance(val, np.ndarray):
if core.has_scalar_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already a scalar variable!")
else:
core.set_array_variable(key, core.Matrix.from_array(_qcvar_reshape_set(key, val)))
else:
if core.has_array_variable(key):
raise ValidationError("psi4.core.set_variable: Target variable " + key + " already an array variable!")
else:
core.set_scalar_variable(key, val)
# TODO _qcvar_warnings(key)
def _core_wavefunction_set_variable(cls: core.Wavefunction, key: str, val: Union[core.Matrix, np.ndarray, float]) -> None:
"""Sets scalar or array QCVariable *key* to *val* on *cls*."""
if isinstance(val, core.Matrix):
if cls.has_scalar_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already a scalar variable!")
else:
cls.set_array_variable(key, val)
elif isinstance(val, np.ndarray):
if cls.has_scalar_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already a scalar variable!")
else:
cls.set_array_variable(key, core.Matrix.from_array(_qcvar_reshape_set(key, val)))
else:
if cls.has_array_variable(key):
raise ValidationError("psi4.core.Wavefunction.set_variable: Target variable " + key +
" already an array variable!")
else:
cls.set_scalar_variable(key, val)
# TODO _qcvar_warnings(key)
def _core_del_variable(key: str) -> None:
"""Removes scalar or array QCVariable *key* from global memory if present."""
if core.has_scalar_variable(key):
core.del_scalar_variable(key)
elif core.has_array_variable(key):
core.del_array_variable(key)
def _core_wavefunction_del_variable(cls: core.Wavefunction, key: str) -> None:
"""Removes scalar or array QCVariable *key* from *cls* if present."""
if cls.has_scalar_variable(key):
cls.del_scalar_variable(key)
elif cls.has_array_variable(key):
cls.del_array_variable(key)
def _core_variables(include_deprecated_keys: bool = False) -> Dict[str, Union[float, core.Matrix, np.ndarray]]:
"""Return all scalar or array QCVariables from global memory."""
dicary = {**core.scalar_variables(), **{k: _qcvar_reshape_get(k, v) for k, v in core.array_variables().items()}}
if include_deprecated_keys:
for old_key, current_key in _qcvar_transitions.items():
if current_key in dicary:
dicary[old_key] = dicary[current_key]
return dicary
def _core_wavefunction_variables(cls, include_deprecated_keys: bool = False) -> Dict[str, Union[float, core.Matrix, np.ndarray]]:
"""Return all scalar or array QCVariables from *cls*."""
dicary = {**cls.scalar_variables(), **{k: _qcvar_reshape_get(k, v) for k, v in cls.array_variables().items()}}
if include_deprecated_keys:
for old_key, current_key in _qcvar_transitions.items():
if current_key in dicary:
dicary[old_key] = dicary[current_key]
return dicary
core.has_variable = _core_has_variable
core.variable = _core_variable
core.set_variable = _core_set_variable
core.del_variable = _core_del_variable
core.variables = _core_variables
core.Wavefunction.has_variable = _core_wavefunction_has_variable
core.Wavefunction.variable = _core_wavefunction_variable
core.Wavefunction.set_variable = _core_wavefunction_set_variable
core.Wavefunction.del_variable = _core_wavefunction_del_variable
core.Wavefunction.variables = _core_wavefunction_variables
## Psi4 v1.4 Export Deprecations
def _core_get_variable(key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.variable` instead.
"""
warnings.warn(
"Using `psi4.core.get_variable` instead of `psi4.core.variable` (or `psi4.core.scalar_variable` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.scalar_variable(key)
def _core_get_variables():
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.variables` instead.
"""
warnings.warn(
"Using `psi4.core.get_variables` instead of `psi4.core.variables` (or `psi4.core.scalar_variables` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.scalar_variables()
def _core_get_array_variable(key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.variable` instead.
"""
warnings.warn(
"Using `psi4.core.get_array_variable` instead of `psi4.core.variable` (or `psi4.core.array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.array_variable(key)
def _core_get_array_variables():
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.variables` instead.
"""
warnings.warn(
"Using `psi4.core.get_array_variables` instead of `psi4.core.variables` (or `psi4.core.array_variables` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.array_variables()
core.get_variable = _core_get_variable
core.get_variables = _core_get_variables
core.get_array_variable = _core_get_array_variable
core.get_array_variables = _core_get_array_variables
def _core_wavefunction_get_variable(cls, key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.get_variable` instead of `psi4.core.Wavefunction.variable` (or `psi4.core.Wavefunction.scalar_variable` for scalar variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.scalar_variable(key)
def _core_wavefunction_get_array(cls, key):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.get_array` instead of `psi4.core.Wavefunction.variable` (or `psi4.core.Wavefunction.array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.array_variable(key)
def _core_wavefunction_set_array(cls, key, val):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.set_variable` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.set_array` instead of `psi4.core.Wavefunction.set_variable` (or `psi4.core.Wavefunction.set_array_variable` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.set_array_variable(key, val)
def _core_wavefunction_arrays(cls):
"""
.. deprecated:: 1.4
Use :py:func:`psi4.core.Wavefunction.variables` instead.
"""
warnings.warn(
"Using `psi4.core.Wavefunction.arrays` instead of `psi4.core.Wavefunction.variables` (or `psi4.core.Wavefunction.array_variables` for array variables only) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.array_variables()
core.Wavefunction.get_variable = _core_wavefunction_get_variable
core.Wavefunction.get_array = _core_wavefunction_get_array
core.Wavefunction.set_array = _core_wavefunction_set_array
core.Wavefunction.arrays = _core_wavefunction_arrays
def _core_wavefunction_frequencies(cls):
if not hasattr(cls, 'frequency_analysis'):
return None
vibinfo = cls.frequency_analysis
vibonly = qcdb.vib.filter_nonvib(vibinfo)
return core.Vector.from_array(qcdb.vib.filter_omega_to_real(vibonly['omega'].data))
def _core_wavefunction_legacy_frequencies(cls):
"""
.. deprecated:: 1.4
"""
warnings.warn(
"Using `psi4.core.Wavefunction.legacy_frequencies` (accessing c-side member data) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.legacy_frequencies()
def _core_wavefunction_set_frequencies(cls, val):
"""
.. deprecated:: 1.4
"""
warnings.warn(
"Using `psi4.core.Wavefunction.set_frequencies` (accessing c-side member data) instead of `psi4.core.Wavefunction.frequency_analysis` (py-side member data) is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.set_legacy_frequencies(val)
core.Wavefunction.frequencies = _core_wavefunction_frequencies
core.Wavefunction.legacy_frequencies = _core_wavefunction_legacy_frequencies
core.Wavefunction.set_frequencies = _core_wavefunction_set_frequencies
def _core_wavefunction_X(cls):
warnings.warn(
"Using `psi4.core.Wavefunction.X` instead of `psi4.core.Wavefunction.lagrangian` is deprecated, and in 1.5 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return cls.lagrangian()
core.Wavefunction.X = _core_wavefunction_X
## Psi4 v1.3 Export Deprecations
def _core_get_gradient():
"""
.. deprecated:: 1.2
"""
warnings.warn(
"Using `psi4.core.get_gradient` (only used internally for C++ optking; deprecated silently in 1.2) is deprecated, and in 1.5 (or whenever Py optking is adopted) it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.get_legacy_gradient()
def _core_set_gradient(val):
"""
.. deprecated:: 1.2
"""
warnings.warn(
"Using `psi4.core.set_gradient` (only used internally for C++ optking; deprecated silently in 1.2) is deprecated, and in 1.5 (or whenever Py optking is adopted) it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.set_legacy_gradient(val)
core.get_gradient = _core_get_gradient
core.set_gradient = _core_set_gradient
def _core_doublet(A, B, transA, transB):
"""Multiply two matrices together.
.. deprecated:: 1.4
Use :py:func:`psi4.core.doublet` instead.
"""
warnings.warn(
"Using `psi4.core.Matrix.doublet` instead of `psi4.core.doublet` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.doublet(A, B, transA, transB)
def _core_triplet(A, B, C, transA, transB, transC):
"""Multiply three matrices together.
.. deprecated:: 1.4
Use :py:func:`psi4.core.triplet` instead.
"""
warnings.warn(
"Using `psi4.core.Matrix.triplet` instead of `psi4.core.triplet` is deprecated, and in 1.4 it will stop working\n",
category=FutureWarning,
stacklevel=2)
return core.triplet(A, B, C, transA, transB, transC)
core.Matrix.doublet = staticmethod(_core_doublet)
core.Matrix.triplet = staticmethod(_core_triplet)
|
psi-rking/psi4
|
psi4/driver/p4util/python_helpers.py
|
Python
|
lgpl-3.0
| 43,894
|
[
"Psi4"
] |
1e8912996e9a6580d881d8e17e269d4dc98a510c2e5478a79fcdfe28ba342126
|
# -*- coding: utf-8 -*-
#
# phonopy documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 13 15:11:21 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.imgmath']
#imgmath_image_format = 'svg'
imgmath_image_format = 'png'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'phonopy'
copyright = u'2009, Atsushi Togo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.11.6'
# The full version, including alpha/beta/rc tags.
release = '1.11.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# html_theme = 'nature'
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "Phonopy",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
# 'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
# ],
# 'navbar_links': [
# ("Tags", "setting-tags"),
# ("Options", "command-options"),
# ],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
# 'navbar_class': "navbar navbar-inverse",
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
# 'source_link_position': "nav",
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
# 'bootswatch_theme': "united",
'bootswatch_theme': "cosmo",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Phonopy v.%s' % release
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sideba.
# html_logo = 'phonopy-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'phonopydoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'phonopy.tex', u'phonopy manual',
u'Atsushi Togo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
anttikarttunen/ajk-test
|
doc/conf.py
|
Python
|
bsd-3-clause
| 8,914
|
[
"phonopy"
] |
0dbd7201027c9e7b0f67eee0932d3bae75c6e317a6619f3915d3d5f274d2c974
|
"""Functions to plot epochs data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import warnings
from collections import deque
from functools import partial
import numpy as np
from scipy import ndimage
from ..utils import create_chunks
from ..io.pick import pick_types, channel_type
from ..fixes import Counter
from .utils import _mutable_defaults, tight_layout, _prepare_trellis
from .utils import figure_nobar
def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r'):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs
picks : int | array-like of int | None
The indices of the channels to consider. If None, all good
data channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
colorbar : bool
Display or not a colorbar
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)
show : bool
Show or not the figure at the end
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15)`
cmap : matplotlib colormap
Colormap.
Returns
-------
figs : the list of matplotlib figures
One figure per channel displayed
"""
units, scalings = _mutable_defaults(('units', units),
('scalings', scalings))
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if list(units.keys()) != list(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
this_fig = plt.figure()
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if not ch_type in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None:
this_data = this_data[this_order]
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
im = plt.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower',
vmin=vmin, vmax=vmax, cmap=cmap)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
ax2.plot(1e3 * evoked.times, scalings[ch_type] * evoked.data[i])
ax2.set_xlabel('Time (ms)')
ax2.set_ylabel(units[ch_type])
ax2.set_ylim([vmin, vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
if show:
plt.show()
return figs
def _drop_log_stats(drop_log, ignore=['IGNORED']):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
# XXX: This function should be moved to epochs.py after
# removal of perc return parameter in plot_drop_log()
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any([r in ignore for r in d])])
return perc
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
show=True, return_fig=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
return_fig : bool
This argument is deprecated and will be removed in v0.10.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
if show:
plt.show()
if not return_fig:
msg = ("'return_fig=False' will be deprecated in v0.10. "
"Use 'Epochs.drop_log_stats' to get percentages instead.")
warnings.warn(msg, DeprecationWarning)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
[l.set_data(times, d) for l, d in zip(ax.lines, data_[good_ch_idx])]
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
[l.set_data(times, d) for l, d in zip(bad_lines,
data_[bad_ch_idx])]
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks([])
ax.set_xticks([])
if vars(ax)[this]['reject'] is True:
# memorizing reject
[l.set_color((0.8, 0.8, 0.8)) for l in ax.lines]
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
[l.set_color('k') for l in ax.lines[:len(good_ch_idx)]]
if bad_ch_idx is not None:
[l.set_color('r') for l in ax.lines[-len(bad_ch_idx):]]
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop_epochs(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
[l.set_color(reject_color) for l in ax.lines]
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
[l.set_color('k') for l in good_lines]
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
[l.set_color('r') for l in bad_lines]
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, epoch_idx=None, picks=None, scalings=None,
title_str='#%003i', show=True, block=False):
""" Visualize single trials using Trellis plot.
Parameters
----------
epochs : instance of Epochs
The epochs object
epoch_idx : array-like | int | None
The epochs to visualize. If None, the first 20 epochs are shown.
Defaults to None.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to:
`dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
title_str : None | str
The string formatting to use for axes titles. If None, no titles
will be shown. Defaults expand to ``#001, #002, ...``
show : bool
Whether to show the figure or not.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
scalings = _mutable_defaults(('scalings_plot_raw', None))[0]
if np.isscalar(epoch_idx):
epoch_idx = [epoch_idx]
if epoch_idx is None:
n_events = len(epochs.events)
epoch_idx = list(range(n_events))
else:
n_events = len(epoch_idx)
epoch_idx = epoch_idx[:n_events]
idx_handler = deque(create_chunks(epoch_idx, 20))
if picks is None:
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude=[])
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
times = epochs.times * 1e3
n_channels = epochs.info['nchan']
types = [channel_type(epochs.info, idx) for idx in
picks]
# preallocation needed for min / max scaling
data = np.zeros((len(epochs.events), n_channels, len(times)))
for ii, epoch in enumerate(epochs.get_data()):
for jj, (this_type, this_channel) in enumerate(zip(types, epoch)):
data[ii, jj] = this_channel / scalings[this_type]
n_events = len(epochs.events)
epoch_idx = epoch_idx[:n_events]
idx_handler = deque(create_chunks(epoch_idx, 20))
# handle bads
bad_ch_idx = None
ch_names = epochs.ch_names
bads = epochs.info['bads']
if any([ch_names[k] in bads for k in picks]):
ch_picked = [k for k in ch_names if ch_names.index(k) in picks]
bad_ch_idx = [ch_picked.index(k) for k in bads if k in ch_names]
good_ch_idx = [p for p in picks if p not in bad_ch_idx]
else:
good_ch_idx = np.arange(n_channels)
fig, axes = _prepare_trellis(len(data[idx_handler[0]]), max_col=5)
axes_handler = deque(list(range(len(idx_handler))))
for ii, data_, ax in zip(idx_handler[0], data[idx_handler[0]], axes):
ax.plot(times, data_[good_ch_idx].T, color='k')
if bad_ch_idx is not None:
ax.plot(times, data_[bad_ch_idx].T, color='r')
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks([])
ax.set_xticks([])
vars(ax)[axes_handler[0]] = {'idx': ii, 'reject': False}
# initialize memory
for this_view, this_inds in zip(axes_handler, idx_handler):
for ii, ax in zip(this_inds, axes):
vars(ax)[this_view] = {'idx': ii, 'reject': False}
tight_layout(fig=fig)
navigation = figure_nobar(figsize=(3, 1.5))
from matplotlib import gridspec
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[0, 1])
ax3 = plt.subplot(gs[1, :])
params = {
'fig': fig,
'idx_handler': idx_handler,
'epochs': epochs,
'picks': picks,
'times': times,
'scalings': scalings,
'good_ch_idx': good_ch_idx,
'bad_ch_idx': bad_ch_idx,
'axes': axes,
'back': mpl.widgets.Button(ax1, 'back'),
'next': mpl.widgets.Button(ax2, 'next'),
'reject-quit': mpl.widgets.Button(ax3, 'reject-quit'),
'title_str': title_str,
'reject_idx': [],
'axes_handler': axes_handler,
'data': data,
'navigation': navigation,
}
fig.canvas.mpl_connect('button_press_event',
partial(_epochs_axes_onclick, params=params))
navigation.canvas.mpl_connect('button_press_event',
partial(_epochs_navigation_onclick,
params=params))
if show is True:
plt.show(block=block)
return fig
|
effigies/mne-python
|
mne/viz/epochs.py
|
Python
|
bsd-3-clause
| 16,323
|
[
"Gaussian"
] |
ac0d3ee0679fa430bd0b6b3523baa7077d434cf06e8030481a3d4271c45f53e4
|
# multi2.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# This loads in a medium-detail model incorporating
# reac-diff and elec signaling in neurons. The reac-diff model is
# psd_merged31d.g which has only Ca in the dendrites, but is quite
# interesting in the PSD and spine head. Problem is lots of
# cross-compartment reactions.
# has in it, and there are no-cross-compartment
# reactions though Ca diffuses everywhere. The elec model controls the
# Ca levels in the chem compartments.
# This version uses solvers for both chem and electrical parts.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'separate_compts.g', '/model/chem', 'ee' )
#modelId = moose.loadModel( 'psd_merged31d.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 10e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 36 ) #
assert( ndc == 278 ) #
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == ndc )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 13 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
print 'spine num Pools = ', smstoich.numAllPools, smdsolve.numPools
assert( smstoich.numAllPools == 35 )
assert( smdsolve.numPools == 30 )
assert( smdsolve.numAllVoxels == sdc )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 13 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
print 'psd num Pools = ', pmstoich.numAllPools, pmdsolve.numPools
assert( pmstoich.numAllPools == 55 )
assert( pmdsolve.numPools == 48 )
assert( pmdsolve.numAllVoxels == pdc )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
##################################################################
# set up adaptors
aCa = moose.Adaptor( '/model/chem/spine/adaptCa', sdc )
adaptCa = moose.vec( '/model/chem/spine/adaptCa' )
chemCa = moose.vec( '/model/chem/spine/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == sdc )
assert( len( chemCa ) == sdc )
for i in range( sdc ):
elecCa = moose.element( '/model/elec/spine_head_14_' + str(i+1) + '/NMDA_Ca_conc' )
#print elecCa
moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 5e-3 # 520 to 0.0052 mM
#print adaptCa.outputOffset
moose.le( '/model/chem/dend/DEND' )
compts = neuroCompt.elecComptList
begin = neuroCompt.startVoxelInCompt
end = neuroCompt.endVoxelInCompt
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', len( compts))
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( chemCa ) == ndc )
for i in zip( compts, adaptCa, begin, end ):
name = i[0].path + '/Ca_conc'
if ( moose.exists( name ) ):
elecCa = moose.element( name )
#print i[2], i[3], ' ', elecCa
#print i[1]
moose.connect( elecCa, 'concOut', i[1], 'input', 'Single' )
for j in range( i[2], i[3] ):
moose.connect( i[1], 'output', chemCa[j], 'setConc', 'Single' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 20e-6 # 10 arb units to 2 uM.
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeCaPlots():
graphs = moose.Neutral( '/graphs' )
ca = moose.Neutral( '/graphs/ca' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'ca/somaCa' )
addPlot( '/model/elec/lat_11_2/Ca_conc', 'getCa', 'ca/lat11Ca' )
addPlot( '/model/elec/spine_head_14_4/NMDA_Ca_conc', 'getCa', 'ca/spine4Ca' )
addPlot( '/model/elec/spine_head_14_12/NMDA_Ca_conc', 'getCa', 'ca/spine12Ca' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head_14_4', 'getVm', 'elec/spineVm' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/CaM_dash_Ca4', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/actCaMKII', 'getConc', 'chem/psdActCaMKII' )
addPlot( '/model/chem/spine/CaM_dash_Ca4', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/CaM_dash_Ca4[12]', 'getConc', 'chem/spine12CaCam' )
addPlot( '/model/chem/spine/actCaMKII', 'getConc', 'chem/spineActCaMKII' )
addPlot( '/model/chem/spine/actCaMKII[11]', 'getConc', 'chem/spine12ActCaMKII' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/psd/Ca[11]', 'getConc', 'chem/psd12Ca' )
addPlot( '/model/chem/spine/Ca[3]', 'getConc', 'chem/spine4Ca' )
addPlot( '/model/chem/spine/Ca[11]', 'getConc', 'chem/spine12Ca' )
#addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
#addPlot( '/model/chem/dend/DEND/Ca[20]', 'getConc', 'chem/dendCa20' )
def makeGraphics( cPlotDt, ePlotDt ):
plt.ion()
fig = plt.figure( figsize=(10,16) )
chem = fig.add_subplot( 411 )
chem.set_ylim( 0, 0.006 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = chem.plot( pos, x.vector, label=x.name )
plt.legend()
elec = fig.add_subplot( 412 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = elec.plot( pos, x.vector, label=x.name )
plt.legend()
ca = fig.add_subplot( 413 )
plt.ylabel( '[Ca] (mM)' )
plt.xlabel( 'time (seconds)' )
for x in moose.wildcardFind( '/graphs/ca/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = ca.plot( pos, x.vector, label=x.name )
plt.legend()
lenplot = fig.add_subplot( 414 )
plt.ylabel( 'Ca (mM )' )
plt.xlabel( 'Voxel#)' )
spineCa = moose.vec( '/model/chem/spine/Ca' )
dendCa = moose.vec( '/model/chem/dend/DEND/Ca' )
line1, = lenplot.plot( range( len( spineCa ) ), spineCa.conc, label='spine' )
line2, = lenplot.plot( range( len( dendCa ) ), dendCa.conc, label='dend' )
ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ]
line3, = lenplot.plot( range( len( ca ) ), ca, label='elec' )
spineCaM = moose.vec( '/model/chem/spine/CaM_dash_Ca4' )
line4, = lenplot.plot( range( len( spineCaM ) ), spineCaM.conc, label='spineCaM' )
psdCaM = moose.vec( '/model/chem/psd/CaM_dash_Ca4' )
line5, = lenplot.plot( range( len( psdCaM ) ), psdCaM.conc, label='psdCaM' )
plt.legend()
fig.canvas.draw()
raw_input()
'''
for x in moose.wildcardFind( '/graphs/##[ISA=Table]' ):
t = numpy.arange( 0, x.vector.size, 1 )
pylab.plot( t, x.vector, label=x.name )
pylab.legend()
pylab.show()
'''
print 'All done'
def testNeuroMeshMultiscale():
useHsolve = 0
runtime = 0.5
elecDt = 10e-6
chemDt = 0.005
ePlotDt = 0.5e-3
cPlotDt = 0.005
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
makeChemPlots()
makeElecPlots()
makeCaPlots()
for i in range( 10 ):
moose.setClock( i, elecDt )
for i in range( 10, 20 ):
moose.setClock( i, chemDt )
moose.setClock( 8, ePlotDt )
moose.setClock( 18, cPlotDt )
if useHsolve:
hsolve = moose.HSolve( '/model/elec/hsolve' )
#moose.useClock( 1, '/model/elec/hsolve', 'process' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/compt'
moose.reinit()
#soma = moose.element( '/model/elec/soma' )
'''
else:
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=Compartment]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
#moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
#moose.useClock( 4, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 4, '/model/chem/#/dsolve', 'process' )
moose.useClock( 5, '/model/chem/#/ksolve', 'process' )
moose.useClock( 6, '/model/chem/spine/adaptCa', 'process' )
moose.useClock( 6, '/model/chem/dend/DEND/adaptCa', 'process' )
'''
moose.useClock( 18, '/graphs/chem/#', 'process' )
moose.useClock( 8, '/graphs/elec/#,/graphs/ca/#', 'process' )
moose.element( '/model/elec/soma' ).inject = 2e-10
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
moose.start( runtime )
# moose.element( '/model/elec/soma' ).inject = 0
# moose.start( 0.25 )
makeGraphics( cPlotDt, ePlotDt )
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
|
dilawar/moose-full
|
moose-examples/snippets/MULTI/multi2.py
|
Python
|
gpl-2.0
| 14,932
|
[
"MOOSE"
] |
989342392922d941808ca2b27a0fff0b25363f3c67f3ae914b5cba018e26beed
|
from __future__ import absolute_import
import collections
import os
import sqlite3
from mercurial.i18n import _
from mercurial.node import sha1nodeconstants
from mercurial import (
encoding,
error,
pycompat,
)
from . import gitutil
pygit2 = gitutil.get_pygit2()
_CURRENT_SCHEMA_VERSION = 1
_SCHEMA = (
"""
CREATE TABLE refs (
-- node and name are unique together. There may be more than one name for
-- a given node, and there may be no name at all for a given node (in the
-- case of an anonymous hg head).
node TEXT NOT NULL,
name TEXT
);
-- The "possible heads" of the repository, which we use to figure out
-- if we need to re-walk the changelog.
CREATE TABLE possible_heads (
node TEXT NOT NULL
);
-- The topological heads of the changelog, which hg depends on.
CREATE TABLE heads (
node TEXT NOT NULL
);
-- A total ordering of the changelog
CREATE TABLE changelog (
rev INTEGER NOT NULL PRIMARY KEY,
node TEXT NOT NULL,
p1 TEXT,
p2 TEXT
);
CREATE UNIQUE INDEX changelog_node_idx ON changelog(node);
CREATE UNIQUE INDEX changelog_node_rev_idx ON changelog(rev, node);
-- Changed files for each commit, which lets us dynamically build
-- filelogs.
CREATE TABLE changedfiles (
node TEXT NOT NULL,
filename TEXT NOT NULL,
-- 40 zeroes for deletions
filenode TEXT NOT NULL,
-- to handle filelog parentage:
p1node TEXT,
p1filenode TEXT,
p2node TEXT,
p2filenode TEXT
);
CREATE INDEX changedfiles_nodes_idx
ON changedfiles(node);
PRAGMA user_version=%d
"""
% _CURRENT_SCHEMA_VERSION
)
def _createdb(path):
# print('open db', path)
# import traceback
# traceback.print_stack()
db = sqlite3.connect(encoding.strfromlocal(path))
db.text_factory = bytes
res = db.execute('PRAGMA user_version').fetchone()[0]
# New database.
if res == 0:
for statement in _SCHEMA.split(';'):
db.execute(statement.strip())
db.commit()
elif res == _CURRENT_SCHEMA_VERSION:
pass
else:
raise error.Abort(_(b'sqlite database has unrecognized version'))
db.execute('PRAGMA journal_mode=WAL')
return db
_OUR_ORDER = ()
if pygit2:
_OUR_ORDER = (
pygit2.GIT_SORT_TOPOLOGICAL
| pygit2.GIT_SORT_TIME
| pygit2.GIT_SORT_REVERSE
)
_DIFF_FLAGS = 1 << 21 # GIT_DIFF_FORCE_BINARY, which isn't exposed by pygit2
def _find_nearest_ancestor_introducing_node(
db, gitrepo, file_path, walk_start, filenode
):
"""Find the nearest ancestor that introduces a file node.
Args:
db: a handle to our sqlite database.
gitrepo: A pygit2.Repository instance.
file_path: the path of a file in the repo
walk_start: a pygit2.Oid that is a commit where we should start walking
for our nearest ancestor.
Returns:
A hexlified SHA that is the commit ID of the next-nearest parent.
"""
assert isinstance(file_path, str), 'file_path must be str, got %r' % type(
file_path
)
assert isinstance(filenode, str), 'filenode must be str, got %r' % type(
filenode
)
parent_options = {
row[0].decode('ascii')
for row in db.execute(
'SELECT node FROM changedfiles '
'WHERE filename = ? AND filenode = ?',
(file_path, filenode),
)
}
inner_walker = gitrepo.walk(walk_start, _OUR_ORDER)
for w in inner_walker:
if w.id.hex in parent_options:
return w.id.hex
raise error.ProgrammingError(
'Unable to find introducing commit for %s node %s from %s',
(file_path, filenode, walk_start),
)
def fill_in_filelog(gitrepo, db, startcommit, path, startfilenode):
"""Given a starting commit and path, fill in a filelog's parent pointers.
Args:
gitrepo: a pygit2.Repository
db: a handle to our sqlite database
startcommit: a hexlified node id for the commit to start at
path: the path of the file whose parent pointers we should fill in.
filenode: the hexlified node id of the file at startcommit
TODO: make filenode optional
"""
assert isinstance(
startcommit, str
), 'startcommit must be str, got %r' % type(startcommit)
assert isinstance(
startfilenode, str
), 'startfilenode must be str, got %r' % type(startfilenode)
visit = collections.deque([(startcommit, startfilenode)])
while visit:
cnode, filenode = visit.popleft()
commit = gitrepo[cnode]
parents = []
for parent in commit.parents:
t = parent.tree
for comp in path.split('/'):
try:
t = gitrepo[t[comp].id]
except KeyError:
break
else:
introducer = _find_nearest_ancestor_introducing_node(
db, gitrepo, path, parent.id, t.id.hex
)
parents.append((introducer, t.id.hex))
p1node = p1fnode = p2node = p2fnode = gitutil.nullgit
for par, parfnode in parents:
found = int(
db.execute(
'SELECT COUNT(*) FROM changedfiles WHERE '
'node = ? AND filename = ? AND filenode = ? AND '
'p1node NOT NULL',
(par, path, parfnode),
).fetchone()[0]
)
if found == 0:
assert par is not None
visit.append((par, parfnode))
if parents:
p1node, p1fnode = parents[0]
if len(parents) == 2:
p2node, p2fnode = parents[1]
if len(parents) > 2:
raise error.ProgrammingError(
b"git support can't handle octopus merges"
)
db.execute(
'UPDATE changedfiles SET '
'p1node = ?, p1filenode = ?, p2node = ?, p2filenode = ? '
'WHERE node = ? AND filename = ? AND filenode = ?',
(p1node, p1fnode, p2node, p2fnode, commit.id.hex, path, filenode),
)
db.commit()
def _index_repo(
gitrepo,
db,
logfn=lambda x: None,
progress_factory=lambda *args, **kwargs: None,
):
# Identify all references so we can tell the walker to visit all of them.
all_refs = gitrepo.listall_references()
possible_heads = set()
prog = progress_factory(b'refs')
for pos, ref in enumerate(all_refs):
if prog is not None:
prog.update(pos)
if not (
ref.startswith('refs/heads/') # local branch
or ref.startswith('refs/tags/') # tag
or ref.startswith('refs/remotes/') # remote branch
or ref.startswith('refs/hg/') # from this extension
):
continue
try:
start = gitrepo.lookup_reference(ref).peel(pygit2.GIT_OBJ_COMMIT)
except ValueError:
# No commit to be found, so we don't care for hg's purposes.
continue
possible_heads.add(start.id)
# Optimization: if the list of heads hasn't changed, don't
# reindex, the changelog. This doesn't matter on small
# repositories, but on even moderately deep histories (eg cpython)
# this is a very important performance win.
#
# TODO: we should figure out how to incrementally index history
# (preferably by detecting rewinds!) so that we don't have to do a
# full changelog walk every time a new commit is created.
cache_heads = {
pycompat.sysstr(x[0])
for x in db.execute('SELECT node FROM possible_heads')
}
walker = None
cur_cache_heads = {h.hex for h in possible_heads}
if cur_cache_heads == cache_heads:
return
logfn(b'heads mismatch, rebuilding dagcache\n')
for start in possible_heads:
if walker is None:
walker = gitrepo.walk(start, _OUR_ORDER)
else:
walker.push(start)
# Empty out the existing changelog. Even for large-ish histories
# we can do the top-level "walk all the commits" dance very
# quickly as long as we don't need to figure out the changed files
# list.
db.execute('DELETE FROM changelog')
if prog is not None:
prog.complete()
prog = progress_factory(b'commits')
# This walker is sure to visit all the revisions in history, but
# only once.
for pos, commit in enumerate(walker):
if prog is not None:
prog.update(pos)
p1 = p2 = gitutil.nullgit
if len(commit.parents) > 2:
raise error.ProgrammingError(
(
b"git support can't handle octopus merges, "
b"found a commit with %d parents :("
)
% len(commit.parents)
)
if commit.parents:
p1 = commit.parents[0].id.hex
if len(commit.parents) == 2:
p2 = commit.parents[1].id.hex
db.execute(
'INSERT INTO changelog (rev, node, p1, p2) VALUES(?, ?, ?, ?)',
(pos, commit.id.hex, p1, p2),
)
num_changedfiles = db.execute(
"SELECT COUNT(*) from changedfiles WHERE node = ?",
(commit.id.hex,),
).fetchone()[0]
if not num_changedfiles:
files = {}
# I *think* we only need to check p1 for changed files
# (and therefore linkrevs), because any node that would
# actually have this commit as a linkrev would be
# completely new in this rev.
p1 = commit.parents[0].id.hex if commit.parents else None
if p1 is not None:
patchgen = gitrepo.diff(p1, commit.id.hex, flags=_DIFF_FLAGS)
else:
patchgen = commit.tree.diff_to_tree(
swap=True, flags=_DIFF_FLAGS
)
new_files = (p.delta.new_file for p in patchgen)
files = {
nf.path: nf.id.hex
for nf in new_files
if nf.id.raw != sha1nodeconstants.nullid
}
for p, n in files.items():
# We intentionally set NULLs for any file parentage
# information so it'll get demand-computed later. We
# used to do it right here, and it was _very_ slow.
db.execute(
'INSERT INTO changedfiles ('
'node, filename, filenode, p1node, p1filenode, p2node, '
'p2filenode) VALUES(?, ?, ?, ?, ?, ?, ?)',
(commit.id.hex, p, n, None, None, None, None),
)
db.execute('DELETE FROM heads')
db.execute('DELETE FROM possible_heads')
for hid in possible_heads:
h = hid.hex
db.execute('INSERT INTO possible_heads (node) VALUES(?)', (h,))
haschild = db.execute(
'SELECT COUNT(*) FROM changelog WHERE p1 = ? OR p2 = ?', (h, h)
).fetchone()[0]
if not haschild:
db.execute('INSERT INTO heads (node) VALUES(?)', (h,))
db.commit()
if prog is not None:
prog.complete()
def get_index(
gitrepo, logfn=lambda x: None, progress_factory=lambda *args, **kwargs: None
):
cachepath = os.path.join(
pycompat.fsencode(gitrepo.path), b'..', b'.hg', b'cache'
)
if not os.path.exists(cachepath):
os.makedirs(cachepath)
dbpath = os.path.join(cachepath, b'git-commits.sqlite')
db = _createdb(dbpath)
# TODO check against gitrepo heads before doing a full index
# TODO thread a ui.progress call into this layer
_index_repo(gitrepo, db, logfn, progress_factory)
return db
|
jwren/intellij-community
|
plugins/hg4idea/testData/bin/hgext/git/index.py
|
Python
|
apache-2.0
| 11,645
|
[
"Octopus",
"VisIt"
] |
d1c2192e58842c1efab7297b5c8e7fd9cc52538b88b3581492ac880490d4a5c8
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterFileWriter.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Radim Blazek'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
import glob
import tempfile
from osgeo import gdal
from qgis.PyQt.QtCore import QTemporaryFile, QDir
from qgis.core import (QgsRaster,
QgsRasterLayer,
QgsRasterChecker,
QgsRasterPipe,
QgsRasterFileWriter,
QgsRasterProjector)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
class TestQgsRasterFileWriter(unittest.TestCase):
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self.testDataDir = unitTestDataPath()
self.report = "<h1>Python Raster File Writer Tests</h1>\n"
def write(self, theRasterName):
print(theRasterName)
path = "%s/%s" % (self.testDataDir, theRasterName)
rasterLayer = QgsRasterLayer(path, "test")
if not rasterLayer.isValid():
return False
provider = rasterLayer.dataProvider()
tmpFile = QTemporaryFile()
tmpFile.open() # fileName is no avialable until open
tmpName = tmpFile.fileName()
tmpFile.close()
# do not remove when class is destroyed so that we can read
# the file and see difference
tmpFile.setAutoRemove(False)
fileWriter = QgsRasterFileWriter(tmpName)
pipe = QgsRasterPipe()
if not pipe.set(provider.clone()):
print("Cannot set pipe provider")
return False
projector = QgsRasterProjector()
projector.setCrs(provider.crs(), provider.crs())
if not pipe.insert(2, projector):
print("Cannot set pipe projector")
return False
fileWriter.writeRaster(
pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs())
checker = QgsRasterChecker()
ok = checker.runTest("gdal", tmpName, "gdal", path)
self.report += checker.report()
# All OK, we can delete the file
tmpFile.setAutoRemove(ok)
return ok
def testWrite(self):
for name in glob.glob("%s/raster/*.tif" % self.testDataDir):
baseName = os.path.basename(name)
allOk = True
ok = self.write("raster/%s" % baseName)
if not ok:
allOk = False
reportFilePath = "%s/qgistest.html" % QDir.tempPath()
reportFile = open(reportFilePath, 'a')
reportFile.write(self.report)
reportFile.close()
assert allOk, "Raster file writer test failed"
def testDriverForExtension(self):
self.assertEqual(QgsRasterFileWriter.driverForExtension('tif'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('TIF'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('tIf'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('.tif'), 'GTiff')
self.assertEqual(QgsRasterFileWriter.driverForExtension('img'), 'HFA')
self.assertEqual(QgsRasterFileWriter.driverForExtension('.vrt'), 'VRT')
self.assertEqual(QgsRasterFileWriter.driverForExtension('.jpg'), 'JPEG')
self.assertEqual(QgsRasterFileWriter.driverForExtension('asc'), 'AAIGrid')
self.assertEqual(QgsRasterFileWriter.driverForExtension('not a format'), '')
self.assertEqual(QgsRasterFileWriter.driverForExtension(''), '')
def testExtensionsForFormat(self):
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('not format'), [])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('GTiff'), ['tiff', 'tif'])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('GPKG'), ['gpkg'])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('JPEG'), ['jpg', 'jpeg'])
self.assertCountEqual(QgsRasterFileWriter.extensionsForFormat('AAIGrid'), ['asc'])
def testSupportedFiltersAndFormat(self):
# test with formats in recommended order
formats = QgsRasterFileWriter.supportedFiltersAndFormats(QgsRasterFileWriter.SortRecommended)
self.assertEqual(formats[0].filterString, 'GeoTIFF (*.tif *.TIF *.tiff *.TIFF)')
self.assertEqual(formats[0].driverName, 'GTiff')
self.assertTrue('netCDF' in [f.driverName for f in formats])
# alphabetical sorting
formats2 = QgsRasterFileWriter.supportedFiltersAndFormats(QgsRasterFileWriter.RasterFormatOptions())
self.assertTrue(formats2[0].driverName < formats2[1].driverName)
self.assertCountEqual([f.driverName for f in formats], [f.driverName for f in formats2])
self.assertNotEqual(formats2[0].driverName, 'GTiff')
def testSupportedFormatExtensions(self):
formats = QgsRasterFileWriter.supportedFormatExtensions()
self.assertTrue('tif' in formats)
self.assertFalse('exe' in formats)
self.assertEqual(formats[0], 'tif')
self.assertTrue('nc' in formats)
# alphabetical sorting
formats2 = QgsRasterFileWriter.supportedFormatExtensions(QgsRasterFileWriter.RasterFormatOptions())
self.assertTrue(formats2[1] < formats2[2])
self.assertCountEqual(formats, formats2)
self.assertNotEqual(formats2[0], 'tif')
def testImportIntoGpkg(self):
# init target file
test_gpkg = tempfile.mktemp(suffix='.gpkg', dir=self.testDataDir)
gdal.GetDriverByName('GPKG').Create(test_gpkg, 1, 1, 1)
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'band3_byte_noct_epsg4326.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(test_gpkg)
fw.setOutputFormat('gpkg')
fw.setCreateOptions(['RASTER_TABLE=imported_table', 'APPEND_SUBDATASET=YES'])
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
projector = QgsRasterProjector()
projector.setCrs(provider.crs(), provider.crs())
self.assertTrue(pipe.insert(2, projector))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), 0)
# Check that the test geopackage contains the raster layer and compare
rlayer = QgsRasterLayer('GPKG:%s:imported_table' % test_gpkg)
self.assertTrue(rlayer.isValid())
out_provider = rlayer.dataProvider()
self.assertEqual(provider.block(1, provider.extent(), source.width(), source.height()).data(),
out_provider.block(1, out_provider.extent(), rlayer.width(), rlayer.height()).data())
# remove result file
os.unlink(test_gpkg)
def _testGeneratePyramids(self, pyramidFormat):
tmpName = tempfile.mktemp(suffix='.tif')
source = QgsRasterLayer(os.path.join(self.testDataDir, 'raster', 'byte.tif'), 'my', 'gdal')
self.assertTrue(source.isValid())
provider = source.dataProvider()
fw = QgsRasterFileWriter(tmpName)
fw.setBuildPyramidsFlag(QgsRaster.PyramidsFlagYes)
fw.setPyramidsFormat(pyramidFormat)
fw.setPyramidsList([2])
pipe = QgsRasterPipe()
self.assertTrue(pipe.set(provider.clone()))
projector = QgsRasterProjector()
projector.setCrs(provider.crs(), provider.crs())
self.assertTrue(pipe.insert(2, projector))
self.assertEqual(fw.writeRaster(pipe,
provider.xSize(),
provider.ySize(),
provider.extent(),
provider.crs()), 0)
del fw
ds = gdal.Open(tmpName)
self.assertEqual(ds.GetRasterBand(1).GetOverviewCount(), 1)
fl = ds.GetFileList()
if pyramidFormat == QgsRaster.PyramidsGTiff:
self.assertEqual(len(fl), 2, fl)
self.assertIn('.ovr', fl[1])
elif pyramidFormat == QgsRaster.PyramidsInternal:
self.assertEqual(len(fl), 1, fl)
elif pyramidFormat == QgsRaster.PyramidsErdas:
self.assertEqual(len(fl), 2, fl)
self.assertIn('.aux', fl[1])
os.unlink(tmpName)
def testGeneratePyramidsExternal(self):
return self._testGeneratePyramids(QgsRaster.PyramidsGTiff)
def testGeneratePyramidsInternal(self):
return self._testGeneratePyramids(QgsRaster.PyramidsInternal)
def testGeneratePyramidsErdas(self):
return self._testGeneratePyramids(QgsRaster.PyramidsErdas)
if __name__ == '__main__':
unittest.main()
|
dwadler/QGIS
|
tests/src/python/test_qgsrasterfilewriter.py
|
Python
|
gpl-2.0
| 9,364
|
[
"NetCDF"
] |
13c8f86548928ee866b526ad1ad80183778a46eba2a4906a5d0e1a51179faf84
|
# proxy module
from __future__ import absolute_import
from mayavi.filters.wrapper import *
|
enthought/etsproxy
|
enthought/mayavi/filters/wrapper.py
|
Python
|
bsd-3-clause
| 91
|
[
"Mayavi"
] |
27f7f889b321f6717b06bc560df4cace10921e4843f977a71dcbb83c368492c6
|
# -*- coding: utf-8 -*-
"""
Bok choy acceptance tests for problems in the LMS
See also old lettuce tests in lms/djangoapps/courseware/features/problems.feature
"""
from textwrap import dedent
from flaky import flaky
from ..helpers import UniqueCourseTest
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.problem import ProblemPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import EventsTestMixin
class ProblemsTest(UniqueCourseTest):
"""
Base class for tests of problems in the LMS.
"""
USERNAME = "joe_student"
EMAIL = "joe@example.com"
def setUp(self):
super(ProblemsTest, self).setUp()
self.xqueue_grade_response = None
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with a hierarchy and problems
course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
problem = self.get_problem()
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(problem)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def get_problem(self):
""" Subclasses should override this to complete the fixture """
raise NotImplementedError()
class ProblemClarificationTest(ProblemsTest):
"""
Tests the <clarification> element that can be used in problem XML.
"""
def get_problem(self):
"""
Create a problem with a <clarification>
"""
xml = dedent("""
<problem markdown="null">
<text>
<p>
Given the data in Table 7 <clarification>Table 7: "Example PV Installation Costs",
Page 171 of Roberts textbook</clarification>, compute the ROI
<clarification>Return on Investment <strong>(per year)</strong></clarification> over 20 years.
</p>
<numericalresponse answer="6.5">
<textline label="Enter the annual ROI" trailing_text="%" />
</numericalresponse>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'TOOLTIP TEST PROBLEM', data=xml)
def test_clarification(self):
"""
Test that we can see the <clarification> tooltips.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'TOOLTIP TEST PROBLEM')
problem_page.click_clarification(0)
self.assertIn('"Example PV Installation Costs"', problem_page.visible_tooltip_text)
problem_page.click_clarification(1)
tooltip_text = problem_page.visible_tooltip_text
self.assertIn('Return on Investment', tooltip_text)
self.assertIn('per year', tooltip_text)
self.assertNotIn('strong', tooltip_text)
class ProblemExtendedHintTest(ProblemsTest, EventsTestMixin):
"""
Test that extended hint features plumb through to the page html and tracking log.
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">hint</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>demand-hint1</hint>
<hint>demand-hint2</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'TITLE', data=xml)
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer('B')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: hint')
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=1
)
self.assert_events_match(
[{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'hint'}]}}],
actual_events)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): demand-hint2')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): demand-hint1')
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=3
)
self.assert_events_match(
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'demand-hint2'}},
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'demand-hint1'}}
],
actual_events)
class ProblemHintWithHtmlTest(ProblemsTest, EventsTestMixin):
"""
Tests that hints containing html get rendered properly
"""
def get_problem(self):
"""
Problem with extended hint features.
"""
xml = dedent("""
<problem>
<p>question text</p>
<stringresponse answer="A">
<stringequalhint answer="B">aa <a href="#">bb</a> cc</stringequalhint>
<stringequalhint answer="C"><a href="#">aa bb</a> cc</stringequalhint>
<textline size="20"/>
</stringresponse>
<demandhint>
<hint>aa <a href="#">bb</a> cc</hint>
<hint><a href="#">dd ee</a> ff</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'PROBLEM HTML HINT TEST', data=xml)
@flaky # TODO fix this, see TNL-3183
def test_check_hint(self):
"""
Test clicking Check shows the extended hint in the problem message.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_text[0], u'question text')
problem_page.fill_answer('B')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: aa bb cc')
problem_page.fill_answer('C')
problem_page.click_check()
self.assertEqual(problem_page.message_text, u'Incorrect: aa bb cc')
# Check for corresponding tracking event
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.feedback_displayed'},
number_of_matches=2
)
self.assert_events_match(
[{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'B'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': 'aa <a href="#">bb</a> cc'}]}},
{'event': {'hint_label': u'Incorrect',
'trigger_type': 'single',
'student_answer': [u'C'],
'correctness': False,
'question_type': 'stringresponse',
'hints': [{'text': '<a href="#">aa bb</a> cc'}]}}],
actual_events)
def test_demand_hint(self):
"""
Test clicking hint button shows the demand hint in its div.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): aa bb cc')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (2 of 2): dd ee ff')
problem_page.click_hint()
self.assertEqual(problem_page.hint_text, u'Hint (1 of 2): aa bb cc')
# Check corresponding tracking events
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.problem.hint.demandhint_displayed'},
number_of_matches=3
)
self.assert_events_match(
[
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa <a href="#">bb</a> cc'}},
{'event': {u'hint_index': 1, u'hint_len': 2, u'hint_text': u'<a href="#">dd ee</a> ff'}},
{'event': {u'hint_index': 0, u'hint_len': 2, u'hint_text': u'aa <a href="#">bb</a> cc'}}
],
actual_events)
class ProblemWithMathjax(ProblemsTest):
"""
Tests the <MathJax> used in problem
"""
def get_problem(self):
"""
Create a problem with a <MathJax> in body and hint
"""
xml = dedent(r"""
<problem>
<p>Check mathjax has rendered [mathjax]E=mc^2[/mathjax]</p>
<multiplechoiceresponse>
<choicegroup label="Answer this?" type="MultipleChoice">
<choice correct="true">Choice1 <choicehint>Correct choice message</choicehint></choice>
<choice correct="false">Choice2<choicehint>Wrong choice message</choicehint></choice>
</choicegroup>
</multiplechoiceresponse>
<demandhint>
<hint>mathjax should work1 \(E=mc^2\) </hint>
<hint>mathjax should work2 [mathjax]E=mc^2[/mathjax]</hint>
</demandhint>
</problem>
""")
return XBlockFixtureDesc('problem', 'MATHJAX TEST PROBLEM', data=xml)
def test_mathjax_in_hint(self):
"""
Test that MathJax have successfully rendered in problem hint
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, "MATHJAX TEST PROBLEM")
# Verify Mathjax have been rendered
self.assertTrue(problem_page.mathjax_rendered_in_problem, "MathJax did not rendered in body")
# The hint button rotates through multiple hints
problem_page.click_hint()
self.assertIn("Hint (1 of 2): mathjax should work1", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
# Rotate the hint and check the problem hint
problem_page.click_hint()
self.assertIn("Hint (2 of 2): mathjax should work2", problem_page.hint_text)
self.assertTrue(problem_page.mathjax_rendered_in_hint, "MathJax did not rendered in problem hint")
class ProblemPartialCredit(ProblemsTest):
"""
Makes sure that the partial credit is appearing properly.
"""
def get_problem(self):
"""
Create a problem with partial credit.
"""
xml = dedent("""
<problem>
<p>The answer is 1. Partial credit for -1.</p>
<numericalresponse answer="1" partial_credit="list">
<formulaequationinput label="How many miles away from Earth is the sun? Use scientific notation to answer." />
<responseparam type="tolerance" default="0.01" />
<responseparam partial_answers="-1" />
</numericalresponse>
</problem>
""")
return XBlockFixtureDesc('problem', 'PARTIAL CREDIT TEST PROBLEM', data=xml)
def test_partial_credit(self):
"""
Test that we can see the partial credit value and feedback.
"""
self.courseware_page.visit()
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name, 'PARTIAL CREDIT TEST PROBLEM')
problem_page.fill_answer_numerical('-1')
problem_page.click_check()
self.assertTrue(problem_page.simpleprob_is_partially_correct())
|
chauhanhardik/populo
|
common/test/acceptance/tests/lms/test_lms_problems.py
|
Python
|
agpl-3.0
| 13,347
|
[
"VisIt"
] |
1885e641a7b92e51d9b817242392cd5f9e3486f8e5df0c0c5c86d9da4763fce9
|
from allensdk.model.biophys_sim.config import Config
from allensdk.model.biophys_sim.neuron.hoc_utils import HocUtils
class Utils(HocUtils):
def __init__(self, description=None):
description = Config().load('manifest.json')
super(Utils, self).__init__(description)
self.generate_cell()
def generate_cell(self):
# cell = self.h.cell()
from neuron import h as H
H.load_file("cell_template.hoc")
cell = H.cell()
morphology_path = self.description.manifest.get_path('MORPHOLOGY')
self.generate_morphology(cell, morphology_path)
self.load_cell_parameters(cell)
self.cell = cell
def generate_morphology(self, cell, morph_filename):
h = self.h
swc = self.h.Import3d_SWC_read()
swc.input(morph_filename)
imprt = self.h.Import3d_GUI(swc, 0)
imprt.instantiate(cell)
for seg in cell.soma[0]:
seg.area()
for sec in cell.all:
sec.nseg = 1 + 2 * int(sec.L / 40)
cell.simplify_axon()
for sec in cell.axonal:
sec.L = 30
sec.diam = 1
sec.nseg = 1 + 2 * int(sec.L / 40)
cell.axon[0].connect(cell.soma[0], 0.5, 0)
cell.axon[1].connect(cell.axon[0], 1, 0)
h.define_shape()
def load_cell_parameters(self, cell):
passive = self.description.data['passive'][0]
conditions = self.description.data['conditions'][0]
genome = self.description.data['genome']
# Set passive properties
# cm_dict = dict([(c['section'], c['cm']) for c in passive['cm']])
for sec in cell.all:
# sec.Ra = passive['ra']
# sec.cm = cm_dict[sec.name().split(".")[1][:4]]
sec.insert('pas')
# for seg in sec:
# seg.pas.e = passive["e_pas"]
# Insert channels and set parameters
for p in genome:
sections = [s for s in cell.all if s.name().split(".")[1][:4] == p["section"]]
for sec in sections:
if p["mechanism"] != "":
sec.insert(p["mechanism"])
else:
if p["name"] == 'Ra':
sec.Ra = float(p["value"])
elif p["name"] == "cm":
sec.cm = float(p["value"])
elif p["name"] == "g_pas":
for seg in sec:
seg.pas.g = float(p["value"])
elif p["name"] == "e_pas":
for seg in sec:
seg.pas.e = float(p["value"])
else:
for seg in sec:
seg.pas.g =float(p["value"])
setattr(sec, p["name"], float(p["value"]))
# Set reversal potentials
for erev in conditions['erev']:
sections = [s for s in cell.all if s.name().split(".")[1][:4] == erev["section"]]
for sec in sections:
sec.ena = erev["ena"]
sec.ek = erev["ek"]
|
Neurosim-lab/netpyne
|
examples/saveLoadV1/AA/active_cell_utils.py
|
Python
|
mit
| 3,092
|
[
"NEURON"
] |
da38db3c6ecdb81cca53f638f8ae4c504730da88abd04fb3a6f98ce5b598566a
|
#!/usr/bin/env python
#
# SchoolTool - common information systems platform for school administration
# Copyright (c) 2003 Shuttleworth Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
SchoolTool test runner.
Syntax: test.py [options] [pathname-regexp [test-regexp]]
There are two kinds of tests:
- unit tests (or programmer tests) test the internal workings of various
components of the system
- functional tests (acceptance tests, customer tests) test only externally
visible system behaviour
You can choose to run unit tests (this is the default mode), functional tests
(by giving a -f option to test.py) or both (by giving both -u and -f options).
Test cases are located in the directory tree starting at the location of this
script, in subdirectories named 'tests' for unit tests and 'ftests' for
functional tests, in Python modules named 'test*.py'. They are then filtered
according to pathname and test regexes. Alternatively, packages may just have
'tests.py' and 'ftests.py' instead of subpackages 'tests' and 'ftests'
respectively.
A leading "!" in a regexp is stripped and negates the regexp. Pathname
regexp is applied to the whole path (package/package/module.py). Test regexp
is applied to a full test id (package.package.module.class.test_method).
Options:
-h print this help message
-v verbose (print dots for each test run)
-vv very verbose (print test names)
-q quiet (do not print anything on success)
-w enable warnings about omitted test cases
-p show progress bar (can be combined with -v or -vv)
-u select unit tests (default)
-f select functional tests
--level n select only tests at level n or lower
--all-levels select all tests
--list-files list all selected test files
--list-tests list all selected test cases
--list-hooks list all loaded test hooks
--coverage create code coverage reports
"""
#
# This script borrows ideas from Zope 3's test runner heavily. It is smaller
# and cleaner though, at the expense of more limited functionality.
#
import re
import os
import sys
import time
import types
import getopt
import unittest
import traceback
from unittest import TextTestResult
__metaclass__ = type
def stderr(text):
sys.stderr.write(text)
sys.stderr.write("\n")
class Options:
"""Configurable properties of the test runner."""
# test location
basedir = '' # base directory for tests (defaults to
# basedir of argv[0] + 'src'), must be absolute
follow_symlinks = True # should symlinks to subdirectories be
# followed? (hardcoded, may cause loops)
# which tests to run
unit_tests = False # unit tests (default if both are false)
functional_tests = False # functional tests
# test filtering
level = 1 # run only tests at this or lower level
# (if None, runs all tests)
pathname_regex = '' # regexp for filtering filenames
test_regex = '' # regexp for filtering test cases
# actions to take
list_files = False # --list-files
list_tests = False # --list-tests
list_hooks = False # --list-hooks
run_tests = True # run tests (disabled by --list-foo)
# output verbosity
verbosity = 0 # verbosity level (-v)
quiet = 0 # do not print anything on success (-q)
warn_omitted = False # produce warnings when a test case is
# not included in a test suite (-w)
progress = False # show running progress (-p)
coverage = False # produce coverage reports (--coverage)
coverdir = 'coverage' # where to put them (currently hardcoded)
immediate_errors = False # show tracebacks twice (currently hardcoded)
screen_width = 80 # screen width (autodetected)
def compile_matcher(regex):
"""Returns a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
def walk_with_symlinks(top, func, arg):
"""Like os.path.walk, but follows symlinks on POSIX systems.
If the symlinks create a loop, this function will never finish.
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def get_test_files(cfg):
"""Returns a list of test module filenames."""
matcher = compile_matcher(cfg.pathname_regex)
results = []
test_names = []
if cfg.unit_tests:
test_names.append('tests')
if cfg.functional_tests:
test_names.append('ftests')
baselen = len(cfg.basedir) + 1
def visit(ignored, dir, files):
if os.path.basename(dir) not in test_names:
for name in test_names:
if name + '.py' in files:
path = os.path.join(dir, name + '.py')
if matcher(path[baselen:]):
results.append(path)
return
if '__init__.py' not in files:
stderr("%s is not a package" % dir)
return
for file in files:
if file.startswith('test') and file.endswith('.py'):
path = os.path.join(dir, file)
if matcher(path[baselen:]):
results.append(path)
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
walker(cfg.basedir, visit, None)
results.sort()
return results
def import_module(filename, cfg, cov=None):
"""Imports and returns a module."""
filename = os.path.splitext(filename)[0]
modname = filename[len(cfg.basedir):].replace(os.path.sep, '.')
if modname.startswith('.'):
modname = modname[1:]
if cov is not None:
cov.start()
mod = __import__(modname)
if cov is not None:
cov.stop()
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def filter_testsuite(suite, matcher, level=None):
"""Returns a flattened list of test cases that match the given matcher."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = []
for test in suite._tests:
if level is not None and getattr(test, 'level', 0) > level:
continue
if isinstance(test, unittest.TestCase):
testname = test.id() # package.module.class.method
if matcher(testname):
results.append(test)
else:
filtered = filter_testsuite(test, matcher, level)
results.extend(filtered)
return results
def get_all_test_cases(module):
"""Returns a list of all test case classes defined in a given module."""
results = []
for name in dir(module):
if not name.startswith('Test'):
continue
item = getattr(module, name)
if (isinstance(item, (type, types.ClassType)) and
issubclass(item, unittest.TestCase)):
results.append(item)
return results
def get_test_classes_from_testsuite(suite):
"""Returns a set of test case classes used in a test suite."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = set()
for test in suite._tests:
if isinstance(test, unittest.TestCase):
results.add(test.__class__)
else:
classes = get_test_classes_from_testsuite(test)
results.update(classes)
return results
def get_test_cases(test_files, cfg, cov=None):
"""Returns a list of test cases from a given list of test modules."""
matcher = compile_matcher(cfg.test_regex)
results = []
for file in test_files:
module = import_module(file, cfg, cov=cov)
if cov is not None:
cov.start()
test_suite = module.test_suite()
if cov is not None:
cov.stop()
if test_suite is None:
continue
if cfg.warn_omitted:
all_classes = set(get_all_test_cases(module))
classes_in_suite = get_test_classes_from_testsuite(test_suite)
difference = all_classes - classes_in_suite
for test_class in difference:
# surround the warning with blank lines, otherwise it tends
# to get lost in the noise
stderr("\n%s: WARNING: %s not in test suite\n"
% (file, test_class.__name__))
if (cfg.level is not None and
getattr(test_suite, 'level', 0) > cfg.level):
continue
filtered = filter_testsuite(test_suite, matcher, cfg.level)
results.extend(filtered)
return results
def get_test_hooks(test_files, cfg, cov=None):
"""Returns a list of test hooks from a given list of test modules."""
results = []
dirs = set(map(os.path.dirname, test_files))
for dir in list(dirs):
if os.path.basename(dir) == 'ftests':
dirs.add(os.path.join(os.path.dirname(dir), 'tests'))
dirs = list(dirs)
dirs.sort()
for dir in dirs:
filename = os.path.join(dir, 'checks.py')
if os.path.exists(filename):
module = import_module(filename, cfg, tracer=tracer)
if cov is not None:
cov.start()
hooks = module.test_hooks()
if cov is not None:
cov.stop()
results.extend(hooks)
return results
class CustomTestResult(TextTestResult):
"""Customised TestResult.
It can show a progress bar, and displays tracebacks for errors and failures
as soon as they happen, in addition to listing them all at the end.
"""
__super = TextTestResult
__super_init = __super.__init__
__super_startTest = __super.startTest
__super_stopTest = __super.stopTest
__super_printErrors = __super.printErrors
def __init__(self, stream, descriptions, verbosity, count, cfg, hooks):
self.__super_init(stream, descriptions, verbosity)
self.count = count
self.cfg = cfg
self.hooks = hooks
if cfg.progress:
self.dots = False
self._lastWidth = 0
self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1
def startTest(self, test):
if self.cfg.progress:
# verbosity == 0: 'xxxx/xxxx (xxx.x%)'
# verbosity == 1: 'xxxx/xxxx (xxx.x%): test name'
# verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok'
n = self.testsRun + 1
self.stream.write("\r%4d" % n)
if self.count:
self.stream.write("/%d (%5.1f%%)"
% (self.count, n * 100.0 / self.count))
if self.showAll: # self.cfg.verbosity == 1
self.stream.write(": ")
elif self.cfg.verbosity:
name = self.getShortDescription(test)
width = len(name)
if width < self._lastWidth:
name += " " * (self._lastWidth - width)
self.stream.write(": %s" % name)
self._lastWidth = width
self.stream.flush()
self.__super_startTest(test)
for hook in self.hooks:
hook.startTest(test)
def stopTest(self, test):
for hook in self.hooks:
hook.stopTest(test)
self.__super_stopTest(test)
def getShortDescription(self, test):
s = self.getDescription(test)
if len(s) > self._maxWidth:
# s is 'testname (package.module.class)'
# try to shorten it to 'testname (...age.module.class)'
# if it is still too long, shorten it to 'testnam...'
# limit case is 'testname (...)'
pos = s.find(" (")
if pos + len(" (...)") > self._maxWidth:
s = s[:self._maxWidth - 3] + "..."
else:
s = "%s...%s" % (s[:pos + 2], s[pos + 5 - self._maxWidth:])
return s
def printErrors(self):
if self.cfg.progress and not (self.dots or self.showAll):
self.stream.writeln()
self.__super_printErrors()
def formatError(self, err):
return "".join(traceback.format_exception(*err))
def printTraceback(self, kind, test, err):
self.stream.writeln()
self.stream.writeln()
self.stream.writeln("%s: %s" % (kind, test))
self.stream.writeln(self.formatError(err))
self.stream.writeln()
def addFailure(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("FAIL", test, err)
self.failures.append((test, self.formatError(err)))
def addError(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("ERROR", test, err)
self.errors.append((test, self.formatError(err)))
class CustomTestRunner(unittest.TextTestRunner):
"""Customised TestRunner.
See CustomisedTextResult for a list of extensions.
"""
__super = unittest.TextTestRunner
__super_init = __super.__init__
__super_run = __super.run
def __init__(self, cfg, hooks=None):
self.__super_init(verbosity=cfg.verbosity)
self.cfg = cfg
if hooks is not None:
self.hooks = hooks
else:
self.hooks = []
def run(self, test):
"""Run the given test case or test suite."""
self.count = test.countTestCases()
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
result.printErrors()
run = result.testsRun
if not self.cfg.quiet:
self.stream.writeln(result.separator2)
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = list(map(len, (result.failures, result.errors)))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
elif not self.cfg.quiet:
self.stream.writeln("OK")
return result
def _makeResult(self):
return CustomTestResult(self.stream, self.descriptions, self.verbosity,
cfg=self.cfg, count=self.count,
hooks=self.hooks)
def main(argv):
"""Main program."""
# Environment
if sys.version_info < (2, 7):
stderr('%s: need Python 2.7 or later' % argv[0])
stderr('your python is %s' % sys.version)
return 1
# Defaults
cfg = Options()
cfg.basedir = os.path.join(os.path.dirname(argv[0]), 'src')
cfg.basedir = os.path.abspath(cfg.basedir)
# Figure out terminal size
try:
import curses
except ImportError:
pass
else:
try:
curses.setupterm()
cols = curses.tigetnum('cols')
if cols > 0:
cfg.screen_width = cols
except (curses.error, TypeError):
# tigetnum() is broken in PyPy3 and raises TypeError
pass
# Option processing
opts, args = getopt.gnu_getopt(argv[1:], 'hvpqufw',
['list-files', 'list-tests', 'list-hooks',
'level=', 'all-levels', 'coverage'])
for k, v in opts:
if k == '-h':
print(__doc__)
return 0
elif k == '-v':
cfg.verbosity += 1
cfg.quiet = False
elif k == '-p':
cfg.progress = True
cfg.quiet = False
elif k == '-q':
cfg.verbosity = 0
cfg.progress = False
cfg.quiet = True
elif k == '-u':
cfg.unit_tests = True
elif k == '-f':
cfg.functional_tests = True
elif k == '-w':
cfg.warn_omitted = True
elif k == '--list-files':
cfg.list_files = True
cfg.run_tests = False
elif k == '--list-tests':
cfg.list_tests = True
cfg.run_tests = False
elif k == '--list-hooks':
cfg.list_hooks = True
cfg.run_tests = False
elif k == '--coverage':
cfg.coverage = True
elif k == '--level':
try:
cfg.level = int(v)
except ValueError:
stderr('%s: invalid level: %s' % (argv[0], v))
stderr('run %s -h for help')
return 1
elif k == '--all-levels':
cfg.level = None
else:
stderr('%s: invalid option: %s' % (argv[0], k))
stderr('run %s -h for help')
return 1
if args:
cfg.pathname_regex = args[0]
if len(args) > 1:
cfg.test_regex = args[1]
if len(args) > 2:
stderr('%s: too many arguments: %s' % (argv[0], args[2]))
stderr('run %s -h for help')
return 1
if not cfg.unit_tests and not cfg.functional_tests:
cfg.unit_tests = True
# Set up the python path
sys.path[0] = cfg.basedir
# Set up tracing before we start importing things
cov = None
if cfg.run_tests and cfg.coverage:
from coverage import Coverage
cov = Coverage(omit=['test.py'])
# Finding and importing
test_files = get_test_files(cfg)
if cov is not None:
cov.start()
if cfg.list_tests or cfg.run_tests:
test_cases = get_test_cases(test_files, cfg, cov=cov)
if cfg.list_hooks or cfg.run_tests:
test_hooks = get_test_hooks(test_files, cfg, cov=cov)
# Configure the logging module
import logging
logging.basicConfig()
logging.root.setLevel(logging.CRITICAL)
# Running
success = True
if cfg.list_files:
baselen = len(cfg.basedir) + 1
print("\n".join([fn[baselen:] for fn in test_files]))
if cfg.list_tests:
print("\n".join([test.id() for test in test_cases]))
if cfg.list_hooks:
print("\n".join([str(hook) for hook in test_hooks]))
if cfg.run_tests:
runner = CustomTestRunner(cfg, test_hooks)
suite = unittest.TestSuite()
suite.addTests(test_cases)
if cov is not None:
cov.start()
run_result = runner.run(suite)
if cov is not None:
cov.stop()
success = run_result.wasSuccessful()
del run_result
if cov is not None:
traced_file_types = ('.py', '.pyx', '.pxi', '.pxd')
modules = []
def add_file(_, path, files):
if 'tests' in os.path.relpath(path, cfg.basedir).split(os.sep):
return
for filename in files:
if filename.endswith(traced_file_types):
modules.append(os.path.join(path, filename))
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
walker(os.path.abspath(cfg.basedir), add_file, None)
try:
cov.xml_report(modules, outfile='coverage.xml')
if cfg.coverdir:
cov.html_report(modules, directory=cfg.coverdir)
finally:
# test runs can take a while, so at least try to print something
cov.report()
# That's all
if success:
return 0
else:
return 1
if __name__ == '__main__':
exitcode = main(sys.argv)
sys.exit(exitcode)
|
lxml/lxml
|
test.py
|
Python
|
bsd-3-clause
| 21,306
|
[
"VisIt"
] |
919868ff05ae43f6440c50f6e2c9f2212281f944da7d35c68fc4a4154be28f58
|
# -*- coding: utf-8 -*-
'''
Python wrapper for the the python requests package
It is a convienience lib of common code used by
several gentoo utilities.
Copyright:
(c) 2013 Brian Dolbec
Distributed under the terms of the GPL-2 license
Author(s):
Brian Dolbec <dolsen@gentoo.org>
'''
__version__ = '0.4'
|
dol-sen/ssl-fetch
|
sslfetch/__init__.py
|
Python
|
gpl-2.0
| 334
|
[
"Brian"
] |
393727a4d074766103bba30df7f6965c8f405aa171a874ec0724d8b020ed4867
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import types
import itertools
import subprocess
import ray
from dmlc_tracker.tracker import get_host_ip
from bigdl.orca.data.utils import ray_partitions_get_data_label, process_spark_xshards
from bigdl.orca.ray import RayContext
from bigdl.orca.learn.mxnet.mxnet_runner import MXNetRunner
from bigdl.orca.learn.mxnet.utils import find_free_port
from bigdl.orca.learn.ray_estimator import Estimator as OrcaRayEstimator
def partition_refs_to_creator(partition_refs, shuffle=False):
def data_creator(config, kv):
import mxnet as mx
assert "batch_size" in config, "batch_size must be set in config"
data, label = ray_partitions_get_data_label(ray.get(partition_refs),
allow_tuple=False,
allow_list=False)
train_data_iter = mx.io.NDArrayIter(data=data, label=label,
batch_size=config["batch_size"],
shuffle=shuffle)
if "train_resize_batch_num" in config:
train_data_iter = mx.io.ResizeIter(train_data_iter,
config["train_resize_batch_num"])
return train_data_iter
return data_creator
class Estimator(object):
@staticmethod
def from_mxnet(*, config, model_creator, loss_creator=None, eval_metrics_creator=None,
validation_metrics_creator=None, num_workers=None, num_servers=None,
runner_cores=None):
"""
MXNet Estimator provides an automatic setup for synchronous distributed MXNet training.
:param config: A dictionary for training configurations. Keys must include the following:
optimizer, optimizer_params, log_interval.
optimizer should be an MXNet optimizer or its string representation.
optimizer_params should be a dict in companion with the optimizer. It can contain
learning_rate and other optimization configurations.
log_interval should be an integer, specifying the interval for logging throughput and
metrics information (if any) during the training process.
You can call create_config to directly create it.
You can specify "seed" in config to set random seed for weight initialization.
You can specify "init" in extra_config to set model initializer for gluon models.
:param model_creator: A function that takes config as argument and returns an MXNet model.
The model can be defined either using MXNet symbolic API or imperative(gluon) API.
:param loss_creator: A function that takes config as argument and returns an MXNet loss.
This is not needed for symbolic API where loss is already defined as model output.
:param eval_metrics_creator: A function that takes config as argument and returns one or
a list of MXNet metrics or corresponding string representations of metrics, for example,
'accuracy'. This is not needed if you don't need evaluation on the training data set.
:param validation_metrics_creator: A function that takes config as argument and returns one
or a list of MXNet metrics or corresponding string representations of metrics, for example,
'accuracy'. This is not needed if you don't have validation data throughout the training.
:param num_workers: The number of workers for distributed training. Default to be the number
of nodes in the cluster.
:param num_servers: The number of servers for distributed training. Default is None and in
this case it would be equal to the number of workers.
:param runner_cores: The number of CPU cores allocated for each MXNet worker and server.
Default is None. You may need to specify this for better performance when you run in
cluster.
:return: MXNet Estimator object.
"""
return MXNetEstimator(config=config, model_creator=model_creator,
loss_creator=loss_creator,
eval_metrics_creator=eval_metrics_creator,
validation_metrics_creator=validation_metrics_creator,
num_workers=num_workers, num_servers=num_servers,
runner_cores=runner_cores)
class MXNetEstimator(OrcaRayEstimator):
def __init__(self, config, model_creator, loss_creator=None,
eval_metrics_creator=None, validation_metrics_creator=None,
num_workers=None, num_servers=None, runner_cores=None):
ray_ctx = RayContext.get()
if not num_workers:
num_workers = ray_ctx.num_ray_nodes
self.config = {} if config is None else config
assert isinstance(config, dict), "config must be a dict"
for param in ["optimizer", "optimizer_params", "log_interval"]:
assert param in config, param + " must be specified in config"
self.model_creator = model_creator
self.loss_creator = loss_creator
self.validation_metrics_creator = validation_metrics_creator
self.eval_metrics_creator = eval_metrics_creator
self.num_workers = num_workers
self.num_servers = num_servers if num_servers else self.num_workers
# Generate actor class
# Add a dummy custom resource: _mxnet_worker and _mxnet_server to diff worker from server
# if runner_cores is specified so that we can place one worker and one server on a node
# for better performance.
Worker = ray.remote(num_cpus=runner_cores, resources={"_mxnet_worker": 1})(MXNetRunner) \
if runner_cores else ray.remote(MXNetRunner)
Server = ray.remote(num_cpus=runner_cores, resources={"_mxnet_server": 1})(MXNetRunner) \
if runner_cores else ray.remote(MXNetRunner)
# Start runners: workers followed by servers
self.workers = [
Worker.remote()
for i in range(self.num_workers)
]
self.servers = [
Server.remote()
for i in range(self.num_servers)
]
self.runners = self.workers + self.servers
env = {
"DMLC_PS_ROOT_URI": str(get_host_ip()),
"DMLC_PS_ROOT_PORT": str(find_free_port()),
"DMLC_NUM_SERVER": str(self.num_servers),
"DMLC_NUM_WORKER": str(self.num_workers),
}
envs = []
for i in range(self.num_workers):
current_env = env.copy()
current_env['DMLC_ROLE'] = 'worker'
envs.append(current_env)
for i in range(self.num_servers):
current_env = env.copy()
current_env['DMLC_ROLE'] = 'server'
envs.append(current_env)
env['DMLC_ROLE'] = 'scheduler'
modified_env = os.environ.copy()
modified_env.update(env)
# Need to contain system env to run bash
# TODO: Need to kill this process manually?
subprocess.Popen("python -c 'import mxnet'", shell=True, env=modified_env)
ray.get([
runner.setup_distributed.remote(envs[i], self.config,
self.model_creator,
self.loss_creator,
self.validation_metrics_creator,
self.eval_metrics_creator)
for i, runner in enumerate(self.runners)
])
def fit(self, data, epochs=1, batch_size=32, validation_data=None, train_resize_batch_num=None):
"""
Trains an MXNet model given train_data (with val_data) for several epochs.
:param data: An instance of SparkXShards or a function that takes config and kv as
arguments and returns an MXNet DataIter/DataLoader for training.
You can specify data related configurations for this function in the config argument above.
kv is an instance of MXNet distributed key-value store. kv.num_workers and kv.rank
can be used in this function to split data for different workers if necessary.
:param epochs: The number of epochs to train the MXNet model. Default is 1.
:param batch_size: The number of samples per batch for each worker. Default is 32.
:param validation_data: An instance of SparkXShards or a function that takes config and
kv as arguments and returns an MXNet DataIter/DataLoader for validation.
You can specify data related configurations for this function in the config argument above.
kv is an instance of MXNet distributed key-value store. kv.num_workers and kv.rank
can be used in this function to split data for different workers if necessary.
:param train_resize_batch_num: The number of batches per epoch to resize to.
Default is None. You might need to specify this if the size of train_data for each
worker varies. MXNet distributed training would crash when the first worker finishes
the training if the workers have unbalanced training data.
See this issue for more details: https://github.com/apache/incubator-mxnet/issues/17651
"""
if validation_data:
assert self.validation_metrics_creator,\
"Metrics not defined for validation, please specify validation_metrics_creator " \
"when creating the Estimator"
from bigdl.orca.data import SparkXShards
if isinstance(data, SparkXShards):
ray_xshards = process_spark_xshards(data, self.num_workers)
if validation_data is None:
def transform_func(worker, partition_refs):
data_creator = partition_refs_to_creator(partition_refs, shuffle=True)
return worker.train.remote(data_creator,
epochs,
batch_size,
None,
train_resize_batch_num)
worker_stats = ray_xshards.reduce_partitions_for_actors(self.workers,
transform_func)
else:
val_ray_xshards = process_spark_xshards(validation_data, self.num_workers)
def zip_func(worker, this_partition_refs, that_partition_refs):
data_creator = partition_refs_to_creator(this_partition_refs,
shuffle=True)
validation_data_creator = partition_refs_to_creator(that_partition_refs,
shuffle=True)
return worker.train.remote(data_creator,
epochs,
batch_size,
validation_data_creator,
train_resize_batch_num)
worker_stats = ray_xshards.zip_reduce_shards_with_actors(val_ray_xshards,
self.workers,
zip_func)
server_stats = [server.train.remote(None, epochs, batch_size,
None, train_resize_batch_num)
for server in self.servers]
server_stats = ray.get(server_stats)
server_stats = list(itertools.chain.from_iterable(server_stats))
stats = worker_stats + server_stats
else: # data_creator functions; should return Iter or DataLoader
assert isinstance(data, types.FunctionType),\
"train_data should be either an instance of SparkXShards or a callable function"
train_data_list = [data] * self.num_workers
if validation_data:
assert isinstance(validation_data, types.FunctionType),\
"val_data should be either an instance of SparkXShards or a callable function"
val_data_list = [validation_data] * self.num_workers
self.runners = self.workers + self.servers
# For servers, data is not used and thus just input a None value.
train_data_list += [None] * self.num_servers
val_data_list += [None] * self.num_servers
stats = ray.get(
[runner.train.remote(train_data_list[i], epochs, batch_size,
val_data_list[i], train_resize_batch_num)
for i, runner in enumerate(self.runners)])
stats = list(itertools.chain.from_iterable(stats))
return stats
def shutdown(self):
"""
Shuts down runners and releases resources.
"""
for runner in self.runners:
runner.shutdown.remote()
runner.__ray_terminate__.remote()
def predict(self, data, batch_size=32):
"""
Predict is not supported in MXNetEstimator
"""
raise NotImplementedError
def evaluate(self, data, batch_size=32, num_steps=None):
"""
Evaluate is not supported in MXNetEstimator
"""
raise NotImplementedError
def get_model(self):
"""
Get_model is not supported in MXNetEstimator
"""
raise NotImplementedError
def save(self, checkpoint):
"""
Save is not supported in MXNetEstimator
"""
raise NotImplementedError
def load(self, checkpoint):
"""
Load is not supported in MXNetEstimator
"""
raise NotImplementedError
# TODO: add model save and restore
# TODO: add predict, evaluate
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/learn/mxnet/estimator.py
|
Python
|
apache-2.0
| 14,442
|
[
"ORCA"
] |
a98f90cc0ad0907612800e579ad302ff0bed912ff0cce9e633cdc1ad487836c7
|
"""
This class is used to define the plot using the plot attributes.
"""
from DIRAC import S_OK
from DIRAC.MonitoringSystem.Client.Types.PilotSubmissionMonitoring import PilotSubmissionMonitoring
from DIRAC.MonitoringSystem.private.Plotters.BasePlotter import BasePlotter
class PilotSubmissionMonitoringPlotter(BasePlotter):
"""
.. class:: PilotSubmissionMonitoringPlotter
It is used to crate the plots.
param: str _typeName monitoring type
param: list _typeKeyFields list of keys what we monitor (list of attributes)
"""
_typeName = "PilotSubmissionMonitoring"
_typeKeyFields = PilotSubmissionMonitoring().keyFields
def _reportNumberOfSubmissions(self, reportRequest):
"""It is used to retrieve the data from the database.
:param dict reportRequest: contains attributes used to create the plot.
:return: S_OK or S_ERROR {'data':value1, 'granularity':value2} value1 is a dictionary, value2 is the bucket length
"""
retVal = self._getTimedData(
startTime=reportRequest["startTime"],
endTime=reportRequest["endTime"],
selectField="NumTotal",
preCondDict=reportRequest["condDict"],
metadataDict=None,
)
if not retVal["OK"]:
return retVal
dataDict, granularity = retVal["Value"]
return S_OK({"data": dataDict, "granularity": granularity})
def _plotNumberOfSubmissions(self, reportRequest, plotInfo, filename):
"""It creates the plot.
:param dict reportRequest: plot attributes
:param dict plotInfo: contains all the data which are used to create the plot
:param str filename:
:return: S_OK or S_ERROR { 'plot' : value1, 'thumbnail' : value2 } value1 and value2 are TRUE/FALSE
"""
metadata = {
"title": "Pilot Submissions by %s" % reportRequest["grouping"],
"starttime": reportRequest["startTime"],
"endtime": reportRequest["endTime"],
"span": plotInfo["granularity"],
"skipEdgeColor": True,
"ylabel": "Submissions",
}
plotInfo["data"] = self._fillWithZero(
granularity=plotInfo["granularity"],
startEpoch=reportRequest["startTime"],
endEpoch=reportRequest["endTime"],
dataDict=plotInfo["data"],
)
return self._generateStackedLinePlot(filename=filename, dataDict=plotInfo["data"], metadata=metadata)
def _reportNumSucceeded(self, reportRequest):
"""It is used to retrieve the data from the database.
:param dict reportRequest: contains attributes used to create the plot.
:return: S_OK or S_ERROR {'data':value1, 'granularity':value2} value1 is a dictionary, value2 is the bucket length
"""
retVal = self._getTimedData(
startTime=reportRequest["startTime"],
endTime=reportRequest["endTime"],
selectField="NumSucceeded",
preCondDict=reportRequest["condDict"],
metadataDict=None,
)
if not retVal["OK"]:
return retVal
dataDict, granularity = retVal["Value"]
return S_OK({"data": dataDict, "granularity": granularity})
def _plotNumSucceeded(self, reportRequest, plotInfo, filename):
"""
Make 2 dimensional pilotSubmission efficiency plot
:param dict reportRequest: Condition to select data
:param dict plotInfo: Data for plot.
:param str filename: File name
"""
metadata = {
"title": "Pilot Submission efficiency by %s" % reportRequest["grouping"],
"starttime": reportRequest["startTime"],
"endtime": reportRequest["endTime"],
"span": plotInfo["granularity"],
}
return self._generateQualityPlot(filename, plotInfo["data"], metadata)
|
DIRACGrid/DIRAC
|
src/DIRAC/MonitoringSystem/private/Plotters/PilotSubmissionMonitoringPlotter.py
|
Python
|
gpl-3.0
| 3,909
|
[
"DIRAC"
] |
bb5ea7102efc3598d01b6fb7fadf971bdbadfb4b35e74f007571e4edc7483c73
|
import numpy as np
from ase import Atoms
from gpaw import GPAW, FermiDirac
from gpaw.test import equal
calc = GPAW(nbands=1)#, txt=None)
atoms = Atoms('He', pbc=True, calculator=calc)
atoms.center(vacuum=3)
e0 = atoms.get_potential_energy()
niter0 = calc.get_number_of_iterations()
try:
calc.get_fermi_level()
except ValueError:
pass # It *should* raise an error
else:
raise RuntimeError, 'get_fermi_level should not be possible for width=0'
calc.set(nbands=3, convergence={'bands':2})
atoms.get_potential_energy()
homo, lumo = calc.get_homo_lumo()
equal(homo, -15.4473, 0.01)
equal(lumo, -0.2566, 0.01)
calc.write('test.gpw')
assert np.all(GPAW('test.gpw', txt=None).get_homo_lumo() == (homo, lumo))
ef = calc.get_fermi_level()
equal(ef, -7.85196, 0.01)
calc.set(occupations=FermiDirac(0.1))
e1 = atoms.get_potential_energy()
niter1 = calc.get_number_of_iterations()
ef = calc.get_fermi_level()
equal(ef, -7.85196, 0.01)
calc.write('test.gpw')
equal(GPAW('test.gpw', txt=None).get_fermi_level(), ef, 1e-8)
|
qsnake/gpaw
|
gpaw/test/fermilevel.py
|
Python
|
gpl-3.0
| 1,024
|
[
"ASE",
"GPAW"
] |
43ae76d73da9f70a058cc5fe78a020e7239a907aa31ea0ac978ed1b96d78127a
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Perform DNA-DNA alignment using BLAST, NUCMER and BLAT. Keep the interface the
same and does parallelization both in core and on grid.
"""
import os.path as op
import sys
import shutil
import logging
from jcvi.utils.cbook import depends
from jcvi.apps.base import (
OptionParser,
ActionDispatcher,
sh,
get_abs_path,
which,
mkdir,
)
@depends
def run_formatdb(infile=None, outfile=None, dbtype="nucl"):
cmd = "makeblastdb"
cmd += " -dbtype {0} -in {1}".format(dbtype, infile)
sh(cmd)
@depends
def run_blat(
infile=None,
outfile=None,
db="UniVec_Core",
pctid=95,
hitlen=50,
cpus=16,
overwrite=True,
):
cmd = "pblat -threads={0}".format(cpus) if which("pblat") else "blat"
cmd += " {0} {1} -out=blast8 {2}".format(db, infile, outfile)
sh(cmd)
blatfile = outfile
filtered_blatfile = outfile + ".P{0}L{1}".format(pctid, hitlen)
run_blast_filter(
infile=blatfile, outfile=filtered_blatfile, pctid=pctid, hitlen=hitlen
)
if overwrite:
shutil.move(filtered_blatfile, blatfile)
@depends
def run_vecscreen(infile=None, outfile=None, db="UniVec_Core", pctid=None, hitlen=None):
"""
BLASTN parameters reference:
http://www.ncbi.nlm.nih.gov/VecScreen/VecScreen_docs.html
"""
db = get_abs_path(db)
nin = db + ".nin"
run_formatdb(infile=db, outfile=nin)
cmd = "blastn"
cmd += " -task blastn"
cmd += " -query {0} -db {1} -out {2}".format(infile, db, outfile)
cmd += " -penalty -5 -gapopen 4 -gapextend 4 -dust yes -soft_masking true"
cmd += " -searchsp 1750000000000 -evalue 0.01 -outfmt 6 -num_threads 8"
sh(cmd)
@depends
def run_megablast(
infile=None,
outfile=None,
db=None,
wordsize=None,
pctid=98,
hitlen=100,
best=None,
evalue=0.01,
task="megablast",
cpus=16,
):
assert db, "Need to specify database fasta file."
db = get_abs_path(db)
nin = db + ".nin"
nin00 = db + ".00.nin"
nin = nin00 if op.exists(nin00) else (db + ".nin")
run_formatdb(infile=db, outfile=nin)
cmd = "blastn"
cmd += " -query {0} -db {1} -out {2}".format(infile, db, outfile)
cmd += " -evalue {0} -outfmt 6 -num_threads {1}".format(evalue, cpus)
cmd += " -task {0}".format(task)
if wordsize:
cmd += " -word_size {0}".format(wordsize)
if pctid:
cmd += " -perc_identity {0}".format(pctid)
if best:
cmd += " -max_target_seqs {0}".format(best)
sh(cmd)
if pctid and hitlen:
blastfile = outfile
filtered_blastfile = outfile + ".P{0}L{1}".format(pctid, hitlen)
run_blast_filter(
infile=blastfile, outfile=filtered_blastfile, pctid=pctid, hitlen=hitlen
)
shutil.move(filtered_blastfile, blastfile)
def run_blast_filter(infile=None, outfile=None, pctid=95, hitlen=50):
from jcvi.formats.blast import filter
logging.debug("Filter BLAST result (pctid={0}, hitlen={1})".format(pctid, hitlen))
pctidopt = "--pctid={0}".format(pctid)
hitlenopt = "--hitlen={0}".format(hitlen)
filter([infile, pctidopt, hitlenopt])
def main():
actions = (
("blast", "run blastn using query against reference"),
("blat", "run blat using query against reference"),
("blasr", "run blasr on a set of pacbio reads"),
("nucmer", "run nucmer using query against reference"),
("last", "run last using query against reference"),
("lastgenome", "run whole genome LAST"),
("lastgenomeuniq", "run whole genome LAST and screen for 1-to-1 matches"),
("minimap", "run minimap2 aligner"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def minimap(args):
"""
%prog minimap ref.fasta query.fasta
Wrap minimap2 aligner using query against sequences. When query and ref
is the same, we are in "self-scan" mode (e.g. useful for finding internal
duplications resulted from mis-assemblies).
"""
from jcvi.apps.grid import MakeManager
from jcvi.formats.fasta import Fasta
p = OptionParser(minimap.__doc__)
p.add_option(
"--chunks",
type="int",
default=2000000,
help="Split ref.fasta into chunks of size in self-scan mode",
)
p.set_outdir(outdir="outdir")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, query = args
chunks = opts.chunks
outdir = opts.outdir
if ref != query:
raise NotImplementedError
# "self-scan" mode
# build faidx (otherwise, parallel make may complain)
sh("samtools faidx {}".format(ref))
f = Fasta(ref)
mkdir(outdir)
mm = MakeManager()
for name, size in f.itersizes():
start = 0
for end in range(chunks, size, chunks):
fafile = op.join(outdir, "{}_{}_{}.fa".format(name, start + 1, end))
cmd = "samtools faidx {} {}:{}-{} -o {}".format(
ref, name, start + 1, end, fafile
)
mm.add(ref, fafile, cmd)
paffile = fafile.rsplit(".", 1)[0] + ".paf"
cmd = "minimap2 -P {} {} > {}".format(fafile, fafile, paffile)
mm.add(fafile, paffile, cmd)
epsfile = fafile.rsplit(".", 1)[0] + ".eps"
cmd = "minidot {} > {}".format(paffile, epsfile)
mm.add(paffile, epsfile, cmd)
start += chunks
mm.write()
def nucmer(args):
"""
%prog nucmer ref.fasta query.fasta
Run NUCMER using query against reference. Parallel implementation derived
from: <https://github.com/fritzsedlazeck/sge_mummer>
"""
from itertools import product
from jcvi.apps.grid import MakeManager
from jcvi.formats.base import split
p = OptionParser(nucmer.__doc__)
p.add_option(
"--chunks", type="int", help="Split both query and subject into chunks"
)
p.set_params(prog="nucmer", params="-l 100 -c 500")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
ref, query = args
cpus = opts.cpus
nrefs = nqueries = opts.chunks or int(cpus ** 0.5)
refdir = ref.split(".")[0] + "-outdir"
querydir = query.split(".")[0] + "-outdir"
reflist = split([ref, refdir, str(nrefs)]).names
querylist = split([query, querydir, str(nqueries)]).names
mm = MakeManager()
for i, (r, q) in enumerate(product(reflist, querylist)):
pf = "{0:04d}".format(i)
cmd = "nucmer -maxmatch"
cmd += " {0}".format(opts.extra)
cmd += " {0} {1} -p {2}".format(r, q, pf)
deltafile = pf + ".delta"
mm.add((r, q), deltafile, cmd)
print(cmd)
mm.write()
def blasr(args):
"""
%prog blasr ref.fasta fofn
Run blasr on a set of PacBio reads. This is based on a divide-and-conquer
strategy described below.
"""
from more_itertools import grouper
from jcvi.apps.grid import MakeManager
p = OptionParser(blasr.__doc__)
p.set_cpus(cpus=8)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, fofn = args
flist = sorted([x.strip() for x in open(fofn)])
h5list = []
mm = MakeManager()
for i, fl in enumerate(grouper(flist, 3)):
chunkname = "chunk{0:03d}".format(i)
fn = chunkname + ".fofn"
h5 = chunkname + ".cmp.h5"
fw = open(fn, "w")
print("\n".join(fl), file=fw)
fw.close()
cmd = "pbalign {0} {1} {2}".format(fn, reffasta, h5)
cmd += " --nproc {0} --forQuiver --tmpDir .".format(opts.cpus)
mm.add((fn, reffasta), h5, cmd)
h5list.append(h5)
# Merge h5, sort and repack
allh5 = "all.cmp.h5"
tmph5 = "tmp.cmp.h5"
cmd_merge = "cmph5tools.py merge --outFile {0}".format(allh5)
cmd_merge += " " + " ".join(h5list)
cmd_sort = "cmph5tools.py sort --deep {0} --tmpDir .".format(allh5)
cmd_repack = "h5repack -f GZIP=1 {0} {1}".format(allh5, tmph5)
cmd_repack += " && mv {0} {1}".format(tmph5, allh5)
mm.add(h5list, allh5, [cmd_merge, cmd_sort, cmd_repack])
# Quiver
pf = reffasta.rsplit(".", 1)[0]
variantsgff = pf + ".variants.gff"
consensusfasta = pf + ".consensus.fasta"
cmd_faidx = "samtools faidx {0}".format(reffasta)
cmd = "quiver -j 32 {0}".format(allh5)
cmd += " -r {0} -o {1} -o {2}".format(reffasta, variantsgff, consensusfasta)
mm.add(allh5, consensusfasta, [cmd_faidx, cmd])
mm.write()
def get_outfile(reffasta, queryfasta, suffix="blast", outdir=None):
q = op.basename(queryfasta).split(".")[0]
r = op.basename(reffasta).split(".")[0]
outfile = ".".join((q, r, suffix))
if outdir:
outfile = op.join(outdir, outfile)
return outfile
def blat(args):
"""
%prog blat ref.fasta query.fasta
Calls blat and filters BLAST hits.
"""
p = OptionParser(blat.__doc__)
p.set_align(pctid=95, hitlen=30)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, queryfasta = args
blastfile = get_outfile(reffasta, queryfasta, suffix="blat")
run_blat(
infile=queryfasta,
outfile=blastfile,
db=reffasta,
pctid=opts.pctid,
hitlen=opts.hitlen,
cpus=opts.cpus,
overwrite=False,
)
return blastfile
def blast(args):
"""
%prog blast ref.fasta query.fasta
Calls blast and then filter the BLAST hits. Default is megablast.
"""
task_choices = ("blastn", "blastn-short", "dc-megablast", "megablast", "vecscreen")
p = OptionParser(blast.__doc__)
p.set_align(pctid=0, evalue=0.01)
p.add_option("--wordsize", type="int", help="Word size")
p.add_option("--best", default=1, type="int", help="Only look for best N hits")
p.add_option(
"--task", default="megablast", choices=task_choices, help="Task of the blastn"
)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
reffasta, queryfasta = args
blastfile = get_outfile(reffasta, queryfasta)
run_megablast(
infile=queryfasta,
outfile=blastfile,
db=reffasta,
wordsize=opts.wordsize,
pctid=opts.pctid,
evalue=opts.evalue,
hitlen=None,
best=opts.best,
task=opts.task,
cpus=opts.cpus,
)
return blastfile
def lastgenome(args):
"""
%prog genome_A.fasta genome_B.fasta
Run LAST by calling LASTDB, LASTAL. The script runs the following steps:
$ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa
$ lastal -E0.05 -C2 Chr10A-NEAR Chr10A.fa -fTAB > Chr10A.Chr10A.tab
$ last-dotplot Chr10A.Chr10A.tab
"""
from jcvi.apps.grid import MakeManager
p = OptionParser(lastgenome.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gA, gB = args
mm = MakeManager()
bb = lambda x: op.basename(x).rsplit(".", 1)[0]
gA_pf, gB_pf = bb(gA), bb(gB)
# Build LASTDB
dbname = "-".join((gA_pf, "NEAR"))
dbfile = dbname + ".suf"
build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA)
mm.add(gA, dbfile, build_db_cmd)
# Run LASTAL
tabfile = "{}.{}.tab".format(gA_pf, gB_pf)
lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB)
lastal_cmd += " -fTAB > {}".format(tabfile)
mm.add([dbfile, gB], tabfile, lastal_cmd)
mm.write()
def lastgenomeuniq(args):
"""
%prog genome_A.fasta genome_B.fasta
Run LAST by calling LASTDB, LASTAL and LAST-SPLIT. The recipe is based on
tutorial here:
<https://github.com/mcfrith/last-genome-alignments>
The script runs the following steps:
$ lastdb -P0 -uNEAR -R01 Chr10A-NEAR Chr10A.fa
$ lastal -E0.05 -C2 Chr10A-NEAR Chr10B.fa | last-split -m1 | maf-swap | last-split -m1 -fMAF > Chr10A.Chr10B.1-1.maf
$ maf-convert -n blasttab Chr10A.Chr10B.1-1.maf > Chr10A.Chr10B.1-1.blast
Works with LAST v959.
"""
from jcvi.apps.grid import MakeManager
p = OptionParser(lastgenome.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gA, gB = args
mm = MakeManager()
bb = lambda x: op.basename(x).rsplit(".", 1)[0]
gA_pf, gB_pf = bb(gA), bb(gB)
# Build LASTDB
dbname = "-".join((gA_pf, "NEAR"))
dbfile = dbname + ".suf"
build_db_cmd = "lastdb -P0 -uNEAR -R01 {} {}".format(dbfile, gA)
mm.add(gA, dbfile, build_db_cmd)
# Run LASTAL
maffile = "{}.{}.1-1.maf".format(gA_pf, gB_pf)
lastal_cmd = "lastal -E0.05 -C2 {} {}".format(dbname, gB)
lastal_cmd += " | last-split -m1"
lastal_cmd += " | maf-swap"
lastal_cmd += " | last-split -m1 -fMAF > {}".format(maffile)
mm.add([dbfile, gB], maffile, lastal_cmd)
# Convert to BLAST format
blastfile = maffile.replace(".maf", ".blast")
convert_cmd = "maf-convert -n blasttab {} > {}".format(maffile, blastfile)
mm.add(maffile, blastfile, convert_cmd)
mm.write()
@depends
def run_lastdb(
infile=None, outfile=None, mask=False, lastdb_bin="lastdb", dbtype="nucl"
):
outfilebase = outfile.rsplit(".", 1)[0]
db = "-p " if dbtype == "prot" else ""
mask = "-c " if mask else ""
cmd = "{0} {1}{2}{3} {4}".format(lastdb_bin, db, mask, outfilebase, infile)
sh(cmd)
def last(args, dbtype=None):
"""
%prog database.fasta query.fasta
Run LAST by calling LASTDB and LASTAL. LAST program available:
<http://last.cbrc.jp>
Works with LAST-719.
"""
p = OptionParser(last.__doc__)
p.add_option(
"--dbtype",
default="nucl",
choices=("nucl", "prot"),
help="Molecule type of subject database",
)
p.add_option("--path", help="Specify LAST path")
p.add_option(
"--mask", default=False, action="store_true", help="Invoke -c in lastdb"
)
p.add_option(
"--format",
default="BlastTab",
choices=("TAB", "MAF", "BlastTab", "BlastTab+"),
help="Output format",
)
p.add_option(
"--minlen",
default=0,
type="int",
help="Filter alignments by how many bases match",
)
p.add_option("--minid", default=0, type="int", help="Minimum sequence identity")
p.set_cpus()
p.set_outdir()
p.set_params()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
subject, query = args
path = opts.path
cpus = opts.cpus
if not dbtype:
dbtype = opts.dbtype
getpath = lambda x: op.join(path, x) if path else x
lastdb_bin = getpath("lastdb")
lastal_bin = getpath("lastal")
subjectdb = subject.rsplit(".", 1)[0]
run_lastdb(
infile=subject,
outfile=subjectdb + ".prj",
mask=opts.mask,
lastdb_bin=lastdb_bin,
dbtype=dbtype,
)
u = 2 if opts.mask else 0
cmd = "{0} -u {1}".format(lastal_bin, u)
cmd += " -P {0} -i3G".format(cpus)
cmd += " -f {0}".format(opts.format)
cmd += " {0} {1}".format(subjectdb, query)
minlen = opts.minlen
minid = opts.minid
extra = opts.extra
assert minid != 100, "Perfect match not yet supported"
mm = minid / (100 - minid)
if minlen:
extra += " -e{0}".format(minlen)
if minid:
extra += " -r1 -q{0} -a{0} -b{0}".format(mm)
if extra:
cmd += " " + extra.strip()
lastfile = get_outfile(subject, query, suffix="last", outdir=opts.outdir)
sh(cmd, outfile=lastfile)
return lastfile
if __name__ == "__main__":
main()
|
tanghaibao/jcvi
|
jcvi/apps/align.py
|
Python
|
bsd-2-clause
| 15,771
|
[
"BLAST"
] |
92b91427409bfd5ccffed8647b92cb3aa22bffddeae4063884b316b070477937
|
""" This fixer changes all occurrences of the form 'module.member' from the
global dictionary MAPPING's keys and replaces them with the corresponding
value. It adds the proper imports to make it available in the script.
For example this code::
import orange
import orngSVM
data = orange.ExampleTable("iris")
learner = orngSVM.SVMLearner(name='svm')
will be replaced with::
import Orange.data
import Orange.classification.svm
data = Orange.data.Table('iris')
learner = Orange.classification.svm.SVMLearner(name='svm')
Try to add as much name mappings as possible (This fixer is prefered
(and will run before) the fix_orange_imports
"""
from lib2to3 import fixer_base
from lib2to3 import pytree
from lib2to3.fixer_util import Name, Dot, touch_import
# Keys must be in the form of 'orange.name' not name or orange.bla.name
# If the values name a doted name inside of the package the package and name
# must be separated by ':' e.g. Orange.classification:Classifier.GetValue
# indicates Classifier.GetValue is a name inside package Orange.classification,
# do not use Orange.classification.Classifier.GetValue as this is assumed that
# Orange.classification.Classifier is a package
#
MAPPING = {"orange.ExampleTable": "Orange.data.Table",
"orange.Example": "Orange.data.Instance",
"orange.Domain": "Orange.data.Domain",
"orange.Value": "Orange.data.Value",
"orange.VarTypes": "Orange.feature.Type",
"orange.Variable": "Orange.feature.Descriptor",
"orange.EnumVariable": "Orange.feature.Discrete",
"orange.FloatVariable": "Orange.feature.Continuous",
"orange.StringVariable": "Orange.feature.String",
"orange.PythonVariable": "Orange.feature.Python",
"orngOutlier.OutlierDetection" : "Orange.data.outliers.OutlierDetection",
"orange.newmetaid": "Orange.feature:Descriptor.new_meta_id",
"orange.SymMatrix": "Orange.misc.SymMatrix",
"orange.ClassifierFromVar": "Orange.classification.ClassifierFromVar",
"orange.ClassifierFromVarFD": "Orange.classification.ClassifierFromVarFD",
"orange.GetValue": "Orange.classification:Classifier.GetValue",
"orange.GetProbabilities": "Orange.classification:Classifier.GetProbabilities",
"orange.GetBoth": "Orange.classification:Classifier.GetBoth",
"orange.Distribution": "Orange.statistics.distribution.Distribution",
"orange.DiscDistribution": "Orange.statistics.distribution.Discrete",
"orange.ContDistribution": "Orange.statistics.distribution.Continuous",
"orange.GaussianDistribution": "Orange.statistics.distribution.Gaussian",
"orange.DomainDistributions": "Orange.statistics.distribution.Domain",
"orange.BasicAttrStat": "Orange.statistics.basic.Variable",
"orange.DomainBasicAttrStat": "Orange.statistics.basic.Domain",
"orange.ContingencyAttrAttr": "Orange.statistics.contingency.VarVar",
"orange.ContingencyClass": "Orange.statistics.contingency.Class",
"orange.ContingencyAttrClass": "Orange.statistics.contingency.VarClass",
"orange.ContingencyClassAttr": "Orange.statistics.contingency.ClassVar",
"orange.DomainContingency": "Orange.statistics.contingency.Domain",
"orange.Contingency": "Orange.statistics.contingency.Table",
"orange.MeasureAttribute": "Orange.feature.scoring.Score",
"orange.MeasureAttributeFromProbabilities": "Orange.feature.scoring.ScoreFromProbabilities",
"orange.MeasureAttribute_gainRatio": "Orange.feature.scoring.GainRatio",
"orange.MeasureAttribute_relief": "Orange.feature.scoring.Relief",
"orange.MeasureAttribute_info": "Orange.feature.scoring.InfoGain",
"orange.MeasureAttribute_gini": "Orange.feature.scoring.Gini",
"orange.CostMatrix" : "Orange.misc.CostMatrix",
"orange.MeasureAttribute_relevance": "Orange.feature.scoring.Relevance",
"orange.MeasureAttribute_cost": "Orange.feature.scoring.Cost",
"orange.MeasureAttribute_MSE": "Orange.feature.scoring.MSE",
"orange.EntropyDiscretization": "Orange.feature.discretization.Entropy",
"orange.EquiDistDiscretization": "Orange.feature.discretization.EqualWidth",
"orange.EquiNDiscretization": "Orange.feature.discretization.EqualFreq",
"orange.BiModalDiscretization": "Orange.feature.discretization.BiModal",
"orngFSS.attMeasure": "Orange.feature.scoring.score_all",
"orngFSS.bestNAtts": "Orange.feature.selection.top_rated",
"orngFSS.attsAbovethreshold": "Orange.feature.selection.above_threshold",
"orngFSS.selectBestNAtts": "Orange.feature.selection.select",
"orngFSS.selectAttsAboveThresh": "Orange.feature.selection.select_above_threshold",
"orngFSS.filterRelieff": "Orange.feature.selection.select_relief",
"orngFSS.FilterAttsAboveThresh": "Orange.feature.selection.FilterAboveThreshold",
"orngFSS.FilterAttsAboveThresh_Class": "Orange.feature.selection.FilterAboveThreshold",
"orngFSS.FilterBestNAtts": "Orange.feature.selection.FilterBestN",
"orngFSS.FilterBestNAtts_Class": "Orange.feature.selection.FilterBestN",
"orngFSS.FilterRelief": "Orange.feature.selection.FilterRelief",
"orngFSS.FilterRelief_Class": "Orange.feature.selection.FilterRelief",
"orngFSS.FilteredLearner": "Orange.feature.selection.FilteredLearner",
"orngFSS.FilteredLearner_Class": "Orange.feature.selection.FilteredLearner",
"orngFSS.FilteredClassifier": "Orange.feature.selection.FilteredClassifier",
"orngFSS.StepwiseLearner_Class": "Orange.classification.wrappers.StepwiseLearner",
"orngFSS.StepwiseLearner": "Orange.classification.wrappers.StepwiseLearner",
"orange.Imputer": "Orange.feature.imputation.Imputer",
"orange.ImputerConstructor": "Orange.feature.imputation.Constructor",
"orange.ImputerConstructor_minimal": "Orange.feature.imputation.MinimalConstructor",
"orange.ImputerConstructor_maximal": "Orange.feature.imputation.MaximalConstructor",
"orange.ImputerConstructor_average": "Orange.feature.imputation.AverageConstructor",
"orange.ImputerConstructor_model": "Orange.feature.imputation.ModelConstructor",
"orange.ImputerConstructor_asValue": "Orange.feature.imputation.AsValueConstructor",
"orange.ImputerConstructor_random": "Orange.feature.imputation.Random",
"orange.Imputer_defaults": "Orange.feature.imputation.Defaults",
"orange.Imputer_model": "Orange.feature.imputation.Model",
"orange.Imputer_random": "Orange.feature.imputation.Random",
"orange.Imputer_random": "Orange.feature.imputation.AsValue",
"orange.ImputeClassifier": "Orange.feature.imputation.Classifier",
"orange.ExamplesDistance": "Orange.distance.Distance",
"orange.ExamplesDistance_Normalized": "Orange.distance.DistanceNormalized",
"orange.ExamplesDistanceConstructor": "Orange.distance.DistanceConstructor",
"orange.ExamplesDistance_Hamming": "Orange.distance.HammingDistance",
"orange.ExamplesDistance_DTW": "Orange.distance.DTWDistance",
"orange.ExamplesDistance_Euclidean": "Orange.distance.EuclideanDistance",
"orange.ExamplesDistance_Manhattan": "Orange.distance.ManhattanDistance",
"orange.ExamplesDistance_Maximal": "Orange.distance.MaximalDistance",
"orange.ExamplesDistance_Relief": "Orange.distance.ReliefDistance",
"orange.ExamplesDistanceConstructor_DTW": "Orange.distance.DTW",
"orange.ExamplesDistanceConstructor_Euclidean": "Orange.distance.Euclidean",
"orange.ExamplesDistanceConstructor_Hamming": "Orange.distance.Hamming",
"orange.ExamplesDistanceConstructor_Manhattan": "Orange.distance.Manhattan",
"orange.ExamplesDistanceConstructor_Maximal": "Orange.distance.Maximal",
"orange.ExamplesDistanceConstructor_Relief": "Orange.distance.Relief",
"orngClustering.ExamplesDistanceConstructor_PearsonR": "Orange.distance.PearsonR",
"orngClustering.ExamplesDistance_PearsonR": "Orange.distance.PearsonRDistance",
"orngClustering.ExamplesDistanceConstructor_SpearmanR": "Orange.distance.SpearmanR",
"orngClustering.ExamplesDistance_SpearmanR": "Orange.distance.SpearmanRDistance",
"orngClustering.KMeans": "Orange.clustering.kmeans.Clustering",
"orngClustering.kmeans_init_random": "Orange.clustering.kmeans.init_random",
"orngClustering.kmeans_init_diversity": "Orange.clustering.kmeans.init_diversity",
"orngClustering.KMeans_init_hierarchicalClustering": "Orange.clustering.kmeans.init_hclustering",
"orngClustering.data_center": "Orange.clustering.kmeans.data_center",
"orngClustering.plot_silhouette": "Orange.clustering.kmeans.plot_silhouette",
"orngClustering.score_distance_to_centroids": "Orange.clustering.kmeans.score_distance_to_centroids",
"orngClustering.score_silhouette": "Orange.clustering.kmeans.score_silhouette",
"orange.HierarchicalClustering": "Orange.clustering.hierarchical.HierarchicalClustering",
"orange.HierarchicalCluster": "Orange.clustering.hierarchical.HierarchicalCluster",
"orngClustering.hierarchicalClustering": "Orange.clustering.hierarchical.clustering",
"orngClustering.hierarchicalClustering_attributes": "Orange.clustering.hierarchical.clustering_features",
"orngClustering.hierarchicalClustering_clusterList": "Orange.clustering.hierarchical.cluster_to_list",
"orngClustering.hierarchicalClustering_topClusters": "Orange.clustering.hierarchical.top_clusters",
"orngClustering.hierarhicalClustering_topClustersMembership": "Orange.clustering.hierarchical.top_cluster_membership",
"orngClustering.orderLeaves": "Orange.clustering.hierarchical.order_leaves",
"orngClustering.dendrogram_draw": "Orange.clustering.hierarchical.dendrogram_draw",
"orngClustering.DendrogramPlot": "Orange.clustering.hierarchical.DendrogramPlot",
"orngClustering.DendrogramPlotPylab": "Orange.clustering.hierarchical.DendrogramPlotPylab",
"orngSVM.RBFKernelWrapper": "Orange.classification.svm.kernels.RBFKernelWrapper",
"orngSVM.CompositeKernelWrapper": "Orange.classification.svm.kernels.CompositeKernelWrapper",
"orngSVM.KernelWrapper": "Orange.classification.svm.kernels.KernelWrapper",
"orngSVM.DualKernelWrapper": "Orange.classification.svm.kernels.DualKernelWrapper",
"orngSVM.PolyKernelWrapper": "Orange.classification.svm.kernels.PolyKernelWrapper",
"orngSVM.AdditionKernelWrapper": "Orange.classification.svm.kernels.AdditionKernelWrapper",
"orngSVM.MultiplicationKernelWrapper": "Orange.classification.svm.kernels.MultiplicationKernelWrapper",
"orngSVM.SparseLinKernel": "Orange.classification.svm.kernels.SparseLinKernel",
"orngSVM.BagOfWords": "Orange.classification.svm.kernels.BagOfWords",
"orngSVM.SVMLearner": "Orange.classification.svm.SVMLearner",
"orngSVM.SVMLearnerEasy": "Orange.classification.svm.SVMLearnerEasy",
"orngSVM.SVMLearnerSparse": "Orange.classification.svm.SVMLearnerSparse",
"orange.kNNLearner":"Orange.classification.knn.kNNLearner",
"orange.kNNClassifier":"Orange.classification.knn.kNNClassifier",
"orange.FindNearest_BruteForce":"Orange.classification.knn.FindNearest",
"orange.FindNearestConstructor_BruteForce":"Orange.classification.knn.FindNearestConstructor",
"orange.P2NN":"Orange.classification.knn.P2NN",
"orange.BayesLearner":"Orange.classification.bayes.NaiveLearner",
"orange.BayesClassifier":"Orange.classification.bayes.NaiveClassifier",
"orngBayes.BayesLearner":"Orange.classification.bayes.NaiveLearner",
"orngBayes.BayesClassifier":"Orange.classification.bayes.NaiveClassifier",
"orngBayes.printModel": "Orange.classification.bayes.printModel",
"orange.TreeLearner": "Orange.classification.tree.TreeLearner",
"orange.TreeClassifier": "Orange.classification.tree.TreeClassifier",
"orange.C45Learner": "Orange.classification.tree.C45Learner",
"orange.C45Classifier": "Orange.classification.tree.C45Classifier",
"orange.C45TreeNode": "Orange.classification.tree.C45Node",
"orange.C45TreeNodeList": "Orange.classification.tree.C45NodeList",
"orange.TreeDescender": "Orange.classification.tree.Descender",
"orange.TreeDescender_UnknownMergeAsBranchSizes": "Orange.classification.tree.Descender_UnknownMergeAsBranchSizes",
"orange.TreeDescender_UnknownMergeAsSelector": "Orange.classification.tree.Descender_UnknownMergeAsSelector",
"orange.TreeDescender_UnknownToBranch": "Orange.classification.tree.Descender_UnknownToBranch",
"orange.TreeDescender_UnknownToCommonBranch": "Orange.classification.tree.Descender_UnknownToCommonBranch",
"orange.TreeDescender_UnknownToCommonSelector":"Orange.classification.tree.Descender_UnknownToCommonSelector",
"orange.TreeExampleSplitter":"Orange.classification.tree.Splitter",
"orange.TreeExampleSplitter_IgnoreUnknowns":"Orange.classification.tree.Splitter_IgnoreUnknowns",
"orange.TreeExampleSplitter_UnknownsAsBranchSizes":"Orange.classification.tree.Splitter_UnknownsAsBranchSizes",
"orange.TreeExampleSplitter_UnknownsAsSelector":"Orange.classification.tree.Splitter_UnknownsAsSelector",
"orange.TreeExampleSplitter_UnknownsToAll":"Orange.classification.tree.Splitter_UnknownsToAll",
"orange.TreeExampleSplitter_UnknownsToBranch":"Orange.classification.tree.Splitter_UnknownsToBranch",
"orange.TreeExampleSplitter_UnknownsToCommon":"Orange.classification.tree.Splitter_UnknownsToCommon",
"orange.TreeExampleSplitter_UnknownsToRandom":"Orange.classification.tree.Splitter_UnknownsToRandom",
"orange.TreeNode":"Orange.classification.tree.Node",
"orange.TreeNodeList":"Orange.classification.tree.NodeList",
"orange.TreePruner":"Orange.classification.tree.Pruner",
"orange.TreePruner_SameMajority":"Orange.classification.tree.Pruner_SameMajority",
"orange.TreePruner_m":"Orange.classification.tree.Pruner_m",
"orange.TreeSplitConstructor":"Orange.classification.tree.SplitConstructor",
"orange.TreeSplitConstructor_Combined":"Orange.classification.tree.SplitConstructor_Combined",
"orange.TreeSplitConstructor_Measure":"Orange.classification.tree.SplitConstructor_Score",
"orange.TreeSplitConstructor_Attribute":"Orange.classification.tree.SplitConstructor_Feature",
"orange.TreeSplitConstructor_ExhaustiveBinary":"Orange.classification.tree.SplitConstructor_ExhaustiveBinary",
"orange.TreeSplitConstructor_OneAgainstOthers":"Orange.classification.tree.SplitConstructor_OneAgainstOthers",
"orange.TreeSplitConstructor_Threshold":"Orange.classification.tree.SplitConstructor_Threshold",
"orange.TreeStopCriteria":"Orange.classification.tree.StopCriteria",
"orange.TreeStopCriteria_Python":"Orange.classification.tree.StopCriteria_Python",
"orange.TreeStopCriteria_common":"Orange.classification.tree.StopCriteria_common",
"orngTree.printTxt": "Orange.classification.tree:TreeClassifier.dump",
"orngTree.printTree": "Orange.classification.tree:TreeClassifier.dump",
"orngTree.dumpTree": "Orange.classification.tree:TreeClassifier.dump",
"orngTree.printDot": "Orange.classification.tree:TreeClassifier.dot",
"orngTree.dotTree": "Orange.classification.tree:TreeClassifier.dot",
"orngTree.dump": "Orange.classification.tree:TreeClassifier.dump",
"orngTree.dot": "Orange.classification.tree:TreeClassifier.dot",
"orngTree.countLeaves": "Orange.classification.tree:TreeClassifier.count_leaves",
"orngTree.countNodes": "Orange.classification.tree:TreeClassifier.count_nodes",
"orngTree.byWhom": "Orange.classification.tree.by_whom",
"orngTree.insertStr": "Orange.classification.tree.insert_str",
"orngTree.insertDot": "Orange.classification.tree.insert_dot",
"orngTree.insertNum": "Orange.classification.tree.insert_num",
"orange.MajorityLearner":"Orange.classification.majority.MajorityLearner",
"orange.DefaultClassifier":"Orange.classification.ConstantClassifier",
"orngSQL.SQLReader": "Orange.data.sql.SQLReader",
"orngSQL.SQLWriter": "Orange.data.sql.SQLWriter",
"orange.LookupLearner":"Orange.classification.lookup.LookupLearner",
"orange.ClassifierByLookupTable":"Orange.classification.lookup.ClassifierByLookupTable",
"orange.ClassifierByLookupTable1":"Orange.classification.lookup.ClassifierByLookupTable1",
"orange.ClassifierByLookupTable2":"Orange.classification.lookup.ClassifierByLookupTable2",
"orange.ClassifierByLookupTable3":"Orange.classification.lookup.ClassifierByLookupTable3",
"orange.ClassifierByExampleTable":"Orange.classification.lookup.ClassifierByDataTable",
"orngLookup.lookupFromBound":"Orange.classification.lookup.lookup_from_bound",
"orngLookup.lookupFromExamples":"Orange.classification.lookup.lookup_from_data",
"orngLookup.lookupFromFunction":"Orange.classification.lookup.lookup_from_function",
"orngLookup.printLookupFunction":"Orange.classification.lookup.dump_lookup_function",
"orange.AssociationRule" : "Orange.associate.AssociationRule",
"orange.AssociationRules" : "Orange.associate.AssociationRules",
"orange.AssociationRulesInducer" : "Orange.associate.AssociationRulesInducer",
"orange.AssociationRulesSparseInducer" : "Orange.associate.AssociationRulesSparseInducer",
"orange.ItemsetNodeProxy" : "Orange.associate.ItemsetNodeProxy",
"orange.ItemsetsSparseInducer" : "Orange.associate.ItemsetsSparseInducer",
"orngCN2.ruleToString": "Orange.classification.rules.rule_to_string",
"orngCN2.LaplaceEvaluator": "Orange.classification.rules.LaplaceEvaluator",
"orngCN2.WRACCEvaluator": "Orange.classification.rules.WRACCEvaluator",
"orngCN2.mEstimate": "Orange.classification.rules.MEstimateEvaluator",
"orngCN2.RuleStopping_apriori": "Orange.classification.rules.Stopping_Apriori",
"orngCN2.LengthValidator": "Orange.classification.rules.LengthValidator",
"orngCN2.supervisedClassCheck": "Orange.classification.rules.supervisedClassCheck",
"orngCN2.CN2Learner": "Orange.classification.rules.CN2Learner",
"orngCN2.CN2Classifier": "Orange.classification.rules.CN2Classifier",
"orngCN2.CN2UnorderedLearner": "Orange.classification.rules.CN2UnorderedLearner",
"orngCN2.CN2UnorderedClassifier": "Orange.classification.rules.CN2UnorderedClassifier",
"orngCN2.RuleClassifier_bestRule": "Orange.classification.rules.Classifier_BestRule",
"orngCN2.CovererAndRemover_multWeights": "Orange.classification.rules.CovererAndRemover_MultWeights",
"orngCN2.CovererAndRemover_addWeights": "Orange.classification.rules.CovererAndRemover_AddWeights",
"orngCN2.rule_in_set": "Orange.classification.rules.rule_in_set",
"orngCN2.rules_equal": "Orange.classification.rules.rules_equal",
"orngCN2.noDuplicates_validator": "Orange.classification.rules.NoDuplicatesValidator",
"orngCN2.ruleSt_setRules": "Orange.classification.rules.Stopping_SetRules",
"orngCN2.CN2SDUnorderedLearner": "Orange.classification.rules.CN2SDUnorderedLearner",
# "orngCN2.avg": "Orange.classification.rules.avg",
# "orngCN2.var": "Orange.classification.rules.var",
# "orngCN2.median": "Orange.classification.rules.median",
# "orngCN2.perc": "Orange.classification.rules.perc",
# "orngCN2.createRandomDataSet": "Orange.classification.rules.createRandomDataSet",
# "orngCN2.compParameters": "Orange.classification.rules.compParameters",
# "orngCN2.computeDists": "Orange.classification.rules.computeDists",
# "orngCN2.createEVDistList": "Orange.classification.rules.createEVDistList",
"orngCN2.CovererAndRemover_Prob": "Orange.classification.rules.CovererAndRemover_Prob",
# "orngCN2.add_sub_rules": "Orange.classification.rules.add_sub_rules",
"orngCN2.CN2EVCUnorderedLearner": "Orange.classification.rules.CN2EVCUnorderedLearner",
"orngMDS.KruskalStress": "Orange.projection.mds.KruskalStress",
"orngMDS.SammonStress": "Orange.projection.mds.SammonStress",
"orngMDS.SgnSammonStress": "Orange.projection.mds.SgnSammonStress",
"orngMDS.SgnRelStress": "Orange.projection.mds.SgnRelStress",
"orngMDS.PointList": "Orange.projection.mds.PointList",
"orngMDS.FloatListList": "Orange.projection.mds.FloatListList",
"orngMDS.PivotMDS": "Orange.projection.mds.PivotMDS",
"orngMDS.MDS": "Orange.projection.mds.MDS",
"orngStat.AP" : "Orange.evaluation.scoring.AP",
"orngStat.AROC" : "Orange.evaluation.scoring.AROC",
"orngStat.AUC" : "Orange.evaluation.scoring.AUC",
"orngStat.AUCWilcoxon" : "Orange.evaluation.scoring.AUCWilcoxon",
"orngStat.AUC_binary" : "Orange.evaluation.scoring.AUC_binary",
"orngStat.AUC_i" : "Orange.evaluation.scoring.AUC_i",
"orngStat.AUC_ij" : "Orange.evaluation.scoring.AUC_ij",
"orngStat.AUC_iterations" : "Orange.evaluation.scoring.AUC_iterations",
"orngStat.AUC_matrix" : "Orange.evaluation.scoring.AUC_matrix",
"orngStat.AUC_multi" : "Orange.evaluation.scoring.AUC_multi",
"orngStat.AUC_pair" : "Orange.evaluation.scoring.AUC_pair",
"orngStat.AUC_single" : "Orange.evaluation.scoring.AUC_single",
"orngStat.AUC_x" : "Orange.evaluation.scoring.AUC_x",
"orngStat.BSS" : "Orange.evaluation.scoring.BSS",
"orngStat.BrierScore" : "Orange.evaluation.scoring.Brier_score",
"orngStat.CA" : "Orange.evaluation.scoring.CA",
"orngStat.CDT" : "Orange.evaluation.scoring.CDT",
"orngStat.ConfusionMatrix" : "Orange.evaluation.scoring.ConfusionMatrix",
"orngStat.F1" : "Orange.evaluation.scoring.F1",
"orngStat.Falpha" : "Orange.evaluation.scoring.Falpha",
"orngStat.Friedman" : "Orange.evaluation.scoring.Friedman",
"orngStat.IS" : "Orange.evaluation.scoring.IS",
"orngStat.IS_ex" : "Orange.evaluation.scoring.IS_ex",
"orngStat.MAE" : "Orange.evaluation.scoring.MAE",
"orngStat.MCC" : "Orange.evaluation.scoring.MCC",
"orngStat.ME" : "Orange.evaluation.scoring.ME",
"orngStat.MSE" : "Orange.evaluation.scoring.MSE",
"orngStat.MSE_old" : "Orange.evaluation.scoring.MSE_old",
"orngStat.McNemar" : "Orange.evaluation.scoring.McNemar",
"orngStat.McNemarOfTwo" : "Orange.evaluation.scoring.McNemar_of_two",
"orngStat.NPV" : "Orange.evaluation.scoring.NPV",
"orngStat.PPV" : "Orange.evaluation.scoring.PPV",
"orngStat.R2" : "Orange.evaluation.scoring.R2",
"orngStat.RAE" : "Orange.evaluation.scoring.RAE",
"orngStat.RMSE" : "Orange.evaluation.scoring.RMSE",
"orngStat.RMSE_old" : "Orange.evaluation.scoring.RMSE_old",
"orngStat.ROCaddPoint" : "Orange.evaluation.scoring.ROC_add_point",
# "orngStat.ROCsFromCDT" : "Orange.evaluation.scoring.ROCs_from_CDT",
"orngStat.ROCslope" : "Orange.evaluation.scoring.ROC_slope",
"orngStat.RRSE" : "Orange.evaluation.scoring.RRSE",
"orngStat.RSE" : "Orange.evaluation.scoring.RSE",
"orngStat.TCbestThresholdsOnROCcurve" : "Orange.evaluation.scoring.TC_best_thresholds_on_ROC_curve",
"orngStat.TCcomputeROC" : "Orange.evaluation.scoring.TC_compute_ROC",
"orngStat.TCthresholdlAverageROC" : "Orange.evaluation.scoring.TC_threshold_average_ROC",
"orngStat.TCverticalAverageROC" : "Orange.evaluation.scoring.TC_vertical_average_ROC",
"orngStat.Wilcoxon" : "Orange.evaluation.scoring.Wilcoxon",
"orngStat.WilcoxonPairs" : "Orange.evaluation.scoring.Wilcoxon_pairs",
# "orngStat.add" : "Orange.evaluation.scoring.add",
# "orngStat.checkArgkw" : "Orange.evaluation.scoring.checkArgkw",
# "orngStat.checkNonZero" : "Orange.evaluation.scoring.checkNonZero",
"orngStat.classProbabilitiesFromRes" : "Orange.evaluation.scoring.class_probabilities_from_res",
"orngStat.compare2AROCs" : "Orange.evaluation.scoring.compare_2_AROCs",
"orngStat.compare2AUCs" : "Orange.evaluation.scoring.compare_2_AUCs",
"orngStat.computeCDT" : "Orange.evaluation.scoring.compute_CDT",
"orngStat.computeCalibrationCurve" : "Orange.evaluation.scoring.compute_calibration_curve",
"orngStat.computeConfusionMatrices" : "Orange.evaluation.scoring.compute_confusion_matrices",
"orngStat.computeLiftCurve" : "Orange.evaluation.scoring.compute_lift_curve",
"orngStat.computeROC" : "Orange.evaluation.scoring.compute_ROC",
"orngStat.compute_CD" : "Orange.evaluation.scoring.compute_CD",
"orngStat.compute_friedman" : "Orange.evaluation.scoring.compute_friedman",
"orngStat.confusionChiSquare" : "Orange.evaluation.scoring.confusion_chi_square",
"orngStat.confusionMatrices" : "Orange.evaluation.scoring.confusion_matrices",
"orngStat.defaultLineTypes" : "Orange.evaluation.scoring.default_line_types",
"orngStat.defaultPointTypes" : "Orange.evaluation.scoring.default_point_types",
# "orngStat.frange" : "Orange.evaluation.scoring.frange",
# "orngStat.gettotsize" : "Orange.evaluation.scoring.gettotsize",
# "orngStat.gettotweight" : "Orange.evaluation.scoring.gettotweight",
"orngStat.graph_ranks" : "Orange.evaluation.scoring.graph_ranks",
"orngStat.isCDTEmpty" : "Orange.evaluation.scoring.is_CDT_empty",
"orngStat.learningCurve2PiCTeX" : "Orange.evaluation.scoring.learning_curve_to_PiCTeX",
"orngStat.learningCurveLearners2PiCTeX" : "Orange.evaluation.scoring.learning_curve_learners_to_PiCTeX",
"orngStat.legend2PiCTeX" : "Orange.evaluation.scoring.legend_to_PiCTeX",
"orngStat.legendLearners2PiCTeX" : "Orange.evaluation.scoring.legend_learners_to_PiCTeX",
# "orngStat.log2" : "Orange.evaluation.scoring.log2",
# "orngStat.math" : "Orange.evaluation.scoring.math",
# "orngStat.numpy" : "Orange.evaluation.scoring.numpy",
# "orngStat.operator" : "Orange.evaluation.scoring.operator",
"orngStat.plotLearningCurve" : "Orange.evaluation.scoring.plot_learning_curve",
"orngStat.plotLearningCurveLearners" : "Orange.evaluation.scoring.plot_learning_curve_learners",
"orngStat.plotMcNemarCurve" : "Orange.evaluation.scoring.plot_McNemar_curve",
"orngStat.plotMcNemarCurveLearners" : "Orange.evaluation.scoring.plot_McNemar_curve_learners",
"orngStat.plotROC" : "Orange.evaluation.scoring.plot_ROC",
"orngStat.plotROCLearners" : "Orange.evaluation.scoring.plot_ROC_learners",
"orngStat.precision" : "Orange.evaluation.scoring.precision",
"orngStat.printSingleROCCurveCoordinates" : "Orange.evaluation.scoring.print_single_ROC_curve_coordinates",
"orngStat.rankDifference" : "Orange.evaluation.scoring.rank_difference",
"orngStat.recall" : "Orange.evaluation.scoring.recall",
"orngStat.regressionError" : "Orange.evaluation.scoring.regression_error",
"orngStat.scottsPi" : "Orange.evaluation.scoring.scotts_pi",
"orngStat.sens" : "Orange.evaluation.scoring.sens",
"orngStat.spec" : "Orange.evaluation.scoring.spec",
"orngStat.splitByIterations" : "Orange.evaluation.scoring.split_by_iterations",
# "orngStat.statc" : "Orange.evaluation.scoring.statc",
"orngStat.statisticsByFolds" : "Orange.evaluation.scoring.statistics_by_folds",
# "orngStat.x" : "Orange.evaluation.scoring.x",
# Orange.selection
"orngMisc.BestOnTheFly":"Orange.utils.selection.BestOnTheFly",
"orngMisc.selectBest":"Orange.utils.selection.select_best",
"orngMisc.selectBestIndex":"Orange.utils.selection.select_best_index",
"orngMisc.compare2_firstBigger":"Orange.utils.selection.compare_first_bigger",
"orngMisc.compare2_firstSmaller":"Orange.utils.selection.compare_first_smaller",
"orngMisc.compare2_lastBigger":"Orange.utils.selection.compare_last_bigger",
"orngMisc.compare2_lastSmaller":"Orange.utils.selection.compare_last_smaller",
"orngMisc.compare2_bigger":"Orange.utils.selection.compare_bigger",
"orngMisc.compare2_smaller":"Orange.utils.selection.compare_smaller",
"orngMisc.Renderer": "Orange.utils.render.Renderer",
"orngMisc.EPSRenderer": "Orange.utils.render.EPSRenderer",
"orngMisc.SVGRenderer": "Orange.utils.render.SVGRenderer",
"orngMisc.PILRenderer": "Orange.utils.render.PILRenderer",
# The rest of orngMisc is handled by fix_orange_imports (maps to Orange.misc)
"orngEnsemble.BaggedLearner":"Orange.ensemble.bagging.BaggedLearner",
"orngEnsemble.BaggedClassifier":"Orange.ensemble.bagging.BaggedClassifier",
"orngEnsemble.BoostedLearner":"Orange.ensemble.boosting.BoostedLearner",
"orngEnsemble.BoostedClassifier":"Orange.ensemble.boosting.BoostedClassifier",
"orngEnsemble.RandomForestClassifier":"Orange.ensemble.forest.RandomForestClassifier",
"orngEnsemble.RandomForestLearner":"Orange.ensemble.forest.RandomForestLearner",
"orngEnsemble.MeasureAttribute_randomForests":"Orange.ensemble.forest.ScoreFeature",
"orngEnsemble.SplitConstructor_AttributeSubset":"Orange.ensemble.forest.SplitConstructor_AttributeSubset",
"orngTest.proportionTest":"Orange.evaluation.testing.proportion_test",
"orngTest.leaveOneOut":"Orange.evaluation.testing.leave_one_out",
"orngTest.crossValidation":"Orange.evaluation.testing.cross_validation",
"orngTest.testWithIndices":"Orange.evaluation.testing.test_with_indices",
"orngTest.learningCurve":"Orange.evaluation.testing.learning_curve",
"orngTest.learningCurveN":"Orange.evaluation.testing.learning_curve_n",
"orngTest.learningCurveWithTestData":"Orange.evaluation.testing.learning_curve_with_test_data",
"orngTest.learnAndTestOnTestData":"Orange.evaluation.testing.learn_and_test_on_test_data",
"orngTest.learnAndTestOnLearnData":"Orange.evaluation.testing.learn_and_test_on_learn_data",
"orngTest.testOnData":"Orange.evaluation.testing.test_on_data",
"orngTest.TestedExample":"Orange.evaluation.testing.TestedExample",
"orngTest.ExperimentResults":"Orange.evaluation.testing.ExperimentResults",
"orngLR.dump":"Orange.classification.logreg.dump",
"orngLR.printOUT":"Orange.classification.logreg.dump",
"orngLR.printOut":"Orange.classification.logreg.dump",
"orngLR.hasDiscreteValues":"Orange.classification.logreg.has_discrete_values",
"orngLR.LogRegLearner":"Orange.classification.logreg.LogRegLearner",
"orngLR.LogRegLearnerClass":"Orange.classification.logreg.LogRegLearner",
"orngLR.Univariate_LogRegLearner":"Orange.classification.logreg.UnivariateLogRegLearner",
"orngLR.Univariate_LogRegLearner_Class":"Orange.classification.logreg.UnivariateLogRegLearner",
"orngLR.Univariate_LogRegClassifier":"Orange.classification.logreg.UnivariateLogRegClassifier",
"orngLR.LogRegLearner_getPriors":"Orange.classification.logreg.LogRegLearnerGetPriors",
"orngLR.LogRegLearnerClass_getPriors":"Orange.classification.logreg.LogRegLearnerGetPriors",
"orngLR.LogRegLearnerClass_getPriors_OneTable":"Orange.classification.logreg.LogRegLearnerGetPriorsOneTable",
"orngLR.Pr":"Orange.classification.logreg.pr",
"orngLR.lh":"Orange.classification.logreg.lh",
"orngLR.diag":"Orange.classification.logreg.diag",
"orngLR.simpleFitter":"Orange.classification.logreg.SimpleFitter",
"orngLR.Pr_bx":"Orange.classification.logreg.pr_bx",
"orngLR.bayesianFitter":"Orange.classification.logreg.BayesianFitter",
"orngLR.StepWiseFSS":"Orange.classification.logreg.StepWiseFSS",
"orngLR.getLikelihood":"Orange.classification.logreg.get_likelihood",
"orngLR.StepWiseFSS_class":"Orange.classification.logreg.StepWiseFSS",
"orngLR.StepWiseFSS_Filter":"Orange.classification.logreg.StepWiseFSSFilter",
"orngLR.StepWiseFSS_Filter_class":"Orange.classification.logreg.StepWiseFSSFilter",
"orngLR.lchisqprob":"Orange.classification.logreg.lchisqprob",
"orngLR.zprob":"Orange.classification.logreg.zprob",
"orange.Preprocessor": "Orange.data.preprocess.Preprocessor",
"orange.Preprocessor_addCensorWeight": "Orange.data.preprocess.AddCensorWeight",
"orange.Preprocessor_addClassNoise": "Orange.data.preprocess.AddClassNoise",
"orange.Preprocessor_addClassWeight": "Orange.data.preprocess.AddClassWeight",
"orange.Preprocessor_addGaussianClassNoise": "Orange.data.preprocess.AddGaussianClassNoise",
"orange.Preprocessor_addGaussianNoise": "Orange.data.preprocess.AddGaussianNoise",
"orange.Preprocessor_addMissing": "Orange.data.preprocess.AddMissing",
"orange.Preprocessor_addMissingClasses": "Orange.data.preprocess.AddMissingClasses",
"orange.Preprocessor_addNoise": "Orange.data.preprocess.AddNoise",
"orange.Preprocessor_discretize": "Orange.data.preprocess.Discretize",
"orange.Preprocessor_drop": "Orange.data.preprocess.Drop",
"orange.Preprocessor_dropMissing": "Orange.data.preprocess.DropMissing",
"orange.Preprocessor_dropMissingClasses": "Orange.data.preprocess.DropMissingClasses",
"orange.Preprocessor_filter": "Orange.data.preprocess.Filter",
"orange.Preprocessor_ignore": "Orange.data.preprocess.Ignore",
"orange.Preprocessor_imputeByLearner": "Orange.data.preprocess.ImputeByLearner",
"orange.Preprocessor_removeDuplicates": "Orange.data.preprocess.RemoveDuplicates",
"orange.Preprocessor_select": "Orange.data.preprocess.Select",
"orange.Preprocessor_shuffle": "Orange.data.preprocess.Shuffle",
"orange.Preprocessor_take": "Orange.data.preprocess.Take",
"orange.Preprocessor_takeMissing": "Orange.data.preprocess.TakeMissing",
"orange.Preprocessor_takeMissingClasses": "Orange.data.preprocess.TakeMissingClasses",
"orange.Discretizer": "Orange.feature.discretization.Discretizer",
"orange.BiModalDiscretizer": "Orange.feature.discretization.BiModalDiscretizer",
"orange.EquiDistDiscretizer": "Orange.feature.discretization.EqualWidthDiscretizer",
"orange.IntervalDiscretizer": "Orange.feature.discretization.IntervalDiscretizer",
"orange.ThresholdDiscretizer": "Orange.feature.discretization.ThresholdDiscretizer",
"orange.Discrete2Continuous": "Orange.feature.discretization.Discrete2Continuous",
"orange.DomainContinuizer": "Orange.data.continuization.DomainContinuizer",
"orange.MakeRandomIndices": "Orange.data.sample.SubsetIndices",
"orange.MakeRandomIndicesN": "Orange.data.sample.SubsetIndicesN",
"orange.MakeRandomIndicesCV": "Orange.data.sample.SubsetIndicesCV",
"orange.MakeRandomIndicesMultiple": "Orange.data.sample.SubsetIndicesMultiple",
"orange.MakeRandomIndices2": "Orange.data.sample.SubsetIndices2",
"orngLinProj.FAST_IMPLEMENTATION": "Orange.projection.linear.FAST_IMPLEMENTATION",
"orngLinProj.SLOW_IMPLEMENTATION": "Orange.projection.linear.SLOW_IMPLEMENTATION",
"orngLinProj.LDA_IMPLEMENTATION": "Orange.projection.linear.LDA_IMPLEMENTATION",
"orngLinProj.LAW_LINEAR": "Orange.projection.linear.LAW_LINEAR",
"orngLinProj.LAW_SQUARE": "Orange.projection.linear.LAW_SQUARE",
"orngLinProj.LAW_GAUSSIAN": "Orange.projection.linear.LAW_GAUSSIAN",
"orngLinProj.LAW_KNN": "Orange.projection.linear.LAW_KNN",
"orngLinProj.LAW_LINEAR_PLUS": "Orange.projection.linear.LAW_LINEAR_PLUS",
"orngLinProj.DR_PCA": "Orange.projection.linear.DR_PCA",
"orngLinProj.DR_SPCA": "Orange.projection.linear.DR_SPCA",
"orngLinProj.DR_PLS": "Orange.projection.linear.DR_PLS",
"orngLinProj.normalize": "Orange.projection.linear.normalize",
"orngLinProj.center": "Orange.projection.linear.center",
"orngLinProj.FreeViz": "Orange.projection.linear.FreeViz",
"orngLinProj.createPLSProjection": "Orange.projection.linear.create_pls_projection",
"orngLinProj.createPCAProjection": "Orange.projection.linear.create_pca_projection",
"orngLinProj.FreeVizClassifier": "Orange.projection.linear.FreeVizClassifier",
"orngLinProj.FreeVizLearner": "Orange.projection.linear.FreeVizLearner",
"orngLinProj.S2NHeuristicLearner": "Orange.projection.linear.S2NHeuristicLearner",
"orngDisc.entropyDiscretization": "Orange.feature.discretization.entropyDiscretization_wrapper",
"orngDisc.EntropyDiscretization": "Orange.feature.discretization.EntropyDiscretization_wrapper",
"orange.ProbabilityEstimatorList": "Orange.statistics.estimate.EstimatorList",
"orange.FilterList": "Orange.data.filter.FilterList",
"orange.Filter": "Orange.data.filter.Filter",
"orange.Filter_conjunction": "Orange.data.filter.Conjunction",
"orange.Filter_disjunction": "Orange.data.filter.Disjunction",
"orange.Filter_hasClassValue": "Orange.data.filter.HasClassValue",
"orange.Filter_hasMeta": "Orange.data.filter.HasMeta",
"orange.Filter_hasSpecial": "Orange.data.filter.HasSpecial",
"orange.Filter_isDefined": "Orange.data.filter.IsDefined",
"orange.Filter_random": "Orange.data.filter.Random",
"orange.Filter_sameValue": "Orange.data.filter.SameValue",
"orange.Filter_values": "Orange.data.filter.Values",
"orange.ValueFilter": "Orange.data.filter.ValueFilter",
"orange.ValueFilterList": "Orange.data.filter.ValueFilterList",
"orange.ValueFilter_discrete": "Orange.data.filter.ValueFilterDiscrete",
"orange.ValueFilter_continuous": "Orange.data.filter.ValueFilterContinuous",
"orange.ValueFilter_string": "Orange.data.filter.ValueFilterString",
# orngEnviron
"orngEnviron.orangeDir": "Orange.utils.environ.install_dir",
"orngEnviron.orangeDocDir": "Orange.utils.environ.doc_install_dir",
"orngEnviron.orangeVer": "Orange.utils.environ.version",
"orngEnviron.canvasDir": "Orange.utils.environ.canvas_install_dir",
"orngEnviron.widgetDir": "Orange.utils.environ.widget_install_dir",
"orngEnviron.picsDir": "Orange.utils.environ.icons_install_dir",
"orngEnviron.addOnsDirSys": "Orange.utils.environ.add_ons_dir",
"orngEnviron.addOnsDirUser": "Orange.utils.environ.add_ons_dir_user",
"orngEnviron.applicationDir": "Orange.utils.environ.application_dir",
"orngEnviron.outputDir": "Orange.utils.environ.output_dir",
"orngEnviron.defaultReportsDir": "Orange.utils.environ.default_reports_dir",
"orngEnviron.orangeSettingsDir": "Orange.utils.environ.orange_settings_dir",
"orngEnviron.widgetSettingsDir": "Orange.utils.environ.widget_settings_dir",
"orngEnviron.canvasSettingsDir": "Orange.utils.environ.canvas_settings_dir",
"orngEnviron.bufferDir": "Orange.utils.environ.buffer_dir",
"orngEnviron.directoryNames": "Orange.utils.environ.directories",
"orngEnviron.samepath": "Orange.utils.environ.samepath",
"orngEnviron.addOrangeDirectoriesToPath": "Orange.utils.environ.add_orange_directories_to_path",
"orngScaleData.getVariableValuesSorted": "Orange.data.preprocess.scaling.get_variable_values_sorted",
"orngScaleData.getVariableValueIndices": "Orange.data.preprocess.scaling.get_variable_value_indices",
"orngScaleData.discretizeDomain": "Orange.data.preprocess.scaling.discretize_domain",
"orngScaleData.orngScaleData": "Orange.data.preprocess.scaling.ScaleData",
"orngScaleLinProjData.orngScaleLinProjData": "Orange.data.preprocess.scaling.ScaleLinProjData",
"orngScalePolyvizData.orngScalePolyvizData": "Orange.data.preprocess.scaling.ScalePolyvizData",
"orngScaleScatterPlotData.orngScaleScatterPlotData": "Orange.data.preprocess.scaling.ScaleScatterPlotData",
"orngEvalAttr.mergeAttrValues": "Orange.feature.scoring.merge_values",
"orngEvalAttr.MeasureAttribute_MDL": "Orange.feature.scoring.MDL",
"orngEvalAttr.MeasureAttribute_MDLClass": "Orange.feature.scoring.MDL",
"orngEvalAttr.MeasureAttribute_Distance": "Orange.feature.scoring.Distance",
"orngEvalAttr.MeasureAttribute_DistanceClass": "Orange.feature.scoring.Distance",
"orngEvalAttr.OrderAttributesByMeasure": "Orange.feature.scoring.OrderAttributes",
"orange.ProbabilityEstimator": "Orange.statistics.estimate.Estimator",
"orange.ProbabilityEstimator_FromDistribution": "Orange.statistics.estimate.EstimatorFromDistribution",
"orange.ProbabilityEstimatorConstructor": "Orange.statistics.estimate.EstimatorConstructor",
"orange.ProbabilityEstimatorConstructor_Laplace": "Orange.statistics.estimate.Laplace",
"orange.ProbabilityEstimatorConstructor_kernel": "Orange.statistics.estimate.Kernel",
"orange.ProbabilityEstimatorConstructor_loess": "Orange.statistics.estimate.Loess",
"orange.ProbabilityEstimatorConstructor_m": "Orange.statistics.estimate.M",
"orange.ProbabilityEstimatorConstructor_relative": "Orange.statistics.estimate.RelativeFrequency",
"orange.ConditionalProbabilityEstimator": "Orange.statistics.estimate.ConditionalEstimator",
"orange.ConditionalProbabilityEstimator_FromDistribution": "Orange.statistics.estimate.ConditionalEstimatorFromDistribution",
"orange.ConditionalProbabilityEstimator_ByRows": "Orange.statistics.estimate.ConditionalEstimatorByRows",
"orange.ConditionalProbabilityEstimatorConstructor_ByRows": "Orange.statistics.estimate.ConditionalByRows",
"orange.ConditionalProbabilityEstimatorConstructor_loess": "Orange.statistics.estimate.ConditionalLoess",
"orange.RandomGenerator": "Orange.misc.Random",
"orange.TransformValue": "Orange.data.utils.TransformValue",
"orange.Ordinal2Continuous": "Orange.data.utils.Ordinal2Continuous",
"orange.Discrete2Continuous": "Orange.data.utils.Discrete2Continuous",
"orange.NormalizeContinuous": "Orange.data.utils.NormalizeContinuous",
"orange.MapIntValue": "Orange.data.utils.MapIntValue",
}
def build_pattern(mapping=MAPPING):
PATTERN = """
power< head=any+
trailer< '.' member=(%s) >
tail=any*
>
"""
return PATTERN % "|".join("'%s'" % key.split(".")[-1] for key in mapping.keys())
class FixChangedNames(fixer_base.BaseFix):
mapping = MAPPING
run_order = 1
def compile_pattern(self):
self.PATTERN = build_pattern(self.mapping)
self._modules_to_change = [key.split(".", 1)[0] for key in self.mapping.keys()]
super(FixChangedNames, self).compile_pattern()
def package_tree(self, package):
""" Return pytree tree for accessing the package
Example:
>>> package_tree("Orange.feature.scoring")
[Name('Orange'), trailer('.' 'feature'), trailer('.', 'scoring')]
"""
path = package.split('.')
nodes = [Name(path[0])]
for name in path[1:]:
new = pytree.Node(self.syms.trailer, [Dot(), Name(name)])
nodes.append(new)
return nodes
def transform(self, node, results):
member = results.get("member")
head = results.get("head")
tail = results.get("tail")
module = head[0].value
if member and module in self._modules_to_change:
node = member[0]
head = head[0]
old_name = module + "." + node.value
if old_name not in self.mapping:
return
new_name = unicode(self.mapping[old_name])
if ":" in new_name:
# ':' is the delimiter used to separate module namespace
package = new_name.split(":", 1)[0]
new_name = new_name.replace(":", ".")
else:
package = new_name.rsplit(".", 1)[0]
syms = self.syms
if tail:
tail = [t.clone() for t in tail]
new = self.package_tree(new_name)
new = pytree.Node(syms.power, new + tail, prefix=head.prefix)
# Make sure the proper package is imported
# if ":" in new_name:
# package = new_name.split(":",1)[0]
# else:
# package = new_name.rsplit(".", 1)[0]
def orange_to_root(package):
return "Orange" if package.startswith("Orange.") else package
touch_import(None, orange_to_root(package), node)
return new
|
yzl0083/orange
|
Orange/fixes/fix_changed_names.py
|
Python
|
gpl-3.0
| 47,037
|
[
"Gaussian"
] |
37f08df11745c6cada261cc677ba8c4baeb0d9adf06a46a5769c968fe348e0bd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.